1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
| class AntennaPredictor(nn.Module): """ 天线性能预测神经网络 输入:天线参数 输出:S参数、增益等性能指标 """ def __init__(self, input_dim=5, hidden_dims=[128, 256, 256, 128]): super().__init__() layers = [] prev_dim = input_dim for hidden_dim in hidden_dims: layers.extend([ nn.Linear(prev_dim, hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(), nn.Dropout(0.2) ]) prev_dim = hidden_dim self.feature_extractor = nn.Sequential(*layers) self.s11_head = nn.Linear(hidden_dims[-1], 201) self.gain_head = nn.Linear(hidden_dims[-1], 1) self.bandwidth_head = nn.Linear(hidden_dims[-1], 1) self.efficiency_head = nn.Linear(hidden_dims[-1], 1) def forward(self, x): features = self.feature_extractor(x) return { "s11": self.s11_head(features), "gain": self.gain_head(features), "bandwidth": self.bandwidth_head(features), "efficiency": self.efficiency_head(features) }
class SurrogateTrainer: """ 代理模型训练器 """ def __init__(self, model, device="cuda"): self.model = model.to(device) self.device = device def prepare_data(self, params: np.ndarray, s11_data: np.ndarray, labels: dict) -> Tuple[torch.Tensor, dict]: """ 准备训练数据 """ X = torch.FloatTensor(params).to(self.device) y = { "s11": torch.FloatTensor(s11_data).to(self.device), "gain": torch.FloatTensor(labels["gain"]).unsqueeze(1).to(self.device), "bandwidth": torch.FloatTensor(labels["bandwidth"]).unsqueeze(1).to(self.device), "efficiency": torch.FloatTensor(labels["efficiency"]).unsqueeze(1).to(self.device) } return X, y def train(self, train_loader, val_loader, epochs=100): """ 训练代理模型 """ optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, patience=10, factor=0.5 ) criterion = nn.MSELoss() best_val_loss = float('inf') for epoch in range(epochs): self.model.train() train_loss = 0 for X, y in train_loader: optimizer.zero_grad() pred = self.model(X) loss = ( criterion(pred["s11"], y["s11"]) * 0.3 + criterion(pred["gain"], y["gain"]) * 0.3 + criterion(pred["bandwidth"], y["bandwidth"]) * 0.2 + criterion(pred["efficiency"], y["efficiency"]) * 0.2 ) loss.backward() optimizer.step() train_loss += loss.item() val_loss = self.validate(val_loader, criterion) scheduler.step(val_loss) if val_loss < best_val_loss: best_val_loss = val_loss torch.save(self.model.state_dict(), "best_model.pth") if epoch % 10 == 0: print(f"Epoch {epoch}, Train Loss: {train_loss/len(train_loader):.6f}, Val Loss: {val_loss:.6f}") def validate(self, val_loader, criterion): """验证""" self.model.eval() val_loss = 0 with torch.no_grad(): for X, y in val_loader: pred = self.model(X) loss = criterion(pred["s11"], y["s11"]) val_loss += loss.item() return val_loss / len(val_loader)
|