ThinkChat2.0新版上线,更智能更精彩,支持会话、画图、阅读、搜索等,送10W Token,即刻开启你的AI之旅 广告
# pytorch -使用 Dependencies 分析报错 ddl 文件的依赖 - [pytorch|找不到 fbgemm.dll 问题处理](https://blog.csdn.net/Changxing_J/article/details/140489278) - [pytorch 缺少 libomp140.x86\_64.dll 下载](https://blog.csdn.net/Enexj/article/details/140870389) > - pip install torch torchvision nltk -i https://pypi.tuna.tsinghua.edu.cn/simple - pip install scikit-learn -i https://pypi.tuna.tsinghua.edu.cn/simple ~~~ import torch import torch.nn as nn import torch.optim as optim from sklearn.feature_extraction.text import CountVectorizer from sklearn.preprocessing import LabelEncoder import numpy as np # import nltk # nltk.download('punkt') # 数据准备 data = [ ("订单状态", "订单查询"), ("我的订单什么时候到?", "订单查询"), ("退货政策是什么?", "退货"), ("我想取消订单", "取消订单"), ("如何申请退款?", "退款") ] questions, labels = zip(*data) vectorizer = CountVectorizer() X = vectorizer.fit_transform(questions).toarray() label_encoder = LabelEncoder() y = label_encoder.fit_transform(labels) # 定义模型 class SimpleClassifier(nn.Module): def __init__(self, input_size, num_classes): super(SimpleClassifier, self).__init__() self.fc1 = nn.Linear(input_size, 50) self.fc2 = nn.Linear(50, num_classes) def forward(self, x): x = torch.relu(self.fc1(x)) x = self.fc2(x) return x input_size = X.shape[1] num_classes = len(label_encoder.classes_) model = SimpleClassifier(input_size, num_classes) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.01) # 将数据转换为张量 X_tensor = torch.tensor(X, dtype=torch.float32) y_tensor = torch.tensor(y, dtype=torch.long) # 训练模型 epochs = 100 for epoch in range(epochs): outputs = model(X_tensor) loss = criterion(outputs, y_tensor) optimizer.zero_grad() loss.backward() optimizer.step() # 保存模型的状态字典 torch.save(model.state_dict(), 'model_state_dict.pth') # 加载模型的状态字典 loaded_model = SimpleClassifier(input_size, num_classes) loaded_model.load_state_dict(torch.load('model_state_dict.pth', weights_only=True)) loaded_model.eval() # 测试加载的模型 def predict(question): question_vec = vectorizer.transform([question]).toarray() with torch.no_grad(): question_tensor = torch.tensor(question_vec, dtype=torch.float32) output = loaded_model(question_tensor) _, predicted = torch.max(output, 1) return label_encoder.inverse_transform(predicted.numpy())[0] test_question = "我的订单状态是怎样" response = predict(test_question) print(f'预测的客服回复类型: {response}') ~~~