```
// c++22最完整反向传播神经网络梯度下降230801.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
#include <iostream>
#include <vector>
//#include <cmath>
//#include <ctime>
//#include <cstdlib>
class NeuralNetwork {
private:
std::vector<std::vector<float>> inputWeights;
std::vector<float> inputBiases;
std::vector<std::vector<float>> hiddenWeights;
std::vector<float> hiddenBiases;
std::vector<float> hiddenLayer;
std::vector<float> outputLayerOut输出层;
float learningRate = 0.01;
float sigmoid(float x) {
return 1.0 / (1.0 + std::exp(-x));
}
float sigmoid激活_导函数derivative(float x) {
return x * (1.0 - x);
}
float mse_loss(const std::vector<float>& targets, const std::vector<float>& outputs) {
float totalError = 0.0;
for (size_t i = 0; i < targets.size(); i++) {
totalError += (targets[i] - outputs[i]) * (targets[i] - outputs[i]);
}
return totalError / targets.size();
}
public:
NeuralNetwork() {
srand((unsigned int)time(0));
inputWeights.resize(4, std::vector<float>(3));
inputBiases.resize(4);
hiddenWeights.resize(2, std::vector<float>(4));
hiddenBiases.resize(2);
for (int i = 0; i < 4; i++) {
inputBiases[i] = (rand() % 1000 - 500) / 1000.0;
for (int j = 0; j < 3; j++) {
inputWeights[i][j] = (rand() % 1000 - 500) / 1000.0;
}
}
for (int i = 0; i < 2; i++) {
hiddenBiases[i] = (rand() % 1000 - 500) / 1000.0;
for (int j = 0; j < 4; j++) {
hiddenWeights[i][j] = (rand() % 1000 - 500) / 1000.0;
}
}
}
std::vector<float> predict(const std::vector<float>& input) { //ForaWard
hiddenLayer.resize(4);
outputLayerOut输出层.resize(2);
for (int i = 0; i < 4; i++) {
hiddenLayer[i] = inputBiases[i];
for (int j = 0; j < 3; j++) {
hiddenLayer[i] += input[j] * inputWeights[i][j];
}
hiddenLayer[i] = sigmoid(hiddenLayer[i]);
}
for (int i = 0; i < 2; i++) {
outputLayerOut输出层[i] = hiddenBiases[i];
for (int j = 0; j < 4; j++) {
outputLayerOut输出层[i] += hiddenLayer[j] * hiddenWeights[i][j];
}
outputLayerOut输出层[i] = sigmoid(outputLayerOut输出层[i]);
}
return outputLayerOut输出层;
}
float train(const std::vector<float>& input, const std::vector<float>& target) {
predict(input);
std::vector<float> outputError损失float值s(2);
for (int i = 0; i < 2; i++) {
outputError损失float值s[i] = target[i] - outputLayerOut输出层[i];
}
std::vector<float> hiddenErrors(4, 0);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 2; j++) {
hiddenErrors[i] += outputError损失float值s[j] * hiddenWeights[j][i];
}
}
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 4; j++) {
hiddenWeights[i][j] += learningRate * outputError损失float值s[i] * sigmoid激活_导函数derivative(outputLayerOut输出层[i]) * hiddenLayer[j];
}
hiddenBiases[i] += learningRate * outputError损失float值s[i] * sigmoid激活_导函数derivative(outputLayerOut输出层[i]);
}
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 3; j++) {
inputWeights[i][j] += learningRate * hiddenErrors[i] * sigmoid激活_导函数derivative(hiddenLayer[i]) * input[j];
}
inputBiases[i] += learningRate * hiddenErrors[i] * sigmoid激活_导函数derivative(hiddenLayer[i]);
}
return mse_loss(target, outputLayerOut输出层);
}
};
int main() {
NeuralNetwork nn;
// 示例数据
std::vector<float> input = { 0.5, 0.6, 0.7 };
std::vector<float> target = { 0.1, 0.9 };
// 训练
for (int i = 0; i < 10000; i++) {
float loss = nn.train(input, target);
if (i % 1000 == 0) {
std::cout << "Iteration " << i << ", Loss: " << loss << std::endl;
}
}
// 预测
std::vector<float> prediction = nn.predict(input);
std::cout << "Predictions: " << prediction[0] << ", " << prediction[1] << std::endl;
return 0;
}//main
```