```
// neural21ConApp1.cpp : This file contains the 'main' function. Program execution begins and ends there.
//
#include <iostream>
float x[4][2] = { {-2,-1},{25,6},{17,4} ,{-15,-6} };
float all_y_trues[4] = { 1,0,0,1 };
//
double ww[3][2];
double b[4];
//
double sigmoid01(double x)
{
return (1.0 / 1.0 + exp(0 - x));
}
double deriv_sigmoid(double x)
{
//# Derivative of sigmoid : f'(x) = f(x) * (1 - f(x))
double fx = sigmoid01(x);
return fx * (1 - fx);
}
float learn_rate = 0.1;
int epochs = 1000;// # number of times to loop through the entire dataset
double feedforward(double x[])//(self, x) :
{
//# x is a numpy array with 2 elements.
double h1 = sigmoid01(ww[0][0] * x[0] + ww[0][1] * x[1] + b[0]);
double h2 = sigmoid01(ww[1][0] * x[0] + ww[1][1] * x[1] + b[1]);
double o1 = sigmoid01(ww[2][0] * h1 + ww[2][1] * h2 + b[2]);
return o1;
}
int main()
{
std::cout << "Hello World!\n";
double sum_h[2];
double h[2];
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 2; ++j) { ww[i][j] = rand(); }
}
double b[4];
for (int i = 0; i < 3; ++i) { b[i] = rand(); }
//
for (int epoch = 0; epoch < 100; ++epoch) {//for10epoch
for (int kk = 0; kk < 4; ++kk) {//for100kk
//
for (int i = 0; i < 2; ++i) {//for110
double sum_h1 = 0;
for (int j = 0; j < 2; ++j) {//for220j
sum_h[i] += ww[i][j] * x[kk][i];
}//for220j
sum_h[i] = sum_h[i] + b[i];
h[i] = sigmoid01(sum_h[i]);
}//for110
double sum_o1;
sum_o1 = ww[2][0] * h[0] + ww[2][1] * h[1] + b[3];
double o1;
o1 = sigmoid01(sum_o1);
double y_pred = o1;
double d_L_d_ypred = -2 * (all_y_trues[kk] - y_pred);
//
//Neuron o1
double d_ypred_d_w5 = h[0] * deriv_sigmoid(sum_o1);
double d_ypred_d_w6 = h[1] * deriv_sigmoid(sum_o1);
double d_ypred_d_b3 = deriv_sigmoid(sum_o1);
double d_ypred_d_h1 = ww[2][0] * deriv_sigmoid(sum_o1);
double d_ypred_d_h2 = ww[2][1] * deriv_sigmoid(sum_o1);
//# Neuron h1
double d_h1_d_w1 = x[kk][0] * deriv_sigmoid(sum_h[0]);
double d_h1_d_w2 = x[kk][1] * deriv_sigmoid(sum_h[0]);
double d_h1_d_b1 = deriv_sigmoid(sum_h[0]);
//# Neuron h2
double d_h2_d_w3 = x[kk][0] * deriv_sigmoid(sum_h[1]);
double d_h2_d_w4 = x[kk][1] * deriv_sigmoid(sum_h[1]);
double d_h2_d_b2 = deriv_sigmoid(sum_h[1]);
//# --- Update weights and biases 完成:反向传播(参数调整)
//# Neuron h1
ww[0][0] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1;
ww[0][1] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2;
b[0] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1;
//# Neuron h2
ww[1][0] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w3;
ww[1][1] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w4;
b[1] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_b2;
//# Neuron o1
ww[2][0] -= learn_rate * d_L_d_ypred * d_ypred_d_w5;
ww[2][1] -= learn_rate * d_L_d_ypred * d_ypred_d_w6;
b[3] -= learn_rate * d_L_d_ypred * d_ypred_d_b3;
}//for100kk
double loss = 0;
if (0 == (epoch % 10)) {
//绘图:y_preds = np.apply_along_axis(self.feedforward, 1, data)
}
} //for10epoch
}//
```
- BP神经网络到c++实现等--机器学习“掐死教程”
- 训练bp(神经)网络学会“乘法”--用”蚊子“训练高射炮
- Ann计算异或&前馈神经网络20200302
- 神经网络ANN的表示20200312
- 简单神经网络的后向传播(Backpropagration, BP)算法
- 牛顿迭代法求局部最优(解)20200310
- ubuntu安装numpy和pip3等
- 从零实现一个神经网络-numpy篇01
- _美国普林斯顿大学VictorZhou神经网络神文的改进和翻译20200311
- c语言-普林斯顿victorZhou神经网络实现210301
- bp网络实现xor异或的C语言实现202102
- bp网络实现xor异或-自动录入输入(写死20210202
- Mnist在python3.6上跑tensorFlow2.0一步一坑20210210
- numpy手写数字识别-直接用bp网络识别210201