企业🤖AI智能体构建引擎,智能编排和调试,一键部署,支持私有化部署方案 广告
``` // neural21ConApp1.cpp : This file contains the 'main' function. Program execution begins and ends there. // #include <iostream> float x[4][2] = { {-2,-1},{25,6},{17,4} ,{-15,-6} }; float all_y_trues[4] = { 1,0,0,1 }; // double ww[3][2]; double b[4]; // double sigmoid01(double x) { return (1.0 / 1.0 + exp(0 - x)); } double deriv_sigmoid(double x) { //# Derivative of sigmoid : f'(x) = f(x) * (1 - f(x)) double fx = sigmoid01(x); return fx * (1 - fx); } float learn_rate = 0.1; int epochs = 1000;// # number of times to loop through the entire dataset double feedforward(double x[])//(self, x) : { //# x is a numpy array with 2 elements. double h1 = sigmoid01(ww[0][0] * x[0] + ww[0][1] * x[1] + b[0]); double h2 = sigmoid01(ww[1][0] * x[0] + ww[1][1] * x[1] + b[1]); double o1 = sigmoid01(ww[2][0] * h1 + ww[2][1] * h2 + b[2]); return o1; } int main() { std::cout << "Hello World!\n"; double sum_h[2]; double h[2]; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 2; ++j) { ww[i][j] = rand(); } } double b[4]; for (int i = 0; i < 3; ++i) { b[i] = rand(); } // for (int epoch = 0; epoch < 100; ++epoch) {//for10epoch for (int kk = 0; kk < 4; ++kk) {//for100kk // for (int i = 0; i < 2; ++i) {//for110 double sum_h1 = 0; for (int j = 0; j < 2; ++j) {//for220j sum_h[i] += ww[i][j] * x[kk][i]; }//for220j sum_h[i] = sum_h[i] + b[i]; h[i] = sigmoid01(sum_h[i]); }//for110 double sum_o1; sum_o1 = ww[2][0] * h[0] + ww[2][1] * h[1] + b[3]; double o1; o1 = sigmoid01(sum_o1); double y_pred = o1; double d_L_d_ypred = -2 * (all_y_trues[kk] - y_pred); // //Neuron o1 double d_ypred_d_w5 = h[0] * deriv_sigmoid(sum_o1); double d_ypred_d_w6 = h[1] * deriv_sigmoid(sum_o1); double d_ypred_d_b3 = deriv_sigmoid(sum_o1); double d_ypred_d_h1 = ww[2][0] * deriv_sigmoid(sum_o1); double d_ypred_d_h2 = ww[2][1] * deriv_sigmoid(sum_o1); //# Neuron h1 double d_h1_d_w1 = x[kk][0] * deriv_sigmoid(sum_h[0]); double d_h1_d_w2 = x[kk][1] * deriv_sigmoid(sum_h[0]); double d_h1_d_b1 = deriv_sigmoid(sum_h[0]); //# Neuron h2 double d_h2_d_w3 = x[kk][0] * deriv_sigmoid(sum_h[1]); double d_h2_d_w4 = x[kk][1] * deriv_sigmoid(sum_h[1]); double d_h2_d_b2 = deriv_sigmoid(sum_h[1]); //# --- Update weights and biases 完成:反向传播(参数调整) //# Neuron h1 ww[0][0] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1; ww[0][1] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2; b[0] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1; //# Neuron h2 ww[1][0] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w3; ww[1][1] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w4; b[1] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_b2; //# Neuron o1 ww[2][0] -= learn_rate * d_L_d_ypred * d_ypred_d_w5; ww[2][1] -= learn_rate * d_L_d_ypred * d_ypred_d_w6; b[3] -= learn_rate * d_L_d_ypred * d_ypred_d_b3; }//for100kk double loss = 0; if (0 == (epoch % 10)) { //绘图:y_preds = np.apply_along_axis(self.feedforward, 1, data) } } //for10epoch }// ```