网页设计与制作教程期末考试东莞网络优化公司排名
改进版三版250305:
增加保存模型
下次提取模型以及参数,直接推理
选择Y(继续)训练……
并再次 保存 训练模型以及参数的部分:
#include <iostream>
#include <fstream>
#include <filesystem> // C++17 文件系统检查
#include "tiny_dnn/tiny_dnn.h"using namespace tiny_dnn;
using namespace tiny_dnn::activation;#pragma warning(disable : 4996)namespace fs = std::filesystem;int main() {// 网络结构network<sequential> nn;nn << fully_connected_layer(2, 5)<< tanh_layer()<< fully_connected_layer(5, 1)<< sigmoid_layer();// 输入和目标数据std::vector<vec_t> input_data = {{0.0, 0.0},{0.0, 1.0},{1.0, 0.0},{1.0, 1.0}};std::vector<vec_t> target_data = {{0.0},{1.0},{1.0},{0.0}};std::string model_filename = "NeuralNet25_full.json";// 如果模型文件存在,则加载(结构 + 权重参数)if (fs::exists(model_filename)) {std::cout << "Found existing model: " << model_filename << std::endl;nn.load(model_filename, content_type::weights_and_model, file_format::json);std::cout << "Model loaded. Running inference:\n";for (size_t i = 0; i < input_data.size(); ++i) {auto prediction = nn.predict(input_data[i]);std::cout << input_data[i][0] << " XOR " << input_data[i][1]<< " = " << prediction[0] << std::endl;}std::cout << "\nDo you want to continue training? (Y/N): ";char choice;std::cin >> choice;if (choice != 'y' && choice != 'Y') {return 0;}}// 设置训练器adam optimizer;optimizer.alpha = 0.1;const int total_epochs = 110;// 150;const int batch_size = 4;// 开始训练for (int epoch = 1; epoch <= total_epochs; ++epoch) {float_t loss = nn.train<mse>(optimizer, input_data, target_data, batch_size, 1);if (epoch % 10 == 0) {std::cout << "Epoch " << epoch << " - Loss: " << loss << std::endl;}// 保存模型结构和参数if (epoch % 100 == 0 || epoch == total_epochs) {nn.save(model_filename, content_type::weights_and_model, file_format::json);std::cout << "Model saved (structure + weights) at epoch " << epoch << " to " << model_filename << "\n";}}// 最终推理std::cout << "\nFinal inference results:\n";for (size_t i = 0; i < input_data.size(); ++i) {auto prediction = nn.predict(input_data[i]);std::cout << input_data[i][0] << " XOR " << input_data[i][1]<< " = " << prediction[0] << std::endl;}return 0;
}// namespace tiny_dnn
改进二版250303:
(隐藏层的)神经元从4个 增加到5个
优化器用:adam
#include <iostream>
#include "tiny_dnn/tiny_dnn.h"using namespace tiny_dnn;
using namespace tiny_dnn::activation;int main() {// 1. 创建网络结构:2输入 -> 2隐藏 -> 1输出network<sequential> nn;nn << fully_connected_layer(2, 5) //把2个输入连接到5个隐藏单元(重要改进,原来是4个)<< tanh_layer()<< fully_connected_layer(5, 1) << sigmoid_layer();// 2. 训练数据std::vector<vec_t> input_data = {{0.0, 0.0},{0.0, 1.0},{1.0, 0.0},{1.0, 1.0}};std::vector<vec_t> target_data = {{0.0},{1.0},{1.0},{0.0}};// 3. 设置训练器
// adagrad optimizer; //adagrad优化器适用于稀疏数据adam optimizer; // 选择Adam优化器optimizer.alpha = 0.1;// 0.05; // 设置学习率,更精细控制// 4. 训练模型nn.train<mse>(optimizer, input_data, target_data,/*batch_size=*/4,/*epochs=*/500);// 1000);// 5. 打印测试结果for (size_t i = 0; i < input_data.size(); ++i) {auto prediction = nn.predict(input_data[i]);std::cout << input_data[i][0] << " XOR " << input_data[i][1]<< " = " << prediction[0] << std::endl;}return 0;
}//
改进一版:250202:
#include <iostream>
#include "tiny_dnn/tiny_dnn.h"using namespace tiny_dnn;
using namespace tiny_dnn::activation;int main() {// 1. 创建网络结构:2输入 -> 2隐藏 -> 1输出network<sequential> nn;nn << fully_connected_layer(2, 4) << tanh_layer()<< fully_connected_layer(4, 3) << sigmoid_layer()<< fully_connected_layer(3, 1) << sigmoid_layer();// 2. 训练数据std::vector<vec_t> input_data = {{0.0, 0.0},{0.0, 1.0},{1.0, 0.0},{1.0, 1.0}};std::vector<vec_t> target_data = {{0.0},{1.0},{1.0},{0.0}};// 3. 设置训练器adagrad optimizer;// 4. 训练模型nn.train<mse>(optimizer, input_data, target_data,/*batch_size=*/4,/*epochs=*/5000);// 1000);// 5. 打印测试结果for (size_t i = 0; i < input_data.size(); ++i) {auto prediction = nn.predict(input_data[i]);std::cout << input_data[i][0] << " XOR " << input_data[i][1]<< " = " << prediction[0] << std::endl;}return 0;
}
原版:250101
#include <tiny_dnn/tiny_dnn.h>int main() {tiny_dnn::network<tiny_dnn::sequential> net;net << tiny_dnn::fully_connected_layer(2, 4)<< tiny_dnn::fully_connected_layer(4,3)<< tiny_dnn::tanh_layer()<< tiny_dnn::fully_connected_layer(3, 1)<< tiny_dnn::sigmoid_layer();std::vector<tiny_dnn::vec_t> inputs = { {0.0, 0.0}, {1.0, 0.0}, {0.0, 1.0}, {1.0, 1.0} };std::vector<tiny_dnn::vec_t> outputs = { {0.0}, {1.0}, {1.0}, {0.0} }; // XORtiny_dnn::adam optimizer;net.train<tiny_dnn::mse>(optimizer, inputs, outputs, 4, 2250);// , 100); // batch size 10, 100 epochsfor (auto& input : inputs) {auto result = net.predict(input);std::cout << input[0] << " XOR " << input[1] << " = " << result[0] << std::endl;}return 0;
}
