C++卷积神经网络

这篇具有很好参考价值的文章主要介绍了C++卷积神经网络。希望对大家有所帮助。如果存在错误或未考虑完全的地方,请大家不吝赐教,您也可以点击"举报违法"按钮提交疑问。

C++卷积神经网络文章来源地址https://www.toymoban.com/news/detail-674366.html

#include"TP_NNW.h"
#include<iostream>
#pragma warning(disable:4996)
using namespace std;
using namespace mnist;

float* SGD(Weight* W1, Weight& W5, Weight& Wo, float** X)
{
	Vector2 ve(28, 28);
	float* temp = new float[10];
	Vector2 Cout;
	float*** y1 = Conv(X, ve, Cout, W1, 20);
	for (int i = 0; i < 20; i++)
		for (int n = 0; n < Cout.height; n++)
			for (int m = 0; m < Cout.width; m++)
				y1[i][n][m] = ReLU(y1[i][n][m]);
	float*** y2 = y1;
	Vector2 Cout2;
	float*** y3 = Pool(y1, Cout, 20, Cout2);
	float* y4 = reshape(y3, Cout2, 20, true);
	float* v5 = dot(W5, y4);
	float* y5 = ReLU(v5, W5);
	float* v = dot(Wo, y5);
	float* y = Softmax(v, Wo);
	for (int i = 0; i < Wo.len.height; i++)
		temp[i] = y[i];
	return temp;
}
void trainSGD(Weight* W1, Weight& W5, Weight& Wo, FILE* fp, FILE* tp)
{
	Vector2 ve(28, 28);
	unsigned char* reader = new unsigned char[ve.height * ve.width];
	float** X = apply2(ve.height, ve.width);
	unsigned char hao;
	hot_one<char> D(10);

	Weight* momentum1 = new Weight[20];//动量
	Weight momentum5;
	Weight momentumo;
	Weight* dW1 = new Weight[20];//动量
	Weight dW5;
	Weight dWo;
	for (int i = 0; i < 20; i++)
		W1[0] >> momentum1[i];
	W5 >> momentum5;
	Wo >> momentumo;
	int N = 8000;//训练集取前8000个
	int bsize = 100;//100个纠正一次
	int b_len;
	int* blist = bList(bsize, N, &b_len);
	for (int batch = 0; batch < b_len; batch++)
	{
		for (int i = 0; i < 20; i++)
			W1[0] >> dW1[i];
		W5 >> dW5;
		Wo >> dWo;
		int begins = blist[batch];
		for (int k = begins; k < begins + bsize && k < N; k++)
		{
			::fread(reader, sizeof(unsigned char), ve.height * ve.width, fp);//读取图像
			Toshape2(X, reader, ve);//组合成二维数组
			Vector2 Cout;//储存卷积后数组的尺寸  20
			float*** y1 = Conv(X, ve, Cout, W1, 20);//卷积
			for (int i = 0; i < 20; i++)
				for (int n = 0; n < Cout.height; n++)
				{
					for (int m = 0; m < Cout.width; m++)
					{
						y1[i][n][m] = ReLU(y1[i][n][m]);//通过ReLU函数
					}
				}
			float*** y2 = y1;//给变量y2
			Vector2 Cout2;//记录池化后的尺寸   10
			float*** y3 = Pool(y1, Cout, 20, Cout2);//池化层
			float* y4 = reshape(y3, Cout2, 20, true);//作为神经元输入
			float* v5 = dot(W5, y4);//矩阵乘法
			float* y5 = ReLU(v5, W5);//ReLU函数
			float* v = dot(Wo, y5);//举证乘法
			float* y = Softmax(v, Wo);//soft分类
			::fread(&hao, sizeof(unsigned char), 1, tp);//读取标签
			D.re(hao);
			float* e = new float[10];
			for (int i = 0; i < 10; i++)
				e[i] = ((float)D.one[i]) - y[i];
			float* delta = e;
			float* e5 = FXCB_err(Wo, delta);
			float* delta5 = Delta2(y5, e5, W5);
			float* e4 = FXCB_err(W5, delta5);
			float*** e3 = Toshape3(e4, 20, Cout2);
			float*** e2 = apply3(20, Cout.height, Cout.width);
			Weight one(2, 2, ones);
			/*for (int i = 0; i < 20; i++)
			{
				::printf("第%d层\n", i);
				for (int n = 0; n < Cout2.height; n++)
				{
					for (int m = 0; m < Cout2.width; m++)
						::printf("%0.3f ", e3[i][n][m]);
					puts("");
				}
			}
			getchar();*/
			for (int i = 0; i < 20; i++)//---------------------------------
				kron(e2[i], Cout, e3[i], Cout2, one.WG, one.len);

			/*for (int i = 0; i < 20; i++)
			{
				::printf("第%d层\n", i);
				for (int n = 0; n < Cout.height; n++)
				{
					for (int m = 0; m < Cout.width; m++)
						::printf("%f ", e2[i][n][m]);
					puts("");
				}
			}
			getchar();*/

			float*** delta2 = apply3(20, Cout.height, Cout.width);
			for (int i = 0; i < 20; i++)
				for (int n = 0; n < Cout.height; n++)
					for (int m = 0; m < Cout.width; m++)
						delta2[i][n][m] = (y2[i][n][m] > 0) * e2[i][n][m];
			float*** delta_x = (float***)malloc(sizeof(float***) * 20);
			Vector2 t1;
			for (int i = 0; i < 20; i++)
				delta_x[i] = conv2(X, ve, delta2[i], Cout, &t1);
			for (int i = 0; i < 20; i++)
				for (int n = 0; n < t1.height; n++)
					for (int m = 0; m < t1.width; m++)
						dW1[i].WG[n][m] += delta_x[i][n][m];
			dW5.re(delta5, y4, 1);
			dWo.re(delta, y5, 1);

			Free3(delta_x, 20, t1.height);
			Free3(delta2, 20, Cout.height);
			one.release();
			Free3(e2, 20, Cout.height);
			Free3(e3, 20, Cout2.height);
			free(e4);
			free(delta5);
			free(e5);
			free(v5);
			delete e;
			free(y5);
			free(v);
			free(y);
			Free3(y1, 20, Cout.height);
			free(y4);
		}
		for (int i = 0; i < 20; i++)
			dW1[i] /= (bsize);
		dW5 /= (bsize);
		dWo /= (bsize);
		for (int i = 0; i < 20; i++)
			for (int n = 0; n < W1[0].len.height; n++)
				for (int m = 0; m < W1[0].len.width; m++)
				{
					momentum1[i].WG[n][m] = ALPHA * dW1[i].WG[n][m] + BETA * momentum1[i].WG[n][m];
					W1[i].WG[n][m] += momentum1[i].WG[n][m];
				}
		for (int n = 0; n < W5.len.height; n++)
			for (int m = 0; m < W5.len.width; m++)
				momentum5.WG[n][m] = ALPHA * dW5.WG[n][m] + BETA * momentum5.WG[n][m];
		W5 += momentum5;
		for (int n = 0; n < Wo.len.height; n++)
			for (int m = 0; m < Wo.len.width; m++)
				momentumo.WG[n][m] = ALPHA * dWo.WG[n][m] + BETA * momentumo.WG[n][m];
		Wo += momentumo;

	}
	for (int i = 0; i < 20; i++)
	{
		momentum1[i].release();
		dW1[i].release();
	}
	momentum5.release();
	momentumo.release();
	Free2(X, ve.height);
	free(blist);
	delete reader;
	D.release();
	dW5.release();
	dWo.release();
	return;
}
void trainSGD1(Weight* W1, Weight& W5, Weight& Wo, FILE* fp, FILE* tp)
{
	Vector2 ve(28, 28);
	unsigned char* reader = new unsigned char[ve.height * ve.width];
	float** X = apply2(ve.height, ve.width);
	unsigned char hao;
	hot_one<char> D(10);

	Weight* momentum1 = new Weight[20];//动量
	Weight momentum5;
	Weight momentumo;
	Weight* dW1 = new Weight[20];//动量
	Weight dW5;
	Weight dWo;
	for (int i = 0; i < 20; i++)
		W1[0] >> momentum1[i];
	W5 >> momentum5;
	Wo >> momentumo;
	int N = 108;//训练集取前8000个
	int bsize = 12;//100个纠正一次
	int b_len;
	int* blist = bList(bsize, N, &b_len);
	for (int batch = 0; batch < b_len; batch++)
	{
		for (int i = 0; i < 20; i++)
			W1[0] >> dW1[i];
		W5 >> dW5;
		Wo >> dWo;
		int begins = blist[batch];
		for (int k = begins; k < begins + bsize && k < N; k++)
		{
			::fread(reader, sizeof(unsigned char), ve.height * ve.width, fp);//读取图像
			Toshape2(X, reader, ve);//组合成二维数组
			Vector2 Cout;//储存卷积后数组的尺寸  20
			float*** y1 = Conv(X, ve, Cout, W1, 20);//卷积
			for (int i = 0; i < 20; i++)
				for (int n = 0; n < Cout.height; n++)
				{
					for (int m = 0; m < Cout.width; m++)
					{
						y1[i][n][m] = ReLU(y1[i][n][m]);//通过ReLU函数
					}
				}
			float*** y2 = y1;//给变量y2
			Vector2 Cout2;//记录池化后的尺寸   10
			float*** y3 = Pool(y1, Cout, 20, Cout2);//池化层
			float* y4 = reshape(y3, Cout2, 20, true);//作为神经元输入
			float* v5 = dot(W5, y4);//矩阵乘法
			float* y5 = ReLU(v5, W5);//ReLU函数
			float* v = dot(Wo, y5);//举证乘法
			float* y = Softmax(v, Wo);//soft分类
			::fread(&hao, sizeof(unsigned char), 1, tp);//读取标签
			D.re(hao);
			float* e = new float[10];
			for (int i = 0; i < 10; i++)
				e[i] = ((float)D.one[i]) - y[i];
			float* delta = e;
			float* e5 = FXCB_err(Wo, delta);
			float* delta5 = Delta2(y5, e5, W5);
			float* e4 = FXCB_err(W5, delta5);
			float*** e3 = Toshape3(e4, 20, Cout2);
			float*** e2 = apply3(20, Cout.height, Cout.width);
			Weight one(2, 2, ones);
			/*for (int i = 0; i < 20; i++)
			{
			::printf("第%d层\n", i);
			for (int n = 0; n < Cout2.height; n++)
			{
			for (int m = 0; m < Cout2.width; m++)
			::printf("%0.3f ", e3[i][n][m]);
			puts("");
			}
			}
			getchar();*/
			for (int i = 0; i < 20; i++)//---------------------------------
				kron(e2[i], Cout, e3[i], Cout2, one.WG, one.len);

			/*for (int i = 0; i < 20; i++)
			{
			::printf("第%d层\n", i);
			for (int n = 0; n < Cout.height; n++)
			{
			for (int m = 0; m < Cout.width; m++)
			::printf("%f ", e2[i][n][m]);
			puts("");
			}
			}
			getchar();*/

			float*** delta2 = apply3(20, Cout.height, Cout.width);
			for (int i = 0; i < 20; i++)
				for (int n = 0; n < Cout.height; n++)
					for (int m = 0; m < Cout.width; m++)
						delta2[i][n][m] = (y2[i][n][m] > 0) * e2[i][n][m];
			float*** delta_x = (float***)malloc(sizeof(float***) * 20);
			Vector2 t1;
			for (int i = 0; i < 20; i++)
				delta_x[i] = conv2(X, ve, delta2[i], Cout, &t1);
			for (int i = 0; i < 20; i++)
				for (int n = 0; n < t1.height; n++)
					for (int m = 0; m < t1.width; m++)
						dW1[i].WG[n][m] += delta_x[i][n][m];
			dW5.re(delta5, y4, 1);
			dWo.re(delta, y5, 1);

			Free3(delta_x, 20, t1.height);
			Free3(delta2, 20, Cout.height);
			one.release();
			Free3(e2, 20, Cout.height);
			Free3(e3, 20, Cout2.height);
			free(e4);
			free(delta5);
			free(e5);
			free(v5);
			delete e;
			free(y5);
			free(v);
			free(y);
			Free3(y1, 20, Cout.height);
			free(y4);
		}
		for (int i = 0; i < 20; i++)
			dW1[i] /= (bsize);
		dW5 /= (bsize);
		dWo /= (bsize);
		for (int i = 0; i < 20; i++)
			for (int n = 0; n < W1[0].len.height; n++)
				for (int m = 0; m < W1[0].len.width; m++)
				{
					momentum1[i].WG[n][m] = ALPHA * dW1[i].WG[n][m] + BETA * momentum1[i].WG[n][m];
					W1[i].WG[n][m] += momentum1[i].WG[n][m];
				}
		for (int n = 0; n < W5.len.height; n++)
			for (int m = 0; m < W5.len.width; m++)
				momentum5.WG[n][m] = ALPHA * dW5.WG[n][m] + BETA * momentum5.WG[n][m];
		W5 += momentum5;
		for (int n = 0; n < Wo.len.height; n++)
			for (int m = 0; m < Wo.len.width; m++)
				momentumo.WG[n][m] = ALPHA * dWo.WG[n][m] + BETA * momentumo.WG[n][m];
		Wo += momentumo;

	}
	for (int i = 0; i < 20; i++)
	{
		momentum1[i].release();
		dW1[i].release();
	}
	momentum5.release();
	momentumo.release();
	Free2(X, ve.height);
	free(blist);
	delete reader;
	D.release();
	dW5.release();
	dWo.release();
	return;
}
float rand1()
{
	float temp = (rand() % 20) / (float)10;
	if (temp < 0.0001)
		temp = 0.07;
	temp *= (rand() % 2 == 0) ? -1 : 1;
	return temp * 0.01;
}
float rand2()
{
	float temp = (rand() % 10) / (float)10;
	float ret = (2 * temp - 1) * sqrt(6) / sqrt(360 + 2000);
	if (ret < 0.0001 && ret>-0.0001)
		ret = 0.07;
	return ret;
}
float rand3()
{
	float temp = (rand() % 10) / (float)10;
	float ret = (2 * temp - 1) * sqrt(6) / sqrt(10 + 100);
	if (ret < 0.0001 && ret>-0.0001)
		ret = 0.07;
	return ret;
}

void train()
{
	FILE* fp = fopen("t10k-images.idx3-ubyte", "rb");
	FILE* tp = fopen("t10k-labels.idx1-ubyte", "rb");
	int rdint;
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集幻数:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集数量:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集高度:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集宽度:%d\n", ReverseInt(rdint));
	int start1 = ftell(fp);
	::fread(&rdint, sizeof(int), 1, tp);
	::printf("标签幻数:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, tp);
	::printf("标签数量:%d\n", ReverseInt(rdint));
	int start2 = ftell(tp);
	Weight* W1 = new Weight[20];
	WD(W1, 9, 9, 20, rand1);
	Weight W5(100, 2000, rand2);
	Weight Wo(10, W5.len.height, rand3);
	for (int k = 0; k < 3; k++)
	{
		trainSGD(W1, W5, Wo, fp, tp);
		fseek(fp, start1, 0);
		fseek(tp, start2, 0);
		::printf("第%d次训练结束\n", k + 1);
	}
	fclose(fp);
	fclose(tp);
	fp = fopen("mnist_Weight.acp", "wb");
	for (int i = 0; i < 20; i++)
		W1[i].save(fp);
	W5.save(fp);
	Wo.save(fp);
	fclose(fp);
	::printf("训练完成");
	getchar();
}
void train1()
{
	FILE* fp = fopen("out_img.acp", "rb");
	FILE* tp = fopen("out_label.acp", "rb");
	int rdint;
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集幻数:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集数量:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集高度:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集宽度:%d\n", ReverseInt(rdint));
	int start1 = ftell(fp);
	::fread(&rdint, sizeof(int), 1, tp);
	::printf("标签幻数:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, tp);
	::printf("标签数量:%d\n", ReverseInt(rdint));
	int start2 = ftell(tp);
	Weight* W1 = new Weight[20];
	WD(W1, 9, 9, 20, rand1);
	Weight W5(100, 2000, rand2);
	Weight Wo(10, W5.len.height, rand3);
	for (int k = 0; k < 1000; k++)
	{
		trainSGD1(W1, W5, Wo, fp, tp);
		fseek(fp, start1, 0);
		fseek(tp, start2, 0);
		::printf("第%d次训练结束\n", k + 1);
	}
	fclose(fp);
	fclose(tp);
	fp = fopen("mnist_Weight.acp", "wb");
	for (int i = 0; i < 20; i++)
		W1[i].save(fp);
	W5.save(fp);
	Wo.save(fp);
	fclose(fp);
	::printf("训练完成");
	getchar();
}
void test()
{
	FILE* fp = fopen("mnist_Weight.acp", "rb");
	Weight* W1 = new Weight[20];
	WD(W1, 9, 9, 20, rand1);
	Weight W5(100, 2000, rand1);
	Weight Wo(10, W5.len.height, rand1);
	for (int i = 0; i < 20; i++)
		W1[i].load(fp);
	W5.load(fp);
	Wo.load(fp);
	fclose(fp);
	fp = fopen("t10k-images.idx3-ubyte", "rb");
	FILE* tp = fopen("t10k-labels.idx1-ubyte", "rb");
	int rdint;
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集幻数:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集数量:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集高度:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集宽度:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, tp);
	::printf("标签幻数:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, tp);
	::printf("标签数量:%d\n", ReverseInt(rdint));
	unsigned char* res = new unsigned char[28 * 28];
	float** X = apply2(28, 28);
	unsigned char biaoqian;
	Vector2 t2828 = Vector2(28, 28);
	for (int i = 0; i < 50; i++)
	{
		::fread(res, sizeof(unsigned char), 28 * 28, fp);
		Toshape2(X, res, 28, 28);
		print(X, t2828);
		float* h = SGD(W1, W5, Wo, X);//带入神经网络
		int c = -1;
		for (int i = 0; i < 10; i++)
		{
			if (h[i] > 0.85)
			{
				c = i;
				break;
			}
		}
		::fread(&biaoqian, sizeof(unsigned char), 1, tp);
		::printf("正确结果应当为“%d”,      神经网络识别为“%d”   \n", biaoqian, c);
	}
}
void sb()
{

	Weight* W1;
	Weight W5(100, 2000, rand2);
	Weight Wo(10, W5.len.height, rand3);
	//::printf("加载权重完毕\n");
	Vector2 out;
	char path[256];
	for (int r = 0; r < 4; r++)
	{
		sprintf(path, "acp%d.png", r);
		float** img = Get_data_by_Mat(path, out);
		//print(img, out);
		float* h = SGD(W1, W5, Wo, img);//带入神经网络
		int c = -1;
		float x = 0;
		for (int i = 0; i < 10; i++)
		{
			if (h[i] > 0.85 && h[i] > x)
			{
				x = h[i];
				c = i;
			}
		}
		::printf("%d ", c);
		Free2(img, out.height);
		free(h);
		remove(path);
	}
	puts("");
}

void sb(char* path)
{
	Weight* W1 = new Weight[20];
	Weight W5(100, 2000, rand2);
	Weight Wo(10, W5.len.height, rand3);
	FILE* fp = fopen("mnist_Weight.acp", "rb");
	puts("开始加载权重");
	WD(W1, 9, 9, 20, rand1);
	for (int i = 0; i < 20; i++)
		W1[i].load(fp);
	W5.load(fp);
	Wo.load(fp);
	fclose(fp);
	::printf("加载权重完毕\n");
	Vector2 out;
	float** img = Get_data_by_Mat(path, out);
	printf("图像载入完毕");
	//print(img, out);
	float* h = SGD(W1, W5, Wo, img);//带入神经网络
	int c = -1;
	float max = -1;
	for (int i = 0; i < 10; i++)
	{
		::printf("%f\n", h[i]);
		/*if (h[i] > 0.65 && h[i] > x)
		{
			x = h[i];
			c = i;
		}*/
		if (max< h[i])
		{
			max = h[i];
			c = i;
		}
	}
	::printf("神经网络认为它是数字-->%d   相似度为:%f", c, max);
	Free2(img, out.height);
	free(h);

}
bool thank(int x1,int x2, int y1, int y2, int z1, int z2 )
{
	int dis = 0;
	int xx = (x1 - x2);
	dis += xx * xx;
	xx = (y1 - y2);
	dis += xx * xx;
	xx = (z1 - z2);
	dis += xx * xx;
	dis = (int)sqrt(dis);
	if (dis < 100)
		return true;
	return false;
}
void qg(char* path)
{
	::printf(path);
	::printf("识别为:");
	//Mat img = imread(path);
	CImage img;
	img.Load(path);
	//Vec3b yes = Vec3b(204, 198, 204);
	CImage sav;// = Mat(120, 80, CV_8UC3);
	sav.Create(120, 80, 24);
	ResizeCImage(img, img.GetWidth() * 10, img.GetHeight() * 10);
	int XS = img.GetBPP() / 8;
	int pitch = img.GetPitch();
	//resize(img, img, Size(img.cols * 10, img.rows * 10));
	unsigned char* rgb = (unsigned char*)img.GetBits();
	for (int i = 0; i < img.GetHeight(); i++)
		for (int j = 0; j < img.GetWidth(); j++)
		{
			//Vec3b rgb = img.at<Vec3b>(i, j);
			int x1= *(rgb + (j * XS) + (i * pitch) + 0);
			int y1 = *(rgb + (j * XS) + (i * pitch) + 1);
			int z1 = *(rgb + (j * XS) + (i * pitch) + 2);
			if (thank(x1, 204, y1, 198, z1, 204))
			{
				*(rgb + (j * XS) + (i * pitch) + 0) = 255;
				*(rgb + (j * XS) + (i * pitch) + 1) = 255;
				*(rgb + (j * XS) + (i * pitch) + 2) = 255;
				//img.at<Vec3b>(i, j) = Vec3b(255, 255, 255);
			}
			/*else
				img.at<Vec3b>(i, j) = Vec3b(0, 0, 0);*/
		}
	/*char p[256];
	for (int k = 0; k < 4; k++)
	{
		sprintf(p, "acp%d.png", k);
		for (int i = 35 + (k * 80); i < 115 + (k * 80); i++)
			for (int j = 30; j < 150; j++)
				sav.at<Vec3b>(j - 30, i - (35 + (k * 80))) = img.at<Vec3b>(j, i);
		imwrite(p, sav);
	}
	img.release();
	sav.release();*/
	sb();
}
void test1()
{
	FILE* fp = fopen("mnist_Weight.acp", "rb");
	Weight* W1 = new Weight[20];
	WD(W1, 9, 9, 20, rand1);
	Weight W5(100, 2000, rand1);
	Weight Wo(10, W5.len.height, rand1);
	for (int i = 0; i < 20; i++)
		W1[i].load(fp);
	W5.load(fp);
	Wo.load(fp);
	fclose(fp);
	fp = fopen("out_img.acp", "rb");
	FILE* tp = fopen("out_label.acp", "rb");
	int rdint;
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集幻数:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集数量:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集高度:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, fp);
	::printf("训练集宽度:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, tp);
	::printf("标签幻数:%d\n", ReverseInt(rdint));
	::fread(&rdint, sizeof(int), 1, tp);
	::printf("标签数量:%d\n", ReverseInt(rdint));
	unsigned char* res = new unsigned char[28 * 28];
	float** X = apply2(28, 28);
	unsigned char biaoqian;
	Vector2 t2828 = Vector2(28, 28);
	for (int i = 0; i < 10; i++)
	{
		::fread(res, sizeof(unsigned char), 28 * 28, fp);
		Toshape2(X, res, 28, 28);
		print(X, t2828);
		float* h = SGD(W1, W5, Wo, X);//带入神经网络
		int c = -1;
		for (int i = 0; i < 10; i++)
		{
			if (h[i] > 0.85)
			{
				c = i;
				break;
			}
		}
		::fread(&biaoqian, sizeof(unsigned char), 1, tp);
		::printf("正确结果应当为“%d”,      神经网络识别为“%d”   \n", biaoqian, c);
	}
}
void main(int argc, char** argv)
{
	//train();//请先调用这个训练,训练结束后就可以直接加载权重了
	if (argc > 1)
	{
		sb(argv[1]);
		getchar();
	}
}


#include"TP_NNW.h"
#include<iostream>
#pragma warning(disable:4996)
void Weight::apply(int H, int W)
{
	fz = true;
	this->len.height = H;
	this->len.width = W;
	this->WG = apply2(H, W);//申请内存
	for (int i = 0; i < H; i++)
		for (int j = 0; j < W; j++)
			this->WG[i][j] = Get_rand();//得到随机值
}

void Weight::apply(int H, int W, float(*def)())
{
	fz = true;
	this->len.height = H;
	this->len.width = W;
	this->WG = apply2(H, W);
	for (int i = 0; i < H; i++)
		for (int j = 0; j < W; j++)
			this->WG[i][j] = def();
}

Weight::~Weight()
{
	this->release();
}

Weight::Weight(int H/*高度*/, int W/*宽度*/)
{
	W = W <= 0 ? 1 : W;//防止出现0和负数
	H = H <= 0 ? 1 : H;//防止出现0和负数
	fz = true;
	this->apply(H, W);
}

Weight::Weight(int H/*高度*/, int W/*宽度*/, float(*def)())
{
	W = W <= 0 ? 1 : W;
	H = H <= 0 ? 1 : H;
	fz = true;
	this->apply(H, W, def);
}

void Weight::re(float* delta, float* inp, float alpha)
{
	for (int i = 0; i < this->len.height; i++)
	{
		for (int j = 0; j < this->len.width; j++)
			this->WG[i][j] += alpha * delta[i] * inp[j];
	}
}

void Weight::save(FILE* fp)
{
	for (int i = 0; i < this->len.height; i++)
		for (int j = 0; j < this->len.width; j++)
			fwrite(&this->WG[i][j], sizeof(float), 1, fp);
}

void Weight::load(FILE* fp)
{
	for (int i = 0; i < this->len.height; i++)
		for (int j = 0; j < this->len.width; j++)
			fread(&this->WG[i][j], sizeof(float), 1, fp);
}

void Weight::release()
{
	if (this->fz)
	{
		Free2(this->WG, this->len.height);
		//free(this->WG);
	}
	this->fz = false;
}
void Weight::operator >> (Weight& temp)
{
	temp.release();
	//free(temp.WG);
	temp.apply(this->len.height, this->len.width, zeros);
}
void Weight::operator+=(Weight& temp)
{
	for (int i = 0; i < this->len.height; i++)
		for (int j = 0; j < this->len.width; j++)
			this->WG[i][j] += temp.WG[i][j];
}

//void Weight::operator/=(int & temp)
//{
//	for (int i = 0; i < this->len.height; i++)
//		for (int j = 0; j < this->len.width; j++)
//			this->WG[i][j] /= temp;
//}

void Weight::operator/=(int temp)
{
	for (int i = 0; i < this->len.height; i++)
		for (int j = 0; j < this->len.width; j++)
			this->WG[i][j] /= temp;
}

void Weight::operator<<(Weight& temp)
{
	Free2(this->WG, this->len.height);
	this->len.height = temp.len.height;
	this->len.width = temp.len.width;
	this->WG = temp.WG;
}

void WD(Weight* WGS, int H, int W, int len)
{
	for (int i = 0; i < len; i++)
	{
		WGS[i].apply(H, W);
	}
}
void WD(Weight* WGS, int H, int W, int len, float(*def)())
{
	for (int i = 0; i < len; i++)
	{
		WGS[i].apply(H, W, def);
	}
}
float zeros()
{
	return 0;
}


void print(float* y, int y_len)
{
	for (int i = 0; i < y_len; i++)
	{
		printf("%0.2f ", y[i]);
		//printf("%d ", y[i]>0?1:0);
	}
	puts("");
}

void print(float* y, Vector2& vec)
{
	print(y, vec.height);
}

void print(float** y, Vector2& vec)
{
	for (int i = 0; i < vec.height; i++)
		print(y[i], vec.width);
}

void print(char* y, int y_len)
{
	for (int i = 0; i < y_len; i++)
	{
		printf("%d ", y[i]);
	}
	puts("");
}

void print(char** y, Vector2& vec)
{
	for (int i = 0; i < vec.height; i++)
		print(y[i], vec.width);
}

void print(Weight& w)
{
	print(w.WG, w.len);
}

void print(Weight* w, int len)
{
	for (int i = 0; i < len; i++)
	{
		printf("\n第%d层\n", i + 1);
		print(w[i]);
	}
}


float** apply2(int H, int W)
{
	float** temp = (float**)malloc(sizeof(float**) * H);
	for (int i = 0; i < H; i++)
		temp[i] = (float*)malloc(sizeof(float*) * W);
	return temp;
}

float*** apply3(int P, int H/*高度*/, int W/*宽度*/)
{
	float*** temp = (float***)malloc(sizeof(float***) * P);
	for (int i = 0; i < P; i++)
		temp[i] = apply2(H, W);
	return temp;
}

char** apply2_char(int H, int W)
{
	char** temp = (char**)malloc(sizeof(float**) * H);
	for (int i = 0; i < H; i++)
		temp[i] = (char*)malloc(sizeof(float*) * W);
	return temp;
}
float ones()
{
	return 1;
}
float*** Conv(float** X, Vector2& inp, Vector2& out, Weight* W, int W_len)
{
	out.height = inp.height - W[0].len.height + 1;
	out.width = inp.width - W[0].len.width + 1;
	float*** temp = (float***)malloc(sizeof(float***) * W_len);
	for (int k = 0; k < W_len; k++)
		temp[k] = conv2(X, inp, W[k].WG, W[0].len);
	return temp;
}
float*** Pool(float*** y, Vector2& inp, int P, Vector2& out)
{
	int h = inp.height / 2, w = inp.width / 2;
	out.height = h;
	out.width = w;
	float*** temp = apply3(P, h, w);
	float** filter = apply2(2, 2);
	for (int i = 0; i < 2; i++)
		for (int j = 0; j < 2; j++)
			filter[i][j] = 0.25;
	for (int k = 0; k < P; k++)
	{
		Vector2 len;
		Vector2 t22 = Vector2(2, 2);
		float** img = conv2(y[k], inp, filter, t22, &len);
		for (int i = 0; i < h; i++)
			for (int j = 0; j < w; j++)
				temp[k][i][j] = img[i * 2][j * 2];
		Free2(img, len.height);
	}
	Free2(filter, 2);
	return temp;
}
float* apply1(int H)
{
	float* temp = (float*)malloc(sizeof(float*) * H);
	return temp;
}

char* apply1_char(int H)
{
	char* temp = (char*)malloc(sizeof(char*) * H);
	return temp;
}

float Get_rand()
{
	float temp = (float)(rand() % 10) / (float)10;
	return rand() % 2 == 0 ? temp : -temp;
}

float Sigmoid(float x)
{
	return 1 / (1 + exp(-x));
}

float* Sigmoid(float* x, Weight& w)
{
	return Sigmoid(x, w.len.height);
}

float* Sigmoid(float* x, int height)
{
	float* y = (float*)malloc(sizeof(float*) * height);
	for (int i = 0; i < height; i++)
		y[i] = Sigmoid(x[i]);
	return y;
}

float ReLU(float x)
{
	return x > 0 ? x : 0;
}

float* ReLU(float* x, Weight& w)
{
	return ReLU(x, w.len.height);
}

float* ReLU(float* x, int height)
{
	float* y = (float*)malloc(sizeof(float*) * height);
	for (int i = 0; i < height; i++)
		y[i] = ReLU(x[i]);
	return y;
}

float* Softmax(float* x, Weight& w)
{
	return Softmax(x, w.len.height);
}

float dsigmoid(float x)
{
	return x * (1 - x);
}

float* Softmax(float* x, int height)
{
	float* t = new float[height];
	float* ex = new float[height];
	float sum = 0;
	for (int i = 0; i < height; i++)
	{
		ex[i] = exp(x[i]);
		sum += ex[i];
	}
	for (int i = 0; i < height; i++)
	{
		t[i] = ex[i] / sum;
	}
	delete ex;
	return t;
}

float* FXCB_err(Weight& w, float* delta)
{
	float* temp = (float*)malloc(sizeof(float*) * w.len.width);
	for (int i = 0; i < w.len.width; i++)
		temp[i] = 0;
	for (int i = 0; i < w.len.width; i++)
		for (int j = 0; j < w.len.height; j++)
			temp[i] += w.WG[j][i] * delta[j];
	return temp;
}

float* Delta1(float* y, float* e, Weight& w)
{
	float* temp = (float*)malloc(sizeof(float*) * w.len.height);
	for (int i = 0; i < w.len.height; i++)
		temp[i] = y[i] * (1 - y[i]) * e[i];
	return temp;
}

float* Delta2(float* v, float* e, Weight& w)
{
	float* temp = (float*)malloc(sizeof(float*) * w.len.height);
	for (int i = 0; i < w.len.height; i++)
		temp[i] = v[i] > 0 ? e[i] : 0;
	return temp;
}

float* dot(Weight& W, float* inp, int* len)
{
	float* temp = (float*)malloc(sizeof(float*) * W.len.height);
	for (int i = 0; i < W.len.height; i++)
		temp[i] = 0;
	for (int i = 0; i < W.len.height; i++)
	{
		for (int j = 0; j < W.len.width; j++)
			temp[i] += (W.WG[i][j] * inp[j]);
	}
	if (len != NULL)
		*len = W.len.height;
	return temp;
}

char* randperm(int max, int count)
{
	char* temp = new char[count] {0};
	for (int i = 0; i < count; i++)
	{
		while (1)
		{
			char t = rand() % max;
			bool nothave = true;
			for (int j = 0; j < i; j++)
				if (t == temp[j])
				{
					nothave = false;
					break;
				}
			if (nothave)
			{
				temp[i] = t;
				break;
			}
		}
	}
	return temp;
}

void Dropout(float* y, float ratio, Weight& w)
{
	float* ym = new float[w.len.height] {0};
	float round = w.len.height * (1 - ratio);
	int num = (round - (float)(int)round >= 0.5f ? (int)round + 1 : (int)round);
	char* idx = randperm(w.len.height, num);
	for (int i = 0; i < num; i++)
	{
		ym[idx[i]] = (1 / (1 - ratio));
	}
	for (int i = 0; i < w.len.height; i++)
	{
		y[i] *= ym[i];
	}
	delete idx;
	delete ym;
}

float** conv2(float** x, Vector2& x_len, float** fiter, Vector2& fiter_len, Vector2* out_len, int flag, int distance, int fill)
{
	switch (flag)
	{
	case Valid:return VALID(x, x_len.height, x_len.width, fiter, fiter_len.height, fiter_len.width, distance, out_len);
	case Same:return SAME(x, x_len.height, x_len.width, fiter, fiter_len.height, fiter_len.width, distance, fill, out_len);
	}
	return nullptr;
}

float** VALID(float** x, int x_h, int x_w, float** fiter, int fiter_h, int fiter_w, int distance, Vector2* out_len)
{
	int h = VALID_out_len(x_h, fiter_h, distance);
	int w = VALID_out_len(x_w, fiter_w, distance);
	float** temp = apply2(h, w);
	float** t = fiter;
	if (out_len != NULL)
	{
		out_len->height = h;
		out_len->width = w;
	}
	for (int i = 0; i < x_h + 1 - fiter_h; i += distance)
		for (int j = 0; j < x_w + 1 - fiter_w; j += distance)
		{
			float count = 0;
			for (int n = i; n < i + fiter_h; n++)
				for (int m = j; m < j + fiter_w; m++)
				{
					if (n >= x_h || m >= x_w)
						continue;
					count += (x[n][m] * t[n - i][m - j]);
				}
			temp[(i / distance)][(j / distance)] = count;
		}
	//free(t);
	return temp;
}

float** SAME(float** x, int x_h, int x_w, float** fiter, int fiter_h, int fiter_w, int distance, int fill, Vector2* out_len)
{
	return nullptr;
}

int VALID_out_len(int x_len, int fiter_len, int distance)
{
	float temp = (float)(x_len - fiter_len) / (float)distance;
	int t = temp - (int)((float)temp) >= 0.5 ? (int)temp + 1 : (int)temp;
	t++;
	return t;
}

void show_Weight(Weight& W)
{
	for (int i = 0; i < W.len.height; i++)
	{
		for (int j = 0; j < W.len.width; j++)
		{
			printf("%0.3f ", W.WG[i][j]);
		}
		puts("");
	}
}
void rot90(Weight& x)
{
	int h = x.len.width, w = x.len.height;
	x.WG = rot90(x.WG, x.len, true);
	x.len.width = w;
	x.len.height = h;
}
float** rot90(float** x, Vector2& x_len, bool release)
{
	float** temp = apply2(x_len.width, x_len.height);
	for (int i = 0; i < x_len.height; i++)
		for (int j = 0; j < x_len.width; j++)
		{
			temp[x_len.width - 1 - j][i] = x[i][j];
		}
	if (release)
	{
		Free2(x, x_len.height);
		//free(x);
	}
	return temp;
}

float** rot180(float** x, Vector2& x_len, bool release)
{
	float** temp = apply2(x_len.height, x_len.width);
	for (int i = 0; i < x_len.height; i++)
	{
		for (int j = 0; j < x_len.width; j++)
		{
			temp[x_len.height - 1 - i][x_len.width - 1 - j] = x[i][j];
		}
	}
	if (release)
	{
		Free2(x, x_len.height);
		//free(x);
	}
	return temp;
}
void ResizeCImage(CImage& image, int newWidth, int newHeight) {
	// 创建新的CImage对象,并设置大小
	CImage resizedImage;
	resizedImage.Create(newWidth, newHeight, image.GetBPP());

	// 使用Gdiplus::Graphics将原始图像绘制到新图像上,并进行缩放
	SetStretchBltMode(resizedImage.GetDC(), HALFTONE);
	image.StretchBlt(resizedImage.GetDC(), 0, 0, newWidth, newHeight);

	// 完成绘制后,释放新图像的设备上下文
	resizedImage.ReleaseDC();

	// 将结果拷贝回原始的CImage对象
	image.Destroy();
	image.Attach(resizedImage.Detach());
	resizedImage.Destroy();
}
float** Get_data_by_Mat(char* filepath, Vector2& out_len)
{
	CImage mat;// = cv::imread(filepath, 0);
	//cv::resize(mat, mat, cv::Size(28, 28));
	mat.Load(filepath);
	ResizeCImage(mat, 28, 28);
	/*cv::imshow("tt", mat);
	cv::waitKey(0);*/
	out_len.height = mat.GetHeight();
	out_len.width = mat.GetWidth();
	float** temp = apply2(mat.GetHeight(), mat.GetWidth());
	unsigned char* rgb = (unsigned char*)mat.GetBits();
	int pitch = mat.GetPitch();
	int hui = 0;
	int XS = mat.GetBPP()/8;
	for (int i = 0; i < out_len.height; i++)
		for (int j = 0; j < out_len.width; j++)
		{
			hui = 0;
			for (int kkk = 0; kkk < 3; kkk++)
			{
				hui += *(rgb + (j * XS) + (i * pitch) + kkk);
			}
			hui /= 3;
			temp[i][j] = ((float)hui / (float)255);
			//temp[i][j] = 1 - temp[i][j];
		}
	mat.Destroy();
	return temp;
}

char** Get_data_by_Mat_char(char* filepath, Vector2& out_len, int threshold)
{
	CImage mat;
	mat.Load(filepath);
	//cv::Mat mat = cv::imread(filepath, 0);
	out_len.height = mat.GetHeight();
	out_len.width = mat.GetWidth();
	char** temp = apply2_char(out_len.height, out_len.width);
	unsigned char* rgb = (unsigned char*)mat.GetBits();
	int pitch = mat.GetPitch();
	int hui = 0;
	int XS = mat.GetBPP() / 8;
	for (int i = 0; i < out_len.height; i++)
		for (int j = 0; j < out_len.width; j++)
		{
			hui = 0;
			for (int kkk = 0; kkk < 3; kkk++)
			{
				hui += *(rgb + (j * XS) + (i * pitch) + kkk);
			}
			hui /= 3;
			temp[i][j] = hui > threshold ? 0 : 1;
		}
	mat.Destroy();
	return temp;
}

void Get_data_by_Mat(char* filepath, Weight& w)
{
	w.WG = Get_data_by_Mat(filepath, w.len);
}

Weight Get_data_by_Mat(char* filepath)
{
	Weight temp;
	Get_data_by_Mat(filepath, temp);
	return temp;
}

Vector2::Vector2()
{
	this->height = 0;
	this->width = 0;
}

Vector2::Vector2(char height, int width)
{
	this->height = height;
	this->width = width;
}

XML::XML(FILE* fp, char* name, int layer)
{
	this->fp = fp;
	this->name = name;
	this->layer = layer;
}

void XML::showchild()
{
	char reader[500];
	while (fgets(reader, 500, this->fp))
	{
		int len = strlen(reader);
		int lay = 0;
		for (; lay < len; lay++)
		{
			if (reader[lay] != '\t')break;
		}
		if (lay == this->layer)
		{
			if (reader[lay + 1] == '/')continue;
			char show[500];
			memset(show, 0, 500);
			for (int i = lay + 1; i < len - 2; i++)
			{
				if (reader[i] == '>')break;
				show[i - lay - 1] = reader[i];
			}
			puts(show);
		}
	}
	fseek(this->fp, 0, 0);
}

void bit::operator=(int x)
{
	this->B = x;
}

float* reshape(float** x, int h, int w)
{
	float* temp = (float*)malloc(sizeof(float*) * w * h);
	int count = 0;
	for (int i = 0; i < h; i++)
		for (int j = 0; j < w; j++)
		{
			temp[count++] = x[i][j];
		}
	return temp;
}

float* reshape(float** x, Vector2& x_len)
{
	return reshape(x, x_len.height, x_len.width);
}

float* reshape(float*** x, Vector2& x_len, int P, bool releace)
{
	float* temp = apply1(x_len.height * x_len.width * P);
	int c = 0;
	for (int i = 0; i < P; i++)
		for (int n = 0; n < x_len.height; n++)
			for (int m = 0; m < x_len.width; m++)
				temp[c++] = x[i][n][m];
	if (releace)
		Free3(x, P, x_len.height);
	//free(x);
	return temp;
}

int* bList(int distance, int max, int* out_len)
{
	int num = (max % distance != 0);
	int t = (int)(max / distance);
	t += num;
	if (out_len != NULL)
		*out_len = t;
	int* out = (int*)malloc(sizeof(int*) * t);
	for (int i = 0; i < t; i++)
	{
		out[i] = i * distance;
	}
	return out;
}

void Free2(float** x, int h)
{
	for (int i = 0; i < h; i++)
		free(x[i]);
	free(x);
}

void Free3(float*** x, int p, int h)
{
	for (int i = 0; i < p; i++)
		for (int j = 0; j < h; j++)
			free(x[i][j]);
	for (int i = 0; i < p; i++)
		free(x[i]);
	free(x);
}

void kron(float** out, Vector2& out_len, float** inp, Vector2& inp_len, float** filter, Vector2& filter_len)
{
	for (int i = 0; i < inp_len.height; i++)
		for (int j = 0; j < inp_len.width; j++)
		{
			for (int n = i * 2; n < out_len.height && n < ((i * 2) + filter_len.height); n++)
				for (int m = (j * 2); m < ((j * 2) + filter_len.width) && m < out_len.width; m++)
				{
					out[n][m] = inp[i][j] * filter[n - (i * 2)][m - (j * 2)] * 0.25;
				}
		}
}

char** mnist::Toshape2(char* x, int h, int w)
{
	char** temp = apply2_char(h, w);
	int c = 0;
	for (int i = 0; i < h; i++)
		for (int j = 0; j < w; j++)
			temp[i][j] = x[c++];
	return temp;
}

char** mnist::Toshape2(char* x, Vector2& x_len)
{
	return mnist::Toshape2(x, x_len.height, x_len.width);
}

void mnist::Toshape2(char** out, char* x, int h, int w)
{
	int c = 0;
	for (int i = 0; i < h; i++)
		for (int j = 0; j < w; j++)
			out[i][j] = x[c++];
}

void mnist::Toshape2(char** out, char* x, Vector2& x_len)
{
	mnist::Toshape2(out, x, x_len.height, x_len.width);
}

float** mnist::Toshape2_F(char* x, int h, int w)
{
	float** temp = apply2(h, w);
	int c = 0;
	for (int i = 0; i < h; i++)
		for (int j = 0; j < w; j++)
			temp[i][j] = ((float)x[c++] / (float)255);
	return temp;
}

float** mnist::Toshape2_F(char* x, Vector2& x_len)
{
	return mnist::Toshape2_F(x, x_len.height, x_len.width);
}

void mnist::Toshape2(float** out, char* x, int h, int w)
{
	int c = 0;
	for (int i = 0; i < h; i++)
		for (int j = 0; j < w; j++)
			out[i][j] = ((float)x[c++] / (float)255);
}

void mnist::Toshape2(float** out, char* x, Vector2& x_len)
{
	mnist::Toshape2(out, x, x_len.height, x_len.width);
}

void mnist::Toshape2(float** out, unsigned char* x, int h, int w)
{
	int c = 0;
	for (int i = 0; i < h; i++)
		for (int j = 0; j < w; j++)
		{
			out[i][j] = ((float)x[c++] / (float)255);
		}
}

void mnist::Toshape2(float** out, unsigned char* x, Vector2& x_len)
{
	mnist::Toshape2(out, x, x_len.height, x_len.width);
}

float*** mnist::Toshape3(float* x, int P, Vector2& x_len)
{
	float*** temp = apply3(P, x_len.height, x_len.width);
	int c = 0;
	for (int i = 0; i < P; i++)
		for (int j = 0; j < x_len.height; j++)
			for (int n = 0; n < x_len.width; n++)
				temp[i][j][n] = x[c++];
	return temp;
}

int mnist::ReverseInt(int i)
{
	unsigned char ch1, ch2, ch3, ch4;
	ch1 = i & 255;
	ch2 = (i >> 8) & 255;
	ch3 = (i >> 16) & 255;
	ch4 = (i >> 24) & 255;
	return((int)ch1 << 24) + ((int)ch2 << 16) + ((int)ch3 << 8) + ch4;
}


#pragma once
#include<Windows.h>
#include<atlimage.h>
#define ALPHA 0.01
#define BETA 0.95
#define RATIO 0.2
void ResizeCImage(CImage& image, int newWidth, int newHeight);
struct bit
{
	unsigned B : 1;
	void operator=(int x);
};
enum Conv_flag
{
	Valid = 0,
	Same = 1
};
struct Vector2 {
	int height, width;
	Vector2();
	Vector2(char height, int width);
};
class Weight
{
private:
	void apply(int H/*高度*/, int W/*宽度*/);
	void apply(int H/*高度*/, int W/*宽度*/, float(*def)());
public:
	bool fz;
	Vector2 len;
	float** WG;
	~Weight();
	Weight() { fz = false; }
	Weight(int H/*高度*/, int W/*宽度*/);
	Weight(int H/*高度*/, int W/*宽度*/, float (*def)());
	void re(float* delta, float* inp, float alpha = ALPHA);
	void save(FILE* fp);
	void load(FILE* fp);
	void release();
	void operator>>(Weight& temp);
	void operator+=(Weight& temp);
	//void operator/=(int &temp);
	void operator/=(int temp);
	void operator<<(Weight& temp);
	void friend WD(Weight* WGS, int H, int W, int len);
	void friend WD(Weight* WGS, int H, int W, int len, float(*def)());
};
float zeros();
float ones();
float*** Pool(float*** y, Vector2& inp, int P, Vector2& out);//池化
float*** Conv(float** X, Vector2& inp, Vector2& out, Weight* W, int W_len);//卷积
void print(float* y, int y_len = 1);
void print(float* y, Vector2& vec);
void print(float** y, Vector2& vec);
void print(char* y, int y_len = 1);
void print(char** y, Vector2& vec);
void print(Weight& w);
void print(Weight* w, int len = 1);
float** apply2(int H/*高度*/, int W/*宽度*/);
float*** apply3(int P, int H/*高度*/, int W/*宽度*/);
char** apply2_char(int H/*高度*/, int W/*宽度*/);
float* apply1(int H);
char* apply1_char(int H);
float Get_rand();
float Sigmoid(float x);
float* Sigmoid(float* x, Weight& w);
float* Sigmoid(float* x, int height);
float ReLU(float x);
float* ReLU(float* x, Weight& w);
float* ReLU(float* x, int height);
float* Softmax(float* x, Weight& w);
float dsigmoid(float x);
float* Softmax(float* x, int height);
float* FXCB_err(Weight& w, float* delta);
float* Delta1(float* y, float* e, Weight& w);
float* Delta2(float* v, float* e, Weight& w);
float* dot(Weight& W/*权重*/, float* inp/*输入数据*/, int* len = NULL);
char* randperm(int max, int count);
void Dropout(float* y, float ratio, Weight& w);
float** conv2(float** x, Vector2& x_len, float** fiter, Vector2& fiter_len,
	Vector2* out_len = NULL, int flag = Valid, int distance = 1, int fill = 0);
float** VALID(float** x, int x_h, int x_w, float** fiter, int fiter_h,
	int fiter_w, int distance, Vector2* out_len = NULL);
float** SAME(float** x, int x_h, int x_w, float** fiter, int fiter_h,
	int fiter_w, int distance, int fill, Vector2* out_len = NULL);
int VALID_out_len(int x_len, int fiter_len, int distance);
void show_Weight(Weight& W);
void rot90(Weight& x);
float** rot90(float** x, Vector2& x_len, bool release = false);
float** rot180(float** x, Vector2& x_len, bool release = false);
float** Get_data_by_Mat(char* filepath, Vector2& out_len);
char** Get_data_by_Mat_char(char* filepath, Vector2& out_len, int threshold = 127);
void Get_data_by_Mat(char* filepath, Weight& w);
Weight Get_data_by_Mat(char* filepath);
float* reshape(float** x, int h, int w);
float* reshape(float** x, Vector2& x_len);
float* reshape(float*** x, Vector2& x_len, int P, bool releace = false);
namespace mnist
{
	char** Toshape2(char* x, int h, int w);
	char** Toshape2(char* x, Vector2& x_len);
	void Toshape2(char** out, char* x, int h, int w);
	void Toshape2(char** out, char* x, Vector2& x_len);
	float** Toshape2_F(char* x, int h, int w);
	float** Toshape2_F(char* x, Vector2& x_len);
	void Toshape2(float** out, char* x, int h, int w);
	void Toshape2(float** out, char* x, Vector2& x_len);
	void Toshape2(float** out, unsigned char* x, int h, int w);
	void Toshape2(float** out, unsigned char* x, Vector2& x_len);
	float*** Toshape3(float* x, int P, Vector2& x_len);
	int ReverseInt(int i);
}
struct XML
{
	char* name;
	FILE* fp;
	int layer;
	XML(FILE* fp, char* name, int layer);
	void showchild();
};
template<class T>
class hot_one
{
	bool fz;
public:
	T* one;
	int num;
	int count;
	hot_one() { this->fz = false; }
	hot_one(int type_num, int set_num = 0)
	{
		type_num = type_num <= 0 ? 1 : type_num;
		if (set_num >= type_num)
			set_num = 0;
		this->count = type_num;
		this->fz = true;
		this->num = set_num;
		this->one = new T[type_num]{ 0 };
		this->one[set_num] = 1;
	}
	void re(int set_num)
	{
		this->one[num] = 0;
		this->num = set_num;
		this->one[this->num] = 1;
	}
	void release()
	{
		if (this->fz)delete one;
		this->fz = false;
	}
	~hot_one()
	{
		this->release();
	}
};
int* bList(int distance, int max, int* out_len);
void Free2(float** x, int h);
void Free3(float*** x, int p, int h);
void kron(float** out, Vector2& out_len, float** inp, Vector2& inp_len, float** filter,
	Vector2& filter_len);

到了这里,关于C++卷积神经网络的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处: 如若内容造成侵权/违法违规/事实不符,请点击违法举报进行投诉反馈,一经查实,立即删除!

领支付宝红包 赞助服务器费用

相关文章

  • 理解卷积神经网络(CNN)

    卷积神经网络(CNN)是一种专为处理具有类似网格结构的数据(如图像)而设计的深度学习架构。自从在图像处理和计算机视觉领域取得突破性成就以来,CNN已成为这些领域的核心技术之一。 CNN的起源与发展 CNN的概念最初是受到生物视觉感知机制的启发而提出的。早期的研

    2024年03月10日
    浏览(67)
  • 【人工智能】— 深度神经网络、卷积神经网络(CNN)、多卷积核、全连接、池化

    Pre-training + Fine-tuning Pre-training(预训练) : 监督逐层训练是多隐层网络训练的有效手段, 每次训练一层隐层结点, 训练时将上一层隐层结点的输出作为输入, 而本层隐结点的输出作为下一层隐结点的输入, 这称为”预训练”. Fine-tuning(微调) : 在预训练全部完成后, 再对整个网络进行

    2024年02月10日
    浏览(48)
  • 深度学习——CNN卷积神经网络

    卷积神经网络(Convolutional Neural Network,CNN)是一种深度学习中常用于处理具有网格结构数据的神经网络模型。它在计算机视觉领域广泛应用于图像分类、目标检测、图像生成等任务。 CNN 的核心思想是通过利用局部感知和参数共享来捕捉输入数据的空间结构信息。相比于传统

    2024年02月15日
    浏览(48)
  • 深度学习|CNN卷积神经网络

    在CNN没有出现前,图像对人工智能来说非常难处理。 主要原因: 图像要处理的数据量太大了。图像由像素组成,每个像素又由不同颜色组成,一张1000×1000彩色RGB图像需要的参数是1000×1000×3,需要三百万参数左右,普通神经网络会全用全连接方法来学习整幅图像上的特征,处

    2024年02月11日
    浏览(52)
  • 十、CNN卷积神经网络实战

    输入样本通道数4、期待输出样本通道数2、卷积核大小3×3 具体卷积层的构建可参考博文:八、卷积层 设定卷积层 torch.nn.Conv2d(in_channels=in_channel,out_channels=out_channel,kernel_size=kernel_size,padding=1,stride=1) 必要参数:输入样本通道数 in_channels 、输出样本通道数 out_channels 、卷积核大小

    2023年04月09日
    浏览(52)
  • 深度学习之卷积神经网络(CNN)

          大家好,我是带我去滑雪!       卷积神经网络(Convolutional Neural Network,CNN)是一种基于深度学习的前馈神经网络,主要用于 图像 和 视频识别 、 分类 、 分割 和 标注 等计算机视觉任务。它主要由 卷积层 、 池化层 、 全连接层 和 激活函数层 等组成。其中,卷积

    2024年02月05日
    浏览(53)
  • CNN卷积神经网络基础知识

    1.1 卷积核大小的选择 1. 选择奇数卷积核 ①. 保护位置信息,奇数卷积核的中心点位置在中心,有利于定位任务。 ②. padding时左右对称。 2. 在感受野相同的情况下优先选择较小的卷积核以减少计算量 ①. 两个3x3卷积核的感受野与一个5x5卷积核的感受野相同 ②. 两个3x3卷积核的

    2024年02月04日
    浏览(56)
  • 深度学习03-卷积神经网络(CNN)

    CNN,即卷积神经网络(Convolutional Neural Network),是一种常用于图像和视频处理的深度学习模型。与传统神经网络相比,CNN 有着更好的处理图像和序列数据的能力,因为它能够自动学习图像中的特征,并提取出最有用的信息。 CNN 的一个核心特点是卷积操作,它可以在图像上进

    2024年02月05日
    浏览(71)
  • 深度学习1.卷积神经网络-CNN

    目录 卷积神经网络 – CNN CNN 解决了什么问题? 需要处理的数据量太大 保留图像特征 人类的视觉原理 卷积神经网络-CNN 的基本原理 卷积——提取特征 池化层(下采样)——数据降维,避免过拟合 全连接层——输出结果 CNN 有哪些实际应用? 总结 百度百科+维基百科 卷积层

    2024年02月11日
    浏览(54)

觉得文章有用就打赏一下文章作者

支付宝扫一扫打赏

博客赞助

微信扫一扫打赏

请作者喝杯咖啡吧~博客赞助

支付宝扫一扫领取红包,优惠每天领

二维码1

领取红包

二维码2

领红包