用c动态数组(实现权重矩阵可视化)实现手撸神经网络230902

这篇具有很好参考价值的文章主要介绍了用c动态数组(实现权重矩阵可视化)实现手撸神经网络230902。希望对大家有所帮助。如果存在错误或未考虑完全的地方,请大家不吝赐教,您也可以点击"举报违法"按钮提交疑问。

变量即内存、指针使用的架构原理:

1、用结构struct记录 网络架构,如 float*** ws 为权重矩阵的指针(指针地址);

2、用 = (float*)malloc (Num * sizeof(float)) 给 具体变量分配内存;

3、用 = (float**)malloc( Num* sizeof(float*) ) 给 指向 具体变量(一维数组)的指针…… 给分配 存放指针的变量……

……见代码

// test22动态数组22多维数组23三维随机数230101.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。

#include <iostream>
using namespace std;

typedef struct {
    float*** ws;
    int num1;
    float** layer_outputs;

}NeuralN;

//初始化 神经网络的 weights权重矩阵等
NeuralN init(int* t01, int num02) {
    NeuralN nn;
    nn.num1 = num02;

    nn.ws = (float***)malloc((num02 - 1) * sizeof(float**) );

    srand(time(NULL));

    cout << " [num02:" << num02 << endl;

    for (int i = 0; i <(num02 - 1); ++i) {
        nn.ws[i] = (float**)malloc( t01[i] * sizeof(float*) );  //为指针分配内存
        for (int j = 0; j < t01[i]; ++j) {
            nn.ws[i][j] = (float*)malloc( t01[i  + 1  ] * sizeof(float) ); //为变量 分配内存
            for (int k = 0; k < t01[i + 1]; k++) {
                //下一句 使用变量、即使用内存!(使用变量的内存)
                nn.ws[i][j][k] = (float)rand() / RAND_MAX;
            }//for330k
        }//for220j

    }//for110i

    return nn;

}//init

int main()
{
    int t001[] = { 2,8, 7,6, 1 ,2,1};

//#define Num4    4
    //用 for(ForEach)的方法,计数、数出 动态数组长度
    int Len_t001 = 0; for (int ii : t001) { ++Len_t001; }

    int Numm = Len_t001;
    cout << "Numm:"<<Numm << endl;

    NeuralN nn = init(t001, Numm);// Num4);

    //
    //    for(float  ii: (nn.ws[0][1]) )
    //
    //显示三维的 张量(即 三维数组 的 内容)
    for (int i = 0; i < Numm - 1; ++i) {
//        nn.layer_outputs[i + 1] = (float*)malloc(t001[i + 1] * sizeof(float));
        printf("_{ i%d_", i);
        for (int j = 0; j < t001[i + 1]; ++j) {
//            nn.layer_outputs[i + 1][j] = 0;
            printf("[j%d", j);
            for (int k = 0; k < t001[i]; ++k) {

                printf("(k%d(%.1f,", k, nn.ws[i][k][j]);
            }//
            printf("_} \n");

        }//for220j
        printf("\n");
    }//for110i

    std::cout << "Hello World!\n";
}//main

第二版本231001

#include <stdio.h>
#include <windows.h>
#include <math.h>
#include <time.h>

#define LEARNING_RATE  0.05//0.05

// Sigmoid and its derivative
float sigmoid(float x) { return 1 / (1 + exp(-x));}

float sigmoid_derivative(float x) {
    //float sig = sigmoid(x);
    float sig = 1.0 / (exp(-x) + 1);
    return sig * (1 - sig);
}

typedef struct {
    float*** weights;
    int num_layers;
    int* layer_sizes;
    float** layer_outputs;
    float** deltas;
} NeuralNetwork;

NeuralNetwork initialize_nn(int* topology, int num_layers) {
    NeuralNetwork nn;
    nn.num_layers = num_layers;
    nn.layer_sizes = topology;

    // Allocate memory for weights, layer outputs, and deltas
    nn.weights = (float***)malloc((num_layers - 1) * sizeof(float**));
    nn.layer_outputs = (float**)malloc(num_layers * sizeof(float*));
    nn.deltas = (float**)malloc((num_layers - 1) * sizeof(float*));

    srand(time(NULL));
    for (int i = 0; i < num_layers - 1; i++) {
        nn.weights[i] = (float**)malloc(topology[i] * sizeof(float*));
        nn.deltas[i] = (float*)malloc(topology[i + 1] * sizeof(float));
        for (int j = 0; j < topology[i]; j++) {
            nn.weights[i][j] = (float*)malloc(topology[i + 1] * sizeof(float));
            for (int k = 0; k < topology[i + 1]; k++) {
                nn.weights[i][j][k] = ((float)rand() / RAND_MAX) * 2.0f - 1.0f;  // Random weights between -1 and 1
            }
        }//for220j
    }//for110i
    return nn;
}//NeuralNetwork initialize_nn

float* feedforward(NeuralNetwork* nn, float* input) {
    nn->layer_outputs[0] = input;
    for (int i = 0; i < nn->num_layers - 1; i++) {
        nn->layer_outputs[i + 1] = (float*)malloc(nn->layer_sizes[i + 1] * sizeof(float));
        for (int j = 0; j < nn->layer_sizes[i + 1]; j++) {
            nn->layer_outputs[i + 1][j] = 0;
            for (int k = 0; k < nn->layer_sizes[i]; k++) {
//                int A01 = 01;
                nn->layer_outputs[i + 1][j] += nn->layer_outputs[i][k] * nn->weights[i][k][j];

            }//for330k
            nn->layer_outputs[i + 1][j] = sigmoid(nn->layer_outputs[i + 1][j]);
        }//for220j
    }//for110i
    return nn->layer_outputs[nn->num_layers - 1];
}//feedforward


void feedLoss(NeuralNetwork* nn, float* target) {

    //显示权重矩阵:
    //nn->layer_outputs[0] = input;
    for (int i = 0; i < nn->num_layers - 1; i++) {
        nn->layer_outputs[i + 1] = (float*)malloc(nn->layer_sizes[i + 1] * sizeof(float));
        for (int j = 0; j < nn->layer_sizes[i + 1]; j++) {
            nn->layer_outputs[i + 1][j] = 0;
            for (int k = 0; k < nn->layer_sizes[i]; k++) {
                //                int A01 = 01;
                //nn->layer_outputs[i + 1][j] += nn->layer_outputs[i][k] * nn->weights[i][k][j];
                if (0 < nn->weights[i][k][j]) { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_BLUE); // FOREROUND_RED);
            }
                else { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_RED);   // BLUE);
        }
                        printf("(%.4f,", nn->weights[i][k][j]);
                //    A01 = 22;
            }
            printf("] \n");
            nn->layer_outputs[i + 1][j] = sigmoid(nn->layer_outputs[i + 1][j]);
        }//for220j
        SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE);
        printf("};\n");
    }//for110i
    printf("_]};\n \n");
    //

    int Last01 = nn->num_layers - 1;
    // Calculate output layer deltas
    for (int i = 0; i < nn->layer_sizes[Last01]; ++i ) {
        float error = target[i] - nn->layer_outputs[Last01][i];
            printf("[i%d:%f]  ", i, error);
//        nn->deltas[Last01 - 1][i] = error * sigmoid_derivative(nn->layer_outputs[Last01][i]);
    }

    // Calculate hidden layer deltas

}//backpropagate(NeuralNetwork* nn, float* target


void backpropagate(NeuralNetwork* nn, float* target) {
    int Last01 = nn->num_layers - 1;

    // Calculate output layer deltas//计算输出层变化
    for (int i = 0; i < nn->layer_sizes[Last01]; i++) {
        float error = target[i] - nn->layer_outputs[Last01][i];
        nn->deltas[Last01 - 1][i] = error * sigmoid_derivative(nn->layer_outputs[Last01][i]);
    }

    // Calculate hidden layer deltas//计算隐藏层变化
    for (int i = Last01 - 1; i > 0; i--) {
        for (int j = 0; j < nn->layer_sizes[i]; j++) {
            float sum = 0;
            for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
                sum += nn->weights[i][j][k] * nn->deltas[i][k];
            }
            nn->deltas[i - 1][j] = sum * sigmoid_derivative(nn->layer_outputs[i][j]);
        }
    }

    // Adjust weights
    for (int i = 0; i < Last01; i++) {
        for (int j = 0; j < nn->layer_sizes[i]; j++) {
            for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
                nn->weights[i][j][k] += LEARNING_RATE * nn->deltas[i][k] * nn->layer_outputs[i][j];
            }
        }
    }//
}//backpropagate(NeuralNetwork* nn, float* target

void train(NeuralNetwork* nn, float inputs[][2], float* targets, int num_samples, int num_epochs) {
    float* outputs;
    bool whetherOutputLoss = 0;
#define Num10000 100000
    for (int epoch = 0; epoch < num_epochs; epoch++) {
        if (0 == (epoch % Num10000)  ) { whetherOutputLoss = 1; }
        for (int i = 0; i < num_samples; i++) {
            //float* outputs = 
            feedforward(nn, inputs[i]);
            //
            if (whetherOutputLoss) { feedLoss(nn, &targets[i]); } //当抽样时机到的时候,才显示
            //
            backpropagate(nn, &targets[i]);
        }//
        if (whetherOutputLoss) {printf("\n");
                                whetherOutputLoss = 0;
                                }

    }//for110i
}//void train

int main() {
//    int topology[] = { 2, 4, 3, 1 };
//    NeuralNetwork nn = initialize_nn(topology, 4);

#define numLayer5   4
    //5
    //9
    //6
    //7
    int topology[] = { 2, /*128,*/ /*64,*/ /*32,*/ /*16,*/  /*8,*/ 3, 2, 1 };
    //                  1, 2,   3, 4,   5,  6, 7, 8, 9
    NeuralNetwork nn = initialize_nn(topology, numLayer5);  // 4);

#define Num4 4
    float inputs[Num4][2] = { {1, 1}, {0, 0}, {1, 0}, {0, 1} };
    float targets[Num4] = { 0, 0, 1, 1 };

#define Num200000 200000
//    train(&nn, inputs, targets, 4, 10000);
    train(&nn, inputs, targets, Num4, Num200000);

//#define Num4 4

    float test_inputs[Num4][2] = { {0,0}, {1, 0}, {1, 1}, {0, 1} };
    for (int i = 0; i < Num4; i++) {
        float* output = feedforward(&nn, test_inputs[i]);
        printf("Output for [%f, %f]: %f\n", test_inputs[i][0], test_inputs[i][1], output[0]);
        free(output);
    }

    // Free memory
    for (int i = 0; i < nn.num_layers - 1; i++) {
        for (int j = 0; j < nn.layer_sizes[i]; j++) {
            free(nn.weights[i][j]);
        }
        free(nn.weights[i]);
        free(nn.deltas[i]);
    }
    free(nn.weights);
    free(nn.deltas);
    free(nn.layer_outputs);

    return 0;
}//main

第一版本230901文章来源地址https://www.toymoban.com/news/detail-727548.html

#include <stdio.h>
#include <windows.h>
//#include <stdlib.h>
#include <math.h>
#include <time.h>

#define LEARNING_RATE  0.05
//0.05

// Sigmoid and its derivative
float sigmoid(float x) { return 1 / (1 + exp(-x));}

float sigmoid_derivative(float x) {
    //float sig = sigmoid(x);
    float sig = 1.0 / (exp(-x) + 1);
    return sig * (1 - sig);
}

typedef struct {
    float*** weights;
    int num_layers;
    int* layer_sizes;
    float** layer_outputs;
    float** deltas;
} NeuralNetwork;

NeuralNetwork initialize_nn(int* topology, int num_layers) {
    NeuralNetwork nn;
    nn.num_layers = num_layers;
    nn.layer_sizes = topology;

    // Allocate memory for weights, layer outputs, and deltas
    nn.weights = (float***)malloc((num_layers - 1) * sizeof(float**));
    nn.layer_outputs = (float**)malloc(num_layers * sizeof(float*));
    nn.deltas = (float**)malloc((num_layers - 1) * sizeof(float*));

    srand(time(NULL));
    for (int i = 0; i < num_layers - 1; i++) {
        nn.weights[i] = (float**)malloc(topology[i] * sizeof(float*));
        nn.deltas[i] = (float*)malloc(topology[i + 1] * sizeof(float));
        for (int j = 0; j < topology[i]; j++) {
            nn.weights[i][j] = (float*)malloc(topology[i + 1] * sizeof(float));
            for (int k = 0; k < topology[i + 1]; k++) {
                nn.weights[i][j][k] = ((float)rand() / RAND_MAX) * 2.0f - 1.0f;  // Random weights between -1 and 1
            }
        }//for220j
    }//for110i
    return nn;
}//NeuralNetwork initialize_nn

float* feedforward(NeuralNetwork* nn, float* input) {
    nn->layer_outputs[0] = input;
    for (int i = 0; i < nn->num_layers - 1; i++) {
        nn->layer_outputs[i + 1] = (float*)malloc(nn->layer_sizes[i + 1] * sizeof(float));
        for (int j = 0; j < nn->layer_sizes[i + 1]; j++) {
            nn->layer_outputs[i + 1][j] = 0;
            for (int k = 0; k < nn->layer_sizes[i]; k++) {
//                int A01 = 01;
                nn->layer_outputs[i + 1][j] += nn->layer_outputs[i][k] * nn->weights[i][k][j];
            //    A01 = 22;
            }
            nn->layer_outputs[i + 1][j] = sigmoid(nn->layer_outputs[i + 1][j]);
        }//for220j
    }//for110i
    return nn->layer_outputs[nn->num_layers - 1];
}//feedforward


void feedLoss(NeuralNetwork* nn, float* target) {

    //显示权重矩阵:
    //nn->layer_outputs[0] = input;
    for (int i = 0; i < nn->num_layers - 1; i++) {
        nn->layer_outputs[i + 1] = (float*)malloc(nn->layer_sizes[i + 1] * sizeof(float));
        for (int j = 0; j < nn->layer_sizes[i + 1]; j++) {
            nn->layer_outputs[i + 1][j] = 0;
            for (int k = 0; k < nn->layer_sizes[i]; k++) {
                //                int A01 = 01;
                //nn->layer_outputs[i + 1][j] += nn->layer_outputs[i][k] * nn->weights[i][k][j];
                if (0 < nn->weights[i][k][j]) { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_BLUE); // FOREROUND_RED);
            }
                else { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_RED);   // BLUE);
        }
                        printf("(%.4f,", nn->weights[i][k][j]);
                //    A01 = 22;
            }
            printf("] \n");
            nn->layer_outputs[i + 1][j] = sigmoid(nn->layer_outputs[i + 1][j]);
        }//for220j
        SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE);
        printf("};\n");
    }//for110i
    printf("_]};\n");
    //

    int Last01 = nn->num_layers - 1;
    // Calculate output layer deltas
    for (int i = 0; i < nn->layer_sizes[Last01]; ++i ) {
        float error = target[i] - nn->layer_outputs[Last01][i];
            printf("[i%d:%f]  ", i, error);
//        nn->deltas[Last01 - 1][i] = error * sigmoid_derivative(nn->layer_outputs[Last01][i]);
    }

    // Calculate hidden layer deltas

}//backpropagate(NeuralNetwork* nn, float* target


void backpropagate(NeuralNetwork* nn, float* target) {
    int Last01 = nn->num_layers - 1;

    // Calculate output layer deltas//计算输出层变化
    for (int i = 0; i < nn->layer_sizes[Last01]; i++) {
        float error = target[i] - nn->layer_outputs[Last01][i];
        nn->deltas[Last01 - 1][i] = error * sigmoid_derivative(nn->layer_outputs[Last01][i]);
    }

    // Calculate hidden layer deltas//计算隐藏层变化
    for (int i = Last01 - 1; i > 0; i--) {
        for (int j = 0; j < nn->layer_sizes[i]; j++) {
            float sum = 0;
            for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
                sum += nn->weights[i][j][k] * nn->deltas[i][k];
            }
            nn->deltas[i - 1][j] = sum * sigmoid_derivative(nn->layer_outputs[i][j]);
        }
    }

    // Adjust weights
    for (int i = 0; i < Last01; i++) {
        for (int j = 0; j < nn->layer_sizes[i]; j++) {
            for (int k = 0; k < nn->layer_sizes[i + 1]; k++) {
                nn->weights[i][j][k] += LEARNING_RATE * nn->deltas[i][k] * nn->layer_outputs[i][j];
            }
        }
    }//
}//backpropagate(NeuralNetwork* nn, float* target

void train(NeuralNetwork* nn, float inputs[][2], float* targets, int num_samples, int num_epochs) {
    float* outputs;
    bool whetherOutputLoss = 0;
#define Num10000 50000
    for (int epoch = 0; epoch < num_epochs; epoch++) {
        if (0 == (epoch % Num10000)  ) { whetherOutputLoss = 1; }
        for (int i = 0; i < num_samples; i++) {
            //float* outputs = 
            feedforward(nn, inputs[i]);
            //
            if (whetherOutputLoss) { feedLoss(nn, &targets[i]); }
            //
            backpropagate(nn, &targets[i]);
        }//
        if (whetherOutputLoss) {printf("\n");
                                whetherOutputLoss = 0;
                                }

    }//for110i
}//void train

int main() {
//    int topology[] = { 2, 4, 3, 1 };
//    NeuralNetwork nn = initialize_nn(topology, 4);

#define numLayer5   4
    //5
    //9
    //6
    //7
    int topology[] = { 2, /*128,*/ /*64,*/ /*32,*/ /*16,*/  /*8,*/ 3, 2, 1 };
    //                  1, 2,   3, 4,   5,  6, 7, 8, 9
    NeuralNetwork nn = initialize_nn(topology, numLayer5);  // 4);

#define Num4 4
    float inputs[Num4][2] = { {1, 1}, {0, 0}, {1, 0}, {0, 1} };
    float targets[Num4] = { 0, 0, 1, 1 };

#define Num200000 200000
//    train(&nn, inputs, targets, 4, 10000);
    train(&nn, inputs, targets, Num4, Num200000);

//#define Num4 4

    float test_inputs[Num4][2] = { {0,0}, {1, 0}, {1, 1}, {0, 1} };
    for (int i = 0; i < Num4; i++) {
        float* output = feedforward(&nn, test_inputs[i]);
        printf("Output for [%f, %f]: %f\n", test_inputs[i][0], test_inputs[i][1], output[0]);
        free(output);
    }

    // Free memory
    for (int i = 0; i < nn.num_layers - 1; i++) {
        for (int j = 0; j < nn.layer_sizes[i]; j++) {
            free(nn.weights[i][j]);
        }
        free(nn.weights[i]);
        free(nn.deltas[i]);
    }
    free(nn.weights);
    free(nn.deltas);
    free(nn.layer_outputs);

    return 0;
}//main
(-0.1291,(0.7803,]
(-0.6326,(0.5078,]
};
(-0.1854,(-0.5262,(0.8464,]
(0.4913,(0.0774,(0.1000,]
};
(0.7582,(-0.7756,]
};
_]};
[i0:-0.500000]  (0.5459,(0.0427,]
(-0.1289,(0.7804,]
(-0.6327,(0.5076,]
};
(-0.1859,(-0.5268,(0.8458,]
(0.4919,(0.0780,(0.1005,]
};
(0.7553,(-0.7786,]
};
_]};
[i0:-0.500000]  (0.5459,(0.0427,]
(-0.1289,(0.7804,]
(-0.6327,(0.5076,]
};
(-0.1864,(-0.5273,(0.8453,]
(0.4924,(0.0785,(0.1011,]
};
(0.7524,(-0.7815,]
};
_]};
[i0:0.500000]  (0.5458,(0.0427,]
(-0.1291,(0.7804,]
(-0.6326,(0.5076,]
};
(-0.1859,(-0.5268,(0.8458,]
(0.4919,(0.0780,(0.1005,]
};
(0.7553,(-0.7786,]
};
_]};
[i0:0.500000]
(0.5679,(-0.3593,]
(-0.8321,(1.1025,]
(-0.5647,(0.1703,]
};
(-0.5384,(-1.1479,(0.8445,]
(0.2658,(0.1725,(-0.1653,]
};
(1.1137,(-0.7693,]
};
_]};
[i0:-0.500000]  (0.5682,(-0.3590,]
(-0.8317,(1.1029,]
(-0.5651,(0.1699,]
};
(-0.5391,(-1.1487,(0.8437,]
(0.2663,(0.1730,(-0.1647,]
};
(1.1107,(-0.7722,]
};
_]};
[i0:-0.500000]  (0.5682,(-0.3590,]
(-0.8317,(1.1029,]
(-0.5651,(0.1699,]
};
(-0.5399,(-1.1495,(0.8429,]
(0.2668,(0.1735,(-0.1642,]
};
(1.1078,(-0.7751,]
};
_]};
[i0:0.500000]  (0.5679,(-0.3590,]
(-0.8321,(1.1029,]
(-0.5647,(0.1699,]
};
(-0.5391,(-1.1487,(0.8437,]
(0.2663,(0.1730,(-0.1647,]
};
(1.1107,(-0.7722,]
};
_]};
[i0:0.500000]
(6.5241,(-6.2462,]
(-6.5361,(6.8406,]
(0.2226,(0.6834,]
};
(-3.2613,(-3.6355,(2.0290,]
(0.8144,(0.6639,(-0.7503,]
};
(4.2499,(-0.6959,]
};
_]};
[i0:-0.500000]  (6.5288,(-6.2415,]
(-6.5309,(6.8458,]
(0.2196,(0.6804,]
};
(-3.2642,(-3.6385,(2.0261,]
(0.8149,(0.6644,(-0.7498,]
};
(4.2469,(-0.6989,]
};
_]};
[i0:-0.500000]  (6.5288,(-6.2415,]
(-6.5309,(6.8458,]
(0.2196,(0.6804,]
};
(-3.2671,(-3.6414,(2.0231,]
(0.8154,(0.6649,(-0.7494,]
};
(4.2440,(-0.7018,]
};
_]};
[i0:0.500000]  (6.5241,(-6.2415,]
(-6.5361,(6.8458,]
(0.2226,(0.6804,]
};
(-3.2642,(-3.6385,(2.0260,]
(0.8149,(0.6644,(-0.7498,]
};
(4.2469,(-0.6989,]
};
_]};
[i0:0.500000]
(114.9971,(-113.4876,]
(-112.8603,(114.3747,]
(0.6990,(0.7116,]
};
(-31.6319,(-31.7725,(45.2379,]
(11.9645,(11.6226,(-25.5372,]
};
(22.2722,(-15.6809,]
};
_]};
[i0:-0.500000]  (115.2866,(-113.1981,]
(-112.5715,(114.6635,]
(0.2422,(0.2548,]
};
(-31.6473,(-31.7879,(45.2226,]
(11.9753,(11.6335,(-25.5264,]
};
(22.2693,(-15.6838,]
};
_]};
[i0:-0.500000]  (115.2866,(-113.1981,]
(-112.5715,(114.6635,]
(0.2422,(0.2548,]
};
(-31.6626,(-31.8033,(45.2072,]
(11.9861,(11.6443,(-25.5155,]
};
(22.2663,(-15.6867,]
};
_]};
[i0:0.500000]  (114.9968,(-113.1981,]
(-112.8605,(114.6635,]
(0.6987,(0.2548,]
};
(-31.6473,(-31.7879,(45.2226,]
(11.9753,(11.6335,(-25.5264,]
};
(22.2693,(-15.6838,]
};
_]};
[i0:0.500000]
Output for [0.000000, 0.000000]: 0.005787
Output for [1.000000, 0.000000]: 0.993864
Output for [1.000000, 1.000000]: 0.011066
Output for [0.000000, 1.000000]: 0.993822

到了这里,关于用c动态数组(实现权重矩阵可视化)实现手撸神经网络230902的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处: 如若内容造成侵权/违法违规/事实不符,请点击违法举报进行投诉反馈,一经查实,立即删除!

领支付宝红包 赞助服务器费用

相关文章

  • 玩转视图变量,轻松实现动态可视化数据分析

    在当今数据驱动的世界中,数据分析已经成为了企业和组织中不可或缺的一部分。传统的静态数据分析方法往往无法满足快速变化的业务需求和实时决策的要求。为了更好地应对这些挑战,观测云的动态可视化数据分析应运而生。 在动态可视化数据分析中,联动视图变量起到

    2024年02月08日
    浏览(24)
  • 使用JavaScript实现复杂功能:动态数据可视化的构建

    在前端开发中,JavaScript无疑是最核心的技术之一。它能够处理各种交互逻辑,实现复杂的功能。本文将通过一个动态数据可视化的案例,展示如何使用JavaScript实现复杂功能。动态数据可视化能够将大量数据以直观、生动的方式呈现,帮助用户更好地理解和分析数据。 准备工

    2024年02月20日
    浏览(26)
  • 简单的用Python抓取动态网页数据,实现可视化数据分析

    一眨眼明天就周末了,一周过的真快! 今天咱们用Python来实现一下动态网页数据的抓取 最近不是有消息说世界首富马上要变成中国人了吗,这要真成了,可就是历史上首位中国世界首富了! 那我们就以富豪排行榜为例,爬取一下2023年国内富豪五百强,最后实现一下可视化分

    2024年02月05日
    浏览(39)
  • 关于微信小程序中如何实现数据可视化-echarts动态渲染

    移动端设备中,难免会涉及到数据的可视化展示、数据统计等等,本篇主要讲解原生微信小程序中嵌入 echarts 并进行动态渲染,实现数据可视化功能。 基础使用 首先在 GitHub 上下载 echarts 包 地址:https://github.com/ecomfe/echarts-for-weixin/tree/master 下载项目 解压压缩包,将 ec-canva

    2024年01月25日
    浏览(37)
  • 混淆矩阵——矩阵可视化

    相关文章 混淆矩阵——评估指标计算 混淆矩阵——评估指标可视化 正例是指在分类问题中,被标记为目标类别的样本。在二分类问题中, 正例(Positive) 代表我们感兴趣的目标,而另一个类别定义为 反例(Negative) 举个栗子🌰,我们要区分苹果🍎和凤梨🍐。我们 想要

    2024年02月04日
    浏览(49)
  • 基于 matplotlib 实现的基本排序算法的动态可视化项目源码,通过 pyaudio 增加音效,冒泡、选择、插入、快速等排序

    依托 matplotlib 实现的基本排序算法的动态可视化,并通过 pyaudio 增加音效。 安装 在使用之前请先检查本地是否存在以下库: matplotlib pyaudio fire requirements.txt 中包含了上述的库 使用 目前本项目仅提供了以下排序算法 冒泡排序 选择排序 插入排序 快排 归并排序 命令行工具 命

    2024年02月08日
    浏览(33)
  • 积跬步至千里 || 矩阵可视化

    矩阵可以很方面地展示事物两两之间的关系,这种关系可以通过矩阵可视化的方式进行简单监控。 定义一个通用类 调用类 结果展示 另一种方法

    2024年02月12日
    浏览(27)
  • 图像中部分RGB矩阵可视化

    今天室友有个需求就是模仿下面这张图画个示意图: 大致就是把图像中的一小部分区域的RGB值可视化了一下。他居然不知道该怎么画,我寻思这不直接秒了。 其实就是先画三个主图,一个全部的,一个小范围内的,一个RGB值的表,然后画四根线就完事了。效果如下: 唯一要

    2024年01月16日
    浏览(29)
  • 数据可视化 - 动态柱状图

    通过Bar构建基础柱状图 1. 通过Bar()构建一个柱状图对象 2. 和折线图一样,通过add_xaxis()和add_yaxis()添加x和y轴数据 3. 通过柱状图对象的:reversal_axis(),反转x和y轴 4. 通过label_opts=LabelOpts(position=\\\"right\\\")设置数值标签在右侧显示 Timeline()-时间线 柱状图描述的是分类数据,回答的是

    2024年02月15日
    浏览(26)
  • 基于Python的疫情数据可视化(matplotlib,pyecharts动态地图,大屏可视化)

    有任何学习问题可以加我微信交流哦!bmt1014 1、项目需求分析 1.1背景 2020年,新冠肺炎疫情在全球范围内爆发,给人们的健康和生命带来了严重威胁,不同国家和地区的疫情形势也引起了广泛的关注。疫情数据的监测和分析对疫情防控和科学防治至关重要。本报告以疫情数据

    2024年02月05日
    浏览(39)

觉得文章有用就打赏一下文章作者

支付宝扫一扫打赏

博客赞助

微信扫一扫打赏

请作者喝杯咖啡吧~博客赞助

支付宝扫一扫领取红包,优惠每天领

二维码1

领取红包

二维码2

领红包