Run Code
|
API
|
Code Wall
|
Misc
|
Feedback
|
Login
|
Theme
|
Privacy
|
Patreon
1, 2, 3, & 4-layer neural networks in C
//compile: gcc -lm test.c -o test //run as script: tcc -lm -run test.c /* 00numc.h * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ * * * Copyright (C) 2016 by Henry Kroll III, www.thenerdshow.com * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see <http://www.gnu.org/licenses/>.* * * ` * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #ifndef _NUMC #define _NUMC #include <stdio.h> #include <stdlib.h> // exit(), EXIT_SUCCESS #include <stdarg.h> // va_args #include <string.h> // strcmp() #include <limits.h> // INT_MAX #include <float.h> // DBL_MAX #include <math.h> //define constants #define TYPE_MAX 20 #if !defined(MAX_ROWS) || !defined(MAX_COLS) #define MAX_ROWS 1024 #define MAX_COLS 1024 #endif //cross-compile #if defined(_WIN32) || defined(_WIN64) // if windows #define DLL_EXPORT __declspec(dllexport) // make DLL #else #define DLL_EXPORT #endif //handle errors #define _STR(a) #a #define STR(a) _STR(a) #define TRY(a) if (!(a)){ \ perror(__FILE__":"STR(__LINE__));exit(1);} #define ERR(...) fprintf (stderr, STR(__FILE__)" "STR(__LINE__) \ ": " __VA_ARGS__);exit (1) #define INFO(...) fprintf (stderr, STR(__FILE__)" "STR(__LINE__) \ ": " __VA_ARGS__); //operations #define FORMULA_LIST \ F(addn, +=n) /* array + number operations */ \ F(subn, -=n) \ F(divn, /=n) \ F(muln, *=n) \ F(setn, =n) \ F(square, =x*x) /* one-argument operations */ \ F(sigmoid, =1/(1+exp(-x))) \ F(derivative, =x*(1-x)) \ F(roundx, =round(x)) \ F(addx, +=n) /* array + array operations */ \ F(subx, -=n) \ F(divx, /=n) \ F(mulx, *=n) \ F(dupx, =n) \ #define F(a, b) a, typedef enum {FORMULA_LIST} e_ops; #undef F #define F( a, b) #b, char *operations[]={FORMULA_LIST}; #undef F //random double min, max #define getrandom(min, max) \ ((rand()%(int)(((max) + 1)-(min)))+ (min)) #define forEach(s, t) for (int index=0;(t = s[index]);index++) #define split(b,c,ci) for(ci=strtok(b,c);ci;(ci=strtok(NULL,c))) //cast data pointer to array of rows, cols #define CAST2D(NAME) \ (*(double(*)[NAME->rows][NAME->cols])NAME->data) // typedefs typedef void (*umfunc)(int, va_list); typedef struct dm { size_t sz, rows, cols; char *name; double *data; // pointer to mem double mem; // label for memory area } dm, *pdm; // forward declarations pdm _create (char * name, int r, int c, ...); pdm _trans (pdm Y, pdm X); pdm _dup (pdm Y, pdm X); pdm _dot (pdm C, pdm A, pdm B); #define print_r(...) _print_r(__VA_ARGS__, NULL) void _print_r (pdm X, ...); #define fn(...) _fn(__VA_ARGS__, 99) pdm _fn (e_ops f, ...); double fRand (double, double); #define randomize_arrays(...) _randomize_arrays(__VA_ARGS__, NULL) void _randomize_arrays (pdm X, ...); // check for bad array #define CHECK_pdm(X) \ if (X->rows > MAX_ROWS || X->cols > MAX_COLS \ || X->rows < 1 || X->cols < 1) { \ ERR ("Array dimensions corrupted or wrong &p dereference."); \ } if (!X->name || strlen(X->name) > TYPE_MAX) { \ ERR ("Array name corrupted or wrong &p dereference.");} //convenience macros //create NEW arrays in high level scope //pass via reference to functions using the & operator #define NEW(NAME, ROWS, COLS, ...) \ double data_##NAME[ROWS][COLS] = { __VA_ARGS__ }; \ dm NAME; NAME.data = (double*)data_##NAME; \ NAME.rows = ROWS; NAME.cols = COLS; NAME.name = #NAME; #define DOT(C, A, B) \ double data_##C[(A).rows][(B).cols]; \ dm C; C.data = (double*)data_##C; \ C.rows = (A).rows; C.cols = (B).cols; C.name = #C; \ _dot(&C, &(A), &(B)); #define DUP(A, B) \ double data_##A[(B).rows][(B).cols]; \ dm A; A.data = (double*)data_##A; \ A.rows = (B).rows; A.cols = (B).cols; A.name = #A; \ fn(dupx, &A, &(B)); #define TRN(A, B) \ double data_##A[(B).cols][(B).rows]; \ dm A; A.data = (double*)data_##A; \ A.rows = (B).cols; A.cols = (B).rows; A.name = #A; \ _trans(&A, &(B)); #endif //_NUMC //usr/bin/chmod -x "$0"; exit //make shared /* numc.c * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ * * * Copyright (C) 2016 by Henry Kroll III, www.thenerdshow.com * * * ` * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ //#include "00numc.h" //#include <string.h> double fRand (double fMin, double fMax) { double f = (double)rand() / RAND_MAX; return fMin + f * (fMax - fMin); } //fill array with random values void _randomize_arrays (pdm X, ...) { va_list ap; va_start (ap, X); do { CHECK_pdm(X); size_t cr = X->rows * X->cols; // get size for (size_t i = cr; i--;) { X->data[i] = fRand(-3.0, 3.0); } } while ((X = va_arg(ap, pdm))); va_end (ap); return; } //function selector pdm _fn (e_ops f, ...) { va_list ap; va_start (ap, f); double x, n = x = 0.0; //initialize variables pdm A, B = NULL; //signal that B doesn't exist do { A = va_arg (ap, pdm) ;if (A) CHECK_pdm (A); //get more args for operations that need them if (f < square) { //array + number operations n = va_arg (ap, double); } if (f >= addx) { //array + array operations B = va_arg (ap, pdm) ;CHECK_pdm (B); if (B->rows != A->rows || B->cols != A->cols) { INFO ("%s %lix%li != %s %lix%li\n", A->name, A->rows, A->cols, B->name, B->rows, B->cols); ERR ("Can't work on different size arrays.\n"); } } //else one-argument functions (default) size_t rc = A->rows * A->cols; //apply the chosen operation f across A->data switch (f) { //expand each formula in its own loop, for speed! #define F(OPCODE, FORMULA) \ case OPCODE: \ for (;rc--;) { \ if (B) n = B->data[rc]; \ x = A->data[rc]; A->data[rc] FORMULA; \ } break; FORMULA_LIST; } #undef F n = x = 0.0; B = NULL; f = va_arg (ap, e_ops); } while (f <= dupx); va_end (ap); return A; } //transpose array X into Y pdm _trans (pdm Y, pdm X) { CHECK_pdm (X); if (Y->rows != X->cols || Y->cols != X->rows) { ERR ("Arrays must be compatible. Use TRN macro.\n"); } for (int r=X->rows;r--;) for (int c=X->cols;c--;) { CAST2D(Y)[c][r] = CAST2D(X)[r][c]; } return Y; } //do dot product of A and B, store in C pdm _dot (pdm C, pdm A, pdm B) { int r, c; CHECK_pdm (A) ;CHECK_pdm (B); //new array has the rows of A, and the cols of B if (C->rows != A->rows || C->cols != B->cols) { ERR ("Arrays must be compatible. Use DOT macro.\n"); } for (r=0; r < A->rows; r++) { //for each row of A for (c=0; c < B->cols; c++) { //for each col of B double sum = 0.0; for (int i=0; i < A->cols; i++) { //for each col of A sum += CAST2D(A)[r][i] * CAST2D(B)[i][c]; } CAST2D(C)[r][c] = sum; } } return C; } //print array void _print_r (pdm X, ...) { va_list ap; va_start (ap, X); int r, c; do { CHECK_pdm (X); printf ("%s [%lu][%lu] =\n{", X->name, X->rows, X->cols); for (r=0;r < X->rows;r++) { //for each row printf ("%c{", r?' ':'\0'); for (c=0;c < X->cols;c++) { //for each col char p = c < X->cols-1?',':'\0'; printf ("%6.3f%c ", CAST2D(X)[r][c], p); } printf ("}%c\n", r < X->rows - 1?',':'}'); } } while ((X = va_arg(ap, pdm))); return; } //usr/local/bin/anch -std=c99 -Llibs -lnumc -lm -keep -run "$0" "$@" ; exit 0 /* neural.c * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ * * * Copyright (C) 2016 by Henry Kroll III, www.thenerdshow.com * * * ` * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ //#include "00numc.h" #define FOR(i, n) for (int i=n;i--;) #if 1 //return the mean of absolute values in array double mean_abs (pdm X) { CHECK_pdm(X); size_t cr = X->rows * X->cols; // get size double ret = 0.0; for (size_t i = cr; i--;) { ret += fabs(X->data[i]); } return ret / cr; } #endif #if 1 //one-layer neural network batch trainer double one_layer (dm *in, dm *goal, dm *weights1) { //DOT product in . weights ; normalize with sigmoid DOT (layer1, *in, *weights1) ;fn (sigmoid, &layer1); //compute the layer1_delta (amount to change) DUP (layer1_delta, *goal) ;fn (subx, &layer1_delta, &layer1); //compute layer 1 adjustment toward target fn (mulx, &layer1_delta, fn (derivative, &layer1)); //~ print_r &layer1_delta //compute weight shift TRN (in_T, *in) ;DOT (ws1, in_T, layer1_delta); fn (addx, weights1, &ws1); return mean_abs(&layer1_delta); } #endif #if 1 //two-layer neural network batch trainer double two_layer (dm *in, dm *goal, dm *weights1, dm *weights2) { //DOT product in . weights ; normalize with sigmoid DOT (layer1, *in, *weights1) ;fn (sigmoid, &layer1); DOT (layer2, layer1, *weights2) ;fn (sigmoid, &layer2); //compute the layer2_delta (amount to change) DUP (layer2_delta, *goal) ;fn (subx, &layer2_delta, &layer2); //compute layer 2 adjustment toward target fn (mulx, &layer2_delta, fn (derivative, &layer2)); //back propagate layer1_miss distances from layer2_delta TRN (wT, *weights2) ;DOT (layer1_miss, layer2_delta, wT); //compute layer 1 adjustment toward target DUP (layer1_delta, layer1) ;fn (derivative, &layer1_delta); fn (mulx, &layer1_delta, &layer1_miss); //compute weight shift ws TRN (in_T, *in) ;DOT (ws1, in_T, layer1_delta); fn (addx, &*weights1, &ws1); TRN (layer1_T, layer1) ;DOT (ws2, layer1_T, layer2_delta); fn (addx, &*weights2, &ws2); return mean_abs(&layer2_delta); } #endif #if 1 //three-layer neural network batch trainer double three_layer (dm *in, dm *goal, dm *weights1, dm *weights2, dm *weights3) { //DOT product in . weights ; normalize with sigmoid DOT (layer1, *in, *weights1) ;fn (sigmoid, &layer1); DOT (layer2, layer1, *weights2) ;fn (sigmoid, &layer2); DOT (layer3, layer2, *weights3) ;fn (sigmoid, &layer3); if (!goal) { TRN (predict3, layer3) ;print_r (&predict3) ;return 0; } //compute the layer3_delta (amount to change) DUP (layer3_delta, *goal) ;fn (subx, &layer3_delta, &layer3); //compute layer 3 adjustment toward target fn (mulx, &layer3_delta, fn (derivative, &layer3)); //back propagate layer2_miss distances from layer3_delta TRN (w3T, *weights3) ;DOT (layer2_miss, layer3_delta, w3T); //compute layer 2 adjustment toward target DUP (layer2_delta, layer2) ;fn (derivative, &layer2_delta); fn (mulx, &layer2_delta, &layer2_miss); //back propagate layer1_miss distances from layer2_delta TRN (w2T, *weights2) ;DOT (layer1_miss, layer2_delta, w2T); //compute layer 1 adjustment toward target DUP (layer1_delta, layer1) ;fn (derivative, &layer1_delta); fn (mulx, &layer1_delta, &layer1_miss); //compute weight shift ws TRN (in_T, *in) ;DOT (ws1, in_T, layer1_delta); fn (addx, weights1, &ws1); TRN (layer1_T, layer1) ;DOT (ws2, layer1_T, layer2_delta); fn (addx, weights2, &ws2); TRN (layer2_T, layer2) ;DOT (ws3, layer2_T, layer3_delta); fn (addx, weights3, &ws3); return mean_abs(&layer3_delta); } #endif #if 1 //four-layer neural network batch trainer double four_layer (dm *in, dm *goal, dm *weights1, dm *weights2, dm *weights3, dm *weights4) { //DOT product in . weights ; normalize with sigmoid DOT (layer1, *in, *weights1) ;fn (sigmoid, &layer1); DOT (layer2, layer1, *weights2) ;fn (sigmoid, &layer2); DOT (layer3, layer2, *weights3) ;fn (sigmoid, &layer3); DOT (layer4, layer3, *weights4) ;fn (sigmoid, &layer4); if (!goal) { TRN (predict4, layer4) ;print_r (&predict4) ;return 0; } //compute the layer4_delta (amount to change) DUP (layer4_delta, *goal) ;fn (subx, &layer4_delta, &layer4); //compute layer 4 adjustment toward target fn (mulx, &layer4_delta, fn (derivative, &layer4)); //back propagate layer3_miss distances from layer4_delta TRN (w4T, *weights4) ;DOT (layer3_miss, layer4_delta, w4T); //compute layer 3 adjustment toward target DUP (layer3_delta, layer3) ;fn (derivative, &layer3_delta); fn (mulx, &layer3_delta, &layer3_miss); //back propagate layer2_miss distances from layer3_delta TRN (w3T, *weights3) ;DOT (layer2_miss, layer3_delta, w3T); //compute layer 2 adjustment toward target DUP (layer2_delta, layer2) ;fn (derivative, &layer2_delta); fn (mulx, &layer2_delta, &layer2_miss); //back propagate layer1_miss distances from layer2_delta TRN (w2T, *weights2) ;DOT (layer1_miss, layer2_delta, w2T); //compute layer 1 adjustment toward target DUP (layer1_delta, layer1) ;fn (derivative, &layer1_delta); fn (mulx, &layer1_delta, &layer1_miss); //compute weight shift ws TRN (in_T, *in) ;DOT (ws1, in_T, layer1_delta); fn (addx, weights1, &ws1); TRN (layer1_T, layer1) ;DOT (ws2, layer1_T, layer2_delta); fn (addx, weights2, &ws2); TRN (layer2_T, layer2) ;DOT (ws3, layer2_T, layer3_delta); fn (addx, weights3, &ws3); TRN (layer3_T, layer3) ;DOT (ws4, layer3_T, layer4_delta); fn (addx, weights4, &ws4); return mean_abs(&layer4_delta); } #endif #if 1 //create some neural networks int main (void) { #define INPUTS 4 //input layer1 neurons (receptors) #define L2WIDTH 7 //hidden layer2 neurons (synapses) #define L3WIDTH 9 //hidden layer3 neurons #define L4WIDTH 7 //hidden layer4 neurons #define OBJECTS 5 //# of objects (patterns) to train and recognize #define OUTPUTS 2 //# of output neurons per object (pattern) //lazy initialization with {0} NEW (l1w1, INPUTS, OUTPUTS, {0}); // layer1 weights NEW (l2w1, INPUTS, L2WIDTH, {0}); // layer2 weights NEW (l2w2, L2WIDTH ,OUTPUTS, {0}); NEW (l3w1, INPUTS, L2WIDTH, {0}); // layer3 weights NEW (l3w2, L2WIDTH, L3WIDTH, {0}); NEW (l3w3, L3WIDTH ,OUTPUTS, {0}); NEW (l4w1, INPUTS, L2WIDTH, {0}); // layer4 weights NEW (l4w2, L2WIDTH ,L3WIDTH, {0}); NEW (l4w3, L3WIDTH ,L4WIDTH, {0}); NEW (l4w4, L4WIDTH ,OUTPUTS, {0}); //training inputs inputs NEW (in, OBJECTS, INPUTS, {0,0,1,0}, //-, {0,1,1,0}, //--|-----, {1,0,1,0}, //--|-----|----, {1,1,1,0}, //-/------|----|-----, {0,0,0,0});///------/-----|-----|-----, // / / objects | | NEW (goal, OBJECTS,OUTPUTS, {1, 0},{0, 1},{0, 0},{0, 0},{1, 1}); randomize_arrays (&l1w1, &l2w1, &l2w2, &l3w1, &l3w2, &l3w3, &l4w1, &l4w2, &l4w3, &l4w4); //train and test 1-layer network double acc; FOR (j, 600) { acc = one_layer (&in, &goal, &l1w1); } printf ("Accuracy of 1-layer network: %f\n", acc); DOT (layera1, in, l1w1) ;fn(sigmoid, &layera1); TRN (predict1, layera1); print_r (&predict1); //train and test 2-layer network FOR (j, 500) { acc = two_layer (&in, &goal, &l2w1, &l2w2); } printf ("Accuracy of 2-layer network: %f\n", acc); DOT (layerb1, in, l2w1) ;fn (sigmoid, &layerb1); DOT (layerb2, layerb1, l2w2) ;fn (sigmoid, &layerb2); TRN (predict2, layerb2); print_r (&predict2); //~ //train and test 3-layer network FOR (j, 500) { acc = three_layer (&in, &goal, &l3w1, &l3w2, &l3w3); } printf ("Accuracy of 3-layer network: %f\n", acc); three_layer (&in, NULL, &l3w1, &l3w2, &l3w3); //~ //train and test 4-layer network FOR (j, 500) { acc = four_layer (&in, &goal, &l4w1, &l4w2, &l4w3, &l4w4); } printf ("Accuracy of 4-layer network: %f\n", acc); four_layer (&in, NULL, &l4w1, &l4w2, &l4w3, &l4w4); return 0; } #endif
run
|
edit
|
history
|
help
0
ARREGLO DE VARIABLES DE TIPO STRUCT
18BCE2182 ASSESS_1 Q1-2
Lab 5 HW Spring 2017 v0.5
bit shift and mask
SAI_1-5.c
Timestamp microsecond for C
hello 5
PRACTICE 1
Date
Prime no