Commit 6c200736 authored by AMKalinin's avatar AMKalinin
Browse files

create

parents
Iris.csv
mnist_train.csv
\ No newline at end of file
# Neural Network
## Библиотека предназначена для построения нейронных сетей
**Начало работы:**
1. Подключить следующие модули к главному файлу:
```python
import nn
from functions import *
from layer import *
from manager import*
2. Cоздать модель (наша сеть в которую мы будем добавлять слои):
```python
model = nn.NeuralNetwork()
3. Построить сеть
Слои имеют пареметры: тип слоя, и его размер(количество нерйронов)
Также выходной и скрытые слои имеют функции активации(пока доступны только sigmoida, RELU, tanh и Softmax для выходного слоя). Если функция активации не нужна, то в параметр слоя подаётся функция `nonFunc()`
```python
model.add_layers(input_layer('input', 4))
model.add_layers(hide_layer('hide', 10,sigmoida()))
model.add_layers(output_layer('output', 3, softmax()))
4. Скомпилировать модель(создать связи между нейроннами и первоначально проинициализировать веса)
```python
model.compile()
5. Подать модель 'менеджеру'. Также вырать *learning rate*, функцию потерь (SSE - сумма квадратов ошибки, SoftMaxCrossEntropy - перекрёстная энропия)
```python
mg = manager(mщвудd, 0.01, SSE())
6. Запустить обучение
```python
manag.fit(x_train,y_train,x_test, y_test, 100)
\ No newline at end of file
from math import exp
from math import log
class function_activation():
def activate(self):
pass
def derivative(self):
pass
class ReLU(function_activation):
def __init__(self):
self.type = 'RELU'
def activate(self, neu):
if( neu.sum < 0):
neu.out = 0
else:
neu.out = neu.sum
def derivative(self, neu):
if( neu.sum < 0):
return 0
else:
return 1
class sigmoida(function_activation):
def __init__(self):
self.type = 'sigmoida'
def activate(self, neu):
neu.out = 1/(1+exp(-neu.sum))
def derivative(self, neu):
return neu.out*(1-neu.out)
class tanh(function_activation):
def __init__(self):
self.type = 'tanh'
def activate(self, neu):
a = exp(neu.sum)
b = exp(-neu.sum)
neu.out = (a - b) / (a + b)
def derivative(self, neu):
return (1-neu.out*neu.out)
class softmax(function_activation):
def __init__(self):
self.type = 'softmax'
def activate(self, neu):
pass
def derivative(self, neu):
pass
class nonFunc(function_activation):
def __init__(self) -> None:
self.type = 'nonFunctions'
def activate(self, neu):
neu.out = neu.sum
def derivative(self, neu):
return 1
class Loss:
def loss(self, predict, target):
pass
def gradient(self, predict, target):
pass
class SSE(Loss):
def loss(self, predict, target):
squared_errors = 0
for i in range(len(predict)):
squared_errors += (predict[i]-target[i])**2
return squared_errors
def gradient(self, predict, target):
grad = []
for i in range(len(predict)):
grad.append(2*(predict[i]-target[i]))
return grad
class SoftMaxCrossEntropy(Loss):
def loss(self, predict, target):
loss = 0
for i in range(len(predict)):
loss += log(predict[i] + 1e-30)*target[i]
return -loss
def gradient(self, predict, target):
grad = []
for i in range(len(predict)):
grad.append(predict[i]-target[i])
return grad
img/exp.png

14.7 KB

img/sin.png

27.8 KB

img/x^2.png

20.2 KB

from neuron import *
class layer:
def __init__(self, type, size):
self.size = size
self.neurons = []
self.type = type
class input_layer(layer):
def __init__(self, type, size):
super().__init__(type, size)
self.add_neurons()
def add_neurons(self):
for i in range(self.size):
self.neurons.append(neuron())
class hide_layer(layer):
def __init__(self, type, size, func_activation):
super().__init__(type, size)
self.add_neurons(func_activation)
def add_neurons(self, func):
for i in range(self.size):
self.neurons.append(hide_neuron(func_activation=func))
def activate(self):
for i in range(self.size):
self.neurons[i].activate()
def derivative(self):
der = []
for i in range(self.size):
der.append(self.neurons[i].derivative())
return der
class output_layer(layer):
def __init__(self, type, size, func_activation):
super().__init__(type, size)
self.add_neurons(func_activation)
self.func_type = func_activation.type
def add_neurons(self, func):
if func.type != 'softmax':
for i in range(self.size):
self.neurons.append(output_neuron(func_activation=func))
else:
for i in range(self.size):
self.neurons.append(output_neuron())
def activate(self):
if self.func_type != 'softmax':
for i in range(self.size):
self.neurons[i].activate()
else:
sum = 0
max = self.neurons[0].sum
for i in range(1,self.size):
if (max < self.neurons[i].sum):
max = self.neurons[i].sum
for i in range(self.size):
sum += exp(self.neurons[i].sum-max)
for i in range(self.size):
self.neurons[i].out = exp(self.neurons[i].sum)/sum
def derivative(self):
der = []
if self.func_type != 'softmax':
for i in range(self.size):
der.append(self.neurons[i].derivative())
else:
for i in range(self.size):
der.append(1)
return der
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import nn
from functions import *
from layer import *
from manager import *
df = pd.read_csv('mnist_train.csv', header=None)
num = range(1,785)
numeric_data = df[df.columns[num]]
categorial_data = df[df.columns[0]]
dummy_features = pd.get_dummies(categorial_data)
num = range(0,784)
x = numeric_data[numeric_data.columns[num]]
y = dummy_features[dummy_features.columns[[0,1,2,3,4,5,6,7,8,9]]]
x_train, x_test, y_train, y_test = train_test_split(x.values, y.values, train_size=0.8, random_state=42)
md = nn.NeuralNetwork()
md.add_layers(input_layer( 'input', 784))
# md.add_layers(hide_layer('hide', 512, sigmoida()))
# md.add_layers(hide_layer('hide', 128, sigmoida()))
# md.add_layers(hide_layer('hide', 32, sigmoida()))
md.add_layers(output_layer('output', 10, softmax()))
md.compile()
# md = nn.NeuralNetwork()
# md.add_layers(input_layer( 'input', 3))
# md.add_layers(output_layer('output', 1, softmax()))
# md.compile()
# md.layers[1].neurons[0].w = [-0.16595599, 0.44064899,-0.99977125]
manag = manager(md, 0.01, SoftMaxCrossEntropy())
# x_train = [[0,0,1],
# [1,1,1],
# [1,0,1],
# [0,1,1]]
# y_train = [[0],[1],[1],[0]]
# y_train = np.array(y_train)
# x_test = [[1,0,0]]
# y_test = [[1]]
# y_test = np.array(y_test)
manag.fit(x_train/255,y_train,x_test/255, y_test, 10)
print(md.forward(x_train[0]/255))
print(md.forward(x_train[1]/255))
print(md.forward(x_train[3]/255))
import nn
from functions import *
from layer import *
from manager import *
import math
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
x = -math.pi
xds = []
yds = []
x_l = []
y_l = []
while x<math.pi:
xds.append([x])
x_l.append(x)
a = math.sin(x)
yds.append([a])
y_l.append(a)
x += 0.001
x_train, x_test, y_train, y_test = train_test_split(xds, yds, train_size=0.8, random_state=42)
md = nn.NeuralNetwork()
md.add_layers(input_layer( 'input', 1))
md.add_layers(hide_layer('hide', 10, sigmoida()))
md.add_layers(output_layer('output', 1, nonFunc()))
md.compile()
manag = manager(md, 0.01, SSE())
manag.fit(x_train,y_train,x_test, y_test, 100)
print(md.forward([0]))
predict_func = []
for i in range(len(x_l)):
predict_func.append(md.forward(xds[i]))
plt.plot(x_l, predict_func, x_l,y_l)
plt.show()
\ No newline at end of file
import nn
from functions import *
from layer import *
from manager import *
import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_csv('Iris.csv')
numeric_data = df[df.columns[[1,2,3,4]]]
categorial_data = df[df.columns[5]]
dummy_features = pd.get_dummies(categorial_data)
data = pd.concat([numeric_data, dummy_features], axis=1)
X = data[data.columns[[0,1,2,3]]]
y = data[data.columns[[4,5,6]]]
x_train, x_test, y_train, y_test = train_test_split(X.values, y.values, train_size=0.8, random_state=42)
md = nn.NeuralNetwork()
md.add_layers(input_layer('input', 4))
md.add_layers(hide_layer('hide', 10,sigmoida()))
md.add_layers(output_layer('output', 3, softmax()))
md.compile()
manag = manager(md, 0.01, SoftMaxCrossEntropy())
manag.fit(x_train,y_train,x_test, y_test, 1000)
print('1 0 0 - setosa; 0 1 0 - versicolor; 0 0 1 verginica')
print(md.forward([4.8, 3.1, 1.6, 0.2])) # setosa
print(md.forward([5.6, 3.0, 4.1, 1.3])) # versicolor
print(md.forward([5.9, 3.0, 5.1, 1.8])) #verginica
from tqdm import tqdm
class manager:
def __init__(self, model, learning_rate, loss_function):
self.model = model
self.model.lr = learning_rate
self.loss = loss_function
self.loss_train_list = []
self.loss_test_list = []
self.accuracy_train = []
self.accuracy_test = []
def train(self,x_train, y_train):
loss = 0
correct = 0
for i in tqdm(range(len(x_train)), desc='train', leave=False):
predict = self.model.forward(x_train[i])
correct += self.accuracy(predict, y_train[i])
loss += self.loss.loss(predict, y_train[i])
gradient = self.loss.gradient(predict, y_train[i])
self.model.backward(gradient)
total_loss = loss/len(x_train)
acc = correct/len(x_train)
self.loss_train_list.append(total_loss)
self.accuracy_train.append(acc)
print('train loss: ', total_loss)
print(' accuracy:', acc)
def test(self,x_test, y_test):
loss = 0
correct = 0
for i in tqdm(range(len(x_test)), desc='test', leave=False):
predict = self.model.forward(x_test[i])
correct += self.accuracy(predict, y_test[i])
loss += self.loss.loss(predict, y_test[i])
total_loss = loss/len(x_test)
self.loss_test_list.append(total_loss)
acc = correct/len(x_test)
self.accuracy_test.append(acc)
print('test loss: ', total_loss)
print(' accuracy:', acc)
print()
def accuracy(self,predict, target):
max_ind1 = 0
max_ind2 = 0
max1 = predict[0]
max2 = target[0]
for i in range(1,len(predict)):
if max1 < predict[i]:
max1 = predict[i]
max_ind1 = i
if max2 < target[i]:
max2 = target[i]
max_ind2 = i
if max_ind1 == max_ind2:
return 1
else:
return 0
def fit(self, x_train,y_train,x_test,y_test, epoch):
for i in range(1, epoch+1):
print('Epoch: ', i, '/', epoch)
self.train(x_train,y_train)
self.test(x_test,y_test)
from functions import *
class neuron:
def __init__(self):
self.out = 0
def activate(self):
pass
class input_neuron(neuron):
def __init__(self):
super().__init__()
def activate(self):
pass
class hide_neuron(neuron):
def __init__(self, b =0, w = None, func_activation = None):
super().__init__()
self.b = b
self.w = w
self.sum = 0
self.func = func_activation
def activate(self):
self.func.activate(self)
def derivative(self):
return self.func.derivative(self)
class output_neuron(hide_neuron):
def __init__(self, b=0, w=None, func_activation=None):
super().__init__(b=b, w=w, func_activation=func_activation)
def activate(self):
self.func.activate(self)
def derivative(self):
return self.func.derivative(self)
from random import random
from typing import NewType
class model():
def __init__(self):
pass
def add_layers(self):
pass
def compile(self):
pass
def forward(self):
pass
def backward(self):
pass
def fit(self):
pass
def init_w(self):
pass
class NeuralNetwork(model):
def __init__(self):
self.layers = []
self.lr = 1
def add_layers(self, layer):
self.layers.append(layer)
def init_w(self, size):
w = []
for i in range(size):
w.append(random()-0.5)
return w
def compile(self):
self.size = len(self.layers)
for i in range(1,self.size):
for neuron in self.layers[i].neurons:
neuron.w = self.init_w(self.layers[i-1].size)
def forward(self, X):
out = []
for i in range(len(X)):
self.layers[0].neurons[i].out = X[i]
for i in range(1, self.size):
for j in range(self.layers[i].size):
self.layers[i].neurons[j].sum = 0
for k in range(self.layers[i-1].size):
self.layers[i].neurons[j].sum += self.layers[i-1].neurons[k].out * self.layers[i].neurons[j].w[k]
self.layers[i].neurons[j].sum += self.layers[i].neurons[j].b
self.layers[i].activate()
for i in range(self.layers[-1].size):
out.append(self.layers[-1].neurons[i].out)
return out
def backward(self, gradient):
for k in reversed(range(len(self.layers)-1)):
gradientNext = []
der = self.layers[k+1].derivative()
for i in range(self.layers[k+1].size):
gradient[i] *= der[i] * self.lr
for i in range(self.layers[k].size):