Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
AESmolina
Deep learning
Commits
4be0b2ec
Commit
4be0b2ec
authored
Dec 02, 2021
by
AESmolina
Browse files
Initial commit
parents
Changes
1
Hide whitespace changes
Inline
Side-by-side
Neu.py
0 → 100644
View file @
4be0b2ec
import
numpy
as
np
import
math
import
tqdm
from
scipy.special
import
softmax
def
apply_func
(
input
,
f
=
lambda
x
:
x
):
if
isinstance
(
input
[
0
],
list
):
return
[
apply_func
(
input_i
,
f
)
for
input_i
in
input
]
else
:
return
list
(
map
(
f
,
input
))
def
combine_apply_func
(
input1
,
input2
,
f
=
lambda
x
,
y
:
x
+
y
):
if
isinstance
(
input1
[
0
],
list
):
return
[
combine_apply_func
(
input1_i
,
input2_i
,
f
)
for
input1_i
,
input2_i
in
zip
(
input1
,
input2
)]
else
:
return
[
f
(
x
,
y
)
for
x
,
y
in
zip
(
input1
,
input2
)]
def
dot
(
v
,
w
):
return
sum
(
v_i
*
w_i
for
v_i
,
w_i
in
zip
(
v
,
w
))
class
Layer
:
def
forward
(
self
,
input
):
pass
def
backward
(
self
,
gradient
):
pass
def
params
(
self
):
return
()
def
grads
(
self
):
return
()
class
Sigmoid
(
Layer
):
def
forward
(
self
,
input
):
self
.
sigmoids
=
apply_func
(
input
,
lambda
x
:
1
/
(
1
+
math
.
exp
(
-
x
)))
return
self
.
sigmoids
def
backward
(
self
,
gradient
):
back
=
combine_apply_func
(
self
.
sigmoids
,
gradient
,
lambda
sig
,
grad
:
sig
*
(
1
-
sig
)
*
grad
)
#sig*(1-sig)-производная сигмоиды
return
back
class
Relu
(
Layer
):
def
forward
(
self
,
input
):
self
.
relus
=
apply_func
(
input
,
lambda
x
:
max
(
x
,
0
))
return
self
.
relus
def
backward
(
self
,
gradient
):
back
=
combine_apply_func
(
self
.
relus
,
gradient
,
lambda
relu
,
grad
:
grad
if
relu
>
0
else
0
,)
return
back
class
Tanh
(
Layer
):
def
forward
(
self
,
input
):
self
.
tanhs
=
apply_func
(
input
,
math
.
tanh
)
return
self
.
tanhs
def
backward
(
self
,
gradient
):
self
.
back
=
combine_apply_func
(
self
.
tanhs
,
gradient
,
lambda
tanh
,
grad
:
(
1
-
tanh
**
2
)
*
grad
)
return
self
.
back
class
Linear
(
Layer
):
def
__init__
(
self
,
input_count
,
output_count
):
self
.
input_count
=
input_count
self
.
output_count
=
output_count
self
.
w
=
np
.
random
.
normal
(
size
=
(
output_count
,
input_count
)).
tolist
()
self
.
b
=
np
.
random
.
normal
(
size
=
output_count
).
tolist
()
def
forward
(
self
,
input
):
self
.
input
=
input
return
[
dot
(
input
,
self
.
w
[
i
])
+
self
.
b
[
i
]
for
i
in
range
(
self
.
output_count
)]
def
backward
(
self
,
gradient
):
self
.
b_grad
=
gradient
self
.
w_grad
=
[[
self
.
input
[
i
]
*
gradient
[
j
]
for
i
in
range
(
self
.
input_count
)]
for
j
in
range
(
self
.
output_count
)]
return
[
sum
(
self
.
w
[
i
][
j
]
*
gradient
[
i
]
for
i
in
range
(
self
.
output_count
))
for
j
in
range
(
self
.
input_count
)]
def
params
(
self
):
return
[
self
.
w
,
self
.
b
]
def
grads
(
self
):
return
[
self
.
w_grad
,
self
.
b_grad
]
class
Sequential
(
Layer
):
def
__init__
(
self
,
layers
):
self
.
layers
=
layers
def
forward
(
self
,
input
):
for
layer
in
self
.
layers
:
input
=
layer
.
forward
(
input
)
return
input
def
backward
(
self
,
gradient
):
for
layer
in
reversed
(
self
.
layers
):
gradient
=
layer
.
backward
(
gradient
)
return
gradient
def
params
(
self
):
return
[
param
for
layer
in
self
.
layers
for
param
in
layer
.
params
()]
def
grads
(
self
):
return
[
grad
for
layer
in
self
.
layers
for
grad
in
layer
.
grads
()]
class
Loss
:
def
loss
(
self
,
pred
,
actual
):
pass
def
gradient
(
self
,
pred
,
actual
):
pass
class
SSE
(
Loss
):
def
loss
(
self
,
pred
,
actual
):
squared_errors
=
combine_apply_func
(
pred
,
actual
,
lambda
pred
,
actual
:
(
pred
-
actual
)
**
2
)
return
np
.
sum
(
squared_errors
)
def
gradient
(
self
,
pred
,
actual
):
self
.
grad
=
combine_apply_func
(
pred
,
actual
,
lambda
pred
,
actual
:
2
*
(
pred
-
actual
))
return
self
.
grad
class
SoftmaxCrossEntropy
(
Loss
):
def
loss
(
self
,
pred
,
actual
):
probabilities
=
softmax
(
pred
).
tolist
()
likelihoods
=
combine_apply_func
(
probabilities
,
actual
,
lambda
p
,
act
:
math
.
log
(
p
+
1e-30
)
*
act
)
return
-
np
.
sum
(
likelihoods
)
def
gradient
(
self
,
pred
,
actual
):
probabilities
=
softmax
(
pred
).
tolist
()
return
combine_apply_func
(
probabilities
,
actual
,
lambda
p
,
act
:
p
-
act
)
class
Optimizer
:
def
step
(
self
,
layer
):
pass
class
GradientDescet
(
Optimizer
):
def
__init__
(
self
,
learning_rate
):
self
.
lr
=
learning_rate
def
step
(
self
,
layer
):
for
param
,
grad
in
zip
(
layer
.
params
(),
layer
.
grads
()):
param
[:]
=
combine_apply_func
(
param
,
grad
,
lambda
param
,
grad
:
param
-
grad
*
self
.
lr
)
'''xs = [[0, 0],
[0, 1],
[1, 0],
[1, 1]]
ys = [[0], [1], [1], [0]]
net = Sequential([
Linear(2, 2),
Sigmoid(),
Linear(2, 1)
])
loss = SSE()
optim = GradientDescet(learning_rate = 0.1)
with tqdm.trange(3000) as t:
for epoch in t:
epoch_loss=0.0
for x, y in zip(xs, ys):
pred = net.forward(x)
epoch_loss += loss.loss(pred, y)
gradient = loss.gradient(pred, y)
net.backward(gradient)
optim.step(net)
t.set_description(f"xor потеря {epoch_loss:.3f}")
for param in net.params():
print(param)'''
def
fizz_buzz_encode
(
x
):
if
x
%
15
==
0
:
return
[
0
,
0
,
0
,
1
]
elif
x
%
5
==
0
:
return
[
0
,
0
,
1
,
0
]
elif
x
%
3
==
0
:
return
[
0
,
1
,
0
,
0
]
else
:
return
[
1
,
0
,
0
,
0
]
def
binary_encode
(
x
):
binary
=
[]
for
i
in
range
(
10
):
binary
.
append
(
x
%
2
)
x
=
x
//
2
return
binary
def
fizzbuzz_accuracy
(
low
,
hi
,
net
):
num_correct
=
0
for
n
in
range
(
low
,
hi
):
x
=
binary_encode
(
n
)
predicted
=
np
.
argmax
(
net
.
forward
(
x
))
actual
=
np
.
argmax
(
fizz_buzz_encode
(
n
))
labels
=
[
str
(
n
),
"fizz"
,
"buzz"
,
"fizzbuzz"
]
print
(
n
,
labels
[
predicted
],
labels
[
actual
])
if
predicted
==
actual
:
num_correct
+=
1
return
num_correct
/
(
hi
-
low
)
xs
=
[
binary_encode
(
n
)
for
n
in
range
(
101
,
1024
)]
ys
=
[
fizz_buzz_encode
(
n
)
for
n
in
range
(
101
,
1024
)]
net
=
Sequential
([
Linear
(
10
,
25
),
Tanh
(),
Linear
(
25
,
4
)])
optimizer
=
GradientDescet
(
learning_rate
=
0.1
)
loss
=
SoftmaxCrossEntropy
()
with
tqdm
.
trange
(
1000
)
as
t
:
for
epoch
in
t
:
epoch_loss
=
0.0
for
x
,
y
in
zip
(
xs
,
ys
):
predicted
=
net
.
forward
(
x
)
epoch_loss
+=
loss
.
loss
(
predicted
,
y
)
gradient
=
loss
.
gradient
(
predicted
,
y
)
net
.
backward
(
gradient
)
optimizer
.
step
(
net
)
t
.
set_description
(
f
"fb loss:
{
epoch_loss
:.
3
f
}
"
)
print
(
"test results"
,
fizzbuzz_accuracy
(
1
,
101
,
net
))
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment