13
13
@author: rbodo
14
14
"""
15
15
16
- import numpy as np
17
16
import tensorflow as tf
18
17
from tensorflow .keras .layers import Dense , Flatten , AveragePooling2D , Layer , \
19
18
MaxPooling2D , Conv2D , Concatenate , DepthwiseConv2D , Reshape , ZeroPadding2D
@@ -80,31 +79,37 @@ def spike_call(self, x, call):
80
79
self ._a = tf .Variable (lambda : tf .zeros_like (x ), name = 'activation' ,
81
80
trainable = False )
82
81
83
- # In case of centered input layer , some x values could be negative.
82
+ # If not using ReLU , some x values could be negative.
84
83
# Remove and store signs to apply after binarization.
85
84
signs = tf .sign (x )
86
85
x = tf .abs (x )
87
86
88
- # Make sure x is normalized before binarization.
89
- x_max = tf .reduce_max (x )
90
- x = tf .divide (x , x_max )
87
+ # Make sure input is normalized before binarization. Hidden layers are
88
+ # normalized during parsing.
89
+ if self .is_first_spiking :
90
+ x_max = tf .reduce_max (x )
91
+ x = tf .divide (x , x_max )
92
+ else :
93
+ x_max = 1
91
94
92
95
# Transform x into binary format here. Effective batch_size increases
93
96
# from 1 to num_bits.
94
- x_b = self .to_binary (x )
97
+ x = self .to_binary (x )
95
98
96
99
# Apply signs and rescale back to original range.
97
- x_b = tf .multiply (x_b , signs * x_max )
100
+ x = tf .multiply (x , signs * x_max )
98
101
99
102
# Perform layer operation, e.g. convolution, on every power of 2.
100
- x_b = call (self , x_b )
103
+ y = call (self , x )
101
104
102
105
# Add up the weighted powers of 2 to recover the activation values.
103
- y = tf .reduce_sum (x_b , 0 , keepdims = True )
106
+ y = tf .reduce_sum (y , 0 , keepdims = True )
104
107
105
108
# Apply non-linearity.
106
- y = tf .nn .softmax (y ) if self .activation_str == 'softmax' \
107
- else tf .nn .relu (y )
109
+ if self .activation_str == 'softmax' :
110
+ y = tf .nn .softmax (y )
111
+ elif self .activation_str == 'relu' :
112
+ y = tf .nn .relu (y )
108
113
109
114
self .spikerates .assign (y )
110
115
@@ -130,7 +135,8 @@ def to_binary(self, x):
130
135
``x`` is distributed across the first dimension of ``x_binary``.
131
136
"""
132
137
133
- self ._a .assign (x )
138
+ n = 2 ** self .num_bits - 1
139
+ self ._a .assign (tf .divide (tf .round (tf .multiply (x , n )), n ))
134
140
135
141
for i in tf .range (self .num_bits ):
136
142
mask = tf .cast (tf .greater (self ._a , self .powers [i ]), tf .float32 )
@@ -143,41 +149,6 @@ def to_binary(self, x):
143
149
return self ._x_binary
144
150
145
151
146
- def to_binary_numpy (x , num_bits ):
147
- """Transform an array of floats into binary representation.
148
-
149
- Parameters
150
- ----------
151
-
152
- x: ndarray
153
- Input array containing float values. The first dimension has to be of
154
- length 1.
155
- num_bits: int
156
- The fixed point precision to be used when converting to binary.
157
-
158
- Returns
159
- -------
160
-
161
- binary_array: ndarray
162
- Output boolean array. The first dimension of x is expanded to length
163
- ``bits``. The binary representation of each value in ``x`` is
164
- distributed across the first dimension of ``binary_array``.
165
- """
166
-
167
- x_binary = np .zeros ([num_bits ] + list (x .shape [1 :]))
168
-
169
- powers = [2 ** - (i + 1 ) for i in range (num_bits )]
170
-
171
- a = np .copy (x )
172
-
173
- for i in range (num_bits ):
174
- mask = np .greater (a , powers [i ])
175
- x_binary [i ] = mask
176
- a -= mask * powers [i ]
177
-
178
- return x_binary
179
-
180
-
181
152
class SpikeConcatenate (Concatenate ):
182
153
"""Spike merge layer"""
183
154
0 commit comments