URL
https://opencores.org/ocsvn/artificial_neural_network/artificial_neural_network/trunk
Subversion Repositories artificial_neural_network
Compare Revisions
- This comparison shows the changes necessary to convert path
/artificial_neural_network/trunk/ANN_kernel
- from Rev 7 to Rev 8
- ↔ Reverse comparison
Rev 7 → Rev 8
/RTL_VHDL_files/activation_function.vhd
25,6 → 25,7
entity activation_function is |
generic |
( |
lsbit : natural := 10; |
f_type : string := "linear"; -- Activation function type |
Nbit : natural := 8 -- Bit width |
); |
41,6 → 42,22
|
architecture Structural of activation_function is |
|
component af_sigmoid is |
generic |
( |
Nbit : natural := 8 |
); |
port |
( |
reset : in std_logic; |
clk : in std_logic; |
run_in : in std_logic; -- Start and input data validation |
inputs : in std_logic_vector(Nbit-1 downto 0); -- Input data |
run_out : out std_logic; -- Output data validation, run_in for the next layer |
outputs : out std_logic_vector(Nbit-1 downto 0) -- Output data |
); |
end component; |
|
begin |
|
-- Linear activation function. It is a direct assignment: |
53,7 → 70,7
-- Example 1: sigmoid activation function implemented as a Look-Up-Table (LUT): |
Sigmoid_f: |
if (f_type = "siglut") generate |
siglut_inst: entity work.af_sigmoid |
siglut_inst: af_sigmoid |
generic map |
( |
Nbit => Nbit |
75,7 → 92,8
siglut_inst: entity work.af_sigmoid2 |
generic map |
( |
Nbit => Nbit |
Nbit => Nbit, |
lsbit => lsbit |
) |
port map |
( |
/RTL_VHDL_files/adder_tree.vhd
46,6 → 46,29
|
architecture Behavioral of adder_tree is |
|
|
|
component adder_tree is |
generic |
( |
NumIn : integer := 9; -- Number of inputs |
Nbit : integer := 12 -- Bit width of the input data |
); |
|
port |
( |
-- Input ports |
reset : in std_logic; |
clk : in std_logic; |
en : in std_logic; -- Enable |
inputs : in std_logic_vector((Nbit*NumIn)-1 downto 0); -- Input data |
|
-- Output ports |
en_out : out std_logic; -- Output enable (output data validation) |
output : out std_logic_vector(Nbit-1 downto 0) -- Output of the tree adder |
); |
end component; |
|
constant NumIn2 : integer := NumIn/2; -- Number of imputs of the next adder tree layer |
|
signal next_en : std_logic := '0'; -- Next adder tree layer enable |
107,7 → 130,7
recursion: |
if (NumIn > 2) generate |
|
sub_adder_tree: entity work.adder_tree |
sub_adder_tree: adder_tree |
generic map |
( |
NumIn => (NumIn2)+(NumIn mod 2), |
/RTL_VHDL_files/af_sigmoid.vhd
1,99 → 1,99
---------------------------------------------------------------------------------- |
-- Company: CEI |
-- Engineer: Enrique Herrero |
-- |
-- Create Date: |
-- Design Name: Configurable ANN |
-- Module Name: af_sigmoid - Behavioral |
-- Project Name: |
-- Target Devices: |
-- Tool versions: |
-- Description: Sigmoid activation function implemented as a Look-Up-Table (LUT). |
-- |
-- Dependencies: |
-- |
-- Revision: |
-- Revision 0.01 - File Created |
-- Revision 1 - David Aledo |
-- Additional Comments: |
-- |
---------------------------------------------------------------------------------- |
library IEEE; |
use IEEE.STD_LOGIC_1164.ALL; |
use ieee.numeric_std.ALL; |
use ieee.math_real.all; |
|
|
entity af_sigmoid is |
generic |
( |
Nbit : natural := 8 |
); |
port |
( |
reset : in std_logic; |
clk : in std_logic; |
run_in : in std_logic; -- Start and input data validation |
inputs : in std_logic_vector(Nbit-1 downto 0); -- Input data |
run_out : out std_logic; -- Output data validation, run_in for the next layer |
outputs : out std_logic_vector(Nbit-1 downto 0) -- Output data |
); |
end af_sigmoid; |
|
|
architecture Behavioral of af_sigmoid is |
|
-- Definition of internal modules, constants, signals, etc... |
|
-- Sigmoid parameters: |
constant f0 : real := 2.0; -- Slope at the origin |
constant fr : real := 2.0; -- fr = fmax - fmin |
|
signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT |
type table_t is array(0 to (2**Nbit)-1) of std_logic_vector(Nbit-1 downto 0); -- LUT type |
|
-- Function Sigmoidal: generates the Look-Up-Table for the sigmoid activation function: |
-- margin: maximun value of x. |
function Sigmoidal(margin:real;Nbit:natural) return table_t is |
variable scale,x,y,w,t: real; |
variable u: integer; |
variable fbits: std_logic_vector(Nbit-1 downto 0); |
variable table: table_t; |
begin |
scale := (2.0*margin)/(2.0**Nbit); -- Calculates gap between to points |
x := -margin; |
for idx in -(2**(Nbit-1)) to (2**(Nbit-1))-1 loop |
y := (fr/(1.0+exp(((-4.0*f0)/fr)*x)))-(fr/2.0); |
w := y*(2.0**(Nbit-1)); -- Shifts bits to the left |
t := round(w); |
u := integer(t); |
fbits := std_logic_vector(to_signed(u,Nbit)); |
table(to_integer(to_unsigned(idx+(2**Nbit),Nbit))):= fbits; |
x := x+scale; |
end loop; |
return table; |
end Sigmoidal; |
signal Table: table_t := Sigmoidal(1.0,Nbit); -- Generation of the LUT (at synthesis time) |
|
begin |
|
-- Description of the activation function |
dataIn <= to_integer(signed(inputs)); |
|
Activation: process(clk,reset) |
begin |
if clk'event and clk = '1' then |
if reset = '1' then |
run_out <= '0'; |
outputs <= (others => '0'); |
else |
if run_in = '1' then |
run_out <='1'; |
outputs <=Table(dataIn); -- Assigns output value from the LUT |
else |
run_out <='0'; |
end if; |
end if; |
end if; |
end process; |
end Behavioral; |
---------------------------------------------------------------------------------- |
-- Company: CEI |
-- Engineer: Enrique Herrero |
-- |
-- Create Date: |
-- Design Name: Configurable ANN |
-- Module Name: af_sigmoid - Behavioral |
-- Project Name: |
-- Target Devices: |
-- Tool versions: |
-- Description: Sigmoid activation function implemented as a Look-Up-Table (LUT). |
-- |
-- Dependencies: |
-- |
-- Revision: |
-- Revision 0.01 - File Created |
-- Revision 1 - David Aledo |
-- Additional Comments: |
-- |
---------------------------------------------------------------------------------- |
library IEEE; |
use IEEE.STD_LOGIC_1164.ALL; |
use ieee.numeric_std.ALL; |
use ieee.math_real.all; |
|
|
entity af_sigmoid is |
generic |
( |
Nbit : natural := 8 |
); |
port |
( |
reset : in std_logic; |
clk : in std_logic; |
run_in : in std_logic; -- Start and input data validation |
inputs : in std_logic_vector(Nbit-1 downto 0); -- Input data |
run_out : out std_logic; -- Output data validation, run_in for the next layer |
outputs : out std_logic_vector(Nbit-1 downto 0) -- Output data |
); |
end af_sigmoid; |
|
|
architecture Behavioral of af_sigmoid is |
|
-- Definition of internal modules, constants, signals, etc... |
|
-- Sigmoid parameters: |
constant f0 : real := 1.0; -- Slope at the origin |
constant fr : real := 2.0; -- fr = fmax - fmin |
|
signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT |
type table_t is array(0 to (2**Nbit)-1) of std_logic_vector(Nbit-1 downto 0); -- LUT type |
|
-- Function Sigmoidal: generates the Look-Up-Table for the sigmoid activation function: |
-- margin: maximun value of x.sim:/ann_tb/ann1/layers_insts(1)/multiple_activation_functions/multiple_activation_function_insts(1)/activation_function_inst/Sigmoid_f/siglut_inst/Activation |
function Sigmoidal(margin:real;Nbit:natural) return table_t is |
variable scale,x,y,w,t: real; |
variable u: integer; |
variable fbits: std_logic_vector(Nbit-1 downto 0); |
variable table: table_t; |
begin |
scale := (2.0*margin)/(2.0**Nbit); -- Calculates gap between to points |
x := -margin; |
for idx in -(2**(Nbit-1)) to (2**(Nbit-1))-1 loop |
y := ( fr / (1.0+exp(((-4.0*f0)/fr)*x)) ) - (fr/2.0); |
w := y*(2.0**(Nbit-1)); -- Shifts bits to the left |
t := round(w); |
u := integer(t); |
fbits := std_logic_vector(to_signed(u,Nbit)); |
table(to_integer(to_unsigned(idx+(2**Nbit),Nbit))):= fbits; |
x := x+scale; |
end loop; |
return table; |
end Sigmoidal; |
signal Table: table_t := Sigmoidal(1.0,Nbit); -- Generation of the LUT (at synthesis time) |
|
begin |
|
-- Description of the activation function |
dataIn <= to_integer(unsigned(inputs)); |
|
Activation: process(clk,reset) |
begin |
if clk'event and clk = '1' then |
if reset = '1' then |
run_out <= '0'; |
outputs <= (others => '0'); |
else |
if run_in = '1' then |
run_out <='1'; |
outputs <=Table(dataIn); -- Assigns output value from the LUT |
else |
run_out <='0'; |
end if; |
end if; |
end if; |
end process; |
end Behavioral; |
/RTL_VHDL_files/af_sigmoid2.vhd
28,7 → 28,8
entity af_sigmoid2 is |
generic |
( |
Nbit : natural := 8 |
Nbit : natural := 8; |
lsbit : natural := 10 |
); |
port |
( |
47,7 → 48,7
-- Definition of internal modules, constants, signals, etc... |
|
-- Sigmoid parameters: |
constant f0 : real := 0.5; -- Slope at the origin |
constant f0 : real := 1.0; -- Slope at the origin |
constant fr : real := 2.0; -- fr = fmax - fmin |
|
signal dataIn: integer range (2**Nbit-1) downto 0; -- To convert std_logic_vector input to integer index for the LUT |
54,8 → 55,8
type table_t is array(0 to (2**Nbit)-1) of std_logic_vector(Nbit-1 downto 0); -- LUT type |
|
-- Function Sigmoidal: generates the Look-Up-Table for the sigmoid activation function: |
-- margin: maximun value of x. |
function Sigmoidal(margin:real;Nbit:natural) return table_t is |
-- margin: maximum value of input |
function Sigmoidal(margin:real;Nbit:natural;lsbit:natural) return table_t is |
variable scale,x,y,w,t: real; |
variable u: integer; |
variable fbits: std_logic_vector(Nbit-1 downto 0); |
62,10 → 63,10
variable table: table_t; |
begin |
scale := (2.0*margin)/(2.0**Nbit); -- Calculates gap between to points |
x := -margin; |
x := -margin; |
for idx in -(2**(Nbit-1)) to (2**(Nbit-1))-1 loop |
y := ( fr / (1.0+exp(((-4.0*f0)/fr)*x)) ) - (fr/2.0); |
w := y*(2.0**(Nbit-1)); -- Shifts bits to the left |
w := y*(2.0**(lsbit)); -- Shifts bits to the left |
t := round(w); |
u := integer(t); |
fbits := std_logic_vector(to_signed(u,Nbit)); |
74,12 → 75,12
end loop; |
return table; |
end Sigmoidal; |
signal Table: table_t := Sigmoidal(1.0,Nbit); -- Generation of the LUT (at synthesis time) |
signal Table: table_t := Sigmoidal(2.0**(Nbit-lsbit-1),Nbit,lsbit); -- Generation of the LUT (at synthesis time) |
|
begin |
|
-- Description of the activation function |
dataIn <= to_integer(signed(inputs)); |
dataIn <= to_integer(unsigned(inputs)); |
|
Activacion: process(clk,reset) |
begin |
/RTL_VHDL_files/ann.vhd
28,6 → 28,7
entity ann is |
generic |
( |
WBinit : boolean := false; |
Nlayer : integer := 2; ---- Number of layers |
NbitW : natural := 16; ---- Bit width of weights and biases |
NumIn : natural := 64; ---- Number of inputs to the network |
117,6 → 118,8
first_layerSP_top_inst: entity work.layerSP_top |
generic map |
( |
WBinit => WBinit , |
LNum => 0 , |
NumN => NumN(0), -- Number of neurons in the first layer |
NumIn => NumIn, ---- Number of inputs of the first layer |
NbitIn => NbitIn, --- Bit width of the input data |
178,7 → 181,8
generic map |
( |
f_type => ftype_v(i-1), -- Activation function type of the previous layer (i-1) |
Nbit => NbitO(i-1) --- Bit width of the outputs of the previous layer (i-1) |
Nbit => NbitO(i-1), -- Bit width of the outputs of the previous layer (i-1) |
lsbit => LSbit(i-1) -- least significant bit of activation function |
) |
port map |
( |
202,7 → 206,8
generic map |
( |
f_type => ftype_v(i-1), -- Activation function type of the previous layer (i-1) |
Nbit => NbitO(i-1) --- Bit width of the outputs of the previous layer (i-1) |
Nbit => NbitO(i-1), -- Bit width of the outputs of the previous layer (i-1) |
lsbit => LSbit(i-1) -- least significant bit of activation function |
) |
port map |
( |
226,7 → 231,8
generic map |
( |
f_type => ftype_v(i-1), -- Activation function type of the previous layer (i-1) |
Nbit => NbitO(i-1) --- Bit width of the outputs of the previous layer (i-1) |
Nbit => NbitO(i-1), -- Bit width of the outputs of the previous layer (i-1) |
lsbit => LSbit(i-1) -- least significant bit of activation function |
) |
port map |
( |
245,7 → 251,8
generic map |
( |
f_type => ftype_v(i-1), -- Activation function type of the previous layer (i-1) |
Nbit => NbitO(i-1) --- Bit width of the outputs of the previous layer (i-1) |
Nbit => NbitO(i-1) , -- Bit width of the outputs of the previous layer (i-1) |
lsbit => LSbit(i-1) -- least significant bit of activation function |
) |
port map |
( |
270,7 → 277,8
generic map |
( |
f_type => ftype_v(i-1), |
Nbit => NbitO(i-1) |
Nbit => NbitO(i-1), |
lsbit => LSbit(i-1) -- least significant bit of activation function |
) |
port map |
( |
309,6 → 317,8
layerSP_top_inst: entity work.layerSP_top |
generic map |
( |
WBinit => WBinit , |
LNum => i , |
NumN => NumN(i), --- Number of neurons in layer (i) |
NumIn => NumN(i-1), -- Number of inputs, is the number of neurons in previous layer (i-1) |
NbitIn => NbitO(i-1), -- Bit width of the input data, is the bit width of output data of layer (i-1) |
344,7 → 354,9
if ltype_v(i) = "PS" generate |
layerPS_top_inst: entity work.layerPS_top |
generic map |
( |
( |
WBinit => WBinit , |
LNum => i , |
NumN => NumN(i), --- Number of neurons in layer (i) |
NumIn => NumN(i-1), -- Number of inputs, is the number of neurons in previous layer (i-1) |
NbitIn => NbitO(i-1), -- Bit width of the input data, is the bit width of output data of layer (i-1) |
380,9 → 392,9
if ltype_v(i) = "PP" generate |
-- TODO: instance a full parallel layer. At current version this layer type has not been developed. |
-- synthesis translate_off |
assert l_type(i) /= "PP" |
report "Current version does not accept parallel-input parallel-output (PP) layer type." |
severity failure; |
--assert l_type(i) /= "PP" |
-- report "Current version does not accept parallel-input parallel-output (PP) layer type." |
-- severity failure; |
-- synthesis translate_on |
-- TODO: delete above lines when instantiate the parallel-input parallel-output layer. |
end generate; |
414,7 → 426,8
generic map |
( |
f_type => ftype_v(Nlayer-1), -- Activation function type of the last layer (Nlayer-1) |
Nbit => NbitO(Nlayer-1) --- Bit width of the outputs of the last layer (Nlayer-1) |
Nbit => NbitO(Nlayer-1), --- Bit width of the outputs of the last layer (Nlayer-1) |
lsbit => LSbit(Nlayer-1) -- least significant bit of activation function |
) |
port map |
( |
435,7 → 448,8
generic map |
( |
f_type => ftype_v(Nlayer-1), -- Activation function type of the last layer (Nlayer-1) |
Nbit => NbitO(Nlayer-1) --- Bit width of the outputs of the last layer (Nlayer-1) |
Nbit => NbitO(Nlayer-1), -- Bit width of the outputs of the last layer (Nlayer-1) |
lsbit => LSbit(Nlayer-1) -- least significant bit of activation function |
) |
port map |
( |
/RTL_VHDL_files/layerPS_top.vhd
22,6 → 22,9
use IEEE.STD_LOGIC_1164.ALL; |
use ieee.numeric_std.all; |
|
library work; |
use work.wb_init.all; -- initialization package, comment out when not used |
|
-- Deprecated XPS library: |
--library proc_common_v3_00_a; |
--use proc_common_v3_00_a.proc_common_pkg.all; -- Only for simulation ( pad_power2() ) |
30,15 → 33,17
|
generic |
( |
NumN : natural := 64; ------- Number of neurons of the layer |
NumIn : natural := 8; ------- Number of inputs of each neuron |
NbitIn : natural := 12; ------- Bit width of the input data |
NbitW : natural := 8; ------- Bit width of weights and biases |
WBinit : boolean := false; |
LNum : natural := 0; ------- layer number (needed for initialization) |
NumN : natural := 34; ------- Number of neurons of the layer |
NumIn : natural := 27; ------- Number of inputs of each neuron |
NbitIn : natural := 8; ------- Bit width of the input data |
NbitW : natural := 1; ------- Bit width of weights and biases |
NbitOut : natural := 8; ------- Bit width of the output data |
lra_l : natural := 10; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn) |
wra_l : natural := 3; ------- Weight RAM address length. It should value log2(NumIn) |
lra_l : natural := 11; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn) |
wra_l : natural := 5; ------- Weight RAM address length. It should value log2(NumIn) |
bra_l : natural := 6; ------- Bias RAM address length. It should value log2(NumN) |
LSbit : natural := 4 ------- Less significant bit of the outputs |
LSbit : natural := 6 ------- Less significant bit of the outputs |
); |
|
port |
64,14 → 69,44
|
architecture Behavioral of layerPS_top is |
|
--type ramd_type is array (pad_power2(NumN)-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces -- pad_power2() only for simulation |
--type layer_ram is array (pad_power2(NumIn)-1 downto 0) of ramd_type; |
type ramd_type is array (NumN-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces |
type layer_ram is array (NumIn-1 downto 0) of ramd_type; |
type outm_type is array (NumIn-1 downto 0) of std_logic_vector(NbitW-1 downto 0); |
|
signal lram : layer_ram; -- Layer RAM. One RAM per input. It stores the weights |
signal breg : ramd_type; -- Bias RAM. They can be RAM because they are not accessed simultaneously |
function fw_init(LNum : natural) return layer_ram is |
variable tmp_arr : layer_ram := (others =>(others => (others => '0'))); |
begin |
if WBinit = true then |
for i in 0 to NumIn-1 loop |
for j in 0 to NumN-1 loop |
tmp_arr(i)(j) := w_init(LNum)(i)(j); |
end loop; |
end loop; |
end if; |
return tmp_arr ; |
end fw_init; |
|
|
|
|
function fb_init(LNum : natural) return ramd_type is |
variable tmp_arr : ramd_type := (others => (others => '0')) ; |
begin |
if WBinit = true then |
for i in 0 to NumN-1 loop |
tmp_arr(i) := b_init(LNum)(i); |
end loop; |
end if; |
return tmp_arr; |
end fb_init; |
|
--function fb_init(LNum : natural) return ramd_type is |
--begin |
-- return ramd_type(b_init(LNum)); |
--end fb_init; |
|
signal lram : layer_ram := fw_init(LNum); -- Layer RAM. One RAM per input. It stores the weights |
signal breg : ramd_type := fb_init(LNum); -- Bias RAM. They can be RAM because they are not accessed simultaneously |
signal outm : outm_type; -- RAM outputs to be multiplexed into rdata |
signal m_sel : std_logic_vector(NumIn-1 downto 0); --------- RAM select |
signal Wyb : std_logic_vector((NbitW*NumIn)-1 downto 0); -- Weight vectors |
79,7 → 114,7
signal Nouts : std_logic_vector(NbitOut-1 downto 0); ------ Outputs from neurons |
signal uaddr : unsigned(lra_l-1 downto 0); -- Unsigned address of weight and bias memories |
|
|
-- Señales de control |
signal cont : integer range 0 to NumN-1; -- Neuron counter |
signal cntb : integer range 0 to NumN-1; -- Delayed counter for biases |
signal st : bit; ------- State |
88,6 → 123,9
signal en3 : std_logic; -- Shift register enable |
signal en_out : std_logic; |
|
signal input_aux1 : std_logic_vector((NbitIn*NumIn)-1 downto 0); |
signal input_aux2 : std_logic_vector((NbitIn*NumIn)-1 downto 0); |
signal input_aux3 : std_logic_vector((NbitIn*NumIn)-1 downto 0); |
begin |
|
layerPS_inst: entity work.layerPS |
108,7 → 146,7
en => en1, |
en2 => en2, |
en_r => en3, |
inputs => inputs, |
inputs => input_aux2, |
Wyb => Wyb, |
bias => bias, |
|
227,6 → 265,10
en2 <= '0'; |
run_out <= '0'; |
else |
input_aux1 <= inputs; |
input_aux2 <= input_aux1; |
--input_aux3 <=input_aux3 input_aux2; |
|
cntb <= cont; -- Bias counter is delayed to assure correctness of pipeline data |
case st is |
when '0' => |
238,13 → 280,12
end case; |
when '1' => |
en1 <= '1'; -- en1 is delayed 1 cycle in order to insert a register for Wyb |
case cont is |
when (NumN-1) => |
cont <= 0; |
st <= '0'; |
when others => |
cont <= cont +1; |
end case; |
if cont = NumN-1 then |
cont <= 0; |
st <= '0'; |
else |
cont <= cont +1; |
end if; |
end case; |
|
en2 <= en1; |
/RTL_VHDL_files/layerSP_top.vhd
22,6 → 22,9
use IEEE.STD_LOGIC_1164.ALL; |
use ieee.numeric_std.all; |
|
library work; |
use work.wb_init.all; -- initialization package, comment out when not used |
|
-- Deprecated XPS library: |
--library proc_common_v3_00_a; |
--use proc_common_v3_00_a.proc_common_pkg.all; -- Only for simulation ( pad_power2() ) |
30,15 → 33,17
|
generic |
( |
NumN : natural := 8; ------- Number of neurons of the layer |
NumIn : natural := 64; ------- Number of inputs of each neuron |
WBinit : boolean := false; |
LNum : natural := 0; ------- layer number (needed for initialization) |
NumN : natural := 34; ------- Number of neurons of the layer |
NumIn : natural := 27; ------- Number of inputs of each neuron |
NbitIn : natural := 8; ------- Bit width of the input data |
NbitW : natural := 8; ------- Bit width of weights and biases |
NbitOut : natural := 12; ------- Bit width of the output data |
lra_l : natural := 10; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn) |
wra_l : natural := 6; ------- Weight RAM address length. It should value log2(NumIn) |
bra_l : natural := 3; ------- Bias RAM address length. It should value log2(NumN) |
LSbit : natural := 4 ------- Less significant bit of the outputs |
NbitW : natural := 32; ------- Bit width of weights and biases |
NbitOut : natural := 8; ------- Bit width of the output data |
lra_l : natural := 11; ------- Layer RAM address length. It should value log2(NumN)+log2(NumIn) |
wra_l : natural := 5; ------- Weight RAM address length. It should value log2(NumIn) |
bra_l : natural := 6; ------- Bias RAM address length. It should value log2(NumN) |
LSbit : natural := 6 ------- Less significant bit of the outputs |
); |
|
port |
64,14 → 69,39
|
architecture Behavioral of layerSP_top is |
|
--type ramd_type is array (pad_power2(NumIn)-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces |
--type layer_ram is array (pad_power2(NumN)-1 downto 0) of ramd_type; |
type ramd_type is array (NumIn-1 downto 0) of std_logic_vector(NbitW-1 downto 0); -- Optimal: 32 or 64 spaces |
type layer_ram is array (NumN-1 downto 0) of ramd_type; |
type outm_type is array (NumN-1 downto 0) of std_logic_vector(NbitW-1 downto 0); |
|
|
signal lram : layer_ram; -- Layer RAM. One RAM per neuron. It stores the weights |
signal breg : outm_type; -- Bias registers. They can not be RAM because they are accessed simultaneously |
function fw_init(LNum : natural) return layer_ram is |
variable tmp_arr : layer_ram := (others => (others => (others => '0'))) ; |
begin |
if WBinit = true then |
for i in 0 to NumIn-1 loop |
for j in 0 to NumN-1 loop |
tmp_arr(j)(i) := w_init(LNum)(i)(j); |
end loop; |
end loop; |
end if; |
return tmp_arr ; |
end fw_init; |
|
function fb_init(LNum : natural) return outm_type is |
variable tmp_arr : outm_type := (others => (others => '0')) ; |
begin |
if WBinit = true then |
for i in 0 to NumN-1 loop |
tmp_arr(i) := b_init(LNum)(i); |
end loop; |
end if; |
return tmp_arr; |
end fb_init; |
|
|
|
signal lram : layer_ram := fw_init(LNum); -- Layer RAM. One RAM per neuron. It stores the weights |
signal breg : outm_type := fb_init(LNum); -- Bias registers. They can not be RAM because they are accessed simultaneously |
signal outm : outm_type; -- RAM outputs to be multiplexed into rdata |
signal m_sel : std_logic_vector(NumN-1 downto 0); -------- RAM select |
signal Wyb : std_logic_vector((NbitW*NumN)-1 downto 0); --- Weight vectors |
166,7 → 196,14
end if; |
end if; |
end process; |
outm(i) <= lram(i)(to_integer(uaddr(wra_l-1 downto 0))); -- Read all RAM |
outm(i) <= lram(i)(to_integer(uaddr(wra_l-1 downto 0))) when (uaddr(wra_l-1 downto 0) <= NumIn-1) else |
(others => '0') ; -- Read all RAM |
-- In my case I have 27 inputs and 34 neurons in the first layer. When I address |
-- the 1 layer's inputs for the second neuron the layer which acccepts a 6 bit wide |
-- input address (layer 2) sees the ..1 00100 (34) number and interprets it as an input |
-- address (which goes only up to 33) hence the bound check failure |
-- fix: I've changed the assignment to a conditional one to check if we are not |
-- trying to read a weight of an input higher than the number of this layer's inputs. |
end generate; |
|
-- Synchronous read including breg: |
173,6 → 210,8
process (clk) |
begin |
if (clk'event and clk = '1') then |
--report "addr: " & integer'image(wra_l-1); |
--report "addr: " & integer'image(to_integer(uaddr(wra_l-1 downto 0)) ); |
if (m_en = '1') then |
if (b_sel = '1') then |
rdata <= breg(to_integer(uaddr(bra_l-1 downto 0))); -- Bias registers selected |
246,6 → 285,9
else |
cont <= cont +1; |
end if; |
--elsif (cont = NumIn-1) then -- for layers with more that |
-- cont <= 0; -- 1 neuron uncommenting this |
-- aux2_en3 <= '1'; -- solved a problem with cont resetting |
end if; |
en2 <= en1; |
if (cont = 0 and run_in = '1') then |
/RTL_VHDL_files/layers_pkg.vhd
1,301 → 1,304
---------------------------------------------------------------------------------- |
-- Company: CEI - UPM |
-- Engineer: David Aledo |
-- |
-- Create Date: 01.10.2015 |
-- Design Name: Configurable ANN |
-- Pakage Name: layers_pkg |
-- Project Name: |
-- Target Devices: |
-- Tool Versions: |
-- Description: define array types for generics, functions to give them values from |
-- string generics, and other help functions |
-- Dependencies: |
-- |
-- Revision: |
-- Revision 0.01 - File Created |
-- Additional Comments: |
-- |
---------------------------------------------------------------------------------- |
|
library IEEE; |
use IEEE.STD_LOGIC_1164.all; |
|
--library proc_common_v3_00_a; -- Deprecated libray from XPS tool |
--use proc_common_v3_00_a.proc_common_pkg.all; |
|
package layers_pkg is |
|
-- Array types for generics: |
type int_vector is array (natural range <>) of integer; -- Generic integer vector |
type ltype_vector is array (integer range <>) of string(1 to 2); -- Layer type vector |
type ftype_vector is array (integer range <>) of string(1 to 6); -- Activation function type vector |
-- Note: these strings cannot be unconstrined |
|
-- Functions to assign values to vector types from string generics: |
-- Arguments: |
-- str_v : string to be converted |
-- n : number of elements of the vector |
-- Return: assigned vector |
function assign_ints(str_v : string; n : integer) return int_vector; |
function assign_ltype(str_v : string; n : integer) return ltype_vector; |
function assign_ftype(str_v : string; n : integer) return ftype_vector; |
|
-- Other functions: |
|
-- Argument: c : character to be checked |
-- Return: TRUE if c is 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9 |
function is_digit(c : character) return boolean; |
|
-- Base two logarithm for int_vector: |
-- Arguments: |
-- v : integer vector |
-- n : number of elements of the vector |
-- Return : integer vector of the base two logarithms of each elment of v |
function log2(v : int_vector; n : integer) return int_vector; |
|
-- Calculate the total weight and bias memory address length: |
-- Arguments: |
-- NumIn : number of inputs of the network |
-- NumN : number of neurons of each layer |
-- n : number of layers (number of elements of NumN) |
-- Return: total weight and bias memory address length (integer) |
function calculate_addr_l(NumIn : integer; NumN : int_vector; n : integer) return integer; |
|
-- Assign the weight and bias memory address lenght of each layer: |
-- Arguments: |
-- NumIn : number of inputs of the network |
-- NumN : number of neurons of each layer |
-- n : number of layers (number of elements of NumN and the return integer vector) |
-- Return: weight and bias memory address lenght of each layer (integer vector) |
function assign_addrl(NumIn : integer; NumN : int_vector; n : integer) return int_vector; |
|
-- Calculate the maximum of the multiplications of two vectors element by element |
-- Arguments: |
-- v1 : input vector 1 |
-- v2 : input vector 2 |
-- Return: maximum of the multiplications of two vectors element by element |
function calculate_max_mul(v1 : int_vector; v2 : int_vector) return integer; |
|
-- Returns the max value of the input integer vector: |
function calculate_max(v : int_vector) return integer; |
|
-- Adding needed functions from the deprecated libray proc_common_v3_00_a: |
function max2 (num1, num2 : integer) return integer; |
function log2(x : natural) return integer; |
|
end layers_pkg; |
|
package body layers_pkg is |
|
function max2 (num1, num2 : integer) return integer is |
begin |
if num1 >= num2 then |
return num1; |
else |
return num2; |
end if; |
end function max2; |
|
-- Function log2 -- returns number of bits needed to encode x choices |
-- x = 0 returns 0 |
-- x = 1 returns 0 |
-- x = 2 returns 1 |
-- x = 4 returns 2, etc. |
function log2(x : natural) return integer is |
variable i : integer := 0; |
variable val: integer := 1; |
begin |
if x = 0 then |
return 0; |
else |
for j in 0 to 29 loop -- for loop for XST |
if val >= x then null; |
else |
i := i+1; |
val := val*2; |
end if; |
end loop; |
-- Fix per CR520627 XST was ignoring this anyway and printing a |
-- Warning in SRP file. This will get rid of the warning and not |
-- impact simulation. |
-- synthesis translate_off |
assert val >= x |
report "Function log2 received argument larger" & |
" than its capability of 2^30. " |
severity failure; |
-- synthesis translate_on |
return i; |
end if; |
end function log2; |
|
|
function is_digit(c : character) return boolean is |
begin |
case c is |
when '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => return true; |
when others => return false; |
end case; |
end is_digit; |
|
-- Assign values to a integer vector from a string: |
-- Arguments: |
-- str_v : string to be converted |
-- n : number of elements of the vector |
-- Return: assigned integer vector |
function assign_ints(str_v : string; n : integer) return int_vector is |
variable i : integer := n-1; ---- element counter |
variable d_power : integer := 1; -- decimal power |
variable ret : int_vector(n-1 downto 0) := (others => 0); -- return value |
begin |
for c in str_v'length downto 1 loop -- read every character in str_v |
if str_v(c) = ' ' then -- a space separates a new element |
assert i > 0 |
report "Error in assign_ints: number of elements in string is greater than n." |
severity error; |
i := i -1; -- decrease element counter to start calculate a new element |
d_power := 1; -- reset the decimal power to 1 |
else |
assert is_digit(str_v(c)) -- assert the new character is a digit |
report "Error in assign_ints: character " & str_v(c) & " is not a digit." |
severity error; |
-- add the value of the new charactar to the element calculation ( + ("<new_digit>" - "0") * d_power): |
ret(i) := ret(i) + (character'pos(str_v(c))-character'pos('0'))*d_power; |
d_power := d_power*10; -- increase the decimal power for the next digit |
end if; |
end loop; |
assert i = 0 |
report "Error in assign_ints: number of elements in string is less than n." |
severity error; |
return ret; |
end assign_ints; |
|
-- Assign values to an activation function type vector from a string: |
-- Arguments: |
-- str_v : string to be converted |
-- n : number of elements of the vector |
-- Return: assigned activation function type vector |
function assign_ftype(str_v : string; n : integer) return ftype_vector is |
variable i : integer := 0; -- element counter |
variable l : integer := 1; -- element length counter |
variable ret : ftype_vector(n-1 downto 0) := (others => "linear"); -- return value |
begin |
for c in 1 to str_v'length loop -- read every character in str_v |
if str_v(c) = ' ' then -- a space separates a new element |
i := i +1; -- increase element counter to start calculate a new element |
l := 1; -- reset element length counter |
else |
ret(i)(l) := str_v(c); |
l := l +1; -- increase element length counter |
end if; |
end loop; |
assert i = n-1 |
report "Error in assign_ftype: number of elements in string is less than n." |
severity error; |
return ret; |
end assign_ftype; |
|
-- Assign values to an layer type vector from a string: |
-- Arguments: |
-- str_v : string to be converted |
-- n : number of elements of the vector |
-- Return: assigned layer type vector |
function assign_ltype(str_v : string; n : integer) return ltype_vector is |
variable i : integer := 0; -- element counter |
variable l : integer := 1; -- element length counter |
variable ret : ltype_vector(n-1 downto 0) := (others => "SP"); -- return value |
begin |
for c in 1 to str_v'length loop |
if str_v(c) = ' ' then -- a space separates a new element |
i := i +1; -- increase element counter to start calculate a new element |
l := 1; -- reset element length counter |
else |
assert str_v(c) = 'P' or str_v(c) = 'S' |
report "Error in assign_ltype: character " & str_v(c) & " is not 'P' (parallel) or 'S' (serial)." |
severity error; |
ret(i)(l) := str_v(c); |
l := l +1; -- increase element length counter |
end if; |
end loop; |
assert i = n-1 |
report "Error in assign_ltype: number of elements do not coincide with number of introduced elements." |
severity error; |
return ret; |
end assign_ltype; |
|
-- Calculate the total weight and bias memory address length: |
-- Arguments: |
-- NumIn : number of inputs of the network |
-- NumN : number of neurons of each layer |
-- n : number of layers (number of elements of NumN) |
-- Return: total weight and bias memory address length (integer) |
function calculate_addr_l(NumIn : integer; NumN : int_vector; n : integer) return integer is -- matrix + b_sel |
variable addr_l : integer := log2(NumIn)+log2(NumN(0)); -- return value. Initialized with the weight memory length of the first layer |
begin |
-- Calculate the maximum of the weight memory length: |
for i in 1 to n-1 loop |
addr_l := max2( addr_l, log2(NumN(i-1))+log2(NumN(i)) ); |
end loop; |
addr_l := addr_l +1; -- add bias select bit |
return addr_l; |
end calculate_addr_l; |
|
-- Base two logarithm for int_vector: |
-- Arguments: |
-- v : integer vector |
-- n : number of elements of the vector |
-- Return : integer vector of the base two logarithms of each elment of v |
function log2(v : int_vector; n : integer) return int_vector is |
variable ret : int_vector(n-1 downto 0); -- return value |
begin |
-- for each element of v, calculate its base two logarithm: |
for i in 0 to n-1 loop |
ret(i) := log2(v(i)); |
end loop; |
return ret; |
end log2; |
|
-- Assign the weight and bias memory address lenght of each layer: |
-- Arguments: |
-- NumIn : number of inputs of the network |
-- NumN : number of neurons of each layer |
-- n : number of layers (number of elements of NumN and the return integer vector) |
-- Return: weight and bias memory address lenght of each layer (integer vector) |
function assign_addrl(NumIn : integer; NumN : int_vector; n : integer) return int_vector is |
variable ret : int_vector(n-1 downto 0); -- return value |
begin |
ret(0) := log2(NumIn)+log2(NumN(0)); -- Weight memory length of the first layer |
for i in 1 to n-1 loop |
ret(i) := log2(NumN(i-1))+log2(NumN(i)); |
end loop; |
return ret; |
end assign_addrl; |
|
-- Returns the max value of the input integer vector: |
function calculate_max(v : int_vector) return integer is |
variable ac_max : integer := 0; -- return value |
begin |
for i in 0 to v'length-1 loop |
ac_max := max2(ac_max,v(i)); |
end loop; |
return ac_max; |
end calculate_max; |
|
-- Calculate the maximum of the multiplications of two vectors element by element |
-- Arguments: |
-- v1 : input vector 1 |
-- v2 : input vector 2 |
-- Return: maximum of the multiplications of two vectors element by element |
function calculate_max_mul(v1 : int_vector; v2 : int_vector) return integer is |
variable ac_max : integer := 0; |
begin |
assert v1'length = v2'length |
report "Error in calculate_max_mul: vector's length do not coincide." |
severity error; |
for i in 0 to v1'length-1 loop |
ac_max := max2(ac_max,v1(i)*v2(i)); |
end loop; |
return ac_max; |
end calculate_max_mul; |
|
end layers_pkg; |
---------------------------------------------------------------------------------- |
-- Company: CEI - UPM |
-- Engineer: David Aledo |
-- |
-- Create Date: 01.10.2015 |
-- Design Name: Configurable ANN |
-- Pakage Name: layers_pkg |
-- Project Name: |
-- Target Devices: |
-- Tool Versions: |
-- Description: define array types for generics, functions to give them values from |
-- string generics, and other help functions |
-- Dependencies: |
-- |
-- Revision: |
-- Revision 0.01 - File Created |
-- Additional Comments: |
-- |
---------------------------------------------------------------------------------- |
|
library IEEE; |
use IEEE.STD_LOGIC_1164.all; |
use IEEE.numeric_std.all; |
|
--library proc_common_v3_00_a; -- Deprecated libray from XPS tool |
--use proc_common_v3_00_a.proc_common_pkg.all; |
|
package layers_pkg is |
|
-- Array types for generics: |
type int_vector is array (natural range <>) of integer; -- Generic integer vector |
type ltype_vector is array (integer range <>) of string(1 to 2); -- Layer type vector |
type ftype_vector is array (integer range <>) of string(1 to 6); -- Activation function type vector |
-- Note: these strings cannot be unconstrined |
|
-- Functions to assign values to vector types from string generics: |
-- Arguments: |
-- str_v : string to be converted |
-- n : number of elements of the vector |
-- Return: assigned vector |
function assign_ints(str_v : string; n : integer) return int_vector; |
function assign_ltype(str_v : string; n : integer) return ltype_vector; |
function assign_ftype(str_v : string; n : integer) return ftype_vector; |
|
-- Other functions: |
|
-- Argument: c : character to be checked |
-- Return: TRUE if c is 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9 |
function is_digit(c : character) return boolean; |
|
|
-- Base two logarithm for int_vector: |
-- Arguments: |
-- v : integer vector |
-- n : number of elements of the vector |
-- Return : integer vector of the base two logarithms of each elment of v |
function log2(v : int_vector; n : integer) return int_vector; |
|
-- Calculate the total weight and bias memory address length: |
-- Arguments: |
-- NumIn : number of inputs of the network |
-- NumN : number of neurons of each layer |
-- n : number of layers (number of elements of NumN) |
-- Return: total weight and bias memory address length (integer) |
function calculate_addr_l(NumIn : integer; NumN : int_vector; n : integer) return integer; |
|
-- Assign the weight and bias memory address lenght of each layer: |
-- Arguments: |
-- NumIn : number of inputs of the network |
-- NumN : number of neurons of each layer |
-- n : number of layers (number of elements of NumN and the return integer vector) |
-- Return: weight and bias memory address lenght of each layer (integer vector) |
function assign_addrl(NumIn : integer; NumN : int_vector; n : integer) return int_vector; |
|
-- Calculate the maximum of the multiplications of two vectors element by element |
-- Arguments: |
-- v1 : input vector 1 |
-- v2 : input vector 2 |
-- Return: maximum of the multiplications of two vectors element by element |
function calculate_max_mul(v1 : int_vector; v2 : int_vector) return integer; |
|
-- Returns the max value of the input integer vector: |
function calculate_max(v : int_vector) return integer; |
|
-- Adding needed functions from the deprecated libray proc_common_v3_00_a: |
function max2 (num1, num2 : integer) return integer; |
function log2(x : natural) return integer; |
|
end layers_pkg; |
|
package body layers_pkg is |
|
function max2 (num1, num2 : integer) return integer is |
begin |
if num1 >= num2 then |
return num1; |
else |
return num2; |
end if; |
end function max2; |
|
-- Function log2 -- returns number of bits needed to encode x choices |
-- x = 0 returns 0 |
-- x = 1 returns 0 |
-- x = 2 returns 1 |
-- x = 4 returns 2, etc. |
function log2(x : natural) return integer is |
variable i : integer := 0; |
variable val: integer := 1; |
begin |
if x = 0 then |
return 0; |
else |
for j in 0 to 29 loop -- for loop for XST |
if val >= x then null; |
else |
i := i+1; |
val := val*2; |
end if; |
end loop; |
-- Fix per CR520627 XST was ignoring this anyway and printing a |
-- Warning in SRP file. This will get rid of the warning and not |
-- impact simulation. |
-- synthesis translate_off |
assert val >= x |
report "Function log2 received argument larger" & |
" than its capability of 2^30. " |
severity failure; |
-- synthesis translate_on |
return i; |
end if; |
end function log2; |
|
|
function is_digit(c : character) return boolean is |
begin |
case c is |
when '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => return true; |
when others => return false; |
end case; |
end is_digit; |
|
|
-- Assign values to a integer vector from a string: |
-- Arguments: |
-- str_v : string to be converted |
-- n : number of elements of the vector |
-- Return: assigned integer vector |
function assign_ints(str_v : string; n : integer) return int_vector is |
variable i : integer := n-1; ---- element counter |
variable d_power : integer := 1; -- decimal power |
variable ret : int_vector(n-1 downto 0) := (others => 0); -- return value |
begin |
for c in str_v'length downto 1 loop -- read every character in str_v |
if str_v(c) = ' ' then -- a space separates a new element |
assert i > 0 |
report "Error in assign_ints: number of elements in string is greater than n." |
severity error; |
i := i -1; -- decrease element counter to start calculate a new element |
d_power := 1; -- reset the decimal power to 1 |
else |
assert is_digit(str_v(c)) -- assert the new character is a digit |
report "Error in assign_ints: character " & str_v(c) & " is not a digit." |
severity error; |
-- add the value of the new charactar to the element calculation ( + ("<new_digit>" - "0") * d_power): |
ret(i) := ret(i) + (character'pos(str_v(c))-character'pos('0'))*d_power; |
d_power := d_power*10; -- increase the decimal power for the next digit |
end if; |
end loop; |
assert i = 0 |
report "Error in assign_ints: number of elements in string is less than n." |
severity error; |
return ret; |
end assign_ints; |
|
-- Assign values to an activation function type vector from a string: |
-- Arguments: |
-- str_v : string to be converted |
-- n : number of elements of the vector |
-- Return: assigned activation function type vector |
function assign_ftype(str_v : string; n : integer) return ftype_vector is |
variable i : integer := 0; -- element counter |
variable l : integer := 1; -- element length counter |
variable ret : ftype_vector(n-1 downto 0) := (others => "linear"); -- return value |
begin |
for c in 1 to str_v'length loop -- read every character in str_v |
if str_v(c) = ' ' then -- a space separates a new element |
i := i +1; -- increase element counter to start calculate a new element |
l := 1; -- reset element length counter |
else |
ret(i)(l) := str_v(c); |
l := l +1; -- increase element length counter |
end if; |
end loop; |
assert i = n-1 |
report "Error in assign_ftype: number of elements in string is less than n." |
severity error; |
return ret; |
end assign_ftype; |
|
-- Assign values to an layer type vector from a string: |
-- Arguments: |
-- str_v : string to be converted |
-- n : number of elements of the vector |
-- Return: assigned layer type vector |
function assign_ltype(str_v : string; n : integer) return ltype_vector is |
variable i : integer := 0; -- element counter |
variable l : integer := 1; -- element length counter |
variable ret : ltype_vector(n-1 downto 0) := (others => "SP"); -- return value |
begin |
for c in 1 to str_v'length loop |
if str_v(c) = ' ' then -- a space separates a new element |
i := i +1; -- increase element counter to start calculate a new element |
l := 1; -- reset element length counter |
else |
assert str_v(c) = 'P' or str_v(c) = 'S' |
report "Error in assign_ltype: character " & str_v(c) & " is not 'P' (parallel) or 'S' (serial)." |
severity error; |
ret(i)(l) := str_v(c); |
l := l +1; -- increase element length counter |
end if; |
end loop; |
assert i = n-1 |
report "Error in assign_ltype: number of elements do not coincide with number of introduced elements." |
severity error; |
return ret; |
end assign_ltype; |
|
-- Calculate the total weight and bias memory address length: |
-- Arguments: |
-- NumIn : number of inputs of the network |
-- NumN : number of neurons of each layer |
-- n : number of layers (number of elements of NumN) |
-- Return: total weight and bias memory address length (integer) |
function calculate_addr_l(NumIn : integer; NumN : int_vector; n : integer) return integer is -- matrix + b_sel |
variable addr_l : integer := log2(NumIn)+log2(NumN(0)); -- return value. Initialized with the weight memory length of the first layer |
begin |
-- Calculate the maximum of the weight memory length: |
for i in 1 to n-1 loop |
addr_l := max2( addr_l, log2(NumN(i-1)+log2(NumN(i))) ); |
end loop; |
addr_l := addr_l +1; -- add bias select bit |
return addr_l; |
end calculate_addr_l; |
|
-- Base two logarithm for int_vector: |
-- Arguments: |
-- v : integer vector |
-- n : number of elements of the vector |
-- Return : integer vector of the base two logarithms of each elment of v |
function log2(v : int_vector; n : integer) return int_vector is |
variable ret : int_vector(n-1 downto 0); -- return value |
begin |
-- for each element of v, calculate its base two logarithm: |
for i in 0 to n-1 loop |
ret(i) := log2(v(i)); |
end loop; |
return ret; |
end log2; |
|
-- Assign the weight and bias memory address lenght of each layer: |
-- Arguments: |
-- NumIn : number of inputs of the network |
-- NumN : number of neurons of each layer |
-- n : number of layers (number of elements of NumN and the return integer vector) |
-- Return: weight and bias memory address lenght of each layer (integer vector) |
function assign_addrl(NumIn : integer; NumN : int_vector; n : integer) return int_vector is |
variable ret : int_vector(n-1 downto 0); -- return value |
begin |
ret(0) := log2(NumIn)+log2(NumN(0)); -- Weight memory length of the first layer |
for i in 1 to n-1 loop |
ret(i) := log2(NumN(i-1))+log2(NumN(i)); |
end loop; |
return ret; |
end assign_addrl; |
|
-- Returns the max value of the input integer vector: |
function calculate_max(v : int_vector) return integer is |
variable ac_max : integer := 0; -- return value |
begin |
for i in 0 to v'length-1 loop |
ac_max := max2(ac_max,v(i)); |
end loop; |
return ac_max; |
end calculate_max; |
|
-- Calculate the maximum of the multiplications of two vectors element by element |
-- Arguments: |
-- v1 : input vector 1 |
-- v2 : input vector 2 |
-- Return: maximum of the multiplications of two vectors element by element |
function calculate_max_mul(v1 : int_vector; v2 : int_vector) return integer is |
variable ac_max : integer := 0; |
begin |
assert v1'length = v2'length |
report "Error in calculate_max_mul: vector's length do not coincide." |
severity error; |
for i in 0 to v1'length-1 loop |
ac_max := max2(ac_max,v1(i)*v2(i)); |
end loop; |
return ac_max; |
end calculate_max_mul; |
|
end layers_pkg; |
/RTL_VHDL_files/support_pkg.vhd
0,0 → 1,37
library IEEE; |
use IEEE.STD_LOGIC_1164.all; |
use IEEE.numeric_std.all; |
use work.layers_pkg.all; |
package support_pkg is |
|
-- generic constants: |
|
constant NbitIn : natural := 12; |
constant LSB_In : natural := 8; |
constant Nbit : natural := 12; |
constant NbitW : natural := 24; |
constant LSB_OUT : natural := 8; |
constant Nlayer : natural := 3; |
|
constant NbitOut : integer := 12 ; |
constant NumIn : integer := 1; |
constant NumN : int_vector(Nlayer-1 downto 0) := assign_ints("2 3 1",Nlayer); |
constant LSbit : int_vector(Nlayer-1 downto 0) := assign_ints("8 8 8",Nlayer); |
constant NbitO : int_vector(Nlayer-1 downto 0) := assign_ints("12 12 12",Nlayer); |
constant l_type : string := "SP PS SP"; -- Layer type of each layer |
constant f_type : string := "siglu2 siglu2 siglu2"; -- Activation function type of each layer |
|
function real2stdlv (bitW : natural; din : real) return std_logic_vector; |
|
end support_pkg; |
|
package body support_pkg is |
|
function real2stdlv (bitW : natural; din : real) return std_logic_vector is |
variable vres : signed(bitW-1 downto 0) := (others => '0'); |
begin -- real2stdlv |
vres:= to_signed(integer(din*(2.0**(LSB_OUT))), bitW); |
return std_logic_vector(vres); |
end real2stdlv; |
|
end support_pkg; |
/RTL_VHDL_files/wb_init.vhd
0,0 → 1,72
library ieee; |
use ieee.std_logic_1164.all; |
use ieee.numeric_std.all; |
library work; |
use work.support_pkg.all; |
use work.layers_pkg.all; |
package wb_init is |
type ramd_type is array (3 downto 0) of std_logic_vector(NbitW-1 downto 0); |
type layer_ram is array (3 downto 0) of ramd_type; |
type w_ram is array (integer range <>) of layer_ram; |
type b_type is array (integer range <>) of ramd_type; |
constant w_init : w_ram := |
( |
0 => ( |
0 => ( |
0 => real2stdlv(NbitW,-0.8964), |
1 => real2stdlv(NbitW,-2.6600), |
others =>(others => '0') |
), |
others=>(others =>(others => '0')) |
), |
1 => ( |
0 => ( |
0 => real2stdlv(NbitW,-5.6056), |
1 => real2stdlv(NbitW,-1.5274), |
2 => real2stdlv(NbitW,-8.4909), |
others =>(others => '0') |
), |
1 => ( |
0 => real2stdlv(NbitW,1.0885), |
1 => real2stdlv(NbitW,0.7244), |
2 => real2stdlv(NbitW,3.8977), |
others =>(others => '0') |
), |
others=>(others =>(others => '0')) |
), |
2 => ( |
0 => ( |
0 => real2stdlv(NbitW,6.0449), |
others =>(others => '0') |
), |
1 => ( |
0 => real2stdlv(NbitW,-2.8724), |
others =>(others => '0') |
), |
2 => ( |
0 => real2stdlv(NbitW,-5.0188), |
others =>(others => '0') |
), |
others=>(others =>(others => '0')) |
) |
); |
|
constant b_init : b_type := |
( |
0 => ( |
0 => real2stdlv(NbitW,(2.0**LSB_OUT)*(0.3704)), |
1 => real2stdlv(NbitW,(2.0**LSB_OUT)*(0.7149)), |
others =>(others => '0') |
), |
1 => ( |
0 => real2stdlv(NbitW,(2.0**LSB_OUT)*(2.8121)), |
1 => real2stdlv(NbitW,(2.0**LSB_OUT)*(0.3690)), |
2 => real2stdlv(NbitW,(2.0**LSB_OUT)*(2.4685)), |
others =>(others => '0') |
), |
2 => ( |
0 => real2stdlv(NbitW,(2.0**LSB_OUT)*(0.0784)), |
others =>(others => '0') |
) |
); |
end wb_init; |