home *** CD-ROM | disk | FTP | other *** search
- inst4.0 ! 02-Jun-88 (Sample.ins) Sample 2-input Xor (backprop)
- !****************************************************************
- !* *
- !* "Sample" Back-Propagation Network Builder *
- !* *
- !****************************************************************
- !
- ! This script builds a back-propagation network designed
- ! to solve the exclusive "OR" problem with two inputs.
-
- ! *** Load the Control Strategy and LRS if needed
-
- @LdCS "backprop" !control strategy
- @LdLR "backprop" !L/R schedule
-
- =netn "InstaNet (tm) 'Sample' Back-Propagation Network version 1.00 20-Jun-88"
- !=DLnF 1 !learn re-display on
- =DLnN 53 !learn, re-display every 53
- =DRcF 1 !recall re-display on
- =DRcN 1 !show each recall
-
- ! *** Build the Input Layer ***
-
- @LLdf !load default layer to mi_layer structure
- =LDln "In" !layer name
- =Lpes 2 !2 PEs for XOR
- =Ltrn "Linear" !buffer
- =LDsp 6 !spacing
- =x 100 !place to put layer on screen
- =y 80
- #Incl "stdnwgtf.iif" !standard # weight fields
- @LAdd !add the input layer
- =x 130 !position hidden & output over center
-
- ! *** Build the first hidden Layer ***
-
- @LLdf !start with default layer again
- =LDln "H" !layer name
- =Lpes 1 !One Hidden PE
- =Ltrn "Sigmoid" !transfer function
- =Llrn "Delta-Rule" !Generalized Delta learning rule
- +y 60 !up higher on display
- #Incl "stdnwgtf.iif" !standard # weight fields
- @LAdd
-
- ! *** Connect Hidden Layer to Bias & Input Layers ***
-
- =SPEl 1 !current layer
- @SlPE !select it as destination (sb already)
- =NPEl -1 !near to bias term (source)
- @NrPE
- =cnwt 1.0 !connection weight
- =cnty WVar !variable
- =cnsc WAbs !absolute
- @LCFl !fully connect to bias element
- =NPEl 0 !input layer
- @NrPE
- @LCFl !fully connect to input layer
-
- ! *** Build the output layer & connect it to prior, input & bias term ***
-
- @LLdf !load default layer to mi_layer structure
- =LDln "Out" !layer name
- =Lpes 1 !copy # of input PEs from menu
- =Ltrn "Sigmoid" !transfer function
- =Llrn "Delta-Rule" !Generalized Delta learning rule
- +y 60
- #Incl "stdnwgtf.iif" !standard # weight fields
- @LAdd !add the output layer
- =n7 LayN !save for stdprobe
-
- =SPEl 2 !current layer
- @SlPE !select it as destination (sb already)
- =NPEl -1 !near to bias term (source)
- @NrPE
- @LCFl !fully connect to bias element
- =NPEl 1 !previous layer
- @NrPE
- @LCFl !fully connect to input layer
- =NPEl 0 !input layer
- @NrPE
- @LCFl !fully connect to input layer
-
- ! *** Select Control Strategy & L/R Schedule ***
-
- =LrnN 2000 !learn counter for LearnN
- @LLsl !load super layer
- =Lctl "backprop" !backprop control strategy
- =Llrs "backprop" !backprop L/R Schedule
- =Llnn "sample" !name of learn input
- =Lrcn "sample" !name of recall output
- =Llio 321 !binary,sequential,load (0x0141)
- =Lrio 1137 !binary,seq,inp, des out, header (0x0471)
- =Lscl 0 !input low-value
- =Loff 1 !input high-value
- =Llow 0 !output low-value
- =Lhgh 1 !output high-value
- @SVsl !save it back
- !
- =n5 0.0
- =n6 1.0
- =n4 1
- #Incl "stdprobe.iif" !Standard probe include file
- !
- =jogl -.1 !lower limit for jog
- =jogh +.1 !upper limit for jog
- =seed 257 !starting seed number
- @seed !set the seed
- @Nini !randomize the network
- @EOF
-