Main Page   Namespace List   Class Hierarchy   Compound List   File List   Namespace Members   Compound Members   File Members  

kernel.c

Go to the documentation of this file.
00001 
00200 #include <math.h>
00201 #include <vector>
00202 //#include <unistd.h> // Temporary; for sync()
00203 
00204 #include "ind_types.h"
00205 #include "ipc.h"
00206 #include "cmdparam.h"
00207 #include "binarysave.h"
00208 #include "shuffledrand.h"
00209 #include "globals.h"
00210 #include "kernel.h"
00211 #include "kernelwrapper.h"
00212 #include "stringutils.h"
00213 #include "neuralregion.h"
00214 
00215 /* Currently multiprocessor support is unimplemented; the calls to
00216    ipc_put and ipc_get use dynamically-allocated data, whose address
00217    is not known on remote PEs.  They would need to be changed to
00218    broadcast addresses into known locations before broadcasting
00219    the data. */
00220 #if (NPES>1)
00221 #error "Multiprocessor support is not implemented for kernel.c"
00222 #endif
00223 
00224 
00225 
00226 /******************************************************************************/
00227 /* Defines and typedefs                                                       */
00228 /******************************************************************************/
00229 
00230 /*  For speed, define NUM_EYES_IS_CONSTANT=<num_eyes> to declare that
00231     num_eyes is constant and equal to the given value.  Skip this if
00232     flexibility is more important than speed.  However, if you can
00233     make this parameter constant, it speeds up certain inner loops
00234     considerably, because automatic loop unrolling can then eliminate
00235     the entire loop in many cases.  Historically, the speedup has been
00236     about 10-15%.  It is your responsibility to set num_eyes to match,
00237     although warnings should be generated if it does not.
00238 */
00239 /* #define NUM_EYES_IS_CONSTANT=1 */
00240 
00241 
00242 
00255 #define ACTLIST_START(actlist) 1
00256 #define ACTLIST_COUNT(actlist) ((actlist)[0])
00257 
00258 /* 12-09-95 Added by J.A.B. -- Reload from file immediately after save.
00259    Allows ASCII saving in middle of training by replacing the weights
00260    that were destroyed, or can be used for debugging.
00261 */
00262 /* #define RELOAD_AFTER_WEIGHT_SAVE */
00263 
00265 #define VERIFY_WEIGHT_SAVES_ERROR_LIMIT 10
00266 
00267 
00268 
00269 
00270 /* Macros to assist in handling circular receptive fields                      */
00271 
00273 #define WITHIN_RADIUS(xdistance,ydistance,radius_sq) \
00274 (((xdistance)*(xdistance) + (ydistance)*(ydistance)) <= (radius_sq))
00275 
00277 #define WITHIN_RADIUS_SQ(xdistance_sq,ydistance_sq,radius_sq) \
00278 ((xdistance_sq) + (ydistance_sq) <= (radius_sq))
00279 
00280 
00281 
00282 /******************************************************************************/
00283 /* LissomMap class                                                            */
00284 /******************************************************************************/
00285 
00289 class LissomMap : public BasicLissomMap, public InternalNeuralRegion {
00290   typedef std::vector<int> actlist;
00291   
00292 public:
00293   LissomMap(const string& name_i, Dimensions dims, 
00294             bool init_weights_, ParamMap* params_);
00295 
00296   virtual ~LissomMap() {}
00297 
00298 
00299   virtual void activate(bool learn=false, bool settle=true, bool activatefn=true);
00300 
00301   /* For OBJ-LISSOM, but also useful for the backproject command */
00302   virtual void backproject();
00303   virtual void backproject(const string& name, const double gammaaff=1.0);
00304 #ifdef OBJ_LISSOM  
00305   virtual void resp_to_residual();
00306   virtual void resp_to_residual(const string& name, const double gammaaff=1.0);
00307   virtual void train_obj_weights();
00308 #endif /* OBJ_LISSOM */
00309   
00310 
00311 
00312   virtual const WeightMatrix get_weights(const string& name, int ui=0, int uj=0) const;
00313   virtual NeuralRegion* get_input(const string& name);
00314   virtual void prune();
00315   virtual void save_state(const string&);
00316   virtual bool restore_state(const int, const string&);
00317   virtual double size_connection_bytes()   const;
00318   virtual double size_unique_connections() const;
00319   virtual void   add_input(const string& name_, NeuralRegion& upstream_region,
00320                            WeightFunction& fn, Length size_scale);
00321   virtual Dimensions input_dimensions(WeightFunction& fn, Length size_scale=1.0);
00322   virtual WeightBounds* get_weights_bounds(const string& name, int ui=0, int uj=0) const;
00323   virtual int    num_aff_inputs() const
00324     {  return aff_inputs.size();  }
00325 
00326 protected:
00327   void   present_inputs(bool learn, bool settle=true, bool activate=true);
00328   void   init_weights(bool init_weights);
00329   void   collect_activation_data(int dest_pe=Uninitialized);
00330   double dist_lat_wt(double value, double amt_of_randomness);
00331   double lat_resp(int ui, int uj, int radius, int array_width, l_weight *weights, int weight_idx_bound);
00332   double lat_resp_rad1(int ui, int uj, int radius, int array_width, int array_width_2, l_weight *weights, int weight_idx_bound);
00333   double lat_resp_named(int ui, int uj, const string& name);
00334   double sigmoid(double activity, double sigdelta, double sigbeta);
00335   double truesigmoid(double activity, double beta);
00336   double truesigmoidprime(double sigmoidactivity, double beta);
00337   double positivesigmoid(double activity, double beta);
00338   double logsigmoid(double activity, double beta);
00339   double logsigmoidprime(double activity, double beta);
00340   int    binary_search(const actlist& list, int lower, int upper, int value);
00341   double change_lateral_exc_radius(double old_radius, double new_radius);
00342   void   clear_weights(void);
00343   void   randomize_lat_wts(void);
00344   void   compute_responses(int ts, bool activate, bool continuousdynamics);
00345   inline  void   crop_actlist(const actlist&, int lowval, int highval, int *lowidx, int *highidx);
00346   void   initialize_actlists(void);
00347   void   initialize_markers(void);
00348   void   modify_all_weights(void);
00349   void   modify_lat_wts(double alpha_exc, double alpha_inh);
00350   void   modify_latwt_loop(int i, int j, int radius, int array_width, l_weight *weights, int weight_idx_bound, double alpha);
00351   void   modify_latwt_loop_killed(int i, int j, int radius, int array_width, l_weight *weights, int weight_idx_bound, double alpha);
00352   void   modify_latwt_loop_rad1(int ui, int uj, int radius, int array_width, int array_width_2, l_weight *weights, int weight_idx_bound, double alpha);
00353   void   modify_weights(double alpha, bool normalize);
00354   void   normalize_afferent_wts(int lowk, int highk, int lowl, int highl, int row, int j, double length);
00355   void   normalize_lat_wts(int i, int j, int radius, int array_width, l_weight *weights);
00356   double prune_circular_aff_weights(int ui, int uj, int radius, AfferentWeightsList& weights,
00357                                     bool smooth, double radius_trim, double smooth_trim,
00358                                     double length);
00359   void   prune_circular_lat_weights(int ui, int uj, int radius_sq, int array_width, l_weight *weights,
00360                                     bool smooth, double radius_trim, double smooth_trim);
00361   void   receptor_normalize(void); 
00362   void   reduce_lateral_radius(int ui, int uj, double old_radius, double new_radius, int array_width, l_weight *weights,
00363                                bool circular, bool smooth, double smooth_trim);
00364   void   response_to_input(bool continuousdynamics);
00365   void   set_markers(int radius); 
00366   void   settle_responses(bool learn, int tsettle, bool activate, bool continuousdynamics);
00367   void   setup_afferent_weights(Neuron& neuron, AfferentWeightsList& awts,
00368                                 int ui, int uj, int radius, double radius_sq,
00369                                 int preset, double sigma, double amt_of_randomness,
00370                                 bool circular, bool smooth, 
00371                                 double radius_trim, double smooth_trim,
00372                                 double force_cx=-1, double force_cy=-1);
00373   void   setup_lateral_weights(int ui, int uj, int radius,
00374                                int array_width, l_weight *weights,
00375                                int preset, double sigma, double amt_of_randomness,
00376                                bool circular, bool smooth, double radius_trim,
00377                                double smooth_trim, int* con_killed_flag);
00378 //void   verify_actlist(const actlist& list);
00379 //void   verify_actlist_array(const actlist& lists[],int num);
00380   void   verify_saved_lateral_weights( int i, int j, int radius, int array_width, l_weight *old_weights, l_weight *new_weights, double tolerance, const char *description);
00381   void   verify_saved_weights( double tolerance );
00382 
00383 
00384   SETFNOBJ_DECLARE2(LissomMap,exc_rad_setfn2);
00385 
00386   OwningPointer<ParamMap> params;
00387 
00388 #ifndef NUM_EYES_IS_CONSTANT
00389   const int num_aff;
00390 #endif
00391   int    exc_connections_killed;          
00392   int    inh_connections_killed;          
00394   const int exc_array_width_2;            
00395   const int RN_sq;                        
00397   ActivityMatrix gather_activity;         
00398   ActivityMatrix init_activity;           
00399   ActivityMatrix map_activity;            
00400   ActivityMatrix resp_to_inp;             
00401   ActivityMatrix prev_map_activity;       
00403   /* Using a vector of vectors results in a slight performance
00404      decrease compared to an array of vectors (approximately 5%
00405      overall for small (30MB) networks), but is significantly easier
00406      to manage.  If the speedup is desired, arrays could be allocated
00407      for these variables instead.
00408   */
00409   typedef std::vector<actlist> ActivityListTable;
00410   ActivityListTable activity_list;        
00411   ActivityListTable prev_activity_list;   
00413   // Should be <bool> but for some reason that doesn't appear to work well with MTL (using mtl-2.1.2-15)
00414   typedef MatrixType<int>::rectangular  MarkerType;
00415   MarkerType activity_marker;             
00417   typedef std::vector<double> SumType;
00418   SumType aff_norm_sum;                   
00419   SumType afferent_sum;                   
00420   SumType temp_sum;                       
00422   typedef NeuralRegion::ActivityMatrix        aff_input_type;
00423   typedef std::vector<const aff_input_type*>  aff_inputs_type;
00424   aff_inputs_type aff_inputs;
00425   std::vector<string> aff_names;
00426 
00427   typedef Boundary::BoundingCircle<int,int> BoundingCircle;
00428   typedef Boundary::AARBoundingBox<int,int> BoundingRectangle;
00429 
00432   int array_radius_int(const ParamMap& parammap, const string& radius_param) const
00433     {  return array_radius_int(parammap.get_with_default(radius_param.c_str(),0.0));  }
00434 
00435   int array_radius_int(const double radius_float) const
00436     //Probably would be better, but would require extra work to support:
00437     //{  return Generic::round(radius_float);  }
00438     {  return int(radius_float);  }
00439 
00440 
00441 
00442 /* yschoe: the following is an (almost) literal copy from fixedwtregion */
00443 protected: 
00444 
00446   struct Input {
00447     Input(const string& name_i, const WeightMatrix& kernel_i,
00448           NeuralRegion* input_i, const Length size_scale_i)
00449       : name_str(name_i), kernel(kernel_i), input(input_i), size_scale(size_scale_i) { }
00451     const string& name() const {  return name_str;  }
00452 
00453     const double size_bytes() const {  return mat::size(kernel)*sizeof(WeightMatrix::value_type);  }
00454     const double size_conns() const {  return mat::size(kernel);  }
00455 
00456     string name_str;              
00457     WeightMatrix kernel;          
00458     NeuralRegion* input;    
00459     Length size_scale;            
00460   };
00461 
00462   inline Subscript input_row(const Input& input, Subscript row) const
00463     {  return Subscript(row*(input.size_scale)+(input.kernel.nrows()/2.0));  }
00464 
00465   inline Subscript input_col(const Input& input, Subscript col) const
00466     {  return Subscript(col*(input.size_scale)+(input.kernel.ncols()/2.0));  }
00467 
00469   typedef std::vector<Input*> inputs_type;
00470 
00472   const Activity xo,yo; 
00475   inputs_type            inputs;
00476 
00477 };
00478 
00479 
00480 
00482 LissomMap::LissomMap(const string& name_i, Dimensions dims,
00483                      bool init_weights_, ParamMap* params_) :
00484   BasicLissomMap(dims.height,dims.width,
00485                  params_->get_with_default("num_aff_inputs",0),
00486                  params_->get_with_default("RN",0),
00487                  array_radius_int(*params_,"rf_radius"),
00488                  array_radius_int(*params_,"exc_rad"  ),
00489                  array_radius_int(*params_,"inh_rad"  ),
00490                  NPEs),
00491   InternalNeuralRegion(name_i,dims),
00492   params(params_,true),
00493 #ifndef NUM_EYES_IS_CONSTANT
00494   num_aff(params_->get_with_default("num_aff_inputs",0)),
00495 #else
00496 #define num_aff NUM_EYES_IS_CONSTANT
00497 #endif
00498   exc_connections_killed(False), inh_connections_killed(False),
00499     
00500   /* The array widths set below don't change even if exc_radius is decreased. */
00501   exc_array_width_2(2*exc_array_width),
00502   RN_sq(RN*RN),
00503   
00504   // Should these arguments be swapped?  This order is correct
00505   // for a matrix, but does not match the swapped conventions that
00506   // this file is using.
00507   gather_activity(dims.height+1,dims.width+1), // Was [NMAX][NMAX+1]; just being safe here
00508   init_activity(dims.height,dims.width),
00509   map_activity(dims.height,dims.width),
00510   resp_to_inp(dims.height,dims.width),
00511   prev_map_activity(dims.height,dims.width),
00512   
00513   activity_list(perows,actlist(N+1)),
00514   prev_activity_list(N,actlist(N+1)),
00515     
00516   activity_marker(dims.height,dims.width),
00517   
00518   aff_norm_sum(num_aff*RN*RN),
00519   afferent_sum(num_aff*RN*RN),
00520   temp_sum(num_aff*RN*RN)
00521   
00522 {
00523   /* Make sure that our exc_rad will be changed when someone sets it in
00524      our parameter set */
00525   ParameterMap<>* pm = dynamic_cast< ParameterMap<>* >(params.valueptr());
00526   pm->set_local("exc_rad",params->get_with_default("exc_rad",0.0));
00527   pm->lookup("exc_rad").add_setfn(new setfnobj2_exc_rad_setfn2(this));
00528   
00529   /* Initialize activity in the map */
00530   for(int i=0; i< N; i++)   
00531     for(int j=0; j< N; j++)         
00532         prev_map_activity[i][j]=map_activity[i][j]=0.0;
00533 
00534   ipc_notify(IPC_ONE,IPC_SUMMARY,
00535              "%s %d-connection (%dMB%s) network",
00536              (init_weights_? "Initializing" : "Declaring"),
00537              int(size_unique_connections()),(int)(size_connection_bytes()/1024.0/1024.0+1),
00538              (NPEs>1 ? "/PE" : ""));
00539   
00540   init_weights(init_weights_);
00541 
00542   /* Register map for InputResponse */
00543   typedef mat::CArrayMatrixAdapter<double,ActivityMatrix>                       DoubleMatWrapper;
00544   typedef mat::MatrixInterfaceAdapter<DoubleMatWrapper,NeuralRegion::Magnitude> DoubleMatAdapter;
00545   DoubleMatWrapper* inpresp = new DoubleMatWrapper(init_activity, (unsigned int)N, (unsigned int)N);
00546   table_set("InputResponse",  new DoubleMatAdapter(inpresp)); // Swaps coord order for plotting
00547 }
00548 
00549 
00550 
00551 NeuralRegion* LissomMap_create(const string& name_i, NeuralRegion::Dimensions dims,
00552                                bool init_weights_, ParamMap* params)
00553 {  return new LissomMap(name_i,dims,init_weights_,params);  }
00554 
00555 
00556 
00558 cmdstat LissomMap::exc_rad_setfn2(const string& name, Typeless& existing, const Typeless& requested)
00559 {
00560 #ifdef NO_WEIGHTS
00561   (void)name;
00562   existing.set(requested);
00563   
00564 #else 
00565   double oldvalue       = (double)existing.numericvalue();
00566   double requestedvalue = (double)requested.numericvalue();
00567 
00568   if (!network_initialized)
00569     existing.set(requested);
00570   else if (oldvalue != requestedvalue) {
00571     ipc_notify(IPC_ONE,IPC_STD,"Renormalizing %s's weights to change %s from %g to %g",
00572                LissomMap::name().c_str(),name.c_str(),double(oldvalue),double(requestedvalue));
00573     double actualvalue = change_lateral_exc_radius(oldvalue, requestedvalue);
00574     existing.set(Polymorph<double>(actualvalue));
00575   }
00576   exc_rad=array_radius_int(*params,"exc_rad");
00577 #endif
00578   
00579   return CMD_NO_ERROR;
00580 }
00581 
00582 
00583 
00584 void LissomMap::activate(bool learn, bool settle, bool activatefn)
00585 {
00586   collect_activation_data();
00587   present_inputs(learn,settle,activatefn);
00588 
00589   /* Copy the activity to our local matrix to allow clean plotters to be developed */
00590   /* Swaps the index order to match that assumed by ppm_draw.c */
00591   for (Subscript i=0; i<output.nrows(); i++)
00592     for (Subscript j=0; j<output.ncols(); j++)
00593       output[i][j] = bp[i][j] = prev_map_activity[j][i];
00594   //mat::gnuplot(output, name());
00595 
00596 }
00597   
00598 
00600 #define assert_equals(x,y) \
00601 if (x!=y) {  failed=true; ipc_notify(IPC_ALL,IPC_ERROR,"Assertion failed: " #x "!=" #y " (%d!=%d)",x,y);  }
00602 #define assert_ge(x,y) \
00603 if (!(x>=y)) {  failed=true; ipc_notify(IPC_ALL,IPC_ERROR,"Assertion failed: !(" #x "=>" #y ") !(%d=>%d)",x,y);  }
00604 
00605 /* This implementation is essentially just a stub, but it does check that
00606    the values match what we already assume anyway */
00607 void LissomMap::add_input(const string& name_, NeuralRegion& source, WeightFunction& fn, Length size_scale)
00608 {
00609 
00610 
00611   /* yschoe: fixedwtregion style add_input to retain vector of upstream 
00612      regions (source): copied from fixedwtregion.h */
00613   WeightMatrix kernel = (fn)(size_scale);
00614   /* Kernel height and width must be odd */
00615   assert(kernel.nrows()%2==1);
00616   assert(kernel.ncols()%2==1);
00617     
00618   Generic::insert_named(inputs,name_,new Input(name_,kernel,&source,size_scale));
00619   /* end of fixedwtregion style add_input */
00620 
00621 
00622 
00623   const int      afferent_edge_buffer     = params->get_with_default("retina_edge_buffer",0);
00624 
00625   bool failed=false;
00626   if (String::non_numeric_basename_matches<string>(name_,"Afferent")) {
00627     WeightMatrix sampleweights = (fn)(size_scale);
00628     assert_equals(int(sampleweights.nrows()-1)/2,rf_radius);
00629     assert_equals(int(sampleweights.ncols()-1)/2,rf_radius);
00630     assert_equals(int(source.const_activity().nrows()),RN);
00631     assert_equals(int(source.const_activity().ncols()),RN);
00632     const int calculated_N = (int(size_scale*(RN-2*afferent_edge_buffer)+0.0001));
00633     if (calculated_N!=N)
00634       ipc_notify(IPC_ALL,IPC_WARNING,"Mismatched size_scale: %d!=%d",calculated_N,N);
00635     
00636     /* Does one useful thing: adds this input to the list of afferent regions. */
00637     /* Temporarily uses the number being zero as the clue that we
00638        need to replace the list of inputs with a new one. */
00639     if (!failed) {
00640       const int aff   = String::numeric_extension<int>(name_);
00641       if (aff==0) {  aff_inputs.resize(0);  aff_names.resize(0);  }
00642       aff_inputs.push_back(&(source.const_activity()));
00643       aff_names.push_back(name_);
00644     }
00645   }
00646   else if (name_ == "LateralExcitatory") {
00647     WeightMatrix sampleweights = (fn)(size_scale);
00648     assert_equals(int(sampleweights.nrows()-1)/2,exc_rad);
00649     assert_equals(int(sampleweights.ncols()-1)/2,exc_rad);
00650     assert_equals(int(source.const_activity().nrows()),N);
00651     assert_equals(int(source.const_activity().ncols()),N);
00652     assert_equals(int(100*size_scale+0.0001),100); // size_scale==1.0
00653   }
00654   else if (name_ == "LateralInhibitory") {
00655     WeightMatrix sampleweights = (fn)(size_scale);
00656     assert_equals(int(sampleweights.nrows()-1)/2,inh_rad);
00657     assert_equals(int(sampleweights.ncols()-1)/2,inh_rad);
00658     assert_equals(int(source.const_activity().nrows()),N);
00659     assert_equals(int(source.const_activity().ncols()),N);
00660     assert_equals(int(100*size_scale+0.0001),100); // size_scale==1.0
00661   }
00662   else {
00663     ipc_notify(IPC_ALL,IPC_ERROR,"Could not add unsupported weight type (%s)",name_.c_str());    
00664   }
00665 
00666   if (failed)
00667     ipc_notify(IPC_ALL,IPC_ERROR,"Unable to add connection %s",name_.c_str());
00668 }
00669 
00670 
00671 NeuralRegion::Dimensions LissomMap::input_dimensions(WeightFunction&, Length size_scale)
00672 {
00673   const int      afferent_edge_buffer     = params->get_with_default("retina_edge_buffer",0);
00674   const int calculated_N = (int(size_scale*(RN-2*afferent_edge_buffer)+0.0001));
00675   if (calculated_N!=N)
00676     ipc_notify(IPC_ALL,IPC_WARNING,"Mismatched size_scale: %d!=%d",calculated_N,N);
00677   
00678   return NeuralRegion::Dimensions(Subscript(RN),Subscript(RN));
00679 }
00680 
00681 
00685 const LissomMap::WeightMatrix LissomMap::get_weights(const string& name, int ui, int uj) const
00686 {
00687   bool failed=false;
00688 
00689   const bool   allow_negative_wts = params->get_with_default("allow_negative_wts",False);
00690 
00691   /* Check assumptions */
00692   assert_ge(ui,0);
00693   assert_ge(uj,0);
00694   assert_ge(N,ui);
00695   assert_ge(N,uj);
00696 
00697   // Multiprocessor note:
00698   /*
00699     At present plotting weights on multiprocessor machines requires that
00700     the plot_pe correctly specify the particular PE on which the weights
00701     reside, which is not something that the user is ordinarily aware of.
00702     Instead we should just fetch the weights from the PE where they
00703     are stored and return them.
00704   */
00705   assert_equals(ROWISLOCAL(ui),true);
00706 
00707   if (failed) return WeightMatrix();
00708   
00709   /* The array indeces are swapped to translate between the correct and the legacy formats. */
00710   const Neuron&              neuron = cortex_map[uj][ui];
00711   const AfferentWeightsList& awts   = *(neuron.weights);
00712   //const AfferentWeightsList& awts   = reference_wts.weights;
00713 
00714   // pre-allocate these matrices so that they do not get allocated
00715   // subsequently.
00716   /*
00717   if (aff_w == NULL) 
00718         aff_w = new WeightMatrix(2*rf_radius+1,2*rf_radius+1);
00719   
00720   if (lat_w == NULL) 
00721         lat_w = new WeightMatrix(2*MAX(inh_rad,exc_rad)+1,2*MAX(inh_rad,exc_rad)+1);
00722   */
00723 
00724   /* Select appropriate set of weights to copy */
00725   if (String::non_numeric_basename_matches<string>(name,"Afferent")) {
00726     const int aff   = std::find(ISEQ(aff_names),name) - aff_names.begin();
00727     assert (aff<num_aff);
00728 
00729     const int lowk  = MAX(neuron.centery-rf_radius,0   );
00730     const int highk = MIN(neuron.centery+rf_radius,RN-1);
00731     const int lowl  = MAX(neuron.centerx-rf_radius,0   );
00732     const int highl = MIN(neuron.centerx+rf_radius,RN-1);
00733     
00734     WeightMatrix w(highk-lowk+1,highl-lowl+1);
00735     // WeightMatrix& w=(*aff_w);
00736     
00737     for (int k=lowk; k <=highk; k++)
00738       for (int l=lowl; l<=highl; l++) {
00739         a_weight aw = awts[aff][l-lowl][k-lowk];
00740         w[k-lowk][l-lowl] = ((aw>(-DEATH_FLAG) || allow_negative_wts)?aw:0.0);
00741       }
00742     
00743     return w;
00744   }
00745   else if (name == "LateralExcitatory") {
00746     const int lowi  = MAX(uj-exc_rad,0);
00747     const int highi = MIN(uj+exc_rad,N-1);
00748     const int lowj  = MAX(ui-exc_rad,0);
00749     const int highj = MIN(ui+exc_rad,N-1);
00750     
00751     WeightMatrix w(highj-lowj+1,highi-lowi+1);
00752     // WeightMatrix& w=(*lat_w);
00753     
00754     for (int i=lowi; i<=highi; i++)
00755       for (int j=lowj; j<=highj; j++) {
00756         l_weight lw = neuron.lat_exc_wts[LAT_INDEX(uj,ui,i,j,
00757                                         exc_rad,exc_array_width)]; 
00758         w[j-lowj][i-lowi] = ((lw>(-DEATH_FLAG) || allow_negative_wts)?lw:0.0);
00759       }
00760     
00761     return w;
00762   }
00763   else if (name == "LateralInhibitory") {
00764     const int lowi  = MAX(uj-inh_rad,0);
00765     const int highi = MIN(uj+inh_rad,N-1);
00766     const int lowj  = MAX(ui-inh_rad,0);
00767     const int highj = MIN(ui+inh_rad,N-1);
00768     
00769     WeightMatrix w(highj-lowj+1,highi-lowi+1);
00770     // WeightMatrix& w=(*lat_w);
00771     
00772     for (int i=lowi; i<=highi; i++)
00773       for (int j=lowj; j<=highj; j++) {
00774         l_weight lw = neuron.lat_inh_wts[LAT_INDEX(uj,ui,i,j,
00775                                         inh_rad,inh_array_width)]; 
00776         w[j-lowj][i-lowi] = ((lw>(-DEATH_FLAG) || allow_negative_wts)?lw:0.0);
00777       }
00778       
00779     return w;
00780   }
00781 
00782   /* Unknown set of weights */
00783   return WeightMatrix();
00784 }
00785 
00786 
00787 
00788 NeuralRegion* LissomMap::get_input(const string& name)
00789 {
00790   Input* i = Generic::find_named<Input>(inputs,name);
00791 
00792   return (i? i->input : 0);
00793 
00794 }
00795 
00796 
00797 
00798 LissomMap::WeightBounds* LissomMap::get_weights_bounds(const string& name, int ui, int uj) const
00799 {
00800 
00801   /* The array indeces are swapped to translate between the correct and the legacy formats. */
00802   int ci=ui;
00803   int cj=uj;
00804   string radparam = "rf_radius";
00805   NeuralRegion::Bounds b=bounds();
00806   const bool     circular_aff_wts    = params->get_with_default("circular_aff_wts",True);
00807   const bool     circular_lat_wts    = params->get_with_default("circular_lat_wts",True);
00808   bool circular=circular_lat_wts;
00809   bool failed=false;
00810 
00811   /* Check assumptions */
00812   assert_ge(ui,0);
00813   assert_ge(uj,0);
00814   assert_ge(N,ui);
00815   assert_ge(N,uj);
00816   assert_equals(ROWISLOCAL(ui),true);
00817   if (failed) return new NeuralRegion::Bounds();
00818   
00819   if      (name=="LateralExcitatory") radparam = "exc_rad";
00820   else if (name=="LateralInhibitory") radparam = "inh_rad";
00821   else if (String::non_numeric_basename_matches(name,string("Afferent"))) {
00822     circular=circular_aff_wts;
00823     radparam = "rf_radius";
00824     const Neuron& neuron = cortex_map[MAPROW(uj)][ui];    
00825     ci = neuron.centery;
00826     cj = neuron.centerx;
00827     b  = Bounds(0,0,RN-1,RN-1);
00828   }
00829   else ipc_notify(IPC_ALL,IPC_ERROR,"Don't know weight type: %s",name.c_str());
00830 
00831   if (circular) {
00832     const double radius = params->get_with_default(radparam.c_str(),0.0);
00833     const BoundingCircle e(ci,cj,radius);
00834     return new Boundary::Intersection<int,int,double,BoundingCircle,Bounds>(e,b);
00835   }
00836   else {
00837     const int rad = array_radius_int(*params,radparam);
00838     const BoundingRectangle e(ci-rad,cj-rad,ci+rad,cj+rad);
00839     return new Boundary::Intersection<int,int,double,BoundingRectangle,Bounds>(e,b);
00840   }
00841 }
00842 
00843 
00844 /******************************************************************************/
00845 /* Core routines                                                              */
00846 /******************************************************************************/
00847 
00868 void LissomMap::present_inputs(bool learn, bool settle, bool activatefn)
00869 {
00870   int i,pe,j,count;
00871   
00872   const bool continuousdynamics = params->get_with_default("dynamics",False);
00873   const int  tsettle            = params->get_with_default("tsettle",1);
00874   
00875   /* Present and settle activity into bubbles */
00876   settle_responses(learn,(settle? tsettle : 1),activatefn,continuousdynamics);
00877   
00878   for(pe=0; pe< NPEs; pe++)       /* Collect map_activity from each processor */
00879     for(i=0; i<perows; i++)
00880       ipc_get_to(&(prev_map_activity[ARBITRARY_MAPROW(i,pe)][0]),
00881                  &(map_activity[ARBITRARY_MAPROW(i,pe)][0]),IPC_DOUBLE,N,pe);
00882   
00883   /* gather_activity is indexed from [1..count] like the activity list */
00884   for (i=0; i<N; i++)
00885     if ((count=ACTLIST_COUNT(prev_activity_list[i])) > 0)
00886       for (j=ACTLIST_START(prev_activity_list); j<=count; j++)
00887         gather_activity[i][j]=prev_map_activity[i][prev_activity_list[i][j]];
00888   
00889   if (learn && !continuousdynamics)      /* Otherwise, modify at every step */
00890     modify_all_weights();
00891 }
00892 
00893 
00894 
00896 void LissomMap::settle_responses(bool learn, int tsettle, bool activatefn, bool continuousdynamics)
00897 {
00898   int ts;
00899   const bool   allow_negative_wts = params->get_with_default("allow_negative_wts",False);
00900 
00901   response_to_input(continuousdynamics);
00902   initialize_markers();
00903   initialize_actlists();
00904 
00905   ipc_barrier(); /* To make sure dynamics = 0 init has taken place */
00906 
00907   for(ts=1; ts<=tsettle; ts++){ /* Let network settle into an activity bubble */
00908     compute_responses(ts,activatefn,continuousdynamics);  /* Add up input and lateral activations */
00909     ipc_barrier();          /* To ensure all activations have been computed  */
00910     
00911     if (learn && continuousdynamics) { /* Modify at each step   */
00912       (void)allow_negative_wts;
00913       assert(!allow_negative_wts);
00914       modify_all_weights();
00915     }
00916   }
00917 }
00918 
00919 
00920 
00924 void LissomMap::response_to_input(bool continuousdynamics)
00925 {
00926   const bool   divide_input_sum      = params->get_with_default("divide_input_sum",False);
00927   const bool   divide_reference_sum  = params->get_with_default("divide_reference_sum",False);
00928   const double inp_sum_scale         = params->get_with_default("divide_input_sum_scale",1.0);
00929   const double inp_sum_offset        = params->get_with_default("divide_input_sum_offset",0.0);
00930   const double gammasur              = params->get_with_default("gammasur",1.0);
00931 #ifdef OBJ_LISSOM
00932   const bool   l2_norm  = params->get_with_default("l2_norm",False);
00933 #endif /* OBJ_LISSOM */
00934 
00936   if (int(aff_inputs.size()) != num_aff) {
00937     ipc_notify(IPC_ALL,IPC_ERROR,"No response; the number of afferent inputs was declared as %d but is %d (see num_aff_inputs)",
00938                num_aff, int(aff_inputs.size()));
00939     return;
00940   }
00941       
00942   for(int i=0; i<perows; i++){
00943     const int row = MAPROW(i);                      /* calculate row index  */
00944     for(int j=0; j<N; j++)                              /* Init all activations */
00945       { 
00946         const Neuron&              neuron = cortex_map[row][j];
00947         const AfferentWeightsList& awts   = *(neuron.weights);
00948         const AfferentWeightsList& ref_awts = reference_wts.weights;
00949 
00950         const int lowk   = MAX(neuron.centerx-rf_radius,0   );
00951         const int highk  = MIN(neuron.centerx+rf_radius,RN-1);
00952         const int lowl   = MAX(neuron.centery-rf_radius,0   );
00953         const int highl  = MIN(neuron.centery+rf_radius,RN-1);
00954 
00955         const int rlowk  = neuron.centerx-rf_radius;
00956         const int rlowl  = neuron.centery-rf_radius;
00957 
00958         double resp =0;
00959         double resp1=0;
00960         double inp  =0;
00961         double inp1 =0;
00962         double ref  =0;
00963         double ref1 =0;
00964         double num_inp =0;
00965         
00966         /* Blank out previous activity in network if appropriate. */
00967         if (!continuousdynamics) prev_map_activity[row][j] = map_activity[row][j] =0.0;
00968 
00969 #ifdef OBJ_LISSOM
00970         /* calculate l2_norm if necessary */
00971         if (l2_norm) {
00972           for (inputs_type::iterator i=inputs.begin();i != inputs.end(); ++i) {
00973            if (String::non_numeric_basename_matches<string>((*i)->name(),"Afferent")){
00974             NeuralRegion* inp = dynamic_cast<NeuralRegion*>(get_input((*i)->name()));
00975             ActivityMatrix& in = inp->output;
00976 
00977             int nr = inp->output.nrows(), nc = inp->output.ncols();
00978             double l2_sum=0.0;
00979             /* gather sum sqr */
00980             for (int k=0; k<nr ; ++k) 
00981               for (int l=0; l<nc ; ++l) {
00982                   l2_sum += in[k][l]*in[k][l];
00983               }
00984 
00985             l2_sum = 1.0/sqrt(l2_sum);
00986 
00987             /* normalize input */
00988             for (int k=0; k<nr ; ++k) 
00989               for (int l=0; l<nc ; ++l) {
00990                   in[k][l] *= l2_sum;
00991               }
00992 
00993            }
00994           }
00995         }
00996 #endif /* OBJ_LISSOM */ 
00997         /* This is a time-critical loop; make sure that the compiler unrolls it */
00998 
00999         
01000 
01001 #ifndef NUM_EYES_IS_CONSTANT
01002         /* Use canonical but slower version of algorithm */
01003         
01004 #ifdef SPARSE_MATRICES
01005         /* Basic version */
01006         for (int k=lowk; k <=highk; k++) {
01007           for (int l=lowl; l<=highl; l++)
01008             for (int aff=0; aff<num_aff; aff++) {
01009               const aff_input_type::value_type& in = (*aff_inputs[aff])[l][k];
01010               resp  += in * awts[aff][k-lowk][l-lowl];
01011               ref   += in * ref_awts[aff][k-rlowk][l-rlowl];
01012               inp   += in;
01013             }
01014         }
01015         
01016 #else     
01017         /* Optimized assuming dense rectangular layout */
01018         const aff_inputs_type::value_type*     ap = &(aff_inputs[0]);
01019         const AfferentWeightsList::value_type* wp = &(awts[0]);
01020         const AfferentWeightsList::value_type* rwp= &(ref_awts[0]);
01021         for (int aff=0; aff<num_aff; aff++,ap++,wp++,rwp++) {
01022           for (int k=lowk; k <=highk; k++) {
01023             /* This could be further optimized if both arrays had the same index order... */
01024             //const aff_input_type::value_type*  al=&((**ap)[l][lowk]);
01025             const AfferentWeights::value_type* wl=&((*wp)[k-lowk][0]);
01026             const AfferentWeights::value_type* rwl=&((*rwp)[k-rlowk][lowl-rlowl]);
01027             for (int l=lowl; l<=highl; l++,wl++,rwl++) {
01028               const aff_input_type::value_type& in = (**ap)[l][k];
01029               //resp  += (*al) * (*wl);
01030               resp  += in * (*wl);
01031               ref   += in * (*rwl);
01032               inp   += in;
01033               num_inp++;
01034             }
01035           }
01036         }
01037 #endif
01038 
01039         
01040 #else /* num_aff is constant */
01041 #error divide_input_sum and divide_reference_sum have not yet been implemented when NUM_EYES_IS_CONSTANT
01042         // (Implementing them is trivial (see above) but has not been done.)
01043         
01044         for (int k=lowk; k <=highk; k++){
01045 
01046           /*
01047             This is an extremely time-critical loop, so it has been optimized
01048             by unrolling the inner loop by hand for the typical case of
01049             num_aff==1 and num_aff==2.
01050           */
01051           for (int l=lowl; l<=highl; l++){
01052             // Would need to update inp and ref here...
01053             resp  += (*aff_inputs[0])[l][k] * awts[0][k-lowk][l-lowl];
01054 #if   (num_aff>1)
01055             resp1 += (*aff_inputs[1])[l][k] * awts[1][k-lowk][l-lowl];
01056 #endif
01057 #if   (num_aff>2)
01058             {
01059               for (int aff=2; aff<num_aff; aff++)
01060                 resp  += (*aff_inputs[aff])[l][k] * awts[aff][k-lowk][l-lowl];
01061             }
01062 #endif    
01063           }
01064         }
01065 #endif /* num_aff is constant */
01066 
01067         const double response  = resp + resp1;
01068         const double reference = ref  + ref1;
01069         /* Effectively normalizes input_sum weights to 1.0, all equal */
01070         const double inp_avg  = (num_inp? (inp + inp1)/num_inp : 0.0);
01071         const double inp_resp = 
01072           (divide_input_sum?      (inp_avg==0   ? response : response/(inp_sum_scale*inp_avg+inp_sum_offset)) : 
01073            (divide_reference_sum? (reference==0 ? response : response/(inp_sum_scale*ref    +inp_sum_offset)) :
01074             response));
01075 
01076         resp_to_inp[row][j] = inp_resp - gammasur * inp_avg;
01077       }
01078   }
01079 }
01080 
01081 
01082 
01086 void LissomMap::backproject()
01087 {
01088   // NeuralRegion* inp = get_input("Afferent00");
01089   // cout << "- retrieved upstream region: " << inp->name() << endl;
01090 
01091   const double gammaaff = params->get_with_default("gammaaff",1.0);
01092   const double beta = params->get_with_default("beta",1.0);
01093   // Hack; should pass verbose through backproject() instead
01094   const bool   verbose = params->get_with_default("::cmd::backproject::verbose",False);
01095 
01096   // set up current bp matrix by passing it through activation function.
01097   // we are assuming that bp[][] in its final stage did not go through
01098   // activation function: this applies to both present_input() and 
01099   // also to resp_to_residual().
01100   int nr = bp.nrows(), nc=bp.ncols();
01101   for (int x=0; x<nr; ++x) 
01102      for (int y=0; y<nc; ++y) {
01103         // bp[x][y] = logsigmoid(bp[x][y],beta);
01104         bp[x][y] = truesigmoid(bp[x][y],beta);
01105         // These functions are not available on all platforms
01106         //assert(!isnan(bp[x][y]));
01107         //assert(!isinf(bp[x][y]));
01108      }
01109 
01110   // clean upstream bp and residual matrices
01111   for (inputs_type::iterator i=inputs.begin();i != inputs.end(); ++i) {
01112     if (String::non_numeric_basename_matches<string>((*i)->name(),"Afferent")){
01113       NeuralRegion* inp = dynamic_cast<NeuralRegion*>(get_input((*i)->name()));
01114       int nr = inp->bp.nrows(), nc = inp->bp.ncols();
01115       ActivityMatrix& r = inp->residual;
01116       ActivityMatrix& b = inp->bp;
01117       for (int x=0; x<nr; ++x) 
01118         for (int y=0; y<nc; ++y) {
01119           // do not touch inp->output[][] !
01120           b[x][y] = 0.0;
01121           r[x][y] = 0.0;
01122         }
01123     }
01124   }
01125 
01126   // backproject current bp to the upstream bp matrix (using addition)
01127   // and update residual
01128   for (inputs_type::iterator i=inputs.begin();i != inputs.end(); ++i) {
01129     if (String::non_numeric_basename_matches<string>((*i)->name(),"Afferent")){
01130 
01131       backproject((*i)->name(),gammaaff);
01132 
01133       NeuralRegion* inp = dynamic_cast<NeuralRegion*>(get_input((*i)->name()));
01134 
01135       double err = 0.0, err_tmp=0.0, bp_tmp=0.0, output_tmp=0.0;
01136       double v1_len = 0.0, v2_len = 0.0, dot_p = 0.0, vector_err = 0.0;
01137       int nr = inp->bp.nrows(), nc = inp->bp.ncols();
01138 
01139       ActivityMatrix& r = inp->residual;
01140       ActivityMatrix& b = inp->bp;
01141       ActivityMatrix& o = inp->output;
01142       for (int x=0; x<nr; ++x) 
01143         for (int y=0; y<nc; ++y) {
01144             bp_tmp      = b[x][y];
01145             output_tmp  = o[x][y];
01146 
01147             // sqrt(sum sqr err)
01148             err_tmp     = output_tmp - bp_tmp;
01149             err                += err_tmp * err_tmp;
01150             r[x][y]     += err_tmp;
01151 
01152             // vector error
01153             dot_p += bp_tmp * output_tmp;
01154             v1_len += bp_tmp*bp_tmp;
01155             v2_len += output_tmp*output_tmp;
01156         }
01157 
01158         vector_err = acos(dot_p/(sqrt(v1_len * v2_len)));
01159 
01160         if (verbose)
01161           ipc_notify(IPC_ALL,IPC_STD,"%s: sqrt(sum err sqr) = %f : vector error = %f\n",
01162                      ((*i)->name()).c_str(),sqrt(err), vector_err);
01163         else 
01164           ipc_log(IPC_ONE,"%s: sqrt(sum err sqr) = %f : vector error = %f\n",
01165                   ((*i)->name()).c_str(),sqrt(err),vector_err);
01166         //sync();
01167     }
01168   }
01169 
01170 }
01171 
01172 
01173 #ifdef OBJ_LISSOM
01174 
01176 void LissomMap::resp_to_residual()
01177 {
01178 
01179   // How the various matrices are used: the usage is pretty
01180   //    brittle and depends on a lot of conditions outside
01181   //    of this function: e.g., backproject() being called immediately
01182   //    before the call to the current function.
01183   //
01184   // precondition:
01185   //    output:                 a
01186   //    bp:                     f(a)    (backproj() should have applied f())
01187   //    residual (upstream):    r       (calculated by backproject)
01188   //    * backproject() should have been called prior to this function call!
01189   //    * repeated calls of backproject() and resp_to_residual() will
01190   //      update the activity 'a' (inner loop optimization).
01191   // 
01192   // intermediate:
01193   //    output:                 r*W
01194   //    resp_to_inp:            a
01195   //    prev_map_activity:      f(a)
01196   //    map_activity:           f'(a)
01197   //    residual (current):     -dE/da = alpha_settle * f'(a)[r*W+exc-inh]
01198   //
01199   // postcondition:
01200   //    output = prev_map_activity:     new a
01201   //    bp:                             new a (f will be applied by backproj)
01202   //
01203   // As a result, output[][] and bp[][] should contain the new a_i.
01204   // 
01205   //    * weight update need to be based on the new 'f(a)' values
01206   //      and the to-be-calculated residual 'r' based on the current 'a'. 
01207   //      All these necessary steps can be taken care of by backproject() 
01208   //      which will put 'f(a)' in bp[][] and 'r' in the upstream residual[][].
01209 
01210   const double gammaaff = params->get_with_default("gammaaff",1.0);
01211   const double gammaexc = params->get_with_default("gammaexc",1.0);
01212   const double gammainh = params->get_with_default("gammainh",1.0);
01213   const double alpha_settle = params->get_with_default("alpha_settle",1.0);
01214   const double beta = params->get_with_default("beta",1.0);
01215   const bool obj_lateral_on = params->get_with_default("obj_lateral_on",True);
01216 
01217   // Initialize.
01218   int nr = output.nrows(), nc = output.ncols();
01219   for (int x=0; x<nr; ++x) 
01220     for (int y=0; y<nc; ++y) {
01221       resp_to_inp[y][x] = output[x][y];
01222       // prev_map_activity[y][x] = logsigmoid(resp_to_inp[y][x],beta);
01223       // map_activity[y][x] = logsigmoidprime(resp_to_inp[y][x],beta);
01224       prev_map_activity[y][x] = truesigmoid(resp_to_inp[y][x],beta);
01225       map_activity[y][x] = truesigmoidprime(prev_map_activity[y][x],beta);
01226       output[x][y] = 0.0;
01227     } // a, f(a), and f'(a) prepared. output (for r*W) initialized to 0.
01228 
01229   // calculate response to residual: r*W
01230   for (inputs_type::iterator i=inputs.begin();i != inputs.end(); ++i) {
01231     if (String::non_numeric_basename_matches<string>((*i)->name(),"Afferent")){
01232 
01233       // Calculate r*W, the projection of residual through afferent weights
01234       // - assume backproject() was called and all "bp" matrices hold
01235       //   updated values.
01236       // - write r*W to "output": each call is cumulative.
01237       resp_to_residual((*i)->name(),gammaaff);
01238     }
01239   } // r*W ready in output
01240  
01241   // calculate lateral response and update all matrices
01242   for (int x=0; x<nr; ++x) {
01243     for (int y=0; y<nc; ++y) {
01244 
01245       // lateral response should use prev_map_activity[][] as f(a)
01246 
01247       // update residual, which holds -dE/da
01248       residual[x][y] = alpha_settle 
01249                      * map_activity[y][x] 
01250                      * ( output[x][y] +
01251                          ((obj_lateral_on)
01252                           ?( gammaexc*lat_resp_named(x,y,"LateralExcitatory")
01253                             -gammainh*lat_resp_named(x,y,"LateralInhibitory") )
01254                           :0.0
01255                          ) 
01256                        );
01257 
01258       // update output(for plotting) and bp(as new a, for mor back projection) 
01259       output[x][y] = bp[x][y] = resp_to_inp[y][x] + residual[x][y];
01260 
01261     }
01262   }
01263 
01264 }
01265 
01266 
01274 void LissomMap::train_obj_weights() {
01275 
01276   const double alpha_aff     = params->get_with_default("alpha_input",0.0);
01277   const double alpha_exc = params->get_with_default("alpha_exc",0.0);
01278   const double alpha_inh = params->get_with_default("alpha_inh",0.0);
01279   const bool   crop_negative_wts = params->get_with_default("crop_negative_wts",False);
01280   const bool   allow_negative_wts = params->get_with_default("allow_negative_wts",False);
01281   const bool   l2_norm = params->get_with_default("l2_norm",False);
01282   
01283   // 0. Set up 'f(a)' and 'r' properly in bp[][] and upstream 
01284   //    residual[][] respectively.
01285   backproject(); 
01286 
01287   int nr = output.nrows();
01288   int nc = output.ncols();
01289 
01290   // 1. For all afferent regions, adapt weights
01291   for (inputs_type::iterator i=inputs.begin();i != inputs.end(); ++i) {
01292     if (String::non_numeric_basename_matches<string>((*i)->name(),"Afferent")){
01293   
01294       // calculate the afferent region number and get the input region
01295       string name = (*i)->name();
01296       int aff   = std::find(ISEQ(aff_names),name) - aff_names.begin();
01297       NeuralRegion* inp = dynamic_cast<NeuralRegion*>(get_input((*i)->name()));
01298       ActivityMatrix& r = inp->residual;
01299   
01300       // loop through target units
01301       for (int ui=0; ui<nr; ++ui) {
01302         for (int uj=0; uj<nc; ++uj) { 
01303   
01304           /* Array idx swapped to translt b/w correct and legacy formats. */
01305           Neuron&              neuron = cortex_map[uj][ui];
01306           AfferentWeightsList& awts   = *(neuron.weights);
01307       
01308           int lowk  = MAX(neuron.centery-rf_radius,0   );
01309           int highk = MIN(neuron.centery+rf_radius,RN-1);
01310           int lowl  = MAX(neuron.centerx-rf_radius,0   );
01311           int highl = MIN(neuron.centerx+rf_radius,RN-1);
01312           
01313           if (l2_norm) {
01314                   // adapt weight
01315                   double sum=0.0; 
01316                   for (int k=lowk; k <=highk; k++)
01317                     for (int l=lowl; l<=highl; l++) {
01318                         a_weight* const wptr = &(awts[aff][l-lowl][k-lowk]);
01319                         if (*wptr > -DEATH_FLAG || allow_negative_wts) {
01320                           *wptr += alpha_aff * r[k][l] * bp[ui][uj];    
01321                           sum += (*wptr)*(*wptr);
01322                         }
01323                     }
01324 
01325                   // normalize
01326                   sum=1.0/sqrt(sum);
01327                   for (int k=lowk; k <=highk; k++)
01328                     for (int l=lowl; l<=highl; l++) {
01329                         a_weight* const wptr = &(awts[aff][l-lowl][k-lowk]);
01330                           *wptr *= sum;
01331                     }
01332 
01333           } else { // L1-norm 
01334 
01335                   // adapt weight
01336                   double sum=0.0; 
01337                   for (int k=lowk; k <=highk; k++)
01338                     for (int l=lowl; l<=highl; l++) {
01339                         a_weight* const wptr = &(awts[aff][l-lowl][k-lowk]);
01340                         if (((*wptr) > -DEATH_FLAG) || allow_negative_wts) {
01341                           (*wptr) += alpha_aff * r[k][l] * bp[ui][uj];  
01342                           if (crop_negative_wts && ((*wptr)<0.0)) *wptr=0.0;
01343 
01344                           // jbednar: Why fabs?  To avoid divide by zero when doing 1.0/sum,
01345                           // when crop_negative_wts==False?
01346                           sum += fabs((*wptr));
01347                         }
01348                     }
01349 
01350                   // normalize
01351                   assert(sum!=0.0); 
01352                   sum = 1.0/sum;
01353                   for (int k=lowk; k <=highk; k++)
01354                     for (int l=lowl; l<=highl; l++) {
01355                         a_weight* const wptr = &(awts[aff][l-lowl][k-lowk]);
01356                         if (((*wptr) > -DEATH_FLAG) || allow_negative_wts) {
01357                                 (*wptr) *= sum;
01358                         }
01359                     }
01360 
01361           } // end else
01362 
01363         } // uj loop
01364       } // ui loop
01365 
01366     } // endf afferent check if
01367   } // end input regions loop
01368 
01369   // 2. Adapt excitatory lateral connections
01370   for (int ui=0; ui<nr; ++ui) {
01371     for (int uj=0; uj<nc; ++uj) {
01372 
01373         /* Array idx swapped to translt b/w correct and legacy formats. */
01374         Neuron&              neuron = cortex_map[uj][ui];
01375       
01376         const int lowi  = MAX(uj-exc_rad,0);
01377         const int highi = MIN(uj+exc_rad,N-1);
01378         const int lowj  = MAX(ui-exc_rad,0);
01379         const int highj = MIN(ui+exc_rad,N-1);
01380         
01381         double sum=0.0;
01382         for (int i=lowi; i<=highi; i++)
01383           for (int j=lowj; j<=highj; j++) {
01384             l_weight* const wptr = &(neuron.lat_exc_wts[LAT_INDEX(uj,ui,i,j,exc_rad,exc_array_width)]);
01385             (*wptr) += alpha_exc * bp[j][i] * bp[ui][uj];
01386             sum += fabs((*wptr));
01387           }
01388 
01389         assert(sum!=0.0);
01390         sum = 1.0/sum;
01391         for (int i=lowi; i<=highi; i++)
01392           for (int j=lowj; j<=highj; j++) {
01393             l_weight* const wptr = &(neuron.lat_exc_wts[LAT_INDEX(uj,ui,i,j,exc_rad,exc_array_width)]);
01394             (*wptr) *= sum; 
01395           }
01396         
01397     }
01398   }
01399 
01400   // 3. Adapt inhibitory lateral connections
01401   for (int ui=0; ui<nr; ++ui) {
01402     for (int uj=0; uj<nc; ++uj) {
01403 
01404         /* Array idx swapped to translt b/w correct and legacy formats. */
01405         Neuron&              neuron = cortex_map[uj][ui];
01406       
01407         const int lowi  = MAX(uj-inh_rad,0);
01408         const int highi = MIN(uj+inh_rad,N-1);
01409         const int lowj  = MAX(ui-inh_rad,0);
01410         const int highj = MIN(ui+inh_rad,N-1);
01411         
01412         double sum=0.0;
01413         for (int i=lowi; i<=highi; i++)
01414           for (int j=lowj; j<=highj; j++) {
01415             l_weight* const wptr = &(neuron.lat_inh_wts[LAT_INDEX(uj,ui,i,j,inh_rad,inh_array_width)]);
01416             (*wptr) -= alpha_inh * bp[j][i] * bp[ui][uj];
01417             sum += fabs((*wptr));
01418            
01419           }
01420         
01421         assert(sum!=0.0);
01422         sum=1.0/sum;
01423         for (int i=lowi; i<=highi; i++)
01424           for (int j=lowj; j<=highj; j++) {
01425             l_weight* const wptr = &(neuron.lat_inh_wts[LAT_INDEX(uj,ui,i,j,inh_rad,inh_array_width)]);
01426             (*wptr) *= sum;
01427             // this fails! : assert((*wptr)>0.0);
01428           }
01429     }
01430   }
01431 
01432 }
01433 #endif /* OBJ_LISSOM */
01434 
01435 
01439 void LissomMap::compute_responses(int ts, bool activatefn, bool continuousdynamics)
01440 {
01441   const double gammaaff = params->get_with_default("gammaaff",1.0);
01442   const double gammaexc = params->get_with_default("gammaexc",1.0);
01443   const double gammainh = params->get_with_default("gammainh",1.0);
01444   const Tristate ls_type= params->get_with_default("lateral_sign_type",Uninitialized);
01445   const double ls_scale = params->get_with_default("lateral_sign_scale",0.0);
01446   const double ls_offset= params->get_with_default("lateral_sign_offset",-1.0);
01447   const double delta    = params->get_with_default("delta",0.0);
01448   const double beta     = params->get_with_default("beta",1.0);
01449   const double noise    = params->get_with_default("noise",0.0);
01450   const bool   allow_negative_wts = params->get_with_default("allow_negative_wts",False);
01451 
01452   int i,pe,j,row,count; double resp;
01453  
01454   if ((ts==1) && (!continuousdynamics)){ /* Don't add lateral interaction for first */
01455     for(i=0; i<perows; i++){  /* Add up input and lateral activations. */
01456       row = MAPROW(i);
01457       for(j=0; j<N; j++)
01458         {
01459           resp = gammaaff * resp_to_inp[row][j];
01460           if (noise > 0.0)   /* If neuron is noisy, add some zero mean noise */
01461             resp += noise * (shuffled_rand()-0.5);
01462           map_activity[row][j] = init_activity[row][j] = activatefn ? sigmoid(resp,delta,beta) : resp;
01463           if (map_activity[row][j] > 0.0)         
01464             activity_list[i][++ACTLIST_COUNT(activity_list[i])] = j;
01465         }
01466       assert(ACTLIST_COUNT(activity_list[i]) >= 0); assert(ACTLIST_COUNT(activity_list[i]) <= N);
01467     }
01468     ipc_barrier(); /* a barrier before ipc_put is essential to avoid races */
01469     for(i=0; i<perows; i++){  /* Store all the activity lists */
01470       row = MAPROW(i);
01471       for (pe=0; pe < NPEs; pe++)    /* Store activity list to all processors */
01472         ipc_put_to(&(prev_activity_list[row][0]), &(activity_list[i][0]),
01473                    IPC_INT,ACTLIST_COUNT(activity_list[i])+1,pe);
01474       ACTLIST_COUNT(activity_list[i]) = 0;
01475     }
01476     return;
01477   }
01478 
01479   (void)allow_negative_wts;
01480   assert(!allow_negative_wts);
01481   /*============================= OTHERWISE ============================= */
01482   /* Copy the the activity from the previous settling iteration.
01483    * This is used to compute the responses in the next iteration.
01484    */
01485   for(pe=0; pe< NPEs; pe++)          /* From each processor     */
01486     for (i=0; i<perows; i++){          /* Fetch each row to my PE */
01487       row = ARBITRARY_MAPROW(i,pe);
01488       ipc_get_to(&(prev_map_activity[row][0]),&(map_activity[row][0]),IPC_DOUBLE,N,pe);
01489     }
01490 
01491   /* Set markers after two (heuristic) iterations are complete */
01492   if (ts==3){
01493     if (exc_rad > 1) set_markers(exc_rad);
01494     else set_markers(2);
01495   }
01496   /* Gather the activity from each row to the gather_activity array. This
01497    * increases cache_locality and helps increase performance in the unrolled 
01498    * loop. gather_activity is indexed from [1..count] like the activity list *
01499    */
01500   for (i=0; i<N; i++)
01501     if ((count=ACTLIST_COUNT(prev_activity_list[i])) > 0)
01502       for (j=ACTLIST_START(prev_activity_list); j<=count; j++)
01503         gather_activity[i][j]=prev_map_activity[i][prev_activity_list[i][j]];
01504 
01505   for(i=0; i<perows; i++){  /* Add up input and lateral activations. */
01506     row = MAPROW(i);
01507     for(j=0; j<N; j++){
01508       
01509       if (activity_marker[row][j]){ /* Activity exists in neighborhood*/
01510         resp = gammaaff * resp_to_inp[row][j];
01511         
01512         if (noise > 0.0)   /* If neuron is noisy, add some zero mean noise */
01513           resp += noise * (shuffled_rand()-0.5);
01514 
01515         const Neuron& neuron = cortex_map[row][j];
01516         if (exc_rad <= 1)/*   Use optimized version of the lat_resp function*/
01517           resp += gammaexc * lat_resp_rad1(row, j, exc_rad, exc_array_width, exc_array_width_2,
01518                                            neuron.lat_exc_wts, lat_exc_dimension);
01519 
01520         else
01521           resp += gammaexc * lat_resp(row, j, exc_rad, exc_array_width, 
01522                                       neuron.lat_exc_wts, lat_exc_dimension);
01523     
01524         /* Add inhibition only if fixed-sign and activity is already
01525            greater than zero, or if variable sign is being used (and
01526            thus actual sign is not predictable) */
01527         const double act = activatefn ? sigmoid(resp,delta,beta) : resp;
01528         if ( act>0.0 || ls_type>Uninitialized) {
01529           const double inh_resp = lat_resp(row,j,inh_rad,inh_array_width,neuron.lat_inh_wts, lat_inh_dimension);
01530           const double scaled_inh_resp =
01531             /* Standard: fixed-sign inhibitory connections */
01532             (ls_type==Uninitialized?
01533              gammainh*inh_resp :
01534              
01535              /* Untested: scales sign of long-range connections by
01536                 strength of incoming inh activations */
01537              // Consider removing the first inh_resp; right now
01538              // responses are squared for high inh or low ls_offset
01539              (ls_type==False?
01540               gammainh*inh_resp*(ls_scale*inh_resp-ls_offset) : 
01541               
01542               /* Untested: scales sign of long-range connections by resp_to_inp */
01543               gammainh*inh_resp*(ls_scale*resp_to_inp[row][j]-ls_offset) ));
01544           
01545           const double inp      = resp-scaled_inh_resp;
01546           map_activity[row][j] = activatefn ? sigmoid(inp, delta, beta) : inp;
01547         }
01548         
01549         if (map_activity[row][j] > 0.0)   
01550           activity_list[i][++ACTLIST_COUNT(activity_list[i])] = j;
01551         
01552       }
01553       else map_activity[row][j]=0.0;
01554     }
01555   }
01556 
01557   ipc_barrier(); /* a barrier before ipc_put is essential to avoid races */
01558   for(i=0; i<perows; i++){  /* Store all the activity lists */
01559     row = MAPROW(i);
01560     for (pe=0; pe < NPEs; pe++)   /* Store activity list to all processors */
01561       ipc_put_to(&(prev_activity_list[row][0]), &(activity_list[i][0]),
01562                  IPC_INT,ACTLIST_COUNT(activity_list[i])+1,pe);
01563     ACTLIST_COUNT(activity_list[i]) = 0;
01564   }
01565 }
01566 
01567 
01568 
01574 int LissomMap::binary_search(const actlist& list, int lower, int upper, int value)
01575 {
01576   /* Don't bother to search if entirely outside the range */
01577   if (value <= list[lower]) return lower;    /* value lower than entire range */
01578   if (value >  list[upper]) return upper+1;  /* value higher than entire range */
01579 
01580   /* Binary search within range */
01581   while(lower < upper) {
01582     const int middle = (lower+upper) >> 1;  /* same as divide by 2 */
01583     const int compare_to = list[middle];
01584     if      (value > compare_to) lower = middle + 1;
01585     else if (value < compare_to) upper = middle;
01586     else return(middle);   /* value has been found at index middle */
01587   }
01588   return(lower); /* value not found, but is in range */
01589 }
01590 
01591 
01592 
01599 void LissomMap::crop_actlist(const actlist& list, int lowval, int highval, int *lowidx, int *highidx)
01600 {
01601   (*lowidx )=ACTLIST_START(list);
01602   (*highidx)=ACTLIST_COUNT(list);
01603   
01604   if (lowval == 0)
01605     (*lowidx) = 1;
01606   else
01607     (*lowidx) = binary_search(list,(*lowidx),(*highidx),lowval);
01608 
01609   if (highval==(N-1))
01610     (*highidx) = ACTLIST_COUNT(list); 
01611   else {
01612     (*highidx) = binary_search(list,(*lowidx),(*highidx),highval);
01613 
01614     if ((*highidx) > ACTLIST_COUNT(list) || (list[(*highidx)]> highval))
01615       (*highidx)--;
01616   }
01617 }
01618 
01619 
01620 
01625 /*
01626 void LissomMap::crop_actlist_orig(const actlist& list, int lowval, int highval, int *lowidx, int *highidx)
01627 {
01628   (*lowidx )=ACTLIST_START(list);
01629   (*highidx)=ACTLIST_COUNT(list);
01630 
01631   while ((*highidx) >= (*lowidx)  &&  list[(*lowidx )] < lowval ) (*lowidx )++;
01632   while ((*highidx) >= (*lowidx)  &&  list[(*highidx)] > highval) (*highidx)--;
01633 } 
01634 */
01635 
01636 
01637 
01639 /*
01640 void LissomMap::verify_actlist(const actlist& list)
01641 {
01642   int i;
01643   int count=ACTLIST_COUNT(list);
01644 
01645   if (count <0 || count > N) {
01646     ipc_notify(IPC_ALL,IPC_ERROR,"List has invalid count (%d)",count);
01647     return;
01648   }
01649   for (i=ACTLIST_START(list); i<count; i++)
01650     if (list[i]>= list[i+1]) {
01651       ipc_notify(IPC_ALL,IPC_ERROR,"List is not in proper order -- for %d elements, list[%d]=%d > list[%d]=%d",
01652                 count,i,list[i],i+1,list[i+1]);
01653       return;
01654     }
01655   return;
01656 }
01657 */
01658 
01660 /*
01661 void LissomMap::verify_actlist_array(const actlist& lists[],int num)
01662 {
01663   int i;
01664   for (i=0; i<num; i++)
01665     verify_actlist(lists[i]);
01666 }
01667 */
01668 
01669 
01671 void LissomMap::initialize_actlists( void )
01672 {
01673   int i;
01674   for(i=0; i<perows; i++) {
01675     ACTLIST_COUNT(prev_activity_list[MAPROW(i)]) = 0;
01676     ACTLIST_COUNT(activity_list[i]) = 0;
01677   }
01678 }
01679 
01680 
01681 
01684 void LissomMap::initialize_markers( void )
01685 {
01686   int i,j;
01687 
01688   for(i=0; i<perows; i++){
01689     const int row = MAPROW(i);
01690     for(j=0; j<N; j++)  
01691       activity_marker[row][j] = True;
01692   }
01693 }
01694 
01695 
01696 
01702 void LissomMap::set_markers(int radius)
01703 {
01704   int i,j,k,l;
01705   const double act_threshold = params->get_with_default("activity_threshold",0.0);
01706   const bool   has_threshold = act_threshold>=0;
01707   
01708   for(i=0; i<perows; i++){
01709     const int row   = MAPROW(i);                      
01710     const int lowk  = MAX(row-radius,0);
01711     const int highk = MIN(row+radius,N-1);
01712 
01713     for(j=0; j<N; j++){
01714       const int lowl  = MAX(j-radius,0);
01715       const int highl = MIN(j+radius,N-1);
01716       double marker_resp = 0.0;
01717 
01718       for(k=lowk; k<= highk; k++)
01719         for(l=lowl; l<=highl; l++)
01720           marker_resp += prev_map_activity[k][l];
01721 
01722       activity_marker[row][j] = (!has_threshold || (marker_resp > act_threshold));
01723     }
01724   }
01725 }
01726 
01727 
01728 
01734 double LissomMap::lat_resp(int ui, int uj, int radius, int array_width, l_weight *weights, int weight_idx_bound)
01735 {
01736   double resp=0;
01737   
01738   const int lowi  = MAX(ui-radius,0); 
01739   const int highi = MIN(ui+radius,N-1);
01740   
01741   (void)weight_idx_bound; /* Parameter used only with assertions */
01742   
01743   for (int i=lowi; i<=highi; i++)
01744     if (ACTLIST_COUNT(prev_activity_list[i]) > 0){
01745         const int lowj  = MAX(uj-radius,0); 
01746         const int highj = MIN(uj+radius,N-1); 
01747         const int partial_idx = PARTIAL_LAT_INDEX(ui,uj,i,radius,array_width); 
01748         int high_idx, low_idx;
01749         
01750         crop_actlist(prev_activity_list[i],lowj,highj,&low_idx,&high_idx);
01751         assert (high_idx <= ACTLIST_COUNT(prev_activity_list[i]));
01752  
01753         /* This is a time-critical loop; make sure that the compiler unrolls it */
01754         for (int j=low_idx; j <= high_idx; j++) {
01755           const int idx = FULL_LAT_INDEX(partial_idx,prev_activity_list[i][j]);
01756           assert (prev_activity_list[i][j] >= lowj);
01757           assert (prev_activity_list[i][j] <= highj);
01758           assert (idx >= 0); assert (idx <= weight_idx_bound);
01759           
01760           resp += gather_activity[i][j] * weights[idx];
01761         }
01762     }
01763   return(resp);
01764 }
01765 
01766 
01767 
01769 double LissomMap::lat_resp_rad1(int ui, int uj, int radius, int array_width, int array_width_2, l_weight *weights, int weight_idx_bound)
01770 {
01771   double resp, resp1, resp2;
01772 
01773   /* This parameter is for error checking and consistency with
01774      modify_latwt_loop; it is currently unused */
01775   (void)weight_idx_bound;
01776   
01777   if (radius==0){
01778     resp = prev_map_activity[ui][uj]; /* Weights are 1.0 */
01779     return(resp);
01780   }
01781   resp = resp1 = resp2=0.0;
01782   if ((ui > 0) && (ui < N-1))    /* Case where 'ui' is away from border      */
01783     {
01784       if ((uj >0) && (uj < N-1)) /* Case where 'uj' is also away from border */
01785         {
01786           /* FULL_LAT_INDEX need not be called when either argument is 0 */
01787           resp   = prev_map_activity[ui-1][uj-1] * weights[                             0 ];
01788           resp1  = prev_map_activity[ui-1][uj  ] * weights[                             1 ];
01789           resp2  = prev_map_activity[ui-1][uj+1] * weights[                             2 ];
01790 
01791           resp  += prev_map_activity[ui  ][uj-1] * weights[               array_width     ];
01792           resp1 += prev_map_activity[ui  ][uj  ] * weights[FULL_LAT_INDEX(array_width,  1)];
01793           resp2 += prev_map_activity[ui  ][uj+1] * weights[FULL_LAT_INDEX(array_width,  2)];
01794 
01795           resp  += prev_map_activity[ui+1][uj-1] * weights[               array_width_2   ];
01796           resp1 += prev_map_activity[ui+1][uj  ] * weights[FULL_LAT_INDEX(array_width_2,1)];
01797           resp2 += prev_map_activity[ui+1][uj+1] * weights[FULL_LAT_INDEX(array_width_2,2)];
01798         }
01799       else{                     /*  'uj' is at some border */
01800         const int lowj  = MAX(uj-1,0);
01801         const int highj = MIN(uj+1,N-1);
01802         const int partial_index0 =               - uj + 1;
01803         const int partial_index1 = array_width   - uj + 1; 
01804         const int partial_index2 = array_width_2 - uj + 1;
01805         int j;
01806         
01807         for(j=lowj; j<=highj; j++){
01808           resp  += prev_map_activity[ui-1][j] * weights[FULL_LAT_INDEX(partial_index0,j)];
01809           resp1 += prev_map_activity[ui  ][j] * weights[FULL_LAT_INDEX(partial_index1,j)];
01810           resp2 += prev_map_activity[ui+1][j] * weights[FULL_LAT_INDEX(partial_index2,j)];
01811         }
01812       }
01813       resp += resp1 + resp2;
01814     }
01815   else if (ui > 0){            /* Case where 'ui' is at top border (N-1)     */
01816     const int lowj  = MAX(uj-1,0);
01817     const int highj = MIN(uj+1,N-1);
01818     const int partial_index0 = -uj + 1;
01819     const int partial_index1 = array_width - uj + 1;
01820     int j;
01821       
01822     for(j=lowj; j<=highj; j++){
01823       resp += prev_map_activity[ui-1][j] * weights[FULL_LAT_INDEX(partial_index0,j)];
01824       resp1+= prev_map_activity[ui  ][j] * weights[FULL_LAT_INDEX(partial_index1,j)];
01825     }
01826     resp += resp1;
01827   }
01828   else{                        /* Case where 'ui' is at bottom border (0)    */
01829     const int lowj  = MAX(uj-1,0);
01830     const int highj = MIN(uj+1,N-1);
01831     const int partial_index0 = array_width   - uj + 1;
01832     const int partial_index1 = array_width_2 - uj + 1;
01833     int j;
01834       
01835     for(j=lowj; j<=highj; j++){
01836       resp += prev_map_activity[ui  ][j] * weights[FULL_LAT_INDEX(partial_index0,j)];
01837       resp1+= prev_map_activity[ui+1][j] * weights[FULL_LAT_INDEX(partial_index1,j)];
01838     }
01839     resp += resp1;
01840   }
01841   return(resp);
01842 }
01843 
01844 
01849 double LissomMap::lat_resp_named(int ui, int uj, const string& name)
01850 {
01851  
01852   double sum = 0.0;
01853 
01854   // Assumptions: prev_map_activity[][] is used to calculate
01855   // the lateral contributions.
01856 
01857   Neuron&              neuron = cortex_map[uj][ui];
01858 
01859   if (name == "LateralExcitatory") {
01860     /* Array idx swapped to translt b/w correct and legacy formats. */
01861     const int lowi  = MAX(uj-exc_rad,0);
01862     const int highi = MIN(uj+exc_rad,N-1);
01863     const int lowj  = MAX(ui-exc_rad,0);
01864     const int highj = MIN(ui+exc_rad,N-1);
01865         
01866     for (int i=lowi; i<=highi; i++)
01867       for (int j=lowj; j<=highj; j++)
01868          sum += prev_map_activity[i][j] 
01869              * neuron.lat_exc_wts[LAT_INDEX(uj,ui,i,j,exc_rad,exc_array_width)];
01870 
01871   } else if (name == "LateralInhibitory") {
01872     /* Array idx swapped to translt b/w correct and legacy formats. */
01873     const int lowi  = MAX(uj-inh_rad,0);
01874     const int highi = MIN(uj+inh_rad,N-1);
01875     const int lowj  = MAX(ui-inh_rad,0);
01876     const int highj = MIN(ui+inh_rad,N-1);
01877         
01878     for (int i=lowi; i<=highi; i++)
01879       for (int j=lowj; j<=highj; j++)
01880          sum += prev_map_activity[i][j] 
01881              * neuron.lat_inh_wts[LAT_INDEX(uj,ui,i,j,inh_rad,inh_array_width)];
01882   }
01883 
01884   return sum;
01885 }
01886 
01887 
01888 
01890 void LissomMap::modify_all_weights(void)
01891 {
01892   const double alpha_aff     = params->get_with_default("alpha_input",0.0);
01893   const bool   normalize_aff = params->get_with_default("normalize_aff",False);
01894   if (alpha_aff!=0) modify_weights(alpha_aff,normalize_aff);
01895 
01896   const double alpha_exc = params->get_with_default("alpha_exc",0.0);
01897   const double alpha_inh = params->get_with_default("alpha_inh",0.0);
01898   if (alpha_exc!=0 || alpha_inh!=0) modify_lat_wts(alpha_exc,alpha_inh);
01899   
01900   ipc_barrier();      /* To ensure all wts have been modified */
01901 }
01902 
01903 
01904 
01909 void LissomMap::modify_weights(double alpha, bool normalize)
01910 {                         
01912   const bool   allow_negative_wts = params->get_with_default("allow_negative_wts",False);
01913 
01914   (void)allow_negative_wts;
01915   assert(!allow_negative_wts);
01916 
01917   if (int(aff_inputs.size()) != num_aff) {
01918     ipc_notify(IPC_ALL,IPC_ERROR,"No response; the number of afferent inputs is required to be %d but is %d",
01919                num_aff, int(aff_inputs.size()));
01920     return;
01921   }
01922       
01923   int i,j,k,l,aff;
01924   
01925   if (normalize)    /* If afferents from each receptor are normalized */
01926     for(i=0; i<RN; i++)
01927       for(j=0; j<RN; j++)
01928         for (aff=0; aff<num_aff; aff++)
01929           afferent_sum[INP_INDEX(aff,i,j)] = 0.0;
01930   
01931   for(i=0; i<perows; i++){
01932     const int row = MAPROW(i);
01933     
01934     for(j=0; j<N; j++)    /* Only wts of active neurons need to be modified */
01935       if (normalize || (prev_map_activity[row][j] > 0.0)){
01936         Neuron&              neuron = cortex_map[row][j];
01937         AfferentWeightsList& awts = *(neuron.weights);
01938         
01939         /* The weights must be modified only within the receptive field. */
01940         const int lowk   = MAX(neuron.centerx-rf_radius,0   );
01941         const int highk  = MIN(neuron.centerx+rf_radius,RN-1);
01942         const int lowl   = MAX(neuron.centery-rf_radius,0   );
01943         const int highl  = MIN(neuron.centery+rf_radius,RN-1);
01944         
01945         if (prev_map_activity[row][j] > 0.0){
01946           
01947           double deltaw = alpha * prev_map_activity[row][j]; 
01948           double length = 0.0;
01949 
01950           for (aff=0; aff<num_aff; aff++)         
01951             for (k=lowk; k<=highk; k++){
01952               for (l=lowl; l<=highl; l++) {
01953                 a_weight* const wtptr = &(awts[aff][k-lowk][l-lowl]);
01954 
01955                 if (*wtptr > -DEATH_FLAG) {
01956                   const double load = (*aff_inputs[aff])[l][k];
01957                 
01958                   if ( load > 0.0){
01959                     const double delt = load * deltaw;
01960                     *wtptr += delt;
01961                     length += delt;
01962                   }
01963                 }
01964               }
01965             }
01966           
01967           if (!normalize) /* Do only if receptor_normalize is not called */
01968             normalize_afferent_wts(lowk,highk,lowl,highl,row,j,1.0/(1.0+length));
01969         }
01970         
01971         if (normalize) /* Do this whether map activity is >0 or not */
01972           for (k=lowk; k<=highk; k++)
01973             for (l=lowl; l<=highl; l++)
01974               for (aff=0; aff<num_aff; aff++) {
01975                 assert (INP_INDEX(aff,k,l) >= INP_INDEX(aff,  0,0));
01976                 assert (INP_INDEX(aff,k,l) <  INP_INDEX(aff+1,0,0));
01977                 
01978                 afferent_sum[INP_INDEX(aff,k,l)] += awts[aff][k-lowk][l-lowl];
01979               }
01980       }
01981   }
01982   
01983   if (normalize) receptor_normalize();
01984 }
01985 
01986 
01987 
01989 void LissomMap::receptor_normalize(void)
01990 {
01991   double result;
01992   int i,j,k,l,pe,aff;
01993 
01994   for(k=0; k<RN; k++)  /* Store local data in temp array */
01995     for(l=0; l<RN; l++)
01996       for (aff=0; aff<num_aff; aff++)
01997         temp_sum[INP_INDEX(aff,k,l)] = afferent_sum[INP_INDEX(aff,k,l)];
01998 
01999   ipc_barrier();
02000 
02001   for(pe=0; pe<NPEs; pe++)
02002     if (!PEISME(pe)){
02003       ipc_get_to(&aff_norm_sum[0], &afferent_sum[0], IPC_DOUBLE,num_aff*RN*RN,pe);
02004       
02005       for(k=0; k<RN; k++)
02006         for(l=0; l<RN; l++)
02007           for (aff=0; aff<num_aff; aff++)
02008             temp_sum[INP_INDEX(aff,k,l)] += aff_norm_sum[INP_INDEX(aff,k,l)];
02009     }
02010   
02011   ipc_barrier(); /* To ensure that afferent_sum is not changed before fetched */
02012 
02013   for(k=0; k<RN; k++)
02014     for (l=0; l<RN; l++)
02015       for (aff=0; aff<num_aff; aff++)
02016         afferent_sum[INP_INDEX(aff,k,l)] = 1.0/temp_sum[INP_INDEX(aff,k,l)];
02017   
02018   for(i=0; i<perows; i++){
02019     const int row = MAPROW(i);
02020     for(j=0; j<N; j++){
02021       Neuron&              neuron = cortex_map[row][j];
02022       AfferentWeightsList& awts = *(neuron.weights);
02023 
02024       const int lowk   = MAX(neuron.centerx-rf_radius,0   );
02025       const int highk  = MIN(neuron.centerx+rf_radius,RN-1);
02026       const int lowl   = MAX(neuron.centery-rf_radius,0   );
02027       const int highl  = MIN(neuron.centery+rf_radius,RN-1);
02028 
02029       double length = 0.0;
02030 
02031       /* This is a time-critical loop; make sure that the compiler unrolls it */
02032       for (k=lowk; k<=highk; k++)
02033         for (l=lowl; l<=highl; l++)
02034           for (aff=0; aff<num_aff; aff++){
02035             a_weight* const wtptr = &(awts[aff][k-lowk][l-lowl]);
02036             assert (INP_INDEX(aff,k,l) >= INP_INDEX(aff,  0,0));
02037             assert (INP_INDEX(aff,k,l) <  INP_INDEX(aff+1,0,0));
02038             
02039             result  = (*wtptr) * afferent_sum[INP_INDEX(aff,k,l)];
02040             length += result;
02041             
02042             *wtptr = result;
02043           }
02044       
02045       normalize_afferent_wts(lowk,highk,lowl,highl,row,j,1.0/length);
02046     }
02047   }
02048 }
02049 
02050 
02051 
02052 void LissomMap::normalize_afferent_wts(int lowk, int highk, int lowl, int highl, int row, int j, double length)
02053 {
02054   int k,l,aff;
02055   Neuron&              neuron = cortex_map[row][j];
02056   AfferentWeightsList& awts   = *(neuron.weights);
02057   
02058   /* This is a time-critical loop; make sure that the compiler unrolls it */
02059   for (k=lowk; k<=highk; k++)
02060     for (l=lowl; l<=highl; l++)
02061       for (aff=0; aff<num_aff; aff++) {
02062         a_weight* const wtptr = &(awts[aff][k-lowk][l-lowl]);
02063         if (*wtptr > -DEATH_FLAG)
02064           *wtptr *= length;
02065       }
02066 }
02067 
02068 
02069 
02075 void LissomMap::modify_lat_wts(double alpha_exc, double alpha_inh)
02076 {
02077   int i,j;
02078 
02079   for(i=0; i<perows; i++){
02080     const int row = MAPROW(i);
02081 
02082     for(j=0; j<N; j++)     /* Modify only neurons with nonzero activity */
02083       if (prev_map_activity[row][j] > 0.0){
02084         Neuron& neuron = cortex_map[row][j];
02085         
02086         if (exc_rad==1 && !exc_connections_killed)
02087           modify_latwt_loop_rad1(row,j,exc_rad, exc_array_width, exc_array_width_2,
02088                                  neuron.lat_exc_wts, lat_exc_dimension, alpha_exc);
02089         else 
02090           modify_latwt_loop(     row,j, exc_rad, exc_array_width,
02091                                  neuron.lat_exc_wts, lat_exc_dimension, alpha_exc);
02092         
02093         modify_latwt_loop(       row,j, inh_rad, inh_array_width,
02094                                  neuron.lat_inh_wts, lat_inh_dimension, alpha_inh);
02095       }
02096   }
02097 }
02098 
02099 
02100 
02102 void LissomMap::modify_latwt_loop(int i, int j, int radius, int array_width, l_weight *weights, int weight_idx_bound, double alpha)
02103 {
02104   int k,m;
02105   const int lowk  = MAX(i-radius,0  );
02106   const int highk = MIN(i+radius,N-1);
02107   const int lowl  = MAX(j-radius,0  );
02108   const int highl = MIN(j+radius,N-1);
02109   const double loop_const=alpha * prev_map_activity[i][j];
02110   double new_length=0;
02111 
02112   (void)weight_idx_bound; /* Parameter used only with assertions */
02113 
02114   for (k=lowk; k<=highk; k++)
02115     if ( ACTLIST_COUNT(prev_activity_list[k]) > 0) {
02116       const int partial_idx = PARTIAL_LAT_INDEX(i,j,k,radius,array_width);
02117       int low_idx, high_idx;
02118       crop_actlist(prev_activity_list[k],lowl,highl,&low_idx,&high_idx);
02119 
02120       /* This is a time-critical loop; make sure that the compiler unrolls it */
02121       for (m=low_idx; m <= high_idx; m++) {
02122         const int l   = prev_activity_list[k][m];
02123         const int idx = FULL_LAT_INDEX(partial_idx,l);
02124         assert (l >= lowl); assert (l <= highl);
02125         assert (idx >= 0);  assert (idx <= weight_idx_bound);
02126         l_weight* const wt = &(weights[idx]);
02127         if (*wt>=0) {
02128           const double delt = gather_activity[k][m] * loop_const;
02129           new_length   += delt;
02130           *wt += delt;
02131         }
02132       }
02133     }
02134   new_length = 1.0/(1.0+new_length);
02135 
02136   for (k=lowk; k<=highk; k++) {
02137     int idx = LAT_INDEX(i,j,k,lowl,radius,array_width);
02138     int l;
02139     assert (idx >= 0);
02140 
02141     /* This is a time-critical loop; make sure that the compiler unrolls it */
02142     for (l=lowl; l<=highl; l++,idx++) {
02143       assert (idx <= weight_idx_bound);
02144       l_weight* const wt = &(weights[idx]);
02145       if (*wt>=0)
02146         *wt *= new_length;
02147     }
02148   }
02149 }
02150 
02151 
02152 
02156 void LissomMap::modify_latwt_loop_rad1(int ui, int uj, int radius, int array_width, int array_width_2, l_weight *weights, int weight_idx_bound, double alpha)
02157 {
02158   int j;
02159   double length=0;
02160   double delta0=0;
02161   double delta1=0;
02162   double delta2=0;
02163   double delta3=0;
02164   const double loop_constant = alpha * prev_map_activity[ui][uj];
02165   
02166   /* These parameters are for error checking and consistency with
02167      modify_latwt_loop; they are currently unused */
02168   (void)radius;
02169   (void)weight_idx_bound;
02170   
02171   if ((ui > 0) && (ui < N-1))    /* Case where 'ui' is away from border      */
02172     {
02173       if ((uj >0) && (uj < N-1)) /* Case where 'uj' is also away from border */
02174         {
02175           /* FULL_LAT_INDEX need not be called when either argument is 0 */
02176           delta0 = prev_map_activity[ui-1][uj-1] * loop_constant; weights[                           0   ] += delta0; 
02177           delta1 = prev_map_activity[ui-1][uj  ] * loop_constant; weights[                           1   ] += delta1;
02178           delta2 = prev_map_activity[ui-1][uj+1] * loop_constant; weights[                           2   ] += delta2;
02179           length = delta0 + delta1 + delta2;
02180 
02181           delta3 = prev_map_activity[ui  ][uj-1] * loop_constant; weights[               array_width     ]+= delta3; 
02182           delta0 = prev_map_activity[ui  ][uj  ] * loop_constant; weights[FULL_LAT_INDEX(array_width,1)  ]+= delta0; 
02183           delta1 = prev_map_activity[ui  ][uj+1] * loop_constant; weights[FULL_LAT_INDEX(array_width,2)  ]+= delta1;
02184           length += delta3 + delta0 + delta1;
02185 
02186           delta2 = prev_map_activity[ui+1][uj-1] * loop_constant; weights[               array_width_2   ]+= delta2;
02187           delta3 = prev_map_activity[ui+1][uj  ] * loop_constant; weights[FULL_LAT_INDEX(array_width_2,1)]+= delta3;
02188           delta0 = prev_map_activity[ui+1][uj+1] * loop_constant; weights[FULL_LAT_INDEX(array_width_2,2)]+= delta0;
02189           length = 1.0/(1.0+length + delta2 + delta3 + delta0);
02190 
02191           for(j=0; j<3; j++){
02192             weights[                             j ] *= length;
02193             weights[FULL_LAT_INDEX(array_width  ,j)] *= length;
02194             weights[FULL_LAT_INDEX(array_width_2,j)] *= length;
02195           }
02196         }
02197       else{                     /*  'uj' is at some border */
02198         const int lowj  = MAX(uj-1,0); 
02199         const int highj = MIN(uj+1,N-1);
02200         const int index0 =               -uj+1;
02201         const int index1 = array_width   -uj+1; 
02202         const int index2 = array_width_2 -uj+1;
02203 
02204         for(j=lowj; j<=highj; j++){
02205           delta0 = prev_map_activity[ui-1][j] * loop_constant; weights[FULL_LAT_INDEX(index0,j)] += delta0; 
02206           delta1 = prev_map_activity[ui  ][j] * loop_constant; weights[FULL_LAT_INDEX(index1,j)] += delta1; 
02207           delta2 = prev_map_activity[ui+1][j] * loop_constant; weights[FULL_LAT_INDEX(index2,j)] += delta2; 
02208           length += delta0 + delta1 + delta2; 
02209         }
02210         length = 1.0/(1.0+length);
02211         for(j=lowj; j<=highj; j++){
02212           weights[FULL_LAT_INDEX(index0,j)] *= length;
02213           weights[FULL_LAT_INDEX(index1,j)] *= length;
02214           weights[FULL_LAT_INDEX(index2,j)] *= length;
02215         }
02216       }
02217     }
02218   else if (ui > 0){            /* Case where 'ui' is at top border (N-1)     */
02219     const int lowj  = MAX(uj-1,0); 
02220     const int highj = MIN(uj+1,N-1);
02221     const int index0 = -uj + 1;
02222     const int index1 = array_width - uj + 1;
02223 
02224     for(j=lowj; j<=highj; j++){
02225       delta0 = prev_map_activity[ui-1][j] * loop_constant; weights[FULL_LAT_INDEX(index0,j)] += delta0; length += delta0; 
02226       delta1 = prev_map_activity[ui  ][j] * loop_constant; weights[FULL_LAT_INDEX(index1,j)] += delta1; length += delta1;
02227     }
02228     length = 1.0/(1.0+length);
02229     for(j=lowj; j<=highj; j++){
02230       weights[FULL_LAT_INDEX(index0,j)] *= length;
02231       weights[FULL_LAT_INDEX(index1,j)] *= length;
02232     }
02233   }
02234   else{                        /* Case where 'ui' is at bottom border (0)    */
02235     const int lowj  = MAX(uj-1,0); 
02236     const int highj = MIN(uj+1,N-1);
02237     const int index1 = array_width   -uj+1;
02238     const int index2 = array_width_2 -uj+1;
02239 
02240     for(j=lowj; j<=highj; j++){
02241       delta0 = prev_map_activity[ui  ][j] * loop_constant; weights[FULL_LAT_INDEX(index1,j)] += delta0; length += delta0;
02242       delta1+= prev_map_activity[ui+1][j] * loop_constant; weights[FULL_LAT_INDEX(index2,j)] += delta1; length += delta1;
02243     }
02244     length = 1.0/(1.0+length);
02245     for(j=lowj; j<=highj; j++){
02246       weights[FULL_LAT_INDEX(index1,j)] *= length;
02247       weights[FULL_LAT_INDEX(index2,j)] *= length;
02248     }
02249   }  
02250 }
02251 
02252 
02253 
02258 void LissomMap::normalize_lat_wts(int i, int j, int radius, int array_width, l_weight *weights)
02259 {
02260   int k,l;
02261   const int lowk   = MAX(i-radius,0  );
02262   const int highk  = MIN(i+radius,N-1);
02263   const int lowl   = MAX(j-radius,0  );
02264   const int highl  = MIN(j+radius,N-1);
02265   double length = 0;
02266 
02267   for(k=lowk; k<= highk; k++){
02268     int idx = LAT_INDEX(i,j,k,lowl,radius,array_width);    
02269     assert (idx >= 0);
02270     
02271     /* This is a time-critical loop; make sure that the compiler unrolls it */
02272     for (l=lowl; l <=highl; l++,idx++) {
02273       /* assert (idx <= weight_idx_bound); */
02274       length += weights[idx];
02275     }
02276   }
02277   length = 1.0/length;
02278   
02279   for(k=lowk; k<= highk; k++){
02280     int idx = LAT_INDEX(i,j,k,lowl,radius,array_width);    
02281     assert (idx >= 0);
02282     
02283     /* This is a time-critical loop; make sure that the compiler unrolls it */
02284     for (l=lowl; l <=highl; l++,idx++){
02285       /* assert (idx <= weight_idx_bound); */
02286       weights[idx] *= length;
02287     }
02288   }
02289 }
02290 
02291 
02292 
02297 double LissomMap::sigmoid(double activity, double sigdelta, double sigbeta) /* Linear approx of sigmoid */
02298                                       /* Lower & upper thresholds */    
02299 {
02300 #ifdef DEBUG_SIGMOID
02301   ipc_notify(IPC_ALL,IPC_VERBOSE,"activity=%.6f; sigdelta=%.6f, sigbeta=%.6f",
02302              (double)activity,(double)sigdelta,(double)sigbeta);
02303 #endif
02304   if      (activity < sigdelta) return(0.0);
02305   else if (activity > sigbeta)  return(1.0);
02306   else return( (activity-sigdelta)/(sigbeta-sigdelta) );
02307 }
02308 
02313 double LissomMap::truesigmoid(double activity, double beta)
02314 {
02315         double ex = exp(-2*beta*activity);
02316         return ((1-ex)/(1+ex));
02317 }
02318 
02323 double LissomMap::truesigmoidprime(double sigmoidactivity, double beta)
02324 {
02325         return beta*(1-sigmoidactivity*sigmoidactivity);        
02326 }
02327 
02332 double LissomMap::positivesigmoid(double activity, double beta)
02333 {
02334         return 1.0/(1.0+exp(-(activity-0.5)*beta));
02335 }
02336 
02341 double LissomMap::logsigmoid(double activity, double beta)
02342 {
02343         return (activity>0.0)?(log(1+activity*beta)):0.0;
02344 }
02345 
02350 double LissomMap::logsigmoidprime(double activity, double beta)
02351 {
02352         return (activity>0.0)?(beta/(1+activity*beta)):0.0;
02353 }
02354 
02355 /******************************************************************************/
02356 /* Other routines                                                             */
02357 /******************************************************************************/
02358 
02360 double LissomMap::size_connection_bytes() const
02361 {
02362   int memsize = (N*N * ( (2*rf_radius +1)*(2*rf_radius +1)*sizeof(a_weight)*num_aff +
02363                          (2*exc_rad   +1)*(2*exc_rad   +1)*sizeof(l_weight) +
02364                          (2*inh_rad   +1)*(2*inh_rad   +1)*sizeof(l_weight) ))/NPEs;
02365   
02366   return memsize;
02367 }
02368 
02369 
02371 double LissomMap::size_unique_connections() const
02372 {
02373   return        N*N * ( (2*rf_radius +1)*(2*rf_radius +1)*num_aff +
02374                         (2*exc_rad   +1)*(2*exc_rad   +1) +
02375                         (2*inh_rad   +1)*(2*inh_rad   +1) );
02376 }
02377 
02378 
02380 void LissomMap::clear_weights(void)
02381 {
02382   int local_row, j, k, aff, x, y;
02383 
02384   for(local_row=0; local_row<perows; local_row++){
02385     const int map_row = MAPROW(local_row);
02386 
02387     for(j=0; j<N; j++) {
02388       Neuron&              neuron = cortex_map[map_row][j];
02389       AfferentWeightsList& awts = *(neuron.weights);
02390 
02391       neuron.centerx = 0;
02392       neuron.centery = 0;
02393       
02394       for(k=0; k<lat_exc_dimension; k++)
02395         neuron.lat_exc_wts[k]   = -DEATH_FLAG;
02396       for(k=0; k<lat_inh_dimension; k++)
02397         neuron.lat_inh_wts[k]   = -DEATH_FLAG; 
02398       for(aff=0; aff<num_aff; aff++)
02399         for(x=0; x<aff_array_width; x++)
02400           for(y=0; y<aff_array_width; y++)
02401             awts[aff][x][y] = -DEATH_FLAG;  
02402     }
02403   }
02404 }
02405 
02406 
02407 
02416 void LissomMap::init_weights(bool init_weights)
02417 {
02418   const double   afferent_randomness = params->get_with_default("randomness",0.0);
02419   const double   lat_exc_randomness  = params->get_with_default("lat_exc_randomness",0.0);
02420   const double   lat_inh_randomness  = params->get_with_default("lat_inh_randomness",0.0);
02421   const Tristate preset_lat_wts      = params->get_with_default("preset_lat_wts",False);
02422   const Tristate preset_aff_wts      = params->get_with_default("preset_aff_wts",False);
02423   const double   preset_sigma_aff    = params->get_with_default("preset_sigma_aff",0.0);
02424   const double   preset_sigma_lat    = params->get_with_default("preset_sigma_lat",0.0);
02425   const double   preset_sigma_exc    = params->get_with_default("preset_sigma_exc",0.0);
02426   const double   preset_sigma_inh    = params->get_with_default("preset_sigma_inh",0.0);
02427   const bool     circular_aff_wts    = params->get_with_default("circular_aff_wts",True);
02428   const bool     circular_lat_wts    = params->get_with_default("circular_lat_wts",True);
02429   const bool     smooth              = params->get_with_default("smooth_circular_outlines",False);
02430   const double   smooth_trim         = params->get_with_default("smooth_circular_radius_trim",0.0);
02431   const double   rf_radius_float     = params->get_with_default("rf_radius",0.0);
02432   const double   exc_rad_float       = params->get_with_default("exc_rad",0.0);
02433   const double   inh_rad_float       = params->get_with_default("inh_rad",0.0);
02434 
02435   const double preset_sigma_e =
02436    (preset_sigma_exc  != Uninitialized ? preset_sigma_exc : 
02437     (preset_sigma_lat != Uninitialized ? preset_sigma_lat :
02438      0.9*exc_rad));
02439 
02440   const double preset_sigma_i =
02441    (preset_sigma_inh  != Uninitialized ? preset_sigma_inh : 
02442     (preset_sigma_lat != Uninitialized ? preset_sigma_lat*MIN(1,sqrt(inh_rad)) :
02443      0.9*inh_rad));
02444 
02445   /* Ensure that all weights are known. */
02446   if (init_weights) {
02447     clear_weights();
02448     
02449     for(int i=0; i<perows; i++){
02450       const int row = MAPROW(i);
02451       /* To make weights generated independent of NPEs */
02452       shuffled_rand_reset(ShuffledRandom<double>::default_seed+123456+row);
02453       for(int j=0; j<N; j++) {
02454         /* Set up each set of weights */
02455         Neuron& neuron = cortex_map[row][j];
02456         
02457         setup_afferent_weights(neuron, *(neuron.weights),
02458                                row,j, rf_radius, rf_radius_float*rf_radius_float,
02459                                preset_aff_wts, preset_sigma_aff, afferent_randomness,
02460                                circular_aff_wts, smooth, rf_radius_float-rf_radius, smooth_trim);
02461 
02462         setup_lateral_weights(row,j, exc_rad, exc_array_width,neuron.lat_exc_wts,
02463                               preset_lat_wts, preset_sigma_e, lat_exc_randomness,
02464                               circular_lat_wts, smooth, exc_rad_float-exc_rad, smooth_trim, &exc_connections_killed);
02465         setup_lateral_weights(row,j, inh_rad, inh_array_width,neuron.lat_inh_wts,
02466                               preset_lat_wts, preset_sigma_i, lat_inh_randomness,
02467                               circular_lat_wts, smooth, inh_rad_float-inh_rad, smooth_trim, &inh_connections_killed);
02468       }
02469     }
02470   }
02471   
02472   /* Store one neuron with pre-set weights for comparison later. */
02473   {
02474     Neuron neuron; /* Dummy; unused */
02475     const int pos=int(N/2)-1; /* Makes sure the RF is centered on a neuron */
02476 
02477     setup_afferent_weights(neuron, reference_wts.weights,
02478                            pos,pos, rf_radius, rf_radius_float*rf_radius_float,
02479                            true, preset_sigma_aff, afferent_randomness,
02480                            circular_aff_wts, smooth, rf_radius_float-rf_radius, smooth_trim, RN/2+0.5, RN/2+0.5);
02481 
02482     setup_lateral_weights(pos,pos, exc_rad, exc_array_width,&(reference_wts.lat_exc_wts[0]),
02483                           true, preset_sigma_e, lat_exc_randomness,
02484                           circular_lat_wts, smooth, exc_rad_float-exc_rad, smooth_trim, &exc_connections_killed);
02485     setup_lateral_weights(pos,pos, inh_rad, inh_array_width,&(reference_wts.lat_inh_wts[0]),
02486                           true, preset_sigma_i, lat_inh_randomness,
02487                           circular_lat_wts, smooth, inh_rad_float-inh_rad, smooth_trim, &inh_connections_killed);
02488   }
02489 }
02490 
02491 
02492 void LissomMap::setup_afferent_weights(Neuron& neuron, AfferentWeightsList& awts,
02493                                        int ui, int uj, int radius, double radius_sq,
02494                                        int preset, double sigma, double amt_of_randomness,
02495                                        bool circular, bool smooth,
02496                                        double radius_trim, double smooth_trim,
02497                                        double force_cx, double force_cy)
02498 {
02499   const int    afferent_edge_buffer = params->get_with_default("retina_edge_buffer",0);
02500   const bool   match_weights        = params->get_with_default("force_matching_initial_weights",True);
02501   const double cortical_image_width = RN-2*afferent_edge_buffer;
02502   
02503   /* Choose receptive field center with a random degree of scatter. */
02504   const double cx = (force_cx>=0? force_cx : afferent_edge_buffer + cortical_image_width *
02505     ((ui+0.5)/double(N) + amt_of_randomness*(2.0*shuffled_rand()-1.0)));
02506   
02507   const double cy = (force_cy>=0? force_cy : afferent_edge_buffer + cortical_image_width *
02508     ((uj+0.5)/double(N) + amt_of_randomness*(2.0*shuffled_rand()-1.0)));
02509 
02510   /* The RF center is constrained to be an integer and to be located 
02511      within the bounds of the input surface, because of how the lowk,
02512      highk, etc. are calculated and used in the rest of the program. */
02513 
02514   neuron.centerx = MAX(0,MIN(RN-1,int(cx)));
02515   neuron.centery = MAX(0,MIN(RN-1,int(cy)));
02516 
02517   const int lowk  = (force_cx>=0? neuron.centerx-radius : MAX(neuron.centerx-radius,0   ));
02518   const int highk = (force_cx>=0? neuron.centerx+radius : MIN(neuron.centerx+radius,RN-1));
02519   const int lowl  = (force_cy>=0? neuron.centery-radius : MAX(neuron.centery-radius,0   ));
02520   const int highl = (force_cy>=0? neuron.centery+radius : MIN(neuron.centery+radius,RN-1));
02521   
02522   /* Distribute weights in the receptive field randomly, or (if
02523      preset_weights is on), preset the weights to gaussians of given
02524      sigma.  Circular receptive fields are accomplished by killing
02525      all the connections that fall outside a circular radius. */
02526   double length=0.0;
02527   if (preset){                                /* Initialize to Gaussian */
02528     for (int k=lowk; k <=highk; k++) 
02529       for (int l=lowl; l<=highl; l++){
02530         double value;
02531         double xsq = std::abs(cx-k-0.5); /* Uses full floating-point precision */
02532         double ysq = std::abs(cy-l-0.5); /*  of the center point */
02533         xsq *= xsq;
02534         ysq *= ysq;
02535         value = exp(-((xsq+ysq)/(sigma*sigma)));
02536         for (int aff=0; aff<num_aff; aff++)
02537           awts[aff][k-lowk][l-lowl] = value;
02538         length += num_aff*value;
02539       }
02540   }
02541   
02542   else{                                       /* Initialize randomly    */
02543     for (int k=lowk; k <=highk; k++) 
02544       for (int l=lowl; l<=highl; l++){
02545         if (match_weights) {
02546           double value = shuffled_rand();
02547           for (int aff=0; aff<num_aff; aff++)
02548             awts[aff][k-lowk][l-lowl] = value;
02549           length += num_aff*value;
02550         }
02551         else {
02552           for (int aff=0; aff<num_aff; aff++) {
02553             double value = shuffled_rand();
02554             awts[aff][k-lowk][l-lowl] = value;
02555             length += value;
02556           }
02557         }
02558       }
02559   }
02560   
02561   /* Initialize all connections outside the circle to negative, if appropriate */
02562   if (circular)
02563     length = prune_circular_aff_weights(neuron.centerx,neuron.centery,
02564                                         radius, awts,
02565                                         smooth, radius_trim, smooth_trim, length);
02566   
02567   /* Normalize */
02568   length = 1.0/length;
02569   for (int k=lowk; k <=highk; k++) 
02570     for (int l=lowl; l<=highl; l++)
02571       if (!circular || WITHIN_RADIUS(k-neuron.centerx,l-neuron.centery,radius_sq))
02572         for (int aff=0; aff<num_aff; aff++) 
02573           awts[aff][k-lowk][l-lowl] *= length;
02574 }
02575 
02576 
02577 
02586 void LissomMap::setup_lateral_weights(int ui, int uj, int radius,
02587                                       int array_width, l_weight *weights,
02588                                       int preset, double sigma, double amt_of_randomness,
02589                                       bool circular, bool smooth, 
02590                                       double radius_trim, double smooth_trim,
02591                                       int* con_killed_flag)
02592 {
02593   int k,l;
02594   
02595   const int lowk  = MAX(ui-radius,0  );
02596   const int highk = MIN(ui+radius,N-1);
02597   const int lowl  = MAX(uj-radius,0  );
02598   const int highl = MIN(uj+radius,N-1);
02599   
02600   const int max_num_connections = (highk-lowk+1)*(highl-lowl+1);
02601   const double mean_wt = 1.0/max_num_connections;
02602   
02603   if (preset)
02604     for(k=lowk; k<=highk; k++)        
02605       for(l=lowl; l<=highl; l++){
02606         double xsq = (double)(k-ui); 
02607         double ysq = (double)(l-uj);
02608         xsq *= xsq;
02609         ysq *= ysq;
02610         weights[LAT_INDEX(ui,uj,k,l,radius,array_width)] = 
02611           mean_wt * exp( -((xsq+ysq)/(sigma*sigma)));
02612       }
02613   else /* Init weights randomly */
02614     for(k=lowk; k<=highk; k++)        
02615       for(l=lowl; l<=highl; l++)
02616         weights[LAT_INDEX(ui,uj,k,l,radius,array_width)] = 
02617           dist_lat_wt(mean_wt,amt_of_randomness);
02618   
02619   if (circular) {
02620     prune_circular_lat_weights(ui,uj,radius, array_width, weights,
02621                                smooth, radius_trim, smooth_trim);
02622     *con_killed_flag=True;
02623   }
02624   
02625   normalize_lat_wts(ui,uj,radius,array_width,weights);
02626 }
02627 
02628 
02629 
02631 void LissomMap::prune_circular_lat_weights(int ui, int uj, int radius, 
02632                                            int array_width, l_weight *weights,
02633                                            bool smooth, double radius_trim, double smooth_trim )
02634 {
02635   const double radius_float    = radius+radius_trim;
02636   const double radius_sq       = radius_float*radius_float;
02637   const double smooth_start    = radius+smooth_trim;
02638   const double smooth_start_sq = smooth_start*smooth_start;
02639   const double smooth_rad      = (radius_float-smooth_start)/2;
02640   const double smooth_rad_sq   = smooth_rad*smooth_rad;
02641     
02642   const int lowk  = MAX(ui-radius,0  );
02643   const int highk = MIN(ui+radius,N-1);
02644   const int lowl  = MAX(uj-radius,0  );
02645   const int highl = MIN(uj+radius,N-1);
02646 
02647   int k,l;
02648   
02649   for (k=lowk; k <=highk; k++)
02650     for (l=lowl; l<=highl; l++) {
02651       const double xdist    = k-ui;
02652       const double ydist    = l-uj;
02653       const double xdist_sq = xdist*xdist;
02654       const double ydist_sq = ydist*ydist;
02655       const double rdist_sq = xdist_sq+ydist_sq;
02656       
02657       if (radius>1 && !WITHIN_RADIUS_SQ(xdist_sq,ydist_sq,radius_sq))
02658         weights[LAT_INDEX(ui,uj,k,l,radius,array_width)] = -DEATH_FLAG;
02659       
02660       else if (smooth && (rdist_sq >= smooth_start_sq)) {
02661         double dist_sq = sqrt(rdist_sq)-smooth_start;
02662         dist_sq*=dist_sq;
02663         
02664         weights[LAT_INDEX(ui,uj,k,l,radius,array_width)] = 
02665           weights[LAT_INDEX(ui,uj,k,l,radius,array_width)] *
02666           exp( -(dist_sq/smooth_rad_sq) );
02667       }
02668     }
02669 }
02670 
02671 
02672 
02674 double LissomMap::prune_circular_aff_weights(int ui, int uj, int radius, 
02675                                              AfferentWeightsList& weights,
02676                                              bool smooth, double radius_trim, double smooth_trim,
02677                                              double length)
02678 {
02679   const double radius_float    = radius+radius_trim;
02680   const double radius_sq       = radius_float*radius_float;
02681   const double smooth_start    = radius+smooth_trim;
02682   const double smooth_start_sq = smooth_start*smooth_start;
02683   const double smooth_rad      = (radius_float-smooth_start)/2;
02684   const double smooth_rad_sq   = smooth_rad*smooth_rad;
02685     
02686   const int lowk  = MAX(ui-radius,0   );
02687   const int highk = MIN(ui+radius,RN-1);
02688   const int lowl  = MAX(uj-radius,0   );
02689   const int highl = MIN(uj+radius,RN-1);
02690 
02691   int k,l;
02692   
02693   for (k=lowk; k <=highk; k++)
02694     for (l=lowl; l<=highl; l++) {
02695       const double xdist    = k-ui;
02696       const double ydist    = l-uj;
02697       const double xdist_sq = xdist*xdist;
02698       const double ydist_sq = ydist*ydist;
02699       const double rdist_sq = xdist_sq+ydist_sq;
02700       
02701       int aff;
02702       
02703       if (radius>1 && !WITHIN_RADIUS_SQ(xdist_sq,ydist_sq,radius_sq))
02704         for (aff=0; aff<num_aff; aff++) {
02705           /* Kill connection */
02706           length -= weights[aff][k-lowk][l-lowl];
02707           weights[aff][k-lowk][l-lowl] = -DEATH_FLAG;
02708         }
02709       
02710       else if (smooth && (rdist_sq >= smooth_start_sq)) {
02711         for (aff=0; aff<num_aff; aff++) {
02712           double dist_sq = sqrt(rdist_sq)-smooth_start;
02713           a_weight init_weight = weights[aff][k-lowk][l-lowl];
02714           dist_sq*=dist_sq;
02715           
02716           weights[aff][k-lowk][l-lowl] = 
02717             init_weight * exp( -(dist_sq/smooth_rad_sq) );
02718           length += weights[aff][k-lowk][l-lowl]-init_weight;
02719         }
02720       }
02721     }
02722 
02723   return length;
02724 }
02725 
02726 
02727 
02732 double LissomMap::dist_lat_wt(double value, double amt_of_randomness)
02733 /* Randomness is in range 0-1 */
02734 {
02735   double low, add;
02736 
02737   low = value * (1.0 - amt_of_randomness)  ;
02738   add = 2*value*amt_of_randomness*(ShuffledRandom<double>::plain_rand());
02739 
02740   return(low+add);
02741 }
02742 
02743 
02744 
02750 void LissomMap::prune()
02751 {
02752   int i,j,k;
02753   int living_inh_connections = 0;
02754   int dead_inh_connections   = 0;
02755   int killed_inh_connections = 0;
02756   //int theoretical_total_inh_connections  = perows*N*lat_inh_dimension;
02757 
02758   const double i_death = params->get_with_default("inh_death",1.0);
02759   const double e_death = params->get_with_default("exc_death",1.0);
02760   const bool   e_kill  = (e_death > 0);
02761   const bool   i_kill  = (i_death > 0);
02762   
02763   if (e_kill)  exc_connections_killed = True;
02764   if (i_kill)  inh_connections_killed = True;
02765 
02766   const int max_num_inh = (2*inh_rad+1)*(2*inh_rad+1);
02767   const int max_num_exc = (2*exc_rad+1)*(2*exc_rad+1);
02768     
02769   for (i=0; i<perows; i++){                       /* For each row in the PE   */
02770     const int row = MAPROW(i);
02771     for(j=0; j<N; j++) {                          /* For each column          */
02772       
02773       Neuron& neuron = cortex_map[row][j];
02774       
02775       if (e_kill) {
02776         /* Adjust killing to account for actual number of connections */
02777         const int radius= exc_rad;
02778         const int lowk  = MAX(i-radius,0  );
02779         const int highk = MIN(i+radius,N-1);
02780         const int lowl  = MAX(j-radius,0  );
02781         const int highl = MIN(j+radius,N-1);
02782         const int num   = (highk-lowk+1)*(highl-lowl+1);
02783         const double death = e_death*double(max_num_exc)/double(num);
02784           
02785         for(k=0; k<lat_exc_dimension; k++) {
02786           l_weight& wt = neuron.lat_exc_wts[k];
02787           if (wt < death) wt = -DEATH_FLAG;
02788         }
02789         normalize_lat_wts(i,j,exc_rad,exc_array_width,neuron.lat_exc_wts);
02790       }
02791         
02792       if (i_kill) {
02793         /* Adjust killing to account for actual number of connections */
02794         const int radius= inh_rad;
02795         const int lowk  = MAX(i-radius,0  );
02796         const int highk = MIN(i+radius,N-1);
02797         const int lowl  = MAX(j-radius,0  );
02798         const int highl = MIN(j+radius,N-1);
02799         const int num   = (highk-lowk+1)*(highl-lowl+1);
02800         const double death = i_death*double(max_num_inh)/double(num);
02801 
02802         for(k=0; k<lat_inh_dimension; k++) {
02803           l_weight& wt = neuron.lat_inh_wts[k];
02804           if (wt < death) {
02805             dead_inh_connections++;
02806             /* count this connection as killed if it was live before */
02807             if (wt > 0) killed_inh_connections++;
02808             wt = -DEATH_FLAG;
02809           }
02810           else living_inh_connections++;
02811         }
02812         normalize_lat_wts(i,j,inh_rad,inh_array_width,neuron.lat_inh_wts);
02813       }
02814     }
02815   }
02816   
02817   ipc_pe_msg_delay(1.0);
02818   int total_inh_connections = living_inh_connections + dead_inh_connections;
02819   if (i_kill)
02820     ipc_notify(IPC_ALL,IPC_STD,"%s: Killed %g%% (%d/%d) of the inhibitory connections, leaving %d",
02821                name().c_str(),(double)(total_inh_connections>0 ?
02822                                        (100*((float)killed_inh_connections)/total_inh_connections) : 0.0),
02823                killed_inh_connections, total_inh_connections, living_inh_connections);
02824 }
02825 
02826 
02827 
02829 double LissomMap::change_lateral_exc_radius(double old_radius, double new_radius)
02830 {
02831   int i,j;
02832   const bool   smooth       = params->get_with_default("smooth_circular_outlines",False);
02833   const bool   circular     = params->get_with_default("circular_lat_wts",True);
02834   const double smooth_trim  = params->get_with_default("smooth_circular_radius_trim",0.0);
02835   
02836   /* Rearrange and renormalize lateral weights, if needed */
02837   if (new_radius > old_radius) {
02838     ipc_notify(IPC_ONE,IPC_ERROR,"Increasing excitatory radius is not supported");
02839     new_radius=old_radius;
02840   }
02841   
02842   else if (new_radius < old_radius)
02843     for (i=0; i<perows; i++) {
02844       const int row = MAPROW(i);
02845       for (j=0; j<N; j++)
02846         reduce_lateral_radius(row,j,old_radius,new_radius,exc_array_width,
02847                               cortex_map[row][j].lat_exc_wts,circular,
02848                               smooth, smooth_trim);
02849     }
02850   
02851   return new_radius;
02852 }
02853 
02854 
02864 void LissomMap::reduce_lateral_radius(int ui, int uj,
02865                                       double old_radius_float, double new_radius_float,
02866                                       int array_width, l_weight *weights, bool circular,
02867                                       bool smooth, double smooth_trim)
02868 {
02869   int k,l;
02870   const int old_radius=array_radius_int(old_radius_float);
02871   const int new_radius=array_radius_int(new_radius_float);
02872   const int lowk  = MAX(ui-new_radius,0  );
02873   const int highk = MIN(ui+new_radius,N-1);
02874   const int lowl  = MAX(uj-new_radius,0  );
02875   const int highl = MIN(uj+new_radius,N-1);
02876         
02877   for(k=lowk; k<= highk; k++){
02878     const int partial_index_new = PARTIAL_LAT_INDEX(ui,uj,k,new_radius,array_width);
02879     const int partial_index_old = PARTIAL_LAT_INDEX(ui,uj,k,old_radius,array_width);
02880     
02881     for(l=lowl; l<=highl; l++)
02882       weights[FULL_LAT_INDEX(partial_index_new,l)] = 
02883         weights[FULL_LAT_INDEX(partial_index_old,l)];
02884   }
02885 
02886   if (circular)
02887     prune_circular_lat_weights(ui,uj,new_radius, array_width, weights,
02888                                smooth, new_radius_float-new_radius, smooth_trim);
02889       
02890   /* Renormalize the copied weights */
02891   normalize_lat_wts(ui,uj,new_radius,array_width,weights);
02892 }
02893 
02894 
02895 
02904 void LissomMap::collect_activation_data(int dest_pe)
02905 {
02906   (void)dest_pe;
02907 #if (NPES>1)
02908   int i,pe;
02909   ipc_barrier(); /* Ensure that data is ready to copy */
02910 
02911   /* Currently collects initial activity from each processor */
02912 
02913   if (dest_pe==Uninitialized)
02914     for(pe=0; pe<NPEs; pe++) {
02915       if (!PEISME(pe))
02916         for(i=0; i<perows; i++)  {
02917           ipc_put(init_activity[MAPROW(i)], IPC_DOUBLE, N, pe);
02918           /* ipc_pe_msg_delay(1.0);
02919           ipc_notify(IPC_ALL,IPC_STD,"Copying data from row %d to PE%d",MAPROW(i),pe); */
02920         }
02921     }
02922   
02923   else if (!PEISME(dest_pe))
02924     for(i=0; i<perows; i++)  
02925       ipc_put(init_activity[MAPROW(i)], IPC_DOUBLE, N, dest_pe);
02926   
02927   ipc_barrier(); /* Ensure that data has arrived */
02928 #endif
02929 } 
02930 
02931 
02932 
02938 void LissomMap::save_state(const string& filename)
02939 {
02940 #ifdef NO_IO
02941 #else
02942   FILE *fp=NULL;
02943 
02944 #ifdef VERIFY_WEIGHT_SAVES 
02945   {
02946     int row, i, j, k;
02947     for(i=0; i<perows; i++){
02948       row = MAPROW(i);
02949       for(j=0; j<N; j++) {
02950         Neuron&              neuron   = cortex_map[row][j];
02951         AfferentWeightsList& awts     = *(neuron.weights);
02952         Neuron&              neuron_o = Orig_map[row][j];
02953         AfferentWeightsList& awts_o   = *(neuron_o.weights);
02954 
02955         /* Copy weights from original arrays */
02956         neuron_o.centerx = neuron.centerx;
02957         neuron_o.centery = neuron.centery;
02958         for(k=0; k<lat_exc_dimension; k++)
02959           neuron_o.lat_exc_wts[k] = neuron.lat_exc_wts[k];
02960         for(k=0; k<lat_inh_dimension; k++)
02961           neuron_o.lat_inh_wts[k] = neuron.lat_inh_wts[k];
02962 
02963         for(int aff=0; aff<num_aff; aff++)
02964           for(int x=0; x<aff_array_width; x++)
02965             for(int y=0; y<aff_array_width; y++)
02966               awts_o[aff][x][y] = awts[aff][x][y];
02967       }
02968     }
02969   }
02970   verify_saved_weights(0.0);
02971 #endif
02972 
02973   /* The last PE is in charge of writing to the file */
02974   if (AMYOUNGESTPE) {
02975     if ((fp=fopen(filename.c_str(), "wb+"))==NULL){
02976       ipc_abort(IPC_EXIT_FILE_PROBLEM,"Weights file %s couldn't be opened for saving", filename.c_str());
02977     }
02978 
02979     ipc_notify(IPC_ALL,IPC_SUMMARY,"Saving weights to file %s",filename.c_str());
02980   }
02981 
02982   
02983   
02984   /* All PEs process the data */
02985   LISSOMBinaryStateSaver lss(*this); // When this file becomes a class, lss should be a member variable.
02986   StateSaver& ss=lss;
02987   ss.write(fp);
02988   
02989 
02990   if (fp!=NULL)
02991     fclose(fp);
02992   
02993   /* Allows saving in middle of training by reloading the weights destroyed by saving */
02994 #ifdef RELOAD_AFTER_WEIGHT_SAVE
02995   ipc_barrier();
02996   clear_weights(); /* Just to be sure it's working */
02997   const bool interactive = params->get_with_default("interactive",False);
02998   if ( !restore_state(t,filename) && !interactive)
02999     ipc_exit(IPC_EXIT_FILE_PROBLEM,"Problem with weights file");
03000 #endif
03001 
03002 #endif
03003 }  
03004 
03005 
03006 
03012 bool LissomMap::restore_state(const int, const string& filename)
03013 {
03014   if (filename=="")
03015     ipc_abort(IPC_EXIT_FILE_PROBLEM,"No weight file specified");
03016   
03017   bool status=true;
03018 #ifdef NO_IO
03019 #else
03020   FILE *fp=NULL;
03021 
03022   if (AMPARENTPE){ /* Parent does the file reading and input generation */
03023 
03024     if ((fp=fopen(filename.c_str(), "rb" ))==NULL) {
03025       /* Could add synchronization mechanism for NPES>1 instead of aborting */
03026       const bool interactive = params->get_with_default("interactive",False);
03027       if (!interactive || NPEs>1) 
03028         ipc_abort(IPC_EXIT_FILE_PROBLEM,"Wt file %s couldn't be opened", filename.c_str());
03029       else {
03030         ipc_notify(IPC_ONE,IPC_ERROR,"Wt file %s couldn't be opened", filename.c_str());
03031         return false;
03032       }
03033     }
03034   }
03035   
03036   /* Ensure that all weights are re-initialized before loading because
03037      some, e.g. killed inhibitory weights, are not initialized during
03038      the reload */
03039   // Temporarily skips clearing if loading only the afferent weights
03040   const bool aff_only = params->get_with_default("load_afferent_weights_only",False);
03041   if (!aff_only)
03042     clear_weights();
03043   
03044   /* Read & distribute the network to all PEs     */
03045   ipc_notify(IPC_ALL,IPC_SUMMARY,"Reading weights from file %s",filename.c_str());
03046   LISSOMBinaryStateSaver lss(*this); // When this file becomes a class, lss should be a member variable.
03047   StateSaver& ss=lss;
03048   if(ss.read(fp)) status=false;
03049 
03050   if (AMPARENTPE)  fclose(fp);/* Only the parent has to close the file        */
03051 
03052   /*
03053     Set these flags for safety, assuming that there might be some
03054     connections killed in the file (presumably should add checking for it,
03055     but it's almost always true that connections were killed, and it
03056     only affects the performance, and even that only slightly.)
03057   */
03058   exc_connections_killed=True;
03059   inh_connections_killed=True;
03060   
03061 
03062   /* Test every weight */
03063 #ifdef VERIFY_WEIGHT_SAVES
03064   verify_saved_weights(0.0000001);
03065 #endif
03066 
03067 #endif
03068 
03069   return status;
03070 }
03071 
03072 
03073 
03074 #ifdef VERIFY_WEIGHT_SAVES 
03075 void LissomMap::verify_saved_weights( double tolerance )
03076 {
03077   int row, i, j, aff, x, y;
03078   int aff_errors_found=0;
03079 
03080   for(i=0; i<perows; i++){
03081     row = MAPROW(i);
03082     for(j=0; j<N; j++) {
03083       Neuron&              neuron   = cortex_map[row][j];
03084       AfferentWeightsList& awts     = *(neuron.weights);
03085       Neuron&              neuron_o = Orig_map[row][j];
03086       AfferentWeightsList& awts_o   = *(neuron_o.weights);
03087       
03088       if (neuron_o.centerx - neuron.centerx > tolerance)
03089         ipc_notify(IPC_ALL,IPC_ERROR,"row:%d j:%d       -- Centerx changed: %d -> %d",
03090                   row,j,(int)neuron_o.centerx,(int)neuron.centerx);
03091       if (neuron_o.centery - neuron.centery > tolerance)
03092         ipc_notify(IPC_ALL,IPC_ERROR,"row:%d j:%d       -- Centery changed: %d -> %d",
03093                   row,j,(int)neuron_o.centery,(int)neuron.centery);
03094       
03095       for(aff=0; aff<num_aff; aff++)
03096         for(x=0; x<aff_array_width; x++)
03097           for(y=0; y<aff_array_width; y++)
03098             if (awts_o[aff][x][y] - awts[aff][x][y] >tolerance) {
03099               if (++aff_errors_found < VERIFY_WEIGHT_SAVES_ERROR_LIMIT)
03100                 ipc_notify(IPC_ALL,IPC_ERROR,"row:%d j:%d aff:%d x:%d y:%d -- Afferent weights differ: %e -> %e",
03101                           row,j,aff,x,y,
03102                           (double)(awts_o[aff][x][y]),
03103                           (double)(awts[aff][x][y]));
03104               else if (aff_errors_found == VERIFY_WEIGHT_SAVES_ERROR_LIMIT)
03105                 ipc_notify(IPC_ALL,IPC_ERROR,"Reached limit for VERIFY_WEIGHT_SAVES error reporting; rest will be discarded");
03106             }
03107 
03108       verify_saved_lateral_weights(row,j, exc_rad, exc_array_width,
03109                                    neuron_o.lat_exc_wts,
03110                                    neuron.lat_exc_wts,
03111                                    tolerance,"lat_exc_wts");
03112 
03113       verify_saved_lateral_weights(row,j, inh_rad, inh_array_width,
03114                                    neuron_o.lat_inh_wts,
03115                                    neuron.lat_inh_wts,
03116                                    tolerance,"lat_inh_wts");
03117       
03118     }
03119   }
03120 }
03121 #endif
03122 
03123 
03124 
03125 #ifdef VERIFY_WEIGHT_SAVES 
03126 void LissomMap::verify_saved_lateral_weights( int i, int j, int radius, int array_width,
03127                                    l_weight *old_weights, l_weight *new_weights,
03128                                    double tolerance, const char *description)
03129 {
03130   const int lowk  = MAX(i-radius,0  );
03131   const int highk = MIN(i+radius,N-1);
03132   const int lowl  = MAX(j-radius,0  );
03133   const int highl = MIN(j+radius,N-1);
03134 
03135   static int errors_found=0;
03136   int k,l;
03137   
03138   for(k=lowk; k<= highk; k++) {
03139     const int partial_idx = PARTIAL_LAT_INDEX(i,j,k,radius,array_width);
03140     for (l=lowl; l<=highl; l++) {
03141       const int idx = FULL_LAT_INDEX(partial_idx,l);
03142       
03143       if (old_weights[idx] - new_weights[idx] > tolerance) {
03144         if (++errors_found < VERIFY_WEIGHT_SAVES_ERROR_LIMIT)
03145           ipc_notify(IPC_ALL,IPC_ERROR,"i:%d j:%d k:%d l:%d -- %s differ: %e -> %e",
03146                     i,j,k,l,description,
03147                     (double)old_weights[idx],
03148                     (double)new_weights[idx]);
03149         else if (errors_found == VERIFY_WEIGHT_SAVES_ERROR_LIMIT)
03150           ipc_notify(IPC_ALL,IPC_ERROR,"Reached limit for VERIFY_WEIGHT_SAVES error reporting; rest will be discarded");
03151       }
03152       
03153     }
03154   }
03155 }
03156 #endif
03157 
03160 void LissomMap::backproject(const string& name, const double gammaaff) {
03161 
03162 #ifdef OBJ_LISSOM
03163   const bool   l2_norm = params->get_with_default("l2_norm",False);
03164 #endif /* OBJ_LISSOM */
03165 
03166   // This function should be called by derived classes' 
03167   // backproject(noarg), which will know which map to call.
03168   // --- cout << " - " << name << endl;
03169 
03170   // 1. get named input (upstream's output). Cleaning up the 
03171   //    backprojection region should be done prior to calling this
03172   //    function, so it is omitted here.
03173   NeuralRegion* inp = dynamic_cast<NeuralRegion*>(get_input(name));
03174   if (!inp) {
03175     ipc_notify(IPC_ALL,IPC_ERROR,"Error: input region nonexistent");        
03176     return;
03177   }
03178   ActivityMatrix& inp_bp = inp->bp;
03179   int aff   = std::find(ISEQ(aff_names),name) - aff_names.begin();
03180 
03181   // 2. Loop thorough all neurons in the current NeuralRegion bp 
03182 
03183   int nr = output.nrows(), nc = output.ncols();
03184   
03185 #ifdef OBJ_LISSOM
03186   double l2_sum=0.0;
03187 
03188   // pre-calculate l2_norm if necessary
03189   if (l2_norm) {
03190     for (int i=0; i<nr; i++)
03191       for (int j=0; j<nc; j++) 
03192         l2_sum += output[i][j]*output[i][j];
03193     l2_sum = 1.0/sqrt(l2_sum);
03194     if (l2_sum==0.0) {
03195       ipc_notify(IPC_ALL,IPC_ERROR,"Panic: l2_norm value is 0.0");        
03196     }
03197   }
03198 #endif /* OBJ_LISSOM */
03199 
03200          
03201   for (int i=0; i<nr; i++)
03202     for (int j=0; j<nc; j++) {
03203   
03204       double cur_output_x_gammaaff = bp[i][j]/gammaaff;
03205 #ifdef OBJ_LISSOM
03206       if (l2_norm) 
03207         cur_output_x_gammaaff *= l2_sum;
03208 #endif /* OBJ_LISSOM */
03209   
03210       /* Array idx swapped to translt b/w correct and legacy formats. */
03211       Neuron&              neuron = cortex_map[j][i];
03212       AfferentWeightsList& awts   = *(neuron.weights);
03213       
03214       int lowk  = MAX(neuron.centery-rf_radius,0   );
03215       int highk = MIN(neuron.centery+rf_radius,RN-1);
03216       int lowl  = MAX(neuron.centerx-rf_radius,0   );
03217       int highl = MIN(neuron.centerx+rf_radius,RN-1);
03218 
03219       // backproject
03220       for (int k=lowk; k <=highk; k++)
03221         for (int l=lowl; l<=highl; l++)
03222           inp_bp[k][l] += cur_output_x_gammaaff*awts[aff][l-lowl][k-lowk];
03223         }
03224   // 2. end
03225 
03226 };
03227 
03228 
03229 #ifdef OBJ_LISSOM
03230 
03232 void LissomMap::resp_to_residual(const string& name, const double gammaaff) {
03233 
03234         // This function should be called by derived classes' 
03235         // resp_to_residual(noarg), which will know which map to call.
03236         //
03237         // precondition: upstream "residual" must have been properly calculated
03238         // assumption: result will be ADDED to the current "output"
03239         //      so "output" should have been cleaned if this is the first time.
03240 
03241         // 1. propagation starts at the current "residual"
03242 
03243         // 2. Loop thorough all neurons in the current NeuralRegion residual
03244         NeuralRegion* inp = dynamic_cast<NeuralRegion*>(get_input(name));
03245 
03246         ActivityMatrix& inp_residual = inp->residual;
03247         int aff   = std::find(ISEQ(aff_names),name) - aff_names.begin();
03248 
03249         int nr = output.nrows(), nc = output.ncols();
03250         for (int i=0; i<nr; i++)
03251           for (int j=0; j<nc; j++) {
03252 
03253             // 2.1 get named weights and bounds (upstream->downstream)
03254             Neuron&              neuron = cortex_map[j][i];
03255             AfferentWeightsList& awts   = *(neuron.weights);
03256 
03257             int lowk  = MAX(neuron.centery-rf_radius,0   );
03258             int highk = MIN(neuron.centery+rf_radius,RN-1);
03259             int lowl  = MAX(neuron.centerx-rf_radius,0   );
03260             int highl = MIN(neuron.centerx+rf_radius,RN-1);
03261 
03262             // 2.2 ffwd residual
03263             double sum = 0.0;
03264             for (int k=lowk; k <=highk; k++)
03265               for (int l=lowl; l<=highl; l++)
03266                 sum += inp_residual[k][l] * awts[aff][l-lowl][k-lowk];
03267 
03268             output[i][j] += sum / gammaaff;
03269 
03270           }
03271         // 2. end
03272 
03273 }
03274 
03276 void LissomMap::randomize_lat_wts(void)
03277 {
03278   int map_row, j, k;
03279   const bool   allow_negative_wts = params->get_with_default("allow_negative_wts",False);
03280 
03281   for(map_row=0; map_row<N; map_row++){
03282 
03283     for(j=0; j<N; j++) {
03284       Neuron&              neuron = cortex_map[map_row][j];
03285 
03286       double sum=0.0;
03287       for(k=0; k<lat_exc_dimension; k++) {
03288         if (allow_negative_wts || neuron.lat_exc_wts[k] > -DEATH_FLAG || 1) {
03289                 neuron.lat_exc_wts[k]   = shuffled_rand();
03290                 sum +=  neuron.lat_exc_wts[k];
03291         } 
03292       }
03293       // normalize
03294       for(k=0; k<lat_exc_dimension; k++) {
03295         if (allow_negative_wts || neuron.lat_exc_wts[k] > -DEATH_FLAG || 1) {
03296                 neuron.lat_exc_wts[k] /= sum;
03297         } 
03298       }
03299 
03300       sum=0.0;
03301       for(k=0; k<lat_inh_dimension; k++) {
03302         if (allow_negative_wts || neuron.lat_inh_wts[k] > -DEATH_FLAG || 1) {
03303                 neuron.lat_inh_wts[k]   = shuffled_rand();
03304                 sum +=  neuron.lat_inh_wts[k];
03305         } 
03306       }
03307       // normalize
03308       for(k=0; k<lat_inh_dimension; k++) {
03309         if (allow_negative_wts || neuron.lat_inh_wts[k] > -DEATH_FLAG || 1) {
03310                 neuron.lat_inh_wts[k] /= sum;
03311         } 
03312       }
03313 
03314 
03315     }
03316   }
03317 }
03318 
03319 #endif /* OBJ_LISSOM */

Generated on Mon Jan 20 02:35:44 2003 for RF-LISSOM by doxygen1.3-rc2