From 02f148f0b52df18eae7262c2f6fd69fd0d0b6bcd Mon Sep 17 00:00:00 2001 From: "Randall C. O'Reilly" Date: Tue, 21 Jan 2025 18:05:10 -0800 Subject: [PATCH] first pass implementation of new standardized 10 cycle CaBin with envelope weights integration --- axon/context.go | 5 +- axon/enumgen.go | 2 +- axon/learn-path.go | 20 +- axon/learn-path.goal | 20 +- axon/learn.go | 10 + axon/learn.goal | 10 + axon/network.go | 3 +- axon/network.goal | 3 +- axon/neuron.go | 4 +- axon/shaders/ApplyExtsNeuron.wgsl | 18 ++ axon/shaders/Beta1Neuron.wgsl | 18 ++ axon/shaders/Beta2Neuron.wgsl | 18 ++ axon/shaders/BetweenGi.wgsl | 18 ++ axon/shaders/CycleInc.wgsl | 18 ++ axon/shaders/CycleNeuron.wgsl | 18 ++ axon/shaders/CyclePost.wgsl | 18 ++ axon/shaders/DWtFromDiSyn.wgsl | 18 ++ axon/shaders/DWtSubMeanNeuron.wgsl | 18 ++ axon/shaders/DWtSyn.wgsl | 44 ++++- axon/shaders/GPUTestWrite.wgsl | 18 ++ axon/shaders/GatherSpikes.wgsl | 18 ++ axon/shaders/InitGBuffsPath.wgsl | 18 ++ axon/shaders/LayerGi.wgsl | 18 ++ axon/shaders/MinusPhaseNeuron.wgsl | 18 ++ axon/shaders/MinusPhasePool.wgsl | 18 ++ axon/shaders/MinusPhasePost.wgsl | 18 ++ axon/shaders/NewStateLayer.wgsl | 18 ++ axon/shaders/NewStateNeuron.wgsl | 18 ++ axon/shaders/PlusPhaseNeuron.wgsl | 18 ++ axon/shaders/PlusPhasePool.wgsl | 18 ++ axon/shaders/PlusPhasePost.wgsl | 18 ++ axon/shaders/PlusPhaseStartContext.wgsl | 18 ++ axon/shaders/PlusPhaseStartNeuron.wgsl | 18 ++ axon/shaders/PoolGi.wgsl | 18 ++ axon/shaders/SendSpike.wgsl | 18 ++ axon/shaders/SlowAdaptLayer.wgsl | 18 ++ axon/shaders/SlowAdaptNeuron.wgsl | 18 ++ axon/shaders/WtFromDWtLayer.wgsl | 18 ++ axon/shaders/WtFromDWtSyn.wgsl | 18 ++ axon/simstats.go | 14 +- axon/typegen.go | 5 +- chans/chanplots/ak-plot.go | 8 +- chans/chanplots/gabab-plot.go | 12 +- chans/chanplots/kir-plot.go | 8 +- chans/chanplots/mahp-plot.go | 8 +- chans/chanplots/nmda-plot.go | 8 +- chans/chanplots/sahp-plot.go | 8 +- chans/chanplots/skca-plot.go | 8 +- chans/chanplots/synca-plot.go | 4 +- chans/chanplots/vgcc-plot.go | 8 +- kinase/enumgen.go | 50 +++++ kinase/linear/linear.go | 60 +++++- kinase/linear/linear_test.go | 14 +- kinase/params.go | 97 +-------- kinase/syncabin.go | 250 ++++++++++++++++++++++++ kinase/typegen.go | 14 +- sims/bgdorsal/bg-dorsal.go | 8 +- sims/bgventral/bg-ventral.go | 4 +- sims/choose/armaze/gui.go | 2 +- sims/choose/choose.go | 2 +- sims/deepfsa/config.go | 3 + sims/deepfsa/deep-fsa.go | 6 +- sims/deepfsa/params.go | 11 +- sims/deepmove/deep-move.go | 2 +- sims/deepmusic/config.go | 3 + sims/deepmusic/configs/30notes.toml | 4 +- sims/deepmusic/configs/fullsong.toml | 4 +- sims/deepmusic/deep-music.go | 6 +- sims/deepmusic/params.go | 1 + sims/deepmusic/typegen.go | 4 +- sims/hip/hip.go | 2 +- sims/inhib/inhib.go | 2 +- sims/kinasesim/config.go | 2 +- sims/kinasesim/kinase.go | 6 +- sims/kinasesim/sim.go | 2 +- sims/lvis/config.go | 2 +- sims/lvis/lvis.go | 2 +- sims/lvis/params.go | 2 + sims/mpi/mpi.go | 2 +- sims/neuron/neuron.go | 4 +- sims/objrec/objrec.go | 2 +- sims/pfcmaint/pfcmaint.go | 2 +- sims/pvlv/pvlv.go | 4 +- sims/ra25/config.go | 3 - sims/ra25/params.go | 2 + sims/ra25/ra25.go | 5 +- sims/ra25x/ra25x.go | 4 +- sims/rl/rl.go | 4 +- sims/vspatch/vspatch.go | 4 +- simscripts/dbformat.csv | 13 -- 90 files changed, 1102 insertions(+), 246 deletions(-) create mode 100644 kinase/enumgen.go create mode 100644 kinase/syncabin.go delete mode 100644 simscripts/dbformat.csv diff --git a/axon/context.go b/axon/context.go index 4253ab7e6..488446404 100644 --- a/axon/context.go +++ b/axon/context.go @@ -58,7 +58,8 @@ type Context struct { //types:add -setters // CaBinCycles is the number of cycles for neuron [CaBins] values used in // computing synaptic calcium values. Total number of bins = ThetaCycles / CaBinCycles. - CaBinCycles int32 `default:"25"` + // This is fixed at 10. + CaBinCycles int32 `default:"10"` // CyclesTotal is the accumulated cycle count, which increments continuously // from whenever it was last reset. Typically this is the number of milliseconds @@ -102,7 +103,7 @@ func (ctx *Context) Defaults() { ctx.TimePerCycle = 0.001 ctx.ThetaCycles = 200 ctx.PlusCycles = 50 - ctx.CaBinCycles = 25 + ctx.CaBinCycles = 10 ctx.SlowInterval = 100 } diff --git a/axon/enumgen.go b/axon/enumgen.go index 0666fb1dc..f1bf87568 100644 --- a/axon/enumgen.go +++ b/axon/enumgen.go @@ -485,7 +485,7 @@ const NeuronVarsN NeuronVars = 83 var _NeuronVarsValueMap = map[string]NeuronVars{`Spike`: 0, `Spiked`: 1, `Act`: 2, `ActInt`: 3, `Ge`: 4, `Gi`: 5, `Gk`: 6, `Inet`: 7, `Vm`: 8, `VmDend`: 9, `ISI`: 10, `ISIAvg`: 11, `Ext`: 12, `Target`: 13, `CaM`: 14, `CaP`: 15, `CaD`: 16, `CaDPrev`: 17, `CaSyn`: 18, `LearnCa`: 19, `LearnCaM`: 20, `LearnCaP`: 21, `LearnCaD`: 22, `CaDiff`: 23, `RLRate`: 24, `GnmdaSyn`: 25, `Gnmda`: 26, `GnmdaLrn`: 27, `GnmdaMaint`: 28, `NmdaCa`: 29, `Gvgcc`: 30, `VgccM`: 31, `VgccH`: 32, `VgccCa`: 33, `VgccCaInt`: 34, `Burst`: 35, `BurstPrv`: 36, `CtxtGe`: 37, `CtxtGeRaw`: 38, `CtxtGeOrig`: 39, `GgabaB`: 40, `GABAB`: 41, `GABABx`: 42, `Gak`: 43, `SSGiDend`: 44, `GknaMed`: 45, `GknaSlow`: 46, `Gkir`: 47, `KirM`: 48, `Gsk`: 49, `SKCaIn`: 50, `SKCaR`: 51, `SKCaM`: 52, `Gmahp`: 53, `MahpN`: 54, `Gsahp`: 55, `SahpCa`: 56, `SahpN`: 57, `ActM`: 58, `ActP`: 59, `Beta1`: 60, `Beta2`: 61, `CaPMax`: 62, `CaPMaxCa`: 63, `GeNoise`: 64, `GeNoiseP`: 65, `GiNoise`: 66, `GiNoiseP`: 67, `GeExt`: 68, `GeRaw`: 69, `GeSyn`: 70, `GiRaw`: 71, `GiSyn`: 72, `GeInt`: 73, `GeIntNorm`: 74, `GiInt`: 75, `GModRaw`: 76, `GModSyn`: 77, `SMaintP`: 78, `GMaintRaw`: 79, `GMaintSyn`: 80, `NeurFlags`: 81, `CaBins`: 82} -var _NeuronVarsDescMap = map[NeuronVars]string{0: `Spike is whether neuron has spiked or not on this cycle (0 or 1).`, 1: `Spiked is 1 if neuron has spiked within the last 10 cycles (msecs), corresponding to a nominal max spiking rate of 100 Hz, 0 otherwise. Useful for visualization and computing activity levels in terms of average spiked levels.`, 2: `Act is rate-coded activation value reflecting instantaneous estimated rate of spiking, based on 1 / ISIAvg. It is integrated over time for ActInt which is then used for performance statistics and layer average activations, etc. Should not be used for learning or other computations: just for stats / display.`, 3: `ActInt is integrated running-average activation value computed from Act with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall activation state across the ThetaCycle time scale, as the overall response of network to current input state. This is copied to ActM and ActP at the ends of the minus and plus phases, respectively, and used in computing some performance-level statistics (based on ActM). Should not be used for learning or other computations.`, 4: `Ge is total excitatory conductance, including all forms of excitation (e.g., NMDA). Does *not* include the Gbar.E factor.`, 5: `Gi is total inhibitory synaptic conductance, i.e., the net inhibitory input to the neuron. Does *not* include the Gbar.I factor.`, 6: `Gk is total potassium conductance, typically reflecting sodium-gated potassium currents involved in adaptation effects. Does *not* include the Gbar.K factor.`, 7: `Inet is net current produced by all channels, which drives update of Vm.`, 8: `Vm is the membrane potential at the cell body, which integrates Inet current over time, and drives spiking at the axon initial segment of the neuron.`, 9: `VmDend is the dendritic membrane potential, which has a slower time constant than Vm and is not subject to the VmR reset after spiking.`, 10: `ISI is the current inter-spike-interval, which counts up since last spike. Starts at -1 when initialized.`, 11: `ISIAvg is the average inter-spike-interval, i.e., the average time interval between spikes, integrated with ISITau rate constant (relatively fast) to capture something close to an instantaneous spiking rate. Starts at -1 when initialized, and goes to -2 after first spike, and is only valid after the second spike post-initialization.`, 12: `Ext is the external input: drives activation of unit from outside influences (e.g., sensory input).`, 13: `Target is the target value: drives learning to produce this activation value.`, 14: `CaM is the spike-driven calcium trace at the neuron level, which then drives longer time-integrated variables: [CaP] and [CaD]. These variables are used for statistics and display to capture spiking activity at different timescales. They fluctuate more than [Act] and [ActInt], but are closer to the biological variables driving learning. CaM is the exponential integration of SpikeG * Spike using the MTau time constant (typically 5), and simulates a calmodulin (CaM) like signal, at an abstract level.`, 15: `CaP is the continuous cascaded integration of [CaM] using the PTau time constant (typically 40), representing a neuron-level, purely spiking version of the plus, LTP direction of weight change in the Kinase learning rule, dependent on CaMKII. This is not used for learning (see [LearnCaP]), but instead for statistics as a representation of recent activity.`, 16: `CaD is the continuous cascaded integration [CaP] using the DTau time constant (typically 40), representing a neuron-level, purely spiking version of the minus, LTD direction of weight change in the Kinase learning rule, dependent on DAPK1. This is not used for learning (see [LearnCaD]), but instead for statistics as a representation of trial-level activity.`, 17: `CaDPrev is the final [CaD] activation state at the end of previous theta cycle. This is used for specialized learning mechanisms that operate on delayed sending activations.`, 18: `CaSyn is the neuron-level integration of spike-driven calcium, used to approximate synaptic calcium influx as a product of sender and receiver neuron CaSyn values, which are integrated separately because it is computationally much more efficient. This value is driven directly by spikes, with an exponential integration time constant of 30 msec (default), which captures the coincidence window for pre*post firing on NMDA receptor opening. The neuron [CaBins] values record the temporal trajectory of CaSyn over the course of the theta cycle window, and then the pre*post product is integrated over these bins at the synaptic level.`, 19: `LearnCa is the receiving neuron calcium signal, which is integrated up to [LearnCaP] and [LearnCaD], the difference of which is the temporal error component of the standard axon cortical learning rule. LearnCa combines NMDA via [NmdaCa] and spiking-driven VGCC [VgccCaInt] calcium sources (vs. CaM which only reflects a simple spiking component). The NMDA signal reflects both sending and receiving activity, while the VGCC signal is purely receiver spiking, and a balance of both works best.`, 20: `LearnCaM is the integrated [LearnCa] at the MTau timescale (typically 5), simulating a calmodulin (CaM) like signal, which then drives [LearnCaP], and [LearnCaD] for the delta signal for error-driven learning.`, 21: `LearnCaP is the cascaded integration of [LearnCaM] using the PTau time constant (typically 40), representing the plus, LTP direction of weight change, capturing the function of CaMKII in the Kinase learning rule.`, 22: `LearnCaD is the cascaded integration of [LearnCaP] using the DTau time constant (typically 40), representing the minus, LTD direction of weight change, capturing the function of DAPK1 in the Kinase learning rule.`, 23: `CaDiff is difference between [LearnCaP] - [LearnCaD]. This is the error signal that drives error-driven learning.`, 24: `RLRate is recv-unit based learning rate multiplier, reflecting the sigmoid derivative computed from [CaD] of recv unit, and the normalized difference (CaP - CaD) / MAX(CaP - CaD).`, 25: `GnmdaSyn is the integrated NMDA synaptic current on the receiving neuron. It adds GeRaw and decays with a time constant.`, 26: `Gnmda is the net postsynaptic (receiving) NMDA conductance, after Mg V-gating and Gbar. This is added directly to Ge as it has the same reversal potential.`, 27: `GnmdaLrn is learning version of integrated NMDA recv synaptic current. It adds [GeRaw] and decays with a time constant. This drives [NmdaCa] that then drives [LearnCa] for learning.`, 28: `GnmdaMaint is net postsynaptic maintenance NMDA conductance, computed from [GMaintSyn] and [GMaintRaw], after Mg V-gating and Gbar. This is added directly to Ge as it has the same reversal potential.`, 29: `NmdaCa is NMDA calcium computed from GnmdaLrn, drives learning via CaM.`, 30: `Gvgcc is conductance (via Ca) for VGCC voltage gated calcium channels.`, 31: `VgccM is activation gate of VGCC channels.`, 32: `VgccH inactivation gate of VGCC channels.`, 33: `VgccCa is the instantaneous VGCC calcium flux: can be driven by spiking or directly from Gvgcc.`, 34: `VgccCaInt is the time-integrated VGCC calcium flux. This is actually what drives learning.`, 35: `Burst is the layer 5 IB intrinsic bursting neural activation value, computed by thresholding the [CaP] value in Super superficial layers.`, 36: `BurstPrv is previous Burst bursting activation from prior time step. Used for context-based learning.`, 37: `CtxtGe is context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 38: `CtxtGeRaw is raw update of context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 39: `CtxtGeOrig is original CtxtGe value prior to any decay factor. Updates at end of plus phase.`, 40: `GgabaB is net GABA-B conductance, after Vm gating and Gbar + Gbase. Applies to Gk, not Gi, for GIRK, with .1 reversal potential.`, 41: `GABAB is GABA-B / GIRK activation, which is a time-integrated value with rise and decay time constants.`, 42: `GABABx is GABA-B / GIRK internal drive variable. This gets the raw activation and decays.`, 43: `Gak is the conductance of A-type K potassium channels.`, 44: `SSGiDend is the amount of SST+ somatostatin positive slow spiking inhibition applied to dendritic Vm (VmDend).`, 45: `GknaMed is the conductance of sodium-gated potassium channel (KNa) medium dynamics (Slick), which produces accommodation / adaptation.`, 46: `GknaSlow is the conductance of sodium-gated potassium channel (KNa) slow dynamics (Slack), which produces accommodation / adaptation.`, 47: `Gkir is the conductance of the potassium (K) inwardly rectifying channel, which is strongest at low membrane potentials. Can be modulated by DA.`, 48: `KirM is the Kir potassium (K) inwardly rectifying gating value.`, 49: `Gsk is Calcium-gated potassium channel conductance as a function of Gbar * SKCaM.`, 50: `SKCaIn is intracellular calcium store level, available to be released with spiking as SKCaR, which can bind to SKCa receptors and drive K current. replenishment is a function of spiking activity being below a threshold.`, 51: `SKCaR is the released amount of intracellular calcium, from SKCaIn, as a function of spiking events. This can bind to SKCa channels and drive K currents.`, 52: `SKCaM is the Calcium-gated potassium channel gating factor, driven by SKCaR via a Hill equation as in chans.SKPCaParams.`, 53: `Gmahp is medium time scale AHP conductance.`, 54: `MahpN is accumulating voltage-gated gating value for the medium time scale AHP.`, 55: `Gsahp is slow time scale AHP conductance.`, 56: `SahpCa is slowly accumulating calcium value that drives the slow AHP.`, 57: `SahpN is the sAHP gating value.`, 58: `ActM is ActInt activation state at end of third quarter, representing the posterior-cortical minus phase activation. This is used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 59: `ActP is ActInt activation state at end of fourth quarter, representing the posterior-cortical plus_phase activation. This is used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 60: `Beta1 is the activation state at the first beta cycle within current state processing window (i.e., at 50 msec), as saved by Beta1() function. Used for example in hippocampus for CA3, CA1 learning.`, 61: `Beta2 is the activation state at the second beta cycle within current state processing window (i.e., at 100 msec), as saved by Beta2() function. Used for example in hippocampus for CA3, CA1 learning.`, 62: `CaPMax is the maximum [CaP] across one theta cycle time window (max of CaPMaxCa). It is used for specialized algorithms that have more phasic behavior within a single trial, e.g., BG Matrix layer gating. Also useful for visualization of peak activity of neurons.`, 63: `CaPMaxCa is the Ca integrated like [CaP] but only starting at the MaxCycStart cycle, to prevent inclusion of carryover spiking from prior theta cycle trial. The PTau time constant otherwise results in significant carryover. This is the input to CaPMax.`, 64: `GeNoise is integrated noise excitatory conductance, added into Ge.`, 65: `GeNoiseP is accumulating poisson probability factor for driving excitatory noise spiking. Multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as function of noise firing rate.`, 66: `GiNoise is integrated noise inhibotyr conductance, added into Gi.`, 67: `GiNoiseP is accumulating poisson probability factor for driving inhibitory noise spiking. Multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as a function of noise firing rate.`, 68: `GeExt is extra excitatory conductance added to Ge, from Ext input, GeCtxt etc.`, 69: `GeRaw is the raw excitatory conductance (net input) received from senders = current raw spiking drive.`, 70: `GeSyn is the time-integrated total excitatory synaptic conductance, with an instantaneous rise time from each spike (in GeRaw) and exponential decay with Dt.GeTau, aggregated over pathways. Does *not* include Gbar.E.`, 71: `GiRaw is the raw inhibitory conductance (net input) received from senders = current raw spiking drive.`, 72: `GiSyn is time-integrated total inhibitory synaptic conductance, with an instantaneous rise time from each spike (in GiRaw) and exponential decay with Dt.GiTau, aggregated over pathways -- does *not* include Gbar.I. This is added with computed FFFB inhibition to get the full inhibition in Gi.`, 73: `GeInt is integrated running-average activation value computed from Ge with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall Ge level across the ThetaCycle time scale (Ge itself fluctuates considerably). This is useful for stats to set strength of connections etc to get neurons into right range of overall excitatory drive.`, 74: `GeIntNorm is normalized GeInt value (divided by the layer maximum). This is used for learning in layers that require learning on subthreshold activity.`, 75: `GiInt is integrated running-average activation value computed from GiSyn with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall synaptic Gi level across the ThetaCycle time scale (Gi itself fluctuates considerably). Useful for stats to set strength of connections etc to get neurons into right range of overall inhibitory drive.`, 76: `GModRaw is raw modulatory conductance, received from GType = ModulatoryG pathways.`, 77: `GModSyn is syn integrated modulatory conductance, received from GType = ModulatoryG pathways.`, 78: `SMaintP is accumulating poisson probability factor for driving self-maintenance by simulating a population of mutually interconnected neurons. Multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda based on accumulating self maint factor.`, 79: `GMaintRaw is raw maintenance conductance, received from GType = MaintG pathways.`, 80: `GMaintSyn is syn integrated maintenance conductance, integrated using MaintNMDA params.`, 81: `NeurFlags are bit flags for binary state variables, which are converted to / from uint32. These need to be in Vars because they can be differential per data (for ext inputs) and are writable (indexes are read only).`, 82: `CaBins is a vector of values starting here, with aggregated [CaSyn] values in time bins of [Context.CaBinCycles] across the theta cycle, for computing synaptic calcium efficiently. There can be a variable number of bins depending on bin width and total number of cycles. Synaptic calcium is integrated from sender * receiver CaBins values, with weights for CaP vs CaD that reflect their faster vs. slower time constants, respectively. CaD is used for the credit assignment factor, while CaP - CaD is used directly for error-driven learning at Target layers.`} +var _NeuronVarsDescMap = map[NeuronVars]string{0: `Spike is whether neuron has spiked or not on this cycle (0 or 1).`, 1: `Spiked is 1 if neuron has spiked within the last 10 cycles (msecs), corresponding to a nominal max spiking rate of 100 Hz, 0 otherwise. Useful for visualization and computing activity levels in terms of average spiked levels.`, 2: `Act is rate-coded activation value reflecting instantaneous estimated rate of spiking, based on 1 / ISIAvg. It is integrated over time for ActInt which is then used for performance statistics and layer average activations, etc. Should not be used for learning or other computations: just for stats / display.`, 3: `ActInt is integrated running-average activation value computed from Act with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall activation state across the ThetaCycle time scale, as the overall response of network to current input state. This is copied to ActM and ActP at the ends of the minus and plus phases, respectively, and used in computing some performance-level statistics (based on ActM). Should not be used for learning or other computations.`, 4: `Ge is total excitatory conductance, including all forms of excitation (e.g., NMDA). Does *not* include the Gbar.E factor.`, 5: `Gi is total inhibitory synaptic conductance, i.e., the net inhibitory input to the neuron. Does *not* include the Gbar.I factor.`, 6: `Gk is total potassium conductance, typically reflecting sodium-gated potassium currents involved in adaptation effects. Does *not* include the Gbar.K factor.`, 7: `Inet is net current produced by all channels, which drives update of Vm.`, 8: `Vm is the membrane potential at the cell body, which integrates Inet current over time, and drives spiking at the axon initial segment of the neuron.`, 9: `VmDend is the dendritic membrane potential, which has a slower time constant than Vm and is not subject to the VmR reset after spiking.`, 10: `ISI is the current inter-spike-interval, which counts up since last spike. Starts at -1 when initialized.`, 11: `ISIAvg is the average inter-spike-interval, i.e., the average time interval between spikes, integrated with ISITau rate constant (relatively fast) to capture something close to an instantaneous spiking rate. Starts at -1 when initialized, and goes to -2 after first spike, and is only valid after the second spike post-initialization.`, 12: `Ext is the external input: drives activation of unit from outside influences (e.g., sensory input).`, 13: `Target is the target value: drives learning to produce this activation value.`, 14: `CaM is the spike-driven calcium trace at the neuron level, which then drives longer time-integrated variables: [CaP] and [CaD]. These variables are used for statistics and display to capture spiking activity at different timescales. They fluctuate more than [Act] and [ActInt], but are closer to the biological variables driving learning. CaM is the exponential integration of SpikeG * Spike using the MTau time constant (typically 5), and simulates a calmodulin (CaM) like signal, at an abstract level.`, 15: `CaP is the continuous cascaded integration of [CaM] using the PTau time constant (typically 40), representing a neuron-level, purely spiking version of the plus, LTP direction of weight change in the Kinase learning rule, dependent on CaMKII. This is not used for learning (see [LearnCaP]), but instead for statistics as a representation of recent activity.`, 16: `CaD is the continuous cascaded integration [CaP] using the DTau time constant (typically 40), representing a neuron-level, purely spiking version of the minus, LTD direction of weight change in the Kinase learning rule, dependent on DAPK1. This is not used for learning (see [LearnCaD]), but instead for statistics as a representation of trial-level activity.`, 17: `CaDPrev is the final [CaD] activation state at the end of previous theta cycle. This is used for specialized learning mechanisms that operate on delayed sending activations.`, 18: `CaSyn is the neuron-level integration of spike-driven calcium, used to approximate synaptic calcium influx as a product of sender and receiver neuron CaSyn values, which are integrated separately because it is computationally much more efficient. This value is driven directly by spikes, with an exponential integration time constant of 30 msec (default), which captures the coincidence window for pre*post firing on NMDA receptor opening. The neuron [CaBins] values record the temporal trajectory of CaSyn over the course of the theta cycle window, and then the pre*post product is integrated over these bins at the synaptic level.`, 19: `LearnCa is the receiving neuron calcium signal, which is integrated up to [LearnCaP] and [LearnCaD], the difference of which is the temporal error component of the standard axon cortical learning rule. LearnCa combines NMDA via [NmdaCa] and spiking-driven VGCC [VgccCaInt] calcium sources (vs. CaM which only reflects a simple spiking component). The NMDA signal reflects both sending and receiving activity, while the VGCC signal is purely receiver spiking, and a balance of both works best.`, 20: `LearnCaM is the integrated [LearnCa] at the MTau timescale (typically 5), simulating a calmodulin (CaM) like signal, which then drives [LearnCaP], and [LearnCaD] for the delta signal for error-driven learning.`, 21: `LearnCaP is the cascaded integration of [LearnCaM] using the PTau time constant (typically 40), representing the plus, LTP direction of weight change, capturing the function of CaMKII in the Kinase learning rule.`, 22: `LearnCaD is the cascaded integration of [LearnCaP] using the DTau time constant (typically 40), representing the minus, LTD direction of weight change, capturing the function of DAPK1 in the Kinase learning rule.`, 23: `CaDiff is difference between [LearnCaP] - [LearnCaD]. This is the error signal that drives error-driven learning.`, 24: `RLRate is recv-unit based learning rate multiplier, reflecting the sigmoid derivative computed from [CaD] of recv unit, and the normalized difference (CaP - CaD) / MAX(CaP - CaD).`, 25: `GnmdaSyn is the integrated NMDA synaptic current on the receiving neuron. It adds GeRaw and decays with a time constant.`, 26: `Gnmda is the net postsynaptic (receiving) NMDA conductance, after Mg V-gating and Gbar. This is added directly to Ge as it has the same reversal potential.`, 27: `GnmdaLrn is learning version of integrated NMDA recv synaptic current. It adds [GeRaw] and decays with a time constant. This drives [NmdaCa] that then drives [LearnCa] for learning.`, 28: `GnmdaMaint is net postsynaptic maintenance NMDA conductance, computed from [GMaintSyn] and [GMaintRaw], after Mg V-gating and Gbar. This is added directly to Ge as it has the same reversal potential.`, 29: `NmdaCa is NMDA calcium computed from GnmdaLrn, drives learning via CaM.`, 30: `Gvgcc is conductance (via Ca) for VGCC voltage gated calcium channels.`, 31: `VgccM is activation gate of VGCC channels.`, 32: `VgccH inactivation gate of VGCC channels.`, 33: `VgccCa is the instantaneous VGCC calcium flux: can be driven by spiking or directly from Gvgcc.`, 34: `VgccCaInt is the time-integrated VGCC calcium flux. This is actually what drives learning.`, 35: `Burst is the layer 5 IB intrinsic bursting neural activation value, computed by thresholding the [CaP] value in Super superficial layers.`, 36: `BurstPrv is previous Burst bursting activation from prior time step. Used for context-based learning.`, 37: `CtxtGe is context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 38: `CtxtGeRaw is raw update of context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 39: `CtxtGeOrig is original CtxtGe value prior to any decay factor. Updates at end of plus phase.`, 40: `GgabaB is net GABA-B conductance, after Vm gating and Gbar + Gbase. Applies to Gk, not Gi, for GIRK, with .1 reversal potential.`, 41: `GABAB is GABA-B / GIRK activation, which is a time-integrated value with rise and decay time constants.`, 42: `GABABx is GABA-B / GIRK internal drive variable. This gets the raw activation and decays.`, 43: `Gak is the conductance of A-type K potassium channels.`, 44: `SSGiDend is the amount of SST+ somatostatin positive slow spiking inhibition applied to dendritic Vm (VmDend).`, 45: `GknaMed is the conductance of sodium-gated potassium channel (KNa) medium dynamics (Slick), which produces accommodation / adaptation.`, 46: `GknaSlow is the conductance of sodium-gated potassium channel (KNa) slow dynamics (Slack), which produces accommodation / adaptation.`, 47: `Gkir is the conductance of the potassium (K) inwardly rectifying channel, which is strongest at low membrane potentials. Can be modulated by DA.`, 48: `KirM is the Kir potassium (K) inwardly rectifying gating value.`, 49: `Gsk is Calcium-gated potassium channel conductance as a function of Gbar * SKCaM.`, 50: `SKCaIn is intracellular calcium store level, available to be released with spiking as SKCaR, which can bind to SKCa receptors and drive K current. replenishment is a function of spiking activity being below a threshold.`, 51: `SKCaR is the released amount of intracellular calcium, from SKCaIn, as a function of spiking events. This can bind to SKCa channels and drive K currents.`, 52: `SKCaM is the Calcium-gated potassium channel gating factor, driven by SKCaR via a Hill equation as in chans.SKPCaParams.`, 53: `Gmahp is medium time scale AHP conductance.`, 54: `MahpN is accumulating voltage-gated gating value for the medium time scale AHP.`, 55: `Gsahp is slow time scale AHP conductance.`, 56: `SahpCa is slowly accumulating calcium value that drives the slow AHP.`, 57: `SahpN is the sAHP gating value.`, 58: `ActM is ActInt activation state at end of third quarter, representing the posterior-cortical minus phase activation. This is used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 59: `ActP is ActInt activation state at end of fourth quarter, representing the posterior-cortical plus_phase activation. This is used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 60: `Beta1 is the activation state at the first beta cycle within current state processing window (i.e., at 50 msec), as saved by Beta1() function. Used for example in hippocampus for CA3, CA1 learning.`, 61: `Beta2 is the activation state at the second beta cycle within current state processing window (i.e., at 100 msec), as saved by Beta2() function. Used for example in hippocampus for CA3, CA1 learning.`, 62: `CaPMax is the maximum [CaP] across one theta cycle time window (max of CaPMaxCa). It is used for specialized algorithms that have more phasic behavior within a single trial, e.g., BG Matrix layer gating. Also useful for visualization of peak activity of neurons.`, 63: `CaPMaxCa is the Ca integrated like [CaP] but only starting at the MaxCycStart cycle, to prevent inclusion of carryover spiking from prior theta cycle trial. The PTau time constant otherwise results in significant carryover. This is the input to CaPMax.`, 64: `GeNoise is integrated noise excitatory conductance, added into Ge.`, 65: `GeNoiseP is accumulating poisson probability factor for driving excitatory noise spiking. Multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as function of noise firing rate.`, 66: `GiNoise is integrated noise inhibotyr conductance, added into Gi.`, 67: `GiNoiseP is accumulating poisson probability factor for driving inhibitory noise spiking. Multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as a function of noise firing rate.`, 68: `GeExt is extra excitatory conductance added to Ge, from Ext input, GeCtxt etc.`, 69: `GeRaw is the raw excitatory conductance (net input) received from senders = current raw spiking drive.`, 70: `GeSyn is the time-integrated total excitatory synaptic conductance, with an instantaneous rise time from each spike (in GeRaw) and exponential decay with Dt.GeTau, aggregated over pathways. Does *not* include Gbar.E.`, 71: `GiRaw is the raw inhibitory conductance (net input) received from senders = current raw spiking drive.`, 72: `GiSyn is time-integrated total inhibitory synaptic conductance, with an instantaneous rise time from each spike (in GiRaw) and exponential decay with Dt.GiTau, aggregated over pathways -- does *not* include Gbar.I. This is added with computed FFFB inhibition to get the full inhibition in Gi.`, 73: `GeInt is integrated running-average activation value computed from Ge with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall Ge level across the ThetaCycle time scale (Ge itself fluctuates considerably). This is useful for stats to set strength of connections etc to get neurons into right range of overall excitatory drive.`, 74: `GeIntNorm is normalized GeInt value (divided by the layer maximum). This is used for learning in layers that require learning on subthreshold activity.`, 75: `GiInt is integrated running-average activation value computed from GiSyn with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall synaptic Gi level across the ThetaCycle time scale (Gi itself fluctuates considerably). Useful for stats to set strength of connections etc to get neurons into right range of overall inhibitory drive.`, 76: `GModRaw is raw modulatory conductance, received from GType = ModulatoryG pathways.`, 77: `GModSyn is syn integrated modulatory conductance, received from GType = ModulatoryG pathways.`, 78: `SMaintP is accumulating poisson probability factor for driving self-maintenance by simulating a population of mutually interconnected neurons. Multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda based on accumulating self maint factor.`, 79: `GMaintRaw is raw maintenance conductance, received from GType = MaintG pathways.`, 80: `GMaintSyn is syn integrated maintenance conductance, integrated using MaintNMDA params.`, 81: `NeurFlags are bit flags for binary state variables, which are converted to / from uint32. These need to be in Vars because they can be differential per data (for ext inputs) and are writable (indexes are read only).`, 82: `CaBins is a vector of values starting here, with aggregated [CaSyn] values in time bins of [Context.CaBinCycles] across the theta cycle, for computing synaptic calcium efficiently. Each bin = Sum(CaSyn) / CaBinCycles. Total number of bins = [Context.ThetaCycles] / CaBinCycles. Synaptic calcium is integrated from sender * receiver CaBins values, with weights for CaP vs CaD that reflect their faster vs. slower time constants, respectively. CaD is used for the credit assignment factor, while CaP - CaD is used directly for error-driven learning at Target layers.`} var _NeuronVarsMap = map[NeuronVars]string{0: `Spike`, 1: `Spiked`, 2: `Act`, 3: `ActInt`, 4: `Ge`, 5: `Gi`, 6: `Gk`, 7: `Inet`, 8: `Vm`, 9: `VmDend`, 10: `ISI`, 11: `ISIAvg`, 12: `Ext`, 13: `Target`, 14: `CaM`, 15: `CaP`, 16: `CaD`, 17: `CaDPrev`, 18: `CaSyn`, 19: `LearnCa`, 20: `LearnCaM`, 21: `LearnCaP`, 22: `LearnCaD`, 23: `CaDiff`, 24: `RLRate`, 25: `GnmdaSyn`, 26: `Gnmda`, 27: `GnmdaLrn`, 28: `GnmdaMaint`, 29: `NmdaCa`, 30: `Gvgcc`, 31: `VgccM`, 32: `VgccH`, 33: `VgccCa`, 34: `VgccCaInt`, 35: `Burst`, 36: `BurstPrv`, 37: `CtxtGe`, 38: `CtxtGeRaw`, 39: `CtxtGeOrig`, 40: `GgabaB`, 41: `GABAB`, 42: `GABABx`, 43: `Gak`, 44: `SSGiDend`, 45: `GknaMed`, 46: `GknaSlow`, 47: `Gkir`, 48: `KirM`, 49: `Gsk`, 50: `SKCaIn`, 51: `SKCaR`, 52: `SKCaM`, 53: `Gmahp`, 54: `MahpN`, 55: `Gsahp`, 56: `SahpCa`, 57: `SahpN`, 58: `ActM`, 59: `ActP`, 60: `Beta1`, 61: `Beta2`, 62: `CaPMax`, 63: `CaPMaxCa`, 64: `GeNoise`, 65: `GeNoiseP`, 66: `GiNoise`, 67: `GiNoiseP`, 68: `GeExt`, 69: `GeRaw`, 70: `GeSyn`, 71: `GiRaw`, 72: `GiSyn`, 73: `GeInt`, 74: `GeIntNorm`, 75: `GiInt`, 76: `GModRaw`, 77: `GModSyn`, 78: `SMaintP`, 79: `GMaintRaw`, 80: `GMaintSyn`, 81: `NeurFlags`, 82: `CaBins`} diff --git a/axon/learn-path.go b/axon/learn-path.go index 1cdbe8598..fc0034634 100644 --- a/axon/learn-path.go +++ b/axon/learn-path.go @@ -53,9 +53,23 @@ func (pt *PathParams) DWtSyn(ctx *Context, rlay *LayerParams, syni, si, ri, di u func (pt *PathParams) SynCa(ctx *Context, si, ri, di uint32, syCaP, syCaD *float32) { nbins := NetworkIxs[0].NCaBins cadSt := GvCaBinWts + GlobalScalarVars(nbins) - var cp, cd float32 - for i := range nbins { - sp := Neurons.Value(int(ri), int(di), int(CaBins+NeuronVars(i))) * Neurons.Value(int(si), int(di), int(CaBins+NeuronVars(i))) + + // T0 + r0 := Neurons.Value(int(ri), int(di), int(CaBins+NeuronVars(0))) + s0 := Neurons.Value(int(si), int(di), int(CaBins+NeuronVars(0))) + sp := pt.Learn.SynCaBin.SynCaT0(r0, s0) + cp := sp * GlobalScalars.Value(int(GvCaBinWts+GlobalScalarVars(0)), int(0)) + cd := sp * GlobalScalars.Value(int(cadSt+GlobalScalarVars(0)), int(0)) + + // T1 + r1 := Neurons.Value(int(ri), int(di), int(CaBins+NeuronVars(1))) + s1 := Neurons.Value(int(si), int(di), int(CaBins+NeuronVars(1))) + sp = pt.Learn.SynCaBin.SynCaT1(r0, r1, s0, s1) + cp += sp * GlobalScalars.Value(int(GvCaBinWts+GlobalScalarVars(1)), int(0)) + cd += sp * GlobalScalars.Value(int(cadSt+GlobalScalarVars(1)), int(0)) + + for i := int32(2); i < nbins; i++ { + sp := pt.Learn.SynCaBin.SynCaT(Neurons.Value(int(ri), int(di), int(CaBins+NeuronVars(i))), Neurons.Value(int(ri), int(di), int(CaBins+NeuronVars(i-1))), Neurons.Value(int(ri), int(di), int(CaBins+NeuronVars(i-2))), Neurons.Value(int(si), int(di), int(CaBins+NeuronVars(i))), Neurons.Value(int(si), int(di), int(CaBins+NeuronVars(i-1))), Neurons.Value(int(si), int(di), int(CaBins+NeuronVars(i-2)))) cp += sp * GlobalScalars.Value(int(GvCaBinWts+GlobalScalarVars(i)), int(0)) cd += sp * GlobalScalars.Value(int(cadSt+GlobalScalarVars(i)), int(0)) } diff --git a/axon/learn-path.goal b/axon/learn-path.goal index 9886bcdca..3a7ad3193 100644 --- a/axon/learn-path.goal +++ b/axon/learn-path.goal @@ -51,9 +51,23 @@ func (pt *PathParams) DWtSyn(ctx *Context, rlay *LayerParams, syni, si, ri, di u func (pt *PathParams) SynCa(ctx *Context, si, ri, di uint32, syCaP, syCaD *float32) { nbins := NetworkIxs[0].NCaBins cadSt := GvCaBinWts+GlobalScalarVars(nbins) - var cp, cd float32 - for i := range nbins { - sp := Neurons[ri, di, CaBins + NeuronVars(i)] * Neurons[si, di, CaBins + NeuronVars(i)] + + // T0 + r0 := Neurons[ri, di, CaBins + NeuronVars(0)] + s0 := Neurons[si, di, CaBins + NeuronVars(0)] + sp := pt.Learn.SynCaBin.SynCaT0(r0, s0) + cp := sp * GlobalScalars[GvCaBinWts + GlobalScalarVars(0), 0] + cd := sp * GlobalScalars[cadSt + GlobalScalarVars(0), 0] + + // T1 + r1 := Neurons[ri, di, CaBins + NeuronVars(1)] + s1 := Neurons[si, di, CaBins + NeuronVars(1)] + sp = pt.Learn.SynCaBin.SynCaT1(r0, r1, s0, s1) + cp += sp * GlobalScalars[GvCaBinWts + GlobalScalarVars(1), 0] + cd += sp * GlobalScalars[cadSt + GlobalScalarVars(1), 0] + + for i := int32(2); i < nbins; i++ { + sp := pt.Learn.SynCaBin.SynCaT(Neurons[ri, di, CaBins + NeuronVars(i)], Neurons[ri, di, CaBins + NeuronVars(i-1)], Neurons[ri, di, CaBins + NeuronVars(i-2)], Neurons[si, di, CaBins + NeuronVars(i)], Neurons[si, di, CaBins + NeuronVars(i-1)], Neurons[si, di, CaBins + NeuronVars(i-2)]) cp += sp * GlobalScalars[GvCaBinWts + GlobalScalarVars(i), 0] cd += sp * GlobalScalars[cadSt + GlobalScalarVars(i), 0] } diff --git a/axon/learn.go b/axon/learn.go index 3af234b80..46e63de48 100644 --- a/axon/learn.go +++ b/axon/learn.go @@ -855,6 +855,14 @@ type LearnSynParams struct { // trace-based cortical learning rule and for other specialized learning rules. DWt DWtParams `display:"inline"` + // SynCaBin computes synaptic calcium values as a product of the + // separately-integrated and binned sender and receiver SynCa values. + // Binning always happens at 10 msec intervals, but the product term + // is more robust if computed on a longer effective timescale, which + // is determined by weighting factors for the t-1 and t-2 bins when + // computing the neural SynCa for time bin t. + SynCaBin kinase.SynCaBin `display:"inline"` + // hebbian learning option, which overrides the default learning rules Hebb HebbParams `display:"inline"` } @@ -862,6 +870,7 @@ type LearnSynParams struct { func (ls *LearnSynParams) Update() { ls.LRate.Update() ls.DWt.Update() + ls.SynCaBin.Update() ls.Hebb.Update() } @@ -869,6 +878,7 @@ func (ls *LearnSynParams) Defaults() { ls.Learn.SetBool(true) ls.LRate.Defaults() ls.DWt.Defaults() + ls.SynCaBin.Defaults() ls.Hebb.Defaults() } diff --git a/axon/learn.goal b/axon/learn.goal index 492a6f257..9ecc459e5 100644 --- a/axon/learn.goal +++ b/axon/learn.goal @@ -852,6 +852,14 @@ type LearnSynParams struct { // trace-based cortical learning rule and for other specialized learning rules. DWt DWtParams `display:"inline"` + // SynCaBin computes synaptic calcium values as a product of the + // separately-integrated and binned sender and receiver SynCa values. + // Binning always happens at 10 msec intervals, but the product term + // is more robust if computed on a longer effective timescale, which + // is determined by weighting factors for the t-1 and t-2 bins when + // computing the neural SynCa for time bin t. + SynCaBin kinase.SynCaBin `display:"inline"` + // hebbian learning option, which overrides the default learning rules Hebb HebbParams `display:"inline"` } @@ -859,6 +867,7 @@ type LearnSynParams struct { func (ls *LearnSynParams) Update() { ls.LRate.Update() ls.DWt.Update() + ls.SynCaBin.Update() ls.Hebb.Update() } @@ -866,6 +875,7 @@ func (ls *LearnSynParams) Defaults() { ls.Learn.SetBool(true) ls.LRate.Defaults() ls.DWt.Defaults() + ls.SynCaBin.Defaults() ls.Hebb.Defaults() } diff --git a/axon/network.go b/axon/network.go index 8a3effcb9..2c076b068 100644 --- a/axon/network.go +++ b/axon/network.go @@ -944,10 +944,9 @@ func (nt *Network) SetCaBinWts() { ctx := nt.Context() nix := nt.NetIxs() nBins := int(nix.NCaBins) - nPlusBins := int(ctx.PlusCycles / ctx.CaBinCycles) cp := make([]float32, nBins) cd := make([]float32, nBins) - kinase.CaBinWts(nPlusBins, int(ctx.CaBinCycles), cp, cd) + kinase.CaBinWts(int(ctx.PlusCycles), cp, cd) for i := range nBins { nt.GlobalScalars.Set(cp[i], int(GvCaBinWts+GlobalScalarVars(i)), int(0)) nt.GlobalScalars.Set(cd[i], int(GvCaBinWts+GlobalScalarVars(nBins+i)), int(0)) diff --git a/axon/network.goal b/axon/network.goal index 980d50c4f..491eeaa99 100644 --- a/axon/network.goal +++ b/axon/network.goal @@ -941,10 +941,9 @@ func (nt *Network) SetCaBinWts() { ctx := nt.Context() nix := nt.NetIxs() nBins := int(nix.NCaBins) - nPlusBins := int(ctx.PlusCycles / ctx.CaBinCycles) cp := make([]float32, nBins) cd := make([]float32, nBins) - kinase.CaBinWts(nPlusBins, int(ctx.CaBinCycles), cp, cd) + kinase.CaBinWts(int(ctx.PlusCycles), cp, cd) for i := range nBins { nt.GlobalScalars[GvCaBinWts+GlobalScalarVars(i), 0] = cp[i] nt.GlobalScalars[GvCaBinWts+GlobalScalarVars(nBins+i), 0] = cd[i] diff --git a/axon/neuron.go b/axon/neuron.go index 246ee6639..bca998319 100644 --- a/axon/neuron.go +++ b/axon/neuron.go @@ -456,8 +456,8 @@ const ( // CaBins is a vector of values starting here, with aggregated [CaSyn] values // in time bins of [Context.CaBinCycles] across the theta cycle, - // for computing synaptic calcium efficiently. There can be a variable number - // of bins depending on bin width and total number of cycles. + // for computing synaptic calcium efficiently. Each bin = Sum(CaSyn) / CaBinCycles. + // Total number of bins = [Context.ThetaCycles] / CaBinCycles. // Synaptic calcium is integrated from sender * receiver CaBins values, // with weights for CaP vs CaD that reflect their faster vs. slower time constants, // respectively. CaD is used for the credit assignment factor, while CaP - CaD is diff --git a/axon/shaders/ApplyExtsNeuron.wgsl b/axon/shaders/ApplyExtsNeuron.wgsl index a0307cf35..e7b643636 100644 --- a/axon/shaders/ApplyExtsNeuron.wgsl +++ b/axon/shaders/ApplyExtsNeuron.wgsl @@ -652,6 +652,23 @@ struct CaSpikeParams { Dt: CaDtParams, } +//////// import: "kinase-syncabin.go" +alias SynCaBinEnvelopes = i32; //enums:enum +const Env30: SynCaBinEnvelopes = 0; +const Env25: SynCaBinEnvelopes = 1; +const Env20: SynCaBinEnvelopes = 2; +const Env10: SynCaBinEnvelopes = 3; +struct SynCaBin { //types:add + Envelope: SynCaBinEnvelopes, + Wt1: f32, + Wt2: f32, + Wt11: f32, + Wt10: f32, + WtT0: f32, + WtT1: f32, + WtT2: f32, +} + //////// import: "layerparams.go" struct LayerIndexes { NPools: u32, @@ -842,6 +859,7 @@ struct LearnSynParams { pad2: i32, LRate: LRateParams, DWt: DWtParams, + SynCaBin: SynCaBin, Hebb: HebbParams, } diff --git a/axon/shaders/Beta1Neuron.wgsl b/axon/shaders/Beta1Neuron.wgsl index 80f8f1735..8b8d97834 100644 --- a/axon/shaders/Beta1Neuron.wgsl +++ b/axon/shaders/Beta1Neuron.wgsl @@ -602,6 +602,23 @@ struct CaSpikeParams { Dt: CaDtParams, } +//////// import: "kinase-syncabin.go" +alias SynCaBinEnvelopes = i32; //enums:enum +const Env30: SynCaBinEnvelopes = 0; +const Env25: SynCaBinEnvelopes = 1; +const Env20: SynCaBinEnvelopes = 2; +const Env10: SynCaBinEnvelopes = 3; +struct SynCaBin { //types:add + Envelope: SynCaBinEnvelopes, + Wt1: f32, + Wt2: f32, + Wt11: f32, + Wt10: f32, + WtT0: f32, + WtT1: f32, + WtT2: f32, +} + //////// import: "layerparams.go" struct LayerIndexes { NPools: u32, @@ -787,6 +804,7 @@ struct LearnSynParams { pad2: i32, LRate: LRateParams, DWt: DWtParams, + SynCaBin: SynCaBin, Hebb: HebbParams, } diff --git a/axon/shaders/Beta2Neuron.wgsl b/axon/shaders/Beta2Neuron.wgsl index de389fa0a..53540bb1b 100644 --- a/axon/shaders/Beta2Neuron.wgsl +++ b/axon/shaders/Beta2Neuron.wgsl @@ -602,6 +602,23 @@ struct CaSpikeParams { Dt: CaDtParams, } +//////// import: "kinase-syncabin.go" +alias SynCaBinEnvelopes = i32; //enums:enum +const Env30: SynCaBinEnvelopes = 0; +const Env25: SynCaBinEnvelopes = 1; +const Env20: SynCaBinEnvelopes = 2; +const Env10: SynCaBinEnvelopes = 3; +struct SynCaBin { //types:add + Envelope: SynCaBinEnvelopes, + Wt1: f32, + Wt2: f32, + Wt11: f32, + Wt10: f32, + WtT0: f32, + WtT1: f32, + WtT2: f32, +} + //////// import: "layerparams.go" struct LayerIndexes { NPools: u32, @@ -787,6 +804,7 @@ struct LearnSynParams { pad2: i32, LRate: LRateParams, DWt: DWtParams, + SynCaBin: SynCaBin, Hebb: HebbParams, } diff --git a/axon/shaders/BetweenGi.wgsl b/axon/shaders/BetweenGi.wgsl index 9585d4d35..08b348b03 100644 --- a/axon/shaders/BetweenGi.wgsl +++ b/axon/shaders/BetweenGi.wgsl @@ -619,6 +619,23 @@ struct CaSpikeParams { Dt: CaDtParams, } +//////// import: "kinase-syncabin.go" +alias SynCaBinEnvelopes = i32; //enums:enum +const Env30: SynCaBinEnvelopes = 0; +const Env25: SynCaBinEnvelopes = 1; +const Env20: SynCaBinEnvelopes = 2; +const Env10: SynCaBinEnvelopes = 3; +struct SynCaBin { //types:add + Envelope: SynCaBinEnvelopes, + Wt1: f32, + Wt2: f32, + Wt11: f32, + Wt10: f32, + WtT0: f32, + WtT1: f32, + WtT2: f32, +} + //////// import: "layerparams.go" struct LayerIndexes { NPools: u32, @@ -807,6 +824,7 @@ struct LearnSynParams { pad2: i32, LRate: LRateParams, DWt: DWtParams, + SynCaBin: SynCaBin, Hebb: HebbParams, } diff --git a/axon/shaders/CycleInc.wgsl b/axon/shaders/CycleInc.wgsl index e7d578f74..11f2a3a80 100644 --- a/axon/shaders/CycleInc.wgsl +++ b/axon/shaders/CycleInc.wgsl @@ -598,6 +598,23 @@ struct CaSpikeParams { Dt: CaDtParams, } +//////// import: "kinase-syncabin.go" +alias SynCaBinEnvelopes = i32; //enums:enum +const Env30: SynCaBinEnvelopes = 0; +const Env25: SynCaBinEnvelopes = 1; +const Env20: SynCaBinEnvelopes = 2; +const Env10: SynCaBinEnvelopes = 3; +struct SynCaBin { //types:add + Envelope: SynCaBinEnvelopes, + Wt1: f32, + Wt2: f32, + Wt11: f32, + Wt10: f32, + WtT0: f32, + WtT1: f32, + WtT2: f32, +} + //////// import: "layerparams.go" struct LayerIndexes { NPools: u32, @@ -783,6 +800,7 @@ struct LearnSynParams { pad2: i32, LRate: LRateParams, DWt: DWtParams, + SynCaBin: SynCaBin, Hebb: HebbParams, } diff --git a/axon/shaders/CycleNeuron.wgsl b/axon/shaders/CycleNeuron.wgsl index 1ba82f3a8..6ef683306 100644 --- a/axon/shaders/CycleNeuron.wgsl +++ b/axon/shaders/CycleNeuron.wgsl @@ -1466,6 +1466,23 @@ fn CaSpikeParams_CaSynFromSpike(sp: CaSpikeParams, spike: f32, caSyn: f32) -> f3 var ca = sp.SpikeCaSyn * spike;return caSyn + sp.CaSynDt*(ca-caSyn); } +//////// import: "kinase-syncabin.go" +alias SynCaBinEnvelopes = i32; //enums:enum +const Env30: SynCaBinEnvelopes = 0; +const Env25: SynCaBinEnvelopes = 1; +const Env20: SynCaBinEnvelopes = 2; +const Env10: SynCaBinEnvelopes = 3; +struct SynCaBin { //types:add + Envelope: SynCaBinEnvelopes, + Wt1: f32, + Wt2: f32, + Wt11: f32, + Wt10: f32, + WtT0: f32, + WtT1: f32, + WtT2: f32, +} + //////// import: "layerparams.go" struct LayerIndexes { NPools: u32, @@ -1689,6 +1706,7 @@ struct LearnSynParams { pad2: i32, LRate: LRateParams, DWt: DWtParams, + SynCaBin: SynCaBin, Hebb: HebbParams, } diff --git a/axon/shaders/CyclePost.wgsl b/axon/shaders/CyclePost.wgsl index 2d112d2a9..7ef07403b 100644 --- a/axon/shaders/CyclePost.wgsl +++ b/axon/shaders/CyclePost.wgsl @@ -737,6 +737,23 @@ struct CaSpikeParams { Dt: CaDtParams, } +//////// import: "kinase-syncabin.go" +alias SynCaBinEnvelopes = i32; //enums:enum +const Env30: SynCaBinEnvelopes = 0; +const Env25: SynCaBinEnvelopes = 1; +const Env20: SynCaBinEnvelopes = 2; +const Env10: SynCaBinEnvelopes = 3; +struct SynCaBin { //types:add + Envelope: SynCaBinEnvelopes, + Wt1: f32, + Wt2: f32, + Wt11: f32, + Wt10: f32, + WtT0: f32, + WtT1: f32, + WtT2: f32, +} + //////// import: "layerparams.go" struct LayerIndexes { NPools: u32, @@ -925,6 +942,7 @@ struct LearnSynParams { pad2: i32, LRate: LRateParams, DWt: DWtParams, + SynCaBin: SynCaBin, Hebb: HebbParams, } diff --git a/axon/shaders/DWtFromDiSyn.wgsl b/axon/shaders/DWtFromDiSyn.wgsl index 5f67f39f3..788220c4a 100644 --- a/axon/shaders/DWtFromDiSyn.wgsl +++ b/axon/shaders/DWtFromDiSyn.wgsl @@ -583,6 +583,23 @@ struct CaSpikeParams { Dt: CaDtParams, } +//////// import: "kinase-syncabin.go" +alias SynCaBinEnvelopes = i32; //enums:enum +const Env30: SynCaBinEnvelopes = 0; +const Env25: SynCaBinEnvelopes = 1; +const Env20: SynCaBinEnvelopes = 2; +const Env10: SynCaBinEnvelopes = 3; +struct SynCaBin { //types:add + Envelope: SynCaBinEnvelopes, + Wt1: f32, + Wt2: f32, + Wt11: f32, + Wt10: f32, + WtT0: f32, + WtT1: f32, + WtT2: f32, +} + //////// import: "layerparams.go" struct LayerIndexes { NPools: u32, @@ -784,6 +801,7 @@ struct LearnSynParams { pad2: i32, LRate: LRateParams, DWt: DWtParams, + SynCaBin: SynCaBin, Hebb: HebbParams, } diff --git a/axon/shaders/DWtSubMeanNeuron.wgsl b/axon/shaders/DWtSubMeanNeuron.wgsl index fb712f245..5bd357044 100644 --- a/axon/shaders/DWtSubMeanNeuron.wgsl +++ b/axon/shaders/DWtSubMeanNeuron.wgsl @@ -583,6 +583,23 @@ struct CaSpikeParams { Dt: CaDtParams, } +//////// import: "kinase-syncabin.go" +alias SynCaBinEnvelopes = i32; //enums:enum +const Env30: SynCaBinEnvelopes = 0; +const Env25: SynCaBinEnvelopes = 1; +const Env20: SynCaBinEnvelopes = 2; +const Env10: SynCaBinEnvelopes = 3; +struct SynCaBin { //types:add + Envelope: SynCaBinEnvelopes, + Wt1: f32, + Wt2: f32, + Wt11: f32, + Wt10: f32, + WtT0: f32, + WtT1: f32, + WtT2: f32, +} + //////// import: "layerparams.go" struct LayerIndexes { NPools: u32, @@ -819,6 +836,7 @@ struct LearnSynParams { pad2: i32, LRate: LRateParams, DWt: DWtParams, + SynCaBin: SynCaBin, Hebb: HebbParams, } diff --git a/axon/shaders/DWtSyn.wgsl b/axon/shaders/DWtSyn.wgsl index 997422e72..6f4e5976f 100644 --- a/axon/shaders/DWtSyn.wgsl +++ b/axon/shaders/DWtSyn.wgsl @@ -592,6 +592,33 @@ struct CaSpikeParams { Dt: CaDtParams, } +//////// import: "kinase-syncabin.go" +alias SynCaBinEnvelopes = i32; //enums:enum +const Env30: SynCaBinEnvelopes = 0; +const Env25: SynCaBinEnvelopes = 1; +const Env20: SynCaBinEnvelopes = 2; +const Env10: SynCaBinEnvelopes = 3; +struct SynCaBin { //types:add + Envelope: SynCaBinEnvelopes, + Wt1: f32, + Wt2: f32, + Wt11: f32, + Wt10: f32, + WtT0: f32, + WtT1: f32, + WtT2: f32, +} +fn SynCaBin_SynCaT0(sb: SynCaBin, r0: f32,s0: f32) -> f32 { + return r0 * s0; +} +fn SynCaBin_SynCaT1(sb: SynCaBin, r0: f32,r1: f32,s0: f32,s1: f32) -> f32 { + return sb.Wt10*r0*s0 + sb.Wt11*r1*s1; +} +fn SynCaBin_SynCaT(sb: SynCaBin, rt: f32,r1: f32,r2: f32,st: f32,s1: f32,s2: f32) -> f32 { + var ri = rt*sb.WtT0 + r1*sb.WtT1 + r2*sb.WtT2; + var si = st*sb.WtT0 + s1*sb.WtT1 + s2*sb.WtT2;return ri * si; +} + //////// import: "layerparams.go" struct LayerIndexes { NPools: u32, @@ -746,10 +773,18 @@ fn PathParams_DWtSyn(pt: PathParams, ctx: Context, rlay: LayerParams, syni: u32, fn PathParams_SynCa(pt: PathParams, ctx: Context, si: u32,ri: u32,di: u32, syCaP: ptr,syCaD: ptr) { var nbins = NetworkIxs[0].NCaBins; var cadSt = GvCaBinWts + GlobalScalarVars(nbins); - var cp: f32; - var cd: f32; - for (var i=0; i= mid; i-- { - cp[i] = cur - cur -= inc - } - // then drop off at .6 per plus phase window - inc = float32(.6) / float32(nplus) - for i := mid - 1; i >= 0; i-- { - cp[i] = cur - cur -= inc - if cur < 0 { - cur = 0 - } - } - - // CaD target: [0.35 0.65 0.95 1.25 1.25 1.25 1.125 1.0] - - // CaD drops off from 1.25 to 1.0 in plus - inc = float32(.25) / float32(nplus) - cur = 1.25 - inc - for i := nminus; i < n; i++ { - cd[i] = cur - cur -= inc - } - // is steady at 1.25 in the previous plus chunk - pplus := nminus - nplus - for i := nminus - 1; i >= pplus; i-- { - cd[i] = 1.25 - } - // then drops off again to .3 - inc = float32(.9) / float32(nplus+1) - cur = 1.25 - for i := pplus - 1; i >= 0; i-- { - cd[i] = cur - cur -= inc - if cur < 0 { - cur = 0 - } - } - - // rescale for bin size - scale := float32(binCycles) / (float32(25)) - var cpsum, cdsum float32 - for i := range n { - cp[i] *= scale - cd[i] *= scale - cpsum += cp[i] - cdsum += cd[i] - } - // fmt.Println(cpsum, cdsum, cdsum/cpsum) - renorm := cdsum / cpsum // yes renorm: factor is 0.9843 for 25 cyc bins, or 0.96.. for 10 cyc bins - for i := range n { - cp[i] *= renorm - } -} - -// Theta200plus50 sets bin weights for a theta cycle learning trial of 200 cycles -// and a plus phase of 50 -// func (kp *SynCaLinear) Theta200plus50() { -// // todo: compute these weights into GlobalScalars. Normalize? -// kp.CaP.Init(0.3, 0.4, 0.55, 0.65, 0.75, 0.85, 1.0, 1.0) // linear progression -// kp.CaD.Init(0.5, 0.65, 0.75, 0.9, 0.9, 0.9, 0.65, 0.55) // up and down -// } -// -// // Theta280plus70 sets bin weights for a theta cycle learning trial of 280 cycles -// // and a plus phase of 70, with PTau & DTau at 56 (PDTauForNCycles) -// func (kp *SynCaLinear) Theta280plus70() { -// kp.CaP.Init(0.0, 0.1, 0.23, 0.35, 0.45, 0.55, 0.75, 0.75) -// kp.CaD.Init(0.2, 0.3, 0.4, 0.5, 0.5, 0.5, 0.4, 0.3) -// } diff --git a/kinase/syncabin.go b/kinase/syncabin.go new file mode 100644 index 000000000..3f5db0bd5 --- /dev/null +++ b/kinase/syncabin.go @@ -0,0 +1,250 @@ +// Copyright (c) 2025, The Emergent Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package kinase + +import ( + "fmt" + + "github.com/chewxy/math32" +) + +//gosl:start + +// SynCaBinEnvelopes enumerates different configurations of the [SynCaBin] +// weight values that determine the shape of the temporal integration envelope +// for computing the neural SynCa value from 10 msec binned values, in terms +// of the t-1 and t-2 weights. +// Distinct linear regression coefficients, stored in global scalar values, +// are required for each envelope. +type SynCaBinEnvelopes int32 //enums:enum + +const ( + // Env30 sets the weights to t-1 = 1, t-2 = 1, + // producing a uniform 30 msec integration window. + Env30 SynCaBinEnvelopes = iota + + // Env25 sets the weights to t-1 = 1, t-2 = .5, + // approximating a 25 msec integration window. + Env25 + + // Env20 sets the weights to t-1 = 1, t-2 = 0, + // producing a uniform 20 msec integration window. + Env20 + + // Env10 sets the weights to t-1 = 0, t-2 = 0, + // producing a fast 10 msec integration window. + Env10 +) + +// SynCaBin computes synaptic calcium values as a product of the +// separately-integrated and binned sender and receiver SynCa values. +// Binning always happens at 10 msec intervals, but the product term +// is more robust if computed on a longer effective timescale, which +// is determined by weighting factors for the t-1 and t-2 bins when +// computing the neural SynCa for time bin t. +type SynCaBin struct { //types:add + + // Envelope selects the temporal integration envelope for computing + // the neural SynCa value from 10 msec binned values, in terms + // of the t-1 and t-2 weights. + Envelope SynCaBinEnvelopes + + // Wt1 is the t-1 weight value determined by Envelope setting, + // which multiplies the t-1 bin when integrating for time t. + Wt1 float32 `edit:"-"` + + // Wt2 is the t-2 weight value determined by Envelope setting, + // which multiplies the t-2 bin when integrating for time t. + Wt2 float32 `edit:"-"` + + // Wt11 is the squared normalized weight factor for the t=1 bin, + // when computing for t=1. + Wt11 float32 `display:"-"` + + // Wt10 is the squared normalized weight factor for the t=0 bin, + // when computing for t=1. + Wt10 float32 `display:"-"` + + // WtT0 is the normalized weight factor for the t bin, + // when computing for t. + WtT0 float32 `display:"-"` + + // WtT1 is the normalized weight factor for the t-1 bin, + // when computing for t. + WtT1 float32 `display:"-"` + + // WtT2 is the normalized weight factor for the t-2 bin, + // when computing for t. + WtT2 float32 `display:"-"` +} + +func (sb *SynCaBin) Defaults() { + sb.Envelope = Env25 + sb.Update() +} + +func (sb *SynCaBin) Update() { + sb.UpdateWeights() +} + +// UpdateWeights updates all the weight factors based on Envelope. +func (sb *SynCaBin) UpdateWeights() { + switch sb.Envelope { + case Env30: + sb.Wt1, sb.Wt2 = 1, 1 + case Env25: + sb.Wt1, sb.Wt2 = 1, 0.5 + case Env20: + sb.Wt1, sb.Wt2 = 1, 0 + case Env10: + sb.Wt1, sb.Wt2 = 0, 0 + } + den := 1.0 + sb.Wt1 + sb.Wt11 = 1.0 / den + sb.Wt10 = sb.Wt1 / den + sb.Wt11 *= sb.Wt11 + sb.Wt10 *= sb.Wt10 + + den = 1.0 + sb.Wt1 + sb.Wt2 + sb.WtT0 = 1 / den + sb.WtT1 = sb.Wt1 / den + sb.WtT2 = sb.Wt2 / den +} + +// SynCaT0 returns the SynCa product value for time bin 0, for +// recv and send bin values at 0. This is just the product of the two. +// In principle you'd want to include the last 2 bins from prior trial +// but these early bins have low overall coefficient weights, so it isn't +// worth it. This method exists mainly to provide this documentation. +func (sb *SynCaBin) SynCaT0(r0, s0 float32) float32 { + return r0 * s0 +} + +// SynCaT1 returns the SynCa product value for time bin 1, for +// recv and send bin values at 0, 1, dealing with edge effects. +func (sb *SynCaBin) SynCaT1(r0, r1, s0, s1 float32) float32 { + return sb.Wt10*r0*s0 + sb.Wt11*r1*s1 +} + +// SynCaT returns the SynCa product value for time bin t, for +// recv and send bin values at t, t-1, and t-2. +func (sb *SynCaBin) SynCaT(rt, r1, r2, st, s1, s2 float32) float32 { + ri := rt*sb.WtT0 + r1*sb.WtT1 + r2*sb.WtT2 + si := st*sb.WtT0 + s1*sb.WtT1 + s2*sb.WtT2 + return ri * si +} + +//gosl:end + +// CaBinWts generates the weighting factors for integrating [CaBins] neuron +// level SynCa that have been multiplied send * recv to generate a synapse-level +// synaptic calcium coincidence factor, used for the trace in the kinase learning rule. +// There are separate weights for two time scales of integration: CaP and CaD (cp, cd). +// PlusCycles is the number of cycles in the final plus phase, which determines shape. +// These values are precomputed for given fixed thetaCycles and plusCycles values. +// Fortunately, one set of regression weights works reasonably for the different +// envelope values. +func CaBinWts(plusCycles int, cp, cd []float32) { + nplus := int(math32.Round(float32(plusCycles) / 10)) + caBinWts(nplus, cp, cd) +} + +// caBinWts generates the weighting factors for integrating [CaBins] neuron +// level SynCa that have been multiplied send * recv to generate a synapse-level +// synaptic calcium coincidence factor, used for the trace in the kinase learning rule. +// There are separate weights for two time scales of integration: CaP and CaD. +// nplus is the number of ca bins associated with the plus phase, +// which sets the natural timescale of the integration: total ca bins can +// be proportional to the plus phase (e.g., 4x for standard 200 / 50 total / plus), +// or longer if there is a longer minus phase window (which is downweighted). +func caBinWts(nplus int, cp, cd []float32) { + n := len(cp) + nminus := n - nplus + + // CaP target: [0.1, 0.4, 0.5, 0.6, 0.7, 0.8, 1.7, 3.1] + + end := float32(3.4) + start := float32(0.84) + inc := float32(end-start) / float32(nplus) + cur := float32(start) + inc + for i := nminus; i < n; i++ { + cp[i] = cur + cur += inc + } + // prior two nplus windows ("middle") go up from .5 to .8 + inc = float32(.3) / float32(2*nplus-1) + mid := n - 3*nplus + cur = start + for i := nminus - 1; i >= mid; i-- { + cp[i] = cur + cur -= inc + } + // then drop off at .7 per plus phase window + inc = float32(.7) / float32(nplus) + for i := mid - 1; i >= 0; i-- { + cp[i] = cur + cur -= inc + if cur < 0 { + cur = 0 + } + } + + // CaD target: [0.35 0.65 0.95 1.25 1.25 1.25 1.125 1.0] + + // CaD drops off in plus + base := float32(1.46) + inc = float32(.22) / float32(nplus) + cur = base - inc + for i := nminus; i < n; i++ { + cd[i] = cur + cur -= inc + } + // is steady at 1.25 in the previous plus chunk + pplus := nminus - nplus + for i := nminus - 1; i >= pplus; i-- { + cd[i] = base + } + // then drops off again to .3 + inc = float32(1.2) / float32(nplus+1) + cur = base + for i := pplus - 1; i >= 0; i-- { + cd[i] = cur + cur -= inc + if cur < 0 { + cur = 0 + } + } + + // rescale for bin size: original bin targets are set for 25 cycles + scale := float32(10) / float32(25) + var cpsum, cdsum float32 + for i := range n { + cp[i] *= scale + cd[i] *= scale + cpsum += cp[i] + cdsum += cd[i] + } + fmt.Println(cpsum, cdsum, cdsum/cpsum) + // no: + // renorm := cdsum / cpsum // yes renorm: factor is 0.9843 for 25 cyc bins, or 0.96.. for 10 cyc bins + // for i := range n { + // cp[i] *= renorm + // } +} + +// Theta200plus50 sets bin weights for a theta cycle learning trial of 200 cycles +// and a plus phase of 50 +// func (kp *SynCaLinear) Theta200plus50() { +// // todo: compute these weights into GlobalScalars. Normalize? +// kp.CaP.Init(0.3, 0.4, 0.55, 0.65, 0.75, 0.85, 1.0, 1.0) // linear progression +// kp.CaD.Init(0.5, 0.65, 0.75, 0.9, 0.9, 0.9, 0.65, 0.55) // up and down +// } +// +// // Theta280plus70 sets bin weights for a theta cycle learning trial of 280 cycles +// // and a plus phase of 70, with PTau & DTau at 56 (PDTauForNCycles) +// func (kp *SynCaLinear) Theta280plus70() { +// kp.CaP.Init(0.0, 0.1, 0.23, 0.35, 0.45, 0.55, 0.75, 0.75) +// kp.CaD.Init(0.2, 0.3, 0.4, 0.5, 0.5, 0.5, 0.4, 0.3) +// } diff --git a/kinase/typegen.go b/kinase/typegen.go index 158322237..1be6b08b2 100644 --- a/kinase/typegen.go +++ b/kinase/typegen.go @@ -6,16 +6,10 @@ import ( "cogentcore.org/core/types" ) -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/kinase.Linear", IDName: "linear", Doc: "Linear performs a linear regression to approximate the synaptic Ca\nintegration between send and recv neurons.", Fields: []types.Field{{Name: "Neuron", Doc: "Kinase Neuron params"}, {Name: "Synapse", Doc: "Kinase Synapse params"}, {Name: "BinProd", Doc: "gain on S*R product for SpikeBins"}, {Name: "NCycles", Doc: "total number of cycles (1 MSec) to run"}, {Name: "PlusCycles", Doc: "number of plus cycles"}, {Name: "CyclesPerBin", Doc: "CyclesPerBin specifies the bin size for accumulating spikes"}, {Name: "NumBins", Doc: "NumBins = NCycles / CyclesPerBin"}, {Name: "MaxHz", Doc: "MaxHz is the maximum firing rate to sample in minus, plus phases"}, {Name: "StepHz", Doc: "StepHz is the step size for sampling Hz"}, {Name: "NTrials", Doc: "NTrials is number of trials per Hz case"}, {Name: "TotalTrials", Doc: "Total Trials is number of trials for all data"}, {Name: "Send", Doc: "Sending neuron"}, {Name: "Recv", Doc: "Receiving neuron"}, {Name: "StdSyn", Doc: "Standard synapse values"}, {Name: "LinearSyn", Doc: "Linear synapse values"}, {Name: "ErrDWt", Doc: "ErrDWt is the target error dwt: PlusHz - MinusHz"}, {Name: "SpikeBins", Doc: "binned integration of send, recv spikes"}, {Name: "Data", Doc: "Data to fit the regression"}}}) +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/kinase.CaDtParams", IDName: "ca-dt-params", Doc: "CaDtParams has rate constants for integrating Ca calcium\nat different time scales, including final CaP = CaMKII and CaD = DAPK1\ntimescales for LTP potentiation vs. LTD depression factors.", Directives: []types.Directive{{Tool: "go", Directive: "generate", Args: []string{"core", "generate", "-add-types"}}, {Tool: "gosl", Directive: "start"}, {Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "MTau", Doc: "CaM (calmodulin) time constant in cycles (msec),\nwhich is the first level integration.\nFor CaLearn, 2 is best; for CaSpk, 5 is best.\nFor synaptic-level integration this integrates on top of Ca\nsignal from send->CaSyn * recv->CaSyn, each of which are\ntypically integrated with a 30 msec Tau."}, {Name: "PTau", Doc: "LTP spike-driven potentiation Ca factor (CaP) time constant\nin cycles (msec), simulating CaMKII in the Kinase framework,\ncascading on top of MTau.\nComputationally, CaP represents the plus phase learning signal that\nreflects the most recent past information.\nValue tracks linearly with number of cycles per learning trial:\n200 = 40, 300 = 60, 400 = 80"}, {Name: "DTau", Doc: "LTD spike-driven depression Ca factor (CaD) time constant\nin cycles (msec), simulating DAPK1 in Kinase framework,\ncascading on top of PTau.\nComputationally, CaD represents the minus phase learning signal that\nreflects the expectation representation prior to experiencing the\noutcome (in addition to the outcome).\nValue tracks linearly with number of cycles per learning trial:\n200 = 40, 300 = 60, 400 = 80"}, {Name: "MDt", Doc: "rate = 1 / tau"}, {Name: "PDt", Doc: "rate = 1 / tau"}, {Name: "DDt", Doc: "rate = 1 / tau"}, {Name: "pad"}, {Name: "pad1"}}}) -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/kinase.Neuron", IDName: "neuron", Doc: "Neuron has Neuron state", Fields: []types.Field{{Name: "Spike", Doc: "Neuron spiking (0,1)"}, {Name: "SpikeP", Doc: "Neuron probability of spiking"}, {Name: "CaSyn", Doc: "CaSyn is spike-driven calcium trace for synapse-level Ca-driven learning: exponential integration of SpikeG * Spike at SynTau time constant (typically 30). Synapses integrate send.CaSyn * recv.CaSyn across M, P, D time integrals for the synaptic trace driving credit assignment in learning. Time constant reflects binding time of Glu to NMDA and Ca buffering postsynaptically, and determines time window where pre * post spiking must overlap to drive learning."}, {Name: "CaSpkM", Doc: "neuron-level spike-driven Ca integration"}, {Name: "CaSpkP", Doc: "neuron-level spike-driven Ca integration"}, {Name: "CaSpkD", Doc: "neuron-level spike-driven Ca integration"}, {Name: "TotalSpikes"}, {Name: "SpikeBins", Doc: "binned count of spikes, for regression learning"}}}) +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/kinase.CaSpikeParams", IDName: "ca-spike-params", Doc: "CaSpikeParams parameterizes the neuron-level spike-driven calcium\nsignals, including CaM, CaP, CaD for basic activity stats and RLRate, and\nCaSyn which is integrated at the neuron level and drives synapse-level,\npre * post Ca integration, providing the Tr credit assignment trace factor\nfor kinase error-driven cortical learning.", Fields: []types.Field{{Name: "SpikeCaM", Doc: "SpikeCaM is the drive factor for updating the neuron-level CaM (calmodulin)\nbased on a spike impulse, which is then cascaded into updating the\nCaP and CaD values. These values are used for stats and RLRate computation,\nbut do not drive learning directly. Larger values (e.g., 12) may be useful\nin some models."}, {Name: "SpikeCaSyn", Doc: "SpikeCaSyn is the drive factor for updating the neuron-level CaSyn\nsynaptic calcium trace value based on a spike impulse. CaSyn is integrated\ninto CaBins which are then used to compute synapse-level pre * post\nCa values over the theta cycle, which then drive the Tr credit assignment\ntrace factor for kinase error-driven cortical learning. Changes in this\nvalue will affect the net learning rate."}, {Name: "CaSynTau", Doc: "CaSynTau is the time constant for integrating the spike-driven calcium\ntrace CaSyn at sender and recv neurons. See SpikeCaSyn for more info.\nIf this param is changed, then there will be a change in effective\nlearning rate that can be compensated for by multiplying\nCaScale by sqrt(30 / sqrt(SynTau)"}, {Name: "CaSynDt", Doc: "CaSynDt rate = 1 / tau"}, {Name: "Dt", Doc: "Dt are time constants for integrating Spike-driven Ca across CaM, CaP and CaD\ncascading levels. Typically the same as in LearnCa parameters."}}}) -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/kinase.Synapse", IDName: "synapse", Doc: "Synapse has Synapse state", Fields: []types.Field{{Name: "CaSyn"}, {Name: "CaM", Doc: "CaM is first stage running average (mean) Ca calcium level (like CaM = calmodulin), feeds into CaP"}, {Name: "CaP", Doc: "CaP is shorter timescale integrated CaM value, representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule"}, {Name: "CaD", Doc: "CaD is longer timescale integrated CaP value, representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule"}, {Name: "DWt", Doc: "DWt is the CaP - CaD"}}}) +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/kinase.SynCaBinEnvelopes", IDName: "syn-ca-bin-envelopes", Doc: "SynCaBinEnvelopes enumerates different configurations of the [SynCaBin]\nweight values that determine the shape of the temporal integration envelope\nfor computing the neural SynCa value from 10 msec binned values, in terms\nof the t-1 and t-2 weights.\nDistinct linear regression coefficients, stored in global scalar values,\nare required for each envelope."}) -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/kinase.CaDtParams", IDName: "ca-dt-params", Doc: "CaDtParams has rate constants for integrating Ca calcium\nat different time scales, including final CaP = CaMKII and CaD = DAPK1\ntimescales for LTP potentiation vs. LTD depression factors.", Directives: []types.Directive{{Tool: "gosl", Directive: "start", Args: []string{"kinase"}}, {Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "MTau", Doc: "CaM (calmodulin) time constant in cycles (msec) -- for synaptic-level integration this integrates on top of Ca signal from send->CaSyn * recv->CaSyn, each of which are typically integrated with a 30 msec Tau."}, {Name: "PTau", Doc: "LTP spike-driven Ca factor (CaP) time constant in cycles (msec), simulating CaMKII in the Kinase framework, with 40 on top of MTau roughly tracking the biophysical rise time. Computationally, CaP represents the plus phase learning signal that reflects the most recent past information."}, {Name: "DTau", Doc: "LTD spike-driven Ca factor (CaD) time constant in cycles (msec), simulating DAPK1 in Kinase framework. Computationally, CaD represents the minus phase learning signal that reflects the expectation representation prior to experiencing the outcome (in addition to the outcome). For integration equations, this cannot be identical to PTau."}, {Name: "ExpAdj", Doc: "if true, adjust dt time constants when using exponential integration equations to compensate for difference between discrete and continuous integration"}, {Name: "MDt", Doc: "rate = 1 / tau"}, {Name: "PDt", Doc: "rate = 1 / tau"}, {Name: "DDt", Doc: "rate = 1 / tau"}, {Name: "M4Dt", Doc: "4 * rate = 1 / tau"}, {Name: "P4Dt", Doc: "4 * rate = 1 / tau"}, {Name: "D4Dt", Doc: "4 * rate = 1 / tau"}, {Name: "pad"}, {Name: "pad1"}}}) - -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/kinase.NeurCaParams", IDName: "neur-ca-params", Doc: "NeurCaParams parameterizes the neuron-level spike-driven calcium\nsignals, starting with CaSyn that is integrated at the neuron level\nand drives synapse-level, pre * post Ca integration, which provides the Tr\ntrace that multiplies error signals, and drives learning directly for Target layers.\nCaSpk* values are integrated separately at the Neuron level and used for UpdateThr\nand RLRate as a proxy for the activation (spiking) based learning signal.", Fields: []types.Field{{Name: "SpikeG", Doc: "SpikeG is a gain multiplier on spike impulses for computing CaSpk:\nincreasing this directly affects the magnitude of the trace values,\nlearning rate in Target layers, and other factors that depend on CaSpk\nvalues, including RLRate, UpdateThr.\nLarger networks require higher gain factors at the neuron level:\n12, vs 8 for smaller."}, {Name: "SynTau", Doc: "time constant for integrating spike-driven calcium trace at sender and recv\nneurons, CaSyn, which then drives synapse-level integration of the\njoint pre * post synapse-level activity, in cycles (msec).\nNote: if this param is changed, then there will be a change in effective\nlearning rate that can be compensated for by multiplying\nPathParams.Learn.KinaseCa.CaScale by sqrt(30 / sqrt(SynTau)"}, {Name: "SynDt", Doc: "rate = 1 / tau"}, {Name: "pad"}, {Name: "Dt", Doc: "time constants for integrating CaSpk across M, P and D cascading levels -- these are typically the same as in CaLrn and Path level for synaptic integration, except for the M factor."}}}) - -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/kinase.SynCaParams", IDName: "syn-ca-params", Doc: "SynCaParams has rate constants for integrating spike-driven Ca calcium\nat different time scales, including final CaP = CaMKII and CaD = DAPK1\ntimescales for LTP potentiation vs. LTD depression factors.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "CaScale", Doc: "CaScale is a scaling multiplier on synaptic Ca values,\nwhich due to the multiplication of send * recv are smaller in magnitude.\nThe default 12 value keeps them in roughly the unit scale,\nand affects effective learning rate."}, {Name: "MaxISI", Doc: "maximum ISI for integrating in Opt mode -- above that just set to 0"}, {Name: "pad"}, {Name: "pad1"}, {Name: "Dt", Doc: "time constants for integrating at M, P, and D cascading levels"}}}) - -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/kinase.Rules", IDName: "rules", Doc: "Rules are different options for Kinase-based learning rules\nThese are now implemented using separate Path types in kinasex"}) +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/kinase.SynCaBin", IDName: "syn-ca-bin", Doc: "SynCaBin computes synaptic calcium values as a product of the\nseparately-integrated and binned sender and receiver SynCa values.\nBinning always happens at 10 msec intervals, but the product term\nis more robust if computed on a longer effective timescale, which\nis determined by weighting factors for the t-1 and t-2 bins when\ncomputing the neural SynCa for time bin t. Different linear regression\ncoefficients are required for different such weight factors, so\nwe use the [SynCaBinEnvelopes] enum to discretize the possibilities and\nselect the appropriate coefficients.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "Envelope", Doc: "Envelope selects the temporal integration envelope for computing\nthe neural SynCa value from 10 msec binned values, in terms\nof the t-1 and t-2 weights."}, {Name: "Wt1", Doc: "Wt1 is the t-1 weight value determined by Envelope setting,\nwhich multiplies the t-1 bin when integrating for time t."}, {Name: "Wt2", Doc: "Wt2 is the t-2 weight value determined by Envelope setting,\nwhich multiplies the t-2 bin when integrating for time t."}, {Name: "Wt11", Doc: "Wt11 is the squared normalized weight factor for the t=1 bin,\nwhen computing for t=1."}, {Name: "Wt10", Doc: "Wt10 is the squared normalized weight factor for the t=0 bin,\nwhen computing for t=1."}, {Name: "WtT0", Doc: "WtT0 is the normalized weight factor for the t bin,\nwhen computing for t."}, {Name: "WtT1", Doc: "WtT1 is the normalized weight factor for the t-1 bin,\nwhen computing for t."}, {Name: "WtT2", Doc: "WtT2 is the normalized weight factor for the t-2 bin,\nwhen computing for t."}}}) diff --git a/sims/bgdorsal/bg-dorsal.go b/sims/bgdorsal/bg-dorsal.go index 4ba4523f4..2d45715d1 100644 --- a/sims/bgdorsal/bg-dorsal.go +++ b/sims/bgdorsal/bg-dorsal.go @@ -792,7 +792,7 @@ func (ss *Sim) ConfigStats() { tsr := levelDir.Float64(name) if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) if si >= 3 && si <= 4 { s.On = true @@ -838,7 +838,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) if name != "RT" { s.On = true @@ -886,7 +886,7 @@ func (ss *Sim) ConfigStats() { tsr := levelDir.Float64(name) if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0) s.On = true }) @@ -913,7 +913,7 @@ func (ss *Sim) ConfigStats() { tsr := levelDir.Float64(name) if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0) s.On = true }) diff --git a/sims/bgventral/bg-ventral.go b/sims/bgventral/bg-ventral.go index a4382e6d2..1b9856d04 100644 --- a/sims/bgventral/bg-ventral.go +++ b/sims/bgventral/bg-ventral.go @@ -619,7 +619,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) if si >= 2 && si <= 5 { s.On = true @@ -664,7 +664,7 @@ func (ss *Sim) ConfigStats() { stats.GroupStats(curModeDir, stats.StatMean, subDir.Value(name)) // note: results go under Group name: TrialName gp := curModeDir.Dir("Stats/TrialName/" + name).Value("Mean") - plot.SetFirstStyle(gp, func(s *plot.Style) { + plot.SetFirstStyler(gp, func(s *plot.Style) { if si >= 2 && si <= 3 { s.On = true } diff --git a/sims/choose/armaze/gui.go b/sims/choose/armaze/gui.go index 746204115..fe895b1fb 100644 --- a/sims/choose/armaze/gui.go +++ b/sims/choose/armaze/gui.go @@ -522,7 +522,7 @@ func (vw *GUI) ConfigUSPlots() { cols := []string{"Drive", "USin", "OFC"} for _, cl := range cols { - plot.SetFirstStyle(cl, func(s *plot.Style) { + plot.SetFirstStyler(cl, func(s *plot.Style) { s.On = true s.Range.SetMin(0).SetMax(1) diff --git a/sims/choose/choose.go b/sims/choose/choose.go index 0be245d78..5e651f4aa 100644 --- a/sims/choose/choose.go +++ b/sims/choose/choose.go @@ -754,7 +754,7 @@ func (ss *Sim) ConfigStats() { // var stat float64 // if phase == Start { // tsr.SetNumRows(0) - // plot.SetFirstStyle(tsr, func(s *plot.Style) { + // plot.SetFirstStyler(tsr, func(s *plot.Style) { // s.Range.SetMin(0).SetMax(1) // s.On = true // }) diff --git a/sims/deepfsa/config.go b/sims/deepfsa/config.go index 486942598..1d44f6d9c 100644 --- a/sims/deepfsa/config.go +++ b/sims/deepfsa/config.go @@ -105,6 +105,9 @@ type RunConfig struct { // PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100. PlusCycles int `default:"50"` + // CaBinCycles is the number of cycles per CaBin: how fine-grained the synaptic Ca is. + CaBinCycles int `default:"25"` // no diff for 25 vs. 10 + // NZero is how many perfect, zero-error epochs before stopping a Run. NZero int `default:"2"` diff --git a/sims/deepfsa/deep-fsa.go b/sims/deepfsa/deep-fsa.go index 496cca4c2..816eeece8 100644 --- a/sims/deepfsa/deep-fsa.go +++ b/sims/deepfsa/deep-fsa.go @@ -198,7 +198,9 @@ func (ss *Sim) ConfigEnv() { func (ss *Sim) ConfigNet(net *axon.Network) { net.SetMaxData(ss.Config.Run.NData) - net.Context().ThetaCycles = int32(ss.Config.Run.Cycles) + net.Context().SetThetaCycles(int32(ss.Config.Run.Cycles)). + SetPlusCycles(int32(ss.Config.Run.PlusCycles)). + SetCaBinCycles(int32(ss.Config.Run.CaBinCycles)) net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0 in, inp := net.AddInputPulv4D("Input", 1, 7, ss.Config.Env.UnitsPer, 1, 2) @@ -535,7 +537,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) s.On = true switch name { diff --git a/sims/deepfsa/params.go b/sims/deepfsa/params.go index 2f740a625..df5569ff6 100644 --- a/sims/deepfsa/params.go +++ b/sims/deepfsa/params.go @@ -93,11 +93,12 @@ var PathParams = axon.PathSheets{ "Base": { {Sel: "Path", Doc: "std", Set: func(pt *axon.PathParams) { - pt.Learn.DWt.SubMean = 0 // 0 > 1 -- even with CTCtxt = 0 - pt.Learn.LRate.Base = 0.03 // .03 > others -- same as CtCtxt - pt.SWts.Adapt.LRate = 0.01 // 0.01 or 0.0001 music - pt.SWts.Init.SPct = 1.0 // 1 works fine here -- .5 also ok - pt.Learn.DWt.Tau = 1 // 1 >> 2 v0.0.9 + pt.Learn.DWt.SubMean = 0 // 0 > 1 -- even with CTCtxt = 0 + pt.Learn.LRate.Base = 0.03 // .03 > others -- same as CtCtxt + pt.SWts.Adapt.LRate = 0.01 // 0.01 or 0.0001 music + pt.SWts.Init.SPct = 1.0 // 1 works fine here -- .5 also ok + pt.Learn.DWt.Tau = 1 // 1 >> 2 v0.0.9 + pt.Learn.DWt.CaPScale = 0.95 // 0.95 def; 1 maybe slightly more stable }}, {Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates", Set: func(pt *axon.PathParams) { diff --git a/sims/deepmove/deep-move.go b/sims/deepmove/deep-move.go index 23172ad26..a409cc0e4 100644 --- a/sims/deepmove/deep-move.go +++ b/sims/deepmove/deep-move.go @@ -570,7 +570,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) s.On = true }) diff --git a/sims/deepmusic/config.go b/sims/deepmusic/config.go index 48ad0e34a..bc10958a8 100644 --- a/sims/deepmusic/config.go +++ b/sims/deepmusic/config.go @@ -101,6 +101,9 @@ type RunConfig struct { //types:add // PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100. PlusCycles int `default:"50"` + // CaBinCycles is the number of cycles per CaBin: how fine-grained the synaptic Ca is. + CaBinCycles int `default:"25"` // 25 > 10 + // TestInterval is how often (in epochs) to run through all the test patterns, // in terms of training epochs. Can use 0 or -1 for no testing. TestInterval int `default:"0"` diff --git a/sims/deepmusic/configs/30notes.toml b/sims/deepmusic/configs/30notes.toml index 9d8d52bf2..dcdaa525f 100644 --- a/sims/deepmusic/configs/30notes.toml +++ b/sims/deepmusic/configs/30notes.toml @@ -2,6 +2,6 @@ FullSong = false [Run] - NTrials = 128 - NEpochs = 200 + Trials = 128 + Epochs = 200 diff --git a/sims/deepmusic/configs/fullsong.toml b/sims/deepmusic/configs/fullsong.toml index 5ceca8d3a..107ba2e74 100644 --- a/sims/deepmusic/configs/fullsong.toml +++ b/sims/deepmusic/configs/fullsong.toml @@ -2,6 +2,6 @@ FullSong = true [Run] - NTrials = 768 # number of rows in song - NEpochs = 500 + Trials = 768 # number of rows in song + Epochs = 500 diff --git a/sims/deepmusic/deep-music.go b/sims/deepmusic/deep-music.go index 45dacb177..d5ed0d9f5 100644 --- a/sims/deepmusic/deep-music.go +++ b/sims/deepmusic/deep-music.go @@ -220,7 +220,9 @@ func (ss *Sim) ConfigEnv() { func (ss *Sim) ConfigNet(net *axon.Network) { net.SetMaxData(ss.Config.Run.NData) - net.Context().ThetaCycles = int32(ss.Config.Run.Cycles) + net.Context().SetThetaCycles(int32(ss.Config.Run.Cycles)). + SetPlusCycles(int32(ss.Config.Run.PlusCycles)). + SetCaBinCycles(int32(ss.Config.Run.CaBinCycles)) net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0 ev := ss.Envs.ByMode(etime.Train).(*MusicEnv) @@ -551,7 +553,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) s.On = true }) diff --git a/sims/deepmusic/params.go b/sims/deepmusic/params.go index 4a8a36197..48a28eb5e 100644 --- a/sims/deepmusic/params.go +++ b/sims/deepmusic/params.go @@ -110,6 +110,7 @@ var PathParams = axon.PathSheets{ pt.SWts.Adapt.LRate = 0.0001 // 0.01 == 0.0001 but 0.001 not as good.. pt.SWts.Init.SPct = 1.0 // 1 works fine here -- .5 also ok pt.Learn.DWt.Tau = 1 // 1 > 2 v0.0.9 + pt.Learn.DWt.CaPScale = 0.95 // 0.95 def >> 1 }}, {Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates", Set: func(pt *axon.PathParams) { diff --git a/sims/deepmusic/typegen.go b/sims/deepmusic/typegen.go index 232537d2f..602dfcfb5 100644 --- a/sims/deepmusic/typegen.go +++ b/sims/deepmusic/typegen.go @@ -8,9 +8,9 @@ import ( var _ = types.AddType(&types.Type{Name: "main.EnvConfig", IDName: "env-config", Doc: "EnvConfig has config params for environment\nnote: only adding fields for key Env params that matter for both Network and Env\nother params are set via the Env map data mechanism.", Fields: []types.Field{{Name: "Env", Doc: "Env parameters: can set any field/subfield on Env struct,\nusing standard TOML formatting."}, {Name: "UnitsPer", Doc: "UnitsPer is the number of units per localist output unit. 4 best."}, {Name: "FullSong", Doc: "train the full song -- else 30 notes"}, {Name: "PlayTarg", Doc: "during testing, play the target note instead of the actual network output"}, {Name: "TestClamp", Doc: "drive inputs from the training sequence during testing -- otherwise use network's own output"}}}) -var _ = types.AddType(&types.Type{Name: "main.ParamConfig", IDName: "param-config", Doc: "ParamConfig has config parameters related to sim params.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "Hid2", Doc: "Hid2 use a second hidden layer that predicts the first.\nIs not beneficial for this simple markovian task."}, {Name: "Sheet", Doc: "Sheet is the extra params sheet name(s) to use (space separated\nif multiple). Must be valid name as listed in compiled-in params\nor loaded params."}, {Name: "Tag", Doc: "Tag is an extra tag to add to file names and logs saved from this run."}, {Name: "Note", Doc: "Note is additional info to describe the run params etc,\nlike a git commit message for the run."}, {Name: "SaveAll", Doc: "SaveAll will save a snapshot of all current param and config settings\nin a directory named params_ (or _good if Good is true),\nthen quit. Useful for comparing to later changes and seeing multiple\nviews of current params."}, {Name: "Good", Doc: "Good is for SaveAll, save to params_good for a known good params state.\nThis can be done prior to making a new release after all tests are passing.\nAdd results to git to provide a full diff record of all params over level."}}}) +var _ = types.AddType(&types.Type{Name: "main.ParamConfig", IDName: "param-config", Doc: "ParamConfig has config parameters related to sim params.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "Hid2", Doc: "Hid2 use a second hidden layer that predicts the first.\nIs not beneficial for this simple markovian task."}, {Name: "Script", Doc: "Script is an interpreted script that is run to set parameters in Layer and Path\nsheets, by default using the \"Script\" set name."}, {Name: "Sheet", Doc: "Sheet is the extra params sheet name(s) to use (space separated\nif multiple). Must be valid name as listed in compiled-in params\nor loaded params."}, {Name: "Tag", Doc: "Tag is an extra tag to add to file names and logs saved from this run."}, {Name: "Note", Doc: "Note is additional info to describe the run params etc,\nlike a git commit message for the run."}, {Name: "SaveAll", Doc: "SaveAll will save a snapshot of all current param and config settings\nin a directory named params_ (or _good if Good is true),\nthen quit. Useful for comparing to later changes and seeing multiple\nviews of current params."}, {Name: "Good", Doc: "Good is for SaveAll, save to params_good for a known good params state.\nThis can be done prior to making a new release after all tests are passing.\nAdd results to git to provide a full diff record of all params over level."}}}) -var _ = types.AddType(&types.Type{Name: "main.RunConfig", IDName: "run-config", Doc: "RunConfig has config parameters related to running the sim.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "GPU", Doc: "GPU uses the GPU for computation, generally faster than CPU even for\nsmall models if NData ~16."}, {Name: "NData", Doc: "NData is the number of data-parallel items to process in parallel per trial.\nIs significantly faster for both CPU and GPU. Results in an effective\nmini-batch of learning."}, {Name: "NThreads", Doc: "NThreads is the number of parallel threads for CPU computation;\n0 = use default."}, {Name: "Run", Doc: "Run is the _starting_ run number, which determines the random seed.\nNRuns counts up from there. Can do all runs in parallel by launching\nseparate jobs with each starting Run, NRuns = 1."}, {Name: "Runs", Doc: "Runs is the total number of runs to do when running Train, starting from Run."}, {Name: "Epochs", Doc: "Epochs is the total number of epochs per run."}, {Name: "Trials", Doc: "Trials is the total number of trials per epoch.\nShould be an even multiple of NData."}, {Name: "Cycles", Doc: "Cycles is the total number of cycles per trial: at least 200."}, {Name: "PlusCycles", Doc: "PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100."}, {Name: "TestInterval", Doc: "TestInterval is how often (in epochs) to run through all the test patterns,\nin terms of training epochs. Can use 0 or -1 for no testing."}, {Name: "PCAInterval", Doc: "PCAInterval is how often (in epochs) to compute PCA on hidden\nrepresentations to measure variance."}, {Name: "StartWeights", Doc: "StartWeights is the name of weights file to load at start of first run."}}}) +var _ = types.AddType(&types.Type{Name: "main.RunConfig", IDName: "run-config", Doc: "RunConfig has config parameters related to running the sim.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "GPU", Doc: "GPU uses the GPU for computation, generally faster than CPU even for\nsmall models if NData ~16."}, {Name: "GPUDevice", Doc: "GPUDevice selects the gpu device to use."}, {Name: "NData", Doc: "NData is the number of data-parallel items to process in parallel per trial.\nIs significantly faster for both CPU and GPU. Results in an effective\nmini-batch of learning."}, {Name: "NThreads", Doc: "NThreads is the number of parallel threads for CPU computation;\n0 = use default."}, {Name: "Run", Doc: "Run is the _starting_ run number, which determines the random seed.\nRuns counts up from there. Can do all runs in parallel by launching\nseparate jobs with each starting Run, Runs = 1."}, {Name: "Runs", Doc: "Runs is the total number of runs to do when running Train, starting from Run."}, {Name: "Epochs", Doc: "Epochs is the total number of epochs per run."}, {Name: "Trials", Doc: "Trials is the total number of trials per epoch.\nShould be an even multiple of NData."}, {Name: "Cycles", Doc: "Cycles is the total number of cycles per trial: at least 200."}, {Name: "PlusCycles", Doc: "PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100."}, {Name: "CaBinCycles", Doc: "CaBinCycles is the number of cycles per CaBin: how fine-grained the synaptic Ca is."}, {Name: "TestInterval", Doc: "TestInterval is how often (in epochs) to run through all the test patterns,\nin terms of training epochs. Can use 0 or -1 for no testing."}, {Name: "PCAInterval", Doc: "PCAInterval is how often (in epochs) to compute PCA on hidden\nrepresentations to measure variance."}, {Name: "StartWeights", Doc: "StartWeights is the name of weights file to load at start of first run."}}}) var _ = types.AddType(&types.Type{Name: "main.LogConfig", IDName: "log-config", Doc: "LogConfig has config parameters related to logging data.", Fields: []types.Field{{Name: "SaveWeights", Doc: "SaveWeights will save final weights after each run."}, {Name: "Train", Doc: "Train has the list of Train mode levels to save log files for."}, {Name: "Test", Doc: "Test has the list of Test mode levels to save log files for."}}}) diff --git a/sims/hip/hip.go b/sims/hip/hip.go index 13ad4cecb..75e930c85 100644 --- a/sims/hip/hip.go +++ b/sims/hip/hip.go @@ -498,7 +498,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) s.On = true switch name { diff --git a/sims/inhib/inhib.go b/sims/inhib/inhib.go index a867a4ef3..21344fe3e 100644 --- a/sims/inhib/inhib.go +++ b/sims/inhib/inhib.go @@ -475,7 +475,7 @@ func (ss *Sim) ConfigStats() { ndata := 1 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) s.On = false switch stnm { diff --git a/sims/kinasesim/config.go b/sims/kinasesim/config.go index 21924babb..078ba07b4 100644 --- a/sims/kinasesim/config.go +++ b/sims/kinasesim/config.go @@ -44,7 +44,7 @@ type RunConfig struct { PlusCycles int `default:"50"` // CaBinCycles is the number of cycles per CaBin: how fine-grained the synaptic Ca is. - CaBinCycles int `default:"25"` + CaBinCycles int `default:"10"` // NCaBins is the total number of ca bins in unit variables. // Set to Context.ThetaCycles / CaBinCycles in Build. diff --git a/sims/kinasesim/kinase.go b/sims/kinasesim/kinase.go index 84ab919b5..22057b7ce 100644 --- a/sims/kinasesim/kinase.go +++ b/sims/kinasesim/kinase.go @@ -280,14 +280,16 @@ func (ss *Sim) TrialImpl(minusHz, plusHz float32) { ss.Cycle(&ks.Send, Sint, ks.Cycle) ss.Cycle(&ks.Recv, Rint, ks.Cycle) - ca := 8 * ks.Send.CaSyn * ks.Recv.CaSyn // 12 is standard CaGain Factor + // original synaptic-level integration into "StdSyn" + ca := 8 * ks.Send.CaSyn * ks.Recv.CaSyn // 8 is standard CaGain Factor ss.CaSpike.Dt.FromCa(ca, &ks.StdSyn.CaM, &ks.StdSyn.CaP, &ks.StdSyn.CaD) + // CaBin linear regression integration. bin := ks.Cycle / spikeBinCycles ks.CaBins[bin] = (ks.Recv.CaBins[bin] * ks.Send.CaBins[bin]) ks.CaBin = ks.CaBins[bin] ks.LinearSyn.CaM = ks.CaBin - ks.LinearSyn.CaP += lsint * ss.CaPWts[bin] * ks.CaBin + ks.LinearSyn.CaP += lsint * ss.CaPWts[bin] * ks.CaBin // slow integ just for visualization ks.LinearSyn.CaD += lsint * ss.CaDWts[bin] * ks.CaBin ss.StatsStep(Test, Cycle) diff --git a/sims/kinasesim/sim.go b/sims/kinasesim/sim.go index 8c97776fb..563f6d9c2 100644 --- a/sims/kinasesim/sim.go +++ b/sims/kinasesim/sim.go @@ -257,7 +257,7 @@ func (ss *Sim) ConfigStats() { tsr := tensorfs.ValueType(levelDir, name, kind) if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0) switch level { case Cycle: diff --git a/sims/lvis/config.go b/sims/lvis/config.go index 4db199a84..3fbe43b70 100644 --- a/sims/lvis/config.go +++ b/sims/lvis/config.go @@ -100,7 +100,7 @@ type RunConfig struct { Runs int `default:"1" min:"1"` // Epochs is the total number of epochs per run. - Epochs int `default:"500"` + Epochs int `default:"1000"` // Trials is the total number of trials per epoch. // Should be an even multiple of NData. diff --git a/sims/lvis/lvis.go b/sims/lvis/lvis.go index a0f4c97ac..61df73b3f 100644 --- a/sims/lvis/lvis.go +++ b/sims/lvis/lvis.go @@ -850,7 +850,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) s.On = true switch name { diff --git a/sims/lvis/params.go b/sims/lvis/params.go index 24a680648..8cb6a3a85 100644 --- a/sims/lvis/params.go +++ b/sims/lvis/params.go @@ -62,6 +62,7 @@ var LayerParams = axon.LayerSheets{ ly.Learn.TrgAvgAct.ErrLRate = 0.02 // 0.02 def ly.Learn.RLRate.On.SetBool(true) // beneficial for trace ly.Learn.RLRate.SigmoidMin = 0.05 + ly.Learn.RLRate.SigmoidLinear.SetBool(false) // false > true for output stability ly.Learn.RLRate.Diff.SetBool(true) ly.Learn.RLRate.DiffThr = 0.02 // 0.02 def - todo ly.Learn.RLRate.SpikeThr = 0.1 // 0.1 def @@ -181,6 +182,7 @@ var PathParams = axon.PathSheets{ pt.SWts.Adapt.SubMean = 1 // 1 > 0 -- definitely needed pt.Learn.LRate.Base = 0.005 // 0.01 > 0.02 later (trace) pt.Learn.DWt.SubMean = 1 // 1 > 0 for trgavg weaker + pt.Learn.DWt.CaPScale = 0.96 // 0.96 best for 25 bin; 0.95 unstable }}, {Sel: ".BackPath", Doc: "top-down back-projections MUST have lower relative weight scale, otherwise network hallucinates -- smaller as network gets bigger", Set: func(pt *axon.PathParams) { diff --git a/sims/mpi/mpi.go b/sims/mpi/mpi.go index ec08cfa27..b3e5b6fe3 100644 --- a/sims/mpi/mpi.go +++ b/sims/mpi/mpi.go @@ -557,7 +557,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) s.On = true switch name { diff --git a/sims/neuron/neuron.go b/sims/neuron/neuron.go index 4428dccd0..81bb51fcc 100644 --- a/sims/neuron/neuron.go +++ b/sims/neuron/neuron.go @@ -377,7 +377,7 @@ func (ss *Sim) ConfigStats() { tsr := levelDir.Int(name) if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(float64(ss.Config.Run.Cycles)) }) return @@ -396,7 +396,7 @@ func (ss *Sim) ConfigStats() { tsr := levelDir.Float64(name) if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) s.On = false switch name { diff --git a/sims/objrec/objrec.go b/sims/objrec/objrec.go index 549de8022..0eea0550a 100644 --- a/sims/objrec/objrec.go +++ b/sims/objrec/objrec.go @@ -538,7 +538,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) s.On = true switch name { diff --git a/sims/pfcmaint/pfcmaint.go b/sims/pfcmaint/pfcmaint.go index dc0781aa1..fda8cef28 100644 --- a/sims/pfcmaint/pfcmaint.go +++ b/sims/pfcmaint/pfcmaint.go @@ -494,7 +494,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) s.On = true }) diff --git a/sims/pvlv/pvlv.go b/sims/pvlv/pvlv.go index a52b0eaa0..a55ac052a 100644 --- a/sims/pvlv/pvlv.go +++ b/sims/pvlv/pvlv.go @@ -606,7 +606,7 @@ func (ss *Sim) ConfigStats() { // tsr := levelDir.Float64(name) // if phase == Start { // tsr.SetNumRows(0) - // plot.SetFirstStyle(tsr, func(s *plot.Style) { + // plot.SetFirstStyler(tsr, func(s *plot.Style) { // s.Range.SetMin(0).SetMax(1) // if si >= 2 && si <= 5 { // s.On = true @@ -644,7 +644,7 @@ func (ss *Sim) ConfigStats() { // var stat float64 // if phase == Start { // tsr.SetNumRows(0) - // plot.SetFirstStyle(tsr, func(s *plot.Style) { + // plot.SetFirstStyler(tsr, func(s *plot.Style) { // s.Range.SetMin(0).SetMax(1) // s.On = true // }) diff --git a/sims/ra25/config.go b/sims/ra25/config.go index 77de2d40a..884845536 100644 --- a/sims/ra25/config.go +++ b/sims/ra25/config.go @@ -85,9 +85,6 @@ type RunConfig struct { // PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100. PlusCycles int `default:"50"` - // CaBinCycles is the number of cycles per CaBin: how fine-grained the synaptic Ca is. - CaBinCycles int `default:"25"` // 25 > 10 - // NZero is how many perfect, zero-error epochs before stopping a Run. NZero int `default:"2"` diff --git a/sims/ra25/params.go b/sims/ra25/params.go index 6bbb4c7cc..da86a2938 100644 --- a/sims/ra25/params.go +++ b/sims/ra25/params.go @@ -6,6 +6,7 @@ package main import ( "github.com/emer/axon/v2/axon" + "github.com/emer/axon/v2/kinase" ) // LayerParams sets the minimal non-default params. @@ -50,6 +51,7 @@ var PathParams = axon.PathSheets{ pt.Learn.DWt.Trace.SetBool(true) // no trace is NOT faster. requires lrate = 0.02 pt.Learn.DWt.SubMean = 0 // 1 > 0 for long run stability pt.Learn.DWt.CaPScale = 0.95 // 0.95 > 0.9 > 1 + pt.Learn.SynCaBin.Envelope = kinase.Env10 }}, {Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates", Set: func(pt *axon.PathParams) { diff --git a/sims/ra25/ra25.go b/sims/ra25/ra25.go index 99e8d56f0..028d09aeb 100644 --- a/sims/ra25/ra25.go +++ b/sims/ra25/ra25.go @@ -215,8 +215,7 @@ func (ss *Sim) ConfigEnv() { func (ss *Sim) ConfigNet(net *axon.Network) { net.SetMaxData(ss.Config.Run.NData) net.Context().SetThetaCycles(int32(ss.Config.Run.Cycles)). - SetPlusCycles(int32(ss.Config.Run.PlusCycles)). - SetCaBinCycles(int32(ss.Config.Run.CaBinCycles)) + SetPlusCycles(int32(ss.Config.Run.PlusCycles)) net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0 inp := net.AddLayer2D("Input", axon.InputLayer, 5, 5) @@ -566,7 +565,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) s.On = true switch name { diff --git a/sims/ra25x/ra25x.go b/sims/ra25x/ra25x.go index e62cd500a..18ab20a51 100644 --- a/sims/ra25x/ra25x.go +++ b/sims/ra25x/ra25x.go @@ -548,7 +548,7 @@ func (ss *Sim) ConfigStats() { ndata := int(ss.Net.Context().NData) if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.On = false }) return @@ -577,7 +577,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) s.On = true switch name { diff --git a/sims/rl/rl.go b/sims/rl/rl.go index 2c7520b70..49613112c 100644 --- a/sims/rl/rl.go +++ b/sims/rl/rl.go @@ -440,7 +440,7 @@ func (ss *Sim) ConfigStats() { ndata := int(ss.Net.Context().NData) if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.On = false }) return @@ -468,7 +468,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) if si == 0 { s.On = true diff --git a/sims/vspatch/vspatch.go b/sims/vspatch/vspatch.go index cc9511994..3bfa96114 100644 --- a/sims/vspatch/vspatch.go +++ b/sims/vspatch/vspatch.go @@ -543,7 +543,7 @@ func (ss *Sim) ConfigStats() { var stat float64 if phase == Start { tsr.SetNumRows(0) - plot.SetFirstStyle(tsr, func(s *plot.Style) { + plot.SetFirstStyler(tsr, func(s *plot.Style) { s.Range.SetMin(0).SetMax(1) if si >= 2 && si <= 5 { s.On = true @@ -601,7 +601,7 @@ func (ss *Sim) ConfigStats() { } // note: results go under Group name: Cond gp := curModeDir.Dir("Stats/Cond/" + name).Value("Mean") - plot.SetFirstStyle(gp, func(s *plot.Style) { + plot.SetFirstStyler(gp, func(s *plot.Style) { s.Range.SetMin(0) if si >= 2 && si <= 4 { s.On = true diff --git a/simscripts/dbformat.csv b/simscripts/dbformat.csv deleted file mode 100644 index 1120f660a..000000000 --- a/simscripts/dbformat.csv +++ /dev/null @@ -1,13 +0,0 @@ -Name, Type -JobID, string -Version, string -Status, string -Args, string -Message, string -Label, string -Server, string -ServerJob, string -ServerStatus, string -Submit, string -Start, string -End, string