From 9d9bdc32de4671687f3d8a623cda82abb73dc9bb Mon Sep 17 00:00:00 2001 From: "Randall C. O'Reilly" Date: Thu, 9 Jan 2025 11:32:25 -0800 Subject: [PATCH] updated to new gosl generation that should work on nvidia --- axon/act-net.go | 14 +- axon/act-net.goal | 14 +- axon/context.go | 4 +- axon/gosl.go | 63 +- axon/shaders/ApplyExtsNeuron.wgsl | 35 +- axon/shaders/Beta1Neuron.wgsl | 21 +- axon/shaders/Beta2Neuron.wgsl | 21 +- axon/shaders/BetweenGi.wgsl | 39 +- axon/shaders/CycleInc.wgsl | 5 +- axon/shaders/CycleNeuron.wgsl | 697 ++++++----- axon/shaders/CyclePost.wgsl | 131 +-- axon/shaders/DWtFromDiSyn.wgsl | 11 +- axon/shaders/DWtSubMeanNeuron.wgsl | 27 +- axon/shaders/DWtSyn.wgsl | 135 ++- axon/shaders/GPUTestWrite.wgsl | 17 +- axon/shaders/GatherSpikes.wgsl | 81 +- axon/shaders/InitGBuffsPath.wgsl | 15 +- axon/shaders/LayerGi.wgsl | 61 +- axon/shaders/MinusPhaseNeuron.wgsl | 21 +- axon/shaders/MinusPhasePool.wgsl | 31 +- axon/shaders/MinusPhasePost.wgsl | 87 +- axon/shaders/NewStateLayer.wgsl | 1437 ----------------------- axon/shaders/NewStateNeuron.wgsl | 53 +- axon/shaders/PlusPhaseNeuron.wgsl | 113 +- axon/shaders/PlusPhasePool.wgsl | 21 +- axon/shaders/PlusPhasePost.wgsl | 139 ++- axon/shaders/PlusPhaseStartContext.wgsl | 4 +- axon/shaders/PlusPhaseStartNeuron.wgsl | 23 +- axon/shaders/PoolGi.wgsl | 65 +- axon/shaders/SendSpike.wgsl | 159 ++- axon/shaders/SlowAdaptLayer.wgsl | 55 +- axon/shaders/SlowAdaptNeuron.wgsl | 83 +- axon/shaders/WtFromDWtLayer.wgsl | 61 +- axon/shaders/WtFromDWtSyn.wgsl | 29 +- axon/typegen.go | 12 +- axon/vars.go | 4 +- sims/ra25/params.go | 2 +- 37 files changed, 1149 insertions(+), 2641 deletions(-) delete mode 100644 axon/shaders/NewStateLayer.wgsl diff --git a/axon/act-net.go b/axon/act-net.go index e3bf51d9d..b782a0047 100644 --- a/axon/act-net.go +++ b/axon/act-net.go @@ -66,8 +66,12 @@ func (nt *Network) NewState(mode enums.Enum, testing bool) { ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) ctx.NewState(mode, testing) - ToGPUCtxGlobal() - RunNewStateLayer(int(nix.NLayers)) + for li := range nix.NLayers { + NewStateLayer(li) + } + ToGPULayers() + // ToGPUCtxGlobal() + // RunNewStateLayer(int(nix.NLayers)) RunNewStateNeuron(nd) RunInitGBuffsPath(int(nix.NPaths)) // note: not completed until run cycles @@ -278,7 +282,7 @@ func CyclePost(i uint32) { //gosl:kernel } // CycleInc is the kernel over 1 call to increment the cycle counter. -func CycleInc(i uint32) { //gosl:kernel +func CycleInc(i uint32) { //gosl:kernel read-write:Ctx if i != 0 { return } @@ -301,7 +305,7 @@ func ApplyExtsNeuron(i uint32) { //gosl:kernel // NewStateLayer is the kernel over Layers (not Data) // which does new state on pools as well. -func NewStateLayer(li uint32) { //gosl:kernel +func NewStateLayer(li uint32) { // note: not a kernel at this point ctx := GetCtx(0) Layers[li].NewStateLayer(ctx) } @@ -382,7 +386,7 @@ func MinusPhasePost(li uint32) { //gosl:kernel } // PlusPhaseStartContext is the kernel over 1 call to call PlusPhaseStart on context. -func PlusPhaseStartContext(i uint32) { //gosl:kernel +func PlusPhaseStartContext(i uint32) { //gosl:kernel read-write:Ctx if i != 0 { return } diff --git a/axon/act-net.goal b/axon/act-net.goal index 2fcb5436b..9b40edaf7 100644 --- a/axon/act-net.goal +++ b/axon/act-net.goal @@ -60,8 +60,12 @@ func (nt *Network) NewState(mode enums.Enum, testing bool) { ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) ctx.NewState(mode, testing) - ToGPUCtxGlobal() - RunNewStateLayer(int(nix.NLayers)) + for li := range nix.NLayers { + NewStateLayer(li) + } + ToGPULayers() + // ToGPUCtxGlobal() + // RunNewStateLayer(int(nix.NLayers)) RunNewStateNeuron(nd) RunInitGBuffsPath(int(nix.NPaths)) // note: not completed until run cycles @@ -272,7 +276,7 @@ func CyclePost(i uint32) { //gosl:kernel } // CycleInc is the kernel over 1 call to increment the cycle counter. -func CycleInc(i uint32) { //gosl:kernel +func CycleInc(i uint32) { //gosl:kernel read-write:Ctx if i != 0 { return } @@ -295,7 +299,7 @@ func ApplyExtsNeuron(i uint32) { //gosl:kernel // NewStateLayer is the kernel over Layers (not Data) // which does new state on pools as well. -func NewStateLayer(li uint32) { //gosl:kernel +func NewStateLayer(li uint32) { // note: not a kernel at this point ctx := GetCtx(0) Layers[li].NewStateLayer(ctx) } @@ -376,7 +380,7 @@ func MinusPhasePost(li uint32) { //gosl:kernel } // PlusPhaseStartContext is the kernel over 1 call to call PlusPhaseStart on context. -func PlusPhaseStartContext(i uint32) { //gosl:kernel +func PlusPhaseStartContext(i uint32) { //gosl:kernel read-write:Ctx if i != 0 { return } diff --git a/axon/context.go b/axon/context.go index 2c5a8da2a..e872f38bf 100644 --- a/axon/context.go +++ b/axon/context.go @@ -117,12 +117,14 @@ func (ctx *Context) DataIndex(idx uint32) uint32 { } // CycleInc increments at the cycle level +// +//gosl:pointer-receiver func (ctx *Context) CycleInc() { ctx.PhaseCycle++ ctx.Cycle++ ctx.CyclesTotal++ ctx.Time += ctx.TimePerCycle - ctx.RandCounter.Add(uint32(RandFunIndexN)) + // ctx.RandCounter.Add(uint32(RandFunIndexN)) } // SlowInc increments the Slow counter and returns true if time diff --git a/axon/gosl.go b/axon/gosl.go index 8aa7e422f..c8a448278 100644 --- a/axon/gosl.go +++ b/axon/gosl.go @@ -83,7 +83,6 @@ func GPUInit() { gpu.NewComputePipelineShaderFS(shaders, "shaders/MinusPhaseNeuron.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/MinusPhasePool.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/MinusPhasePost.wgsl", sy) - gpu.NewComputePipelineShaderFS(shaders, "shaders/NewStateLayer.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/NewStateNeuron.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/PlusPhaseNeuron.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/PlusPhasePool.wgsl", sy) @@ -888,48 +887,6 @@ func RunOneMinusPhasePost(n int, syncVars ...GPUVars) { RunMinusPhasePostCPU(n) } } -// RunNewStateLayer runs the NewStateLayer kernel with given number of elements, -// on either the CPU or GPU depending on the UseGPU variable. -// Can call multiple Run* kernels in a row, which are then all launched -// in the same command submission on the GPU, which is by far the most efficient. -// MUST call RunDone (with optional vars to sync) after all Run calls. -// Alternatively, a single-shot RunOneNewStateLayer call does Run and Done for a -// single run-and-sync case. -func RunNewStateLayer(n int) { - if UseGPU { - RunNewStateLayerGPU(n) - } else { - RunNewStateLayerCPU(n) - } -} - -// RunNewStateLayerGPU runs the NewStateLayer kernel on the GPU. See [RunNewStateLayer] for more info. -func RunNewStateLayerGPU(n int) { - sy := GPUSystem - pl := sy.ComputePipelines["NewStateLayer"] - ce, _ := sy.BeginComputePass() - pl.Dispatch1D(ce, n, 64) -} - -// RunNewStateLayerCPU runs the NewStateLayer kernel on the CPU. -func RunNewStateLayerCPU(n int) { - gpu.VectorizeFunc(0, n, NewStateLayer) -} - -// RunOneNewStateLayer runs the NewStateLayer kernel with given number of elements, -// on either the CPU or GPU depending on the UseGPU variable. -// This version then calls RunDone with the given variables to sync -// after the Run, for a single-shot Run-and-Done call. If multiple kernels -// can be run in sequence, it is much more efficient to do multiple Run* -// calls followed by a RunDone call. -func RunOneNewStateLayer(n int, syncVars ...GPUVars) { - if UseGPU { - RunNewStateLayerGPU(n) - RunDone(syncVars...) - } else { - RunNewStateLayerCPU(n) - } -} // RunNewStateNeuron runs the NewStateNeuron kernel with given number of elements, // on either the CPU or GPU depending on the UseGPU variable. // Can call multiple Run* kernels in a row, which are then all launched @@ -1779,33 +1736,29 @@ func SyncFromGPU(vars ...GPUVars) { } // GetLayers returns a pointer to the given global variable: -// [Layers] []LayerParams at given index. -// To ensure that values are updated on the GPU, you must call [SetLayers]. -// after all changes have been made. +// [Layers] []LayerParams at given index. This directly processed in the GPU code, +// so this function call is an equivalent for the CPU. func GetLayers(idx uint32) *LayerParams { return &Layers[idx] } // GetPaths returns a pointer to the given global variable: -// [Paths] []PathParams at given index. -// To ensure that values are updated on the GPU, you must call [SetPaths]. -// after all changes have been made. +// [Paths] []PathParams at given index. This directly processed in the GPU code, +// so this function call is an equivalent for the CPU. func GetPaths(idx uint32) *PathParams { return &Paths[idx] } // GetNetworkIxs returns a pointer to the given global variable: -// [NetworkIxs] []NetworkIndexes at given index. -// To ensure that values are updated on the GPU, you must call [SetNetworkIxs]. -// after all changes have been made. +// [NetworkIxs] []NetworkIndexes at given index. This directly processed in the GPU code, +// so this function call is an equivalent for the CPU. func GetNetworkIxs(idx uint32) *NetworkIndexes { return &NetworkIxs[idx] } // GetCtx returns a pointer to the given global variable: -// [Ctx] []Context at given index. -// To ensure that values are updated on the GPU, you must call [SetCtx]. -// after all changes have been made. +// [Ctx] []Context at given index. This directly processed in the GPU code, +// so this function call is an equivalent for the CPU. func GetCtx(idx uint32) *Context { return &Ctx[idx] } diff --git a/axon/shaders/ApplyExtsNeuron.wgsl b/axon/shaders/ApplyExtsNeuron.wgsl index ded3ea2de..7e9a7e56f 100644 --- a/axon/shaders/ApplyExtsNeuron.wgsl +++ b/axon/shaders/ApplyExtsNeuron.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,10 +78,10 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_ApplyExtFlags(ly: ptr, clearMask: ptr,setMask: ptr, toTarg: ptr) { +fn LayerParams_ApplyExtFlags(ly: LayerParams, clearMask: ptr,setMask: ptr, toTarg: ptr) { *clearMask = NeuronHasExt | NeuronHasTarg | NeuronHasCmpr; *toTarg = false; - switch ((*ly).Type) { + switch (ly.Type) { case TargetLayer: { *setMask = NeuronHasTarg; *toTarg = true; @@ -95,12 +95,12 @@ fn LayerParams_ApplyExtFlags(ly: ptr, clearMask: ptr, ni: u32,di: u32) { +fn LayerParams_InitExt(ly: LayerParams, ni: u32,di: u32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ext))] = 0.0; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Target))] = 0.0; NeuronClearFlag(NeuronHasExt|NeuronHasTarg|NeuronHasCmpr, ni, di); } -fn LayerParams_ApplyExtValue(ly: ptr, ni: u32,di: u32, val: f32) { +fn LayerParams_ApplyExtValue(ly: LayerParams, ni: u32,di: u32, val: f32) { if (val < 0) { return; } @@ -116,11 +116,11 @@ fn LayerParams_ApplyExtValue(ly: ptr, ni: u32,di: u32, val NeuronClearFlag(clearMask, ni, di); NeuronSetFlag(setMask, ni, di); } -fn LayerParams_ApplyExtsNeuron(ly: ptr, ni: u32,di: u32) { - var lni = ni - (*ly).Indexes.NeurSt; // layer-based +fn LayerParams_ApplyExtsNeuron(ly: LayerParams, ni: u32,di: u32) { + var lni = ni - ly.Indexes.NeurSt; // layer-based LayerParams_InitExt(ly, ni, di); - if (IsExtLayerType((*ly).Type)) { - var ei = (*ly).Indexes.ExtsSt + lni; + if (IsExtLayerType(ly.Type)) { + var ei = ly.Indexes.ExtsSt + lni; LayerParams_ApplyExtValue(ly, ni, di, Exts[Index2D(TensorStrides[120], TensorStrides[121], u32(ei), u32(di))]); } @@ -128,15 +128,14 @@ fn LayerParams_ApplyExtsNeuron(ly: ptr, ni: u32,di: u32) { //////// import: "act-net.go" fn ApplyExtsNeuron(i: u32) { //gosl:kernel - var ctx = Ctx[0]; - var ni = Context_ItemIndex(&ctx, i); + let ctx = Ctx[0]; + var ni = Context_ItemIndex(ctx, i); if (ni >= NetworkIxs[0].NNeurons) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); var li = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnLayIndex))]; - var layers=Layers[li]; LayerParams_ApplyExtsNeuron(&layers, ni, di); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_ApplyExtsNeuron(layers, ni, di); } //////// import: "act-path.go" @@ -438,11 +437,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" diff --git a/axon/shaders/Beta1Neuron.wgsl b/axon/shaders/Beta1Neuron.wgsl index 920ee554c..ee4daf7b0 100644 --- a/axon/shaders/Beta1Neuron.wgsl +++ b/axon/shaders/Beta1Neuron.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,21 +78,20 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_Beta1Neuron(ly: ptr, ctx: ptr, ni: u32,di: u32) { +fn LayerParams_Beta1Neuron(ly: LayerParams, ctx: Context, ni: u32,di: u32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Beta1))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaP))]; } //////// import: "act-net.go" fn Beta1Neuron(i: u32) { //gosl:kernel - var ctx = Ctx[0]; - var ni = Context_ItemIndex(&ctx, i); + let ctx = Ctx[0]; + var ni = Context_ItemIndex(ctx, i); if (ni >= NetworkIxs[0].NNeurons) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); var li = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnLayIndex))]; - var layers=Layers[li]; LayerParams_Beta1Neuron(&layers, &ctx, ni, di); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_Beta1Neuron(layers, ctx, ni, di); } //////// import: "act-path.go" @@ -388,11 +387,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" diff --git a/axon/shaders/Beta2Neuron.wgsl b/axon/shaders/Beta2Neuron.wgsl index 5b428733d..00af9ae13 100644 --- a/axon/shaders/Beta2Neuron.wgsl +++ b/axon/shaders/Beta2Neuron.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,21 +78,20 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_Beta2Neuron(ly: ptr, ctx: ptr, ni: u32,di: u32) { +fn LayerParams_Beta2Neuron(ly: LayerParams, ctx: Context, ni: u32,di: u32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Beta2))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaP))]; } //////// import: "act-net.go" fn Beta2Neuron(i: u32) { //gosl:kernel - var ctx = Ctx[0]; - var ni = Context_ItemIndex(&ctx, i); + let ctx = Ctx[0]; + var ni = Context_ItemIndex(ctx, i); if (ni >= NetworkIxs[0].NNeurons) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); var li = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnLayIndex))]; - var layers=Layers[li]; LayerParams_Beta2Neuron(&layers, &ctx, ni, di); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_Beta2Neuron(layers, ctx, ni, di); } //////// import: "act-path.go" @@ -388,11 +387,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" diff --git a/axon/shaders/BetweenGi.wgsl b/axon/shaders/BetweenGi.wgsl index c7e47f07c..c3a86eff4 100644 --- a/axon/shaders/BetweenGi.wgsl +++ b/axon/shaders/BetweenGi.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,22 +78,22 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_BetweenGi(ly: ptr, ctx: ptr, di: u32) { +fn LayerParams_BetweenGi(ly: LayerParams, ctx: Context, di: u32) { var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); var maxGi = Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(lpi), u32(di), u32(TotalGi))]; - maxGi = LayerParams_BetweenLayerGiMax(ly, di, maxGi, (*ly).LayInhib.Index1); - maxGi = LayerParams_BetweenLayerGiMax(ly, di, maxGi, (*ly).LayInhib.Index2); - maxGi = LayerParams_BetweenLayerGiMax(ly, di, maxGi, (*ly).LayInhib.Index3); - maxGi = LayerParams_BetweenLayerGiMax(ly, di, maxGi, (*ly).LayInhib.Index4); + maxGi = LayerParams_BetweenLayerGiMax(ly, di, maxGi, ly.LayInhib.Index1); + maxGi = LayerParams_BetweenLayerGiMax(ly, di, maxGi, ly.LayInhib.Index2); + maxGi = LayerParams_BetweenLayerGiMax(ly, di, maxGi, ly.LayInhib.Index3); + maxGi = LayerParams_BetweenLayerGiMax(ly, di, maxGi, ly.LayInhib.Index4); Pools[Index3D(TensorStrides[130], TensorStrides[131], // our inhib is max of us and everyone in the layer pool TensorStrides[132], u32(lpi), u32(di), u32(TotalGi))] = maxGi; } -fn LayerParams_BetweenLayerGiMax(ly: ptr, di: u32, maxGi: f32, layIndex: i32) -> f32 { +fn LayerParams_BetweenLayerGiMax(ly: LayerParams, di: u32, maxGi: f32, layIndex: i32) -> f32 { if (layIndex < 0) { return maxGi; } - var oly = Layers[u32(layIndex)]; - var opi = LayerParams_PoolIndex(&oly, u32(u32(0))); + let oly = Layers[u32(layIndex)]; + var opi = LayerParams_PoolIndex(oly, u32(u32(0))); var ogi = Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(opi), u32(di), u32(TotalGi))]; if (ogi > maxGi) { return ogi; @@ -102,14 +102,13 @@ fn LayerParams_BetweenLayerGiMax(ly: ptr, di: u32, maxGi: //////// import: "act-net.go" fn BetweenGi(i: u32) { //gosl:kernel - var ctx = Ctx[0]; - var li = Context_ItemIndex(&ctx, i); + let ctx = Ctx[0]; + var li = Context_ItemIndex(ctx, i); if (li >= NetworkIxs[0].NLayers) { return; } - var di = Context_DataIndex(&ctx, i); - var layers=Layers[li]; LayerParams_BetweenGi(&layers, &ctx, di); - Ctx[0] = ctx; + var di = Context_DataIndex(ctx, i); + let layers=Layers[li]; LayerParams_BetweenGi(layers, ctx, di); } //////// import: "act-path.go" @@ -405,11 +404,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" @@ -667,8 +666,8 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } //////// import: "layertypes.go" diff --git a/axon/shaders/CycleInc.wgsl b/axon/shaders/CycleInc.wgsl index 3a545bfe0..12ca11ecb 100644 --- a/axon/shaders/CycleInc.wgsl +++ b/axon/shaders/CycleInc.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -80,7 +80,7 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "act-layer.go" //////// import: "act-net.go" -fn CycleInc(i: u32) { //gosl:kernel +fn CycleInc(i: u32) { //gosl:kernel read-write:Ctx if (i != 0) { return; } @@ -387,7 +387,6 @@ fn Context_CycleInc(ctx: ptr) { (*ctx).Cycle++; (*ctx).CyclesTotal++; (*ctx).Time += (*ctx).TimePerCycle; - RandCounter_Add(&(*ctx).RandCounter, u32(RandFunIndexN)); } //////// import: "deep-layer.go" diff --git a/axon/shaders/CycleNeuron.wgsl b/axon/shaders/CycleNeuron.wgsl index 812a58124..e368a791e 100644 --- a/axon/shaders/CycleNeuron.wgsl +++ b/axon/shaders/CycleNeuron.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,7 +78,7 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn SetNeuronExtPosNeg(ctx: ptr, ni: u32,di: u32, val: f32) { +fn SetNeuronExtPosNeg(ctx: Context, ni: u32,di: u32, val: f32) { if (ni == 0) { if (val >= 0) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ext))] = val; @@ -94,26 +94,26 @@ fn SetNeuronExtPosNeg(ctx: ptr, ni: u32,di: u32, val: f32) { } } } -fn LayerParams_CycleNeuron(ly: ptr, ctx: ptr, ni: u32,di: u32) { +fn LayerParams_CycleNeuron(ly: LayerParams, ctx: Context, ni: u32,di: u32) { var pi = LayerParams_PoolIndex(ly, NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnSubPool))]); var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); LayerParams_GInteg(ly, ctx, pi, ni, di); LayerParams_SpikeFromG(ly, ctx, lpi, ni, di); } -fn LayerParams_PulvinarDriver(ly: ptr, ctx: ptr, lni: u32,di: u32, drvGe: ptr,nonDrivePct: ptr) { - var dli = u32((*ly).Pulv.DriveLayIndex); - var dly = Layers[dli]; - var dpi = LayerParams_PoolIndex(&dly, u32(u32(0))); +fn LayerParams_PulvinarDriver(ly: LayerParams, ctx: Context, lni: u32,di: u32, drvGe: ptr,nonDrivePct: ptr) { + var dli = u32(ly.Pulv.DriveLayIndex); + let dly = Layers[dli]; + var dpi = LayerParams_PoolIndex(dly, u32(u32(0))); var drvMax = PoolAvgMax(AMCaP, AMCycle, Max, dpi, di); - *nonDrivePct = PulvParams_NonDrivePct(&(*ly).Pulv, drvMax); // how much non-driver to keep + *nonDrivePct = PulvParams_NonDrivePct(ly.Pulv, drvMax); // how much non-driver to keep var burst = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(dly.Indexes.NeurSt + lni), u32(di), u32(Burst))]; - *drvGe = PulvParams_DriveGe(&(*ly).Pulv, burst); + *drvGe = PulvParams_DriveGe(ly.Pulv, burst); } -fn LayerParams_GInteg(ly: ptr, ctx: ptr, pi: u32,ni: u32,di: u32) { +fn LayerParams_GInteg(ly: LayerParams, ctx: Context, pi: u32,ni: u32,di: u32) { var drvGe = f32(0); var nonDrivePct = f32(0); - if ((*ly).Type == PulvinarLayer) { - LayerParams_PulvinarDriver(ly, ctx, ni-(*ly).Indexes.NeurSt, di, &drvGe, &nonDrivePct); + if (ly.Type == PulvinarLayer) { + LayerParams_PulvinarDriver(ly, ctx, ni-ly.Indexes.NeurSt, di, &drvGe, &nonDrivePct); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], // use for regulating inhibition u32(ni), u32(di), u32(Ext))] = nonDrivePct; } @@ -123,36 +123,36 @@ fn LayerParams_GInteg(ly: ptr, ctx: ptr, LayerParams_GNeuroMod(ly, ctx, ni, di); LayerParams_SpecialPostGs(ly, ctx, ni, di, saveVal); } -fn LayerParams_SpecialPreGs(ly: ptr, ctx: ptr, pi: u32,ni: u32,di: u32, drvGe: f32, nonDrivePct: f32) -> f32 { +fn LayerParams_SpecialPreGs(ly: LayerParams, ctx: Context, pi: u32,ni: u32,di: u32, drvGe: f32, nonDrivePct: f32) -> f32 { var saveVal = f32(0); // sometimes we need to use a value computed here, for the post Gs step - var pil = pi - (*ly).PoolSt; + var pil = pi - ly.PoolSt; var pnn = u32(PoolNNeurons(pi)); var pni = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnNeurIndex))] - u32(PoolIxs[Index2D(TensorStrides[0], TensorStrides[1], u32(pi), u32(PoolNeurSt))]); var nrnCtxtGe = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CtxtGe))]; var nrnGeRaw = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))]; var hasRew = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvHasRew), u32(di))] > 0; - switch ((*ly).Type) { + switch (ly.Type) { case PTPredLayer, CTLayer: { - var geCtxt = (*ly).CT.GeGain * nrnCtxtGe; + var geCtxt = ly.CT.GeGain * nrnCtxtGe; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] += geCtxt; - if ((*ly).CT.DecayDt > 0) { - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CtxtGe))] -= (*ly).CT.DecayDt * nrnCtxtGe; + if (ly.CT.DecayDt > 0) { + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CtxtGe))] -= ly.CT.DecayDt * nrnCtxtGe; } - var ctxExt = DtParams_GeSynFromRawSteady(&(*ly).Acts.Dt, geCtxt); + var ctxExt = DtParams_GeSynFromRawSteady(ly.Acts.Dt, geCtxt); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] += ctxExt; saveVal = ctxExt; } // used In PostGs to set nrn.GeExt case PTMaintLayer: { - if ((*ly).Acts.SMaint.On == 1) { - saveVal = (*ly).Acts.SMaint.Inhib * Neurons[Index3D(TensorStrides[70], TensorStrides[71], // used In PostGs to set nrn.GeExt + if (ly.Acts.SMaint.On == 1) { + saveVal = ly.Acts.SMaint.Inhib * Neurons[Index3D(TensorStrides[70], TensorStrides[71], // used In PostGs to set nrn.GeExt TensorStrides[72], u32(ni), u32(di), u32(GMaintRaw))]; } } case PulvinarLayer: { - if ((*ctx).PlusPhase == 0) { + if (ctx.PlusPhase == 0) { break; } - saveVal = nonDrivePct*Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] + DtParams_GeSynFromRawSteady(&(*ly).Acts.Dt, drvGe); + saveVal = nonDrivePct*Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] + DtParams_GeSynFromRawSteady(ly.Acts.Dt, drvGe); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] = nonDrivePct*nrnGeRaw + drvGe; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = saveVal; } @@ -165,15 +165,15 @@ fn LayerParams_SpecialPreGs(ly: ptr, ctx: ptr, ctx: ptr 0) { - geRaw = PopCodeParams_EncodeGe(&(*ly).Acts.PopCode, pni, pnn, dr); + geRaw = PopCodeParams_EncodeGe(ly.Acts.PopCode, pni, pnn, dr); } Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] = geRaw; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = DtParams_GeSynFromRawSteady(&(*ly).Acts.Dt, geRaw); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = DtParams_GeSynFromRawSteady(ly.Acts.Dt, geRaw); } case UrgencyLayer: { var ur = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvUrgency), u32(di))]; var geRaw = ur; if (ur > 0) { - geRaw = PopCodeParams_EncodeGe(&(*ly).Acts.PopCode, pni, pnn, ur); + geRaw = PopCodeParams_EncodeGe(ly.Acts.PopCode, pni, pnn, ur); } Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] = geRaw; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = DtParams_GeSynFromRawSteady(&(*ly).Acts.Dt, geRaw); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = DtParams_GeSynFromRawSteady(ly.Acts.Dt, geRaw); } case USLayer: { - var us = RubiconUSStimValue(di, pil-1, (*ly).Learn.NeuroMod.Valence); + var us = RubiconUSStimValue(di, pil-1, ly.Learn.NeuroMod.Valence); var geRaw = us; if (us > 0) { - geRaw = PopCodeParams_EncodeGe(&(*ly).Acts.PopCode, pni, pnn, us); + geRaw = PopCodeParams_EncodeGe(ly.Acts.PopCode, pni, pnn, us); } - if ((*ly).Learn.NeuroMod.DAMod == D1Mod || ((*ly).Learn.NeuroMod.DAMod == D2Mod && hasRew && (*ctx).PlusPhase == 1)) { + if (ly.Learn.NeuroMod.DAMod == D1Mod || (ly.Learn.NeuroMod.DAMod == D2Mod && hasRew && ctx.PlusPhase == 1)) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] = geRaw; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = DtParams_GeSynFromRawSteady(&(*ly).Acts.Dt, geRaw); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = DtParams_GeSynFromRawSteady(ly.Acts.Dt, geRaw); } } case PVLayer: { - if (hasRew && (*ctx).PlusPhase == 1) { + if (hasRew && ctx.PlusPhase == 1) { var pv = f32(0); - if ((*ly).Learn.NeuroMod.Valence == Positive) { + if (ly.Learn.NeuroMod.Valence == Positive) { pv = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvPVpos), u32(di))]; } else { pv = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvPVneg), u32(di))]; } - var pc = PopCodeParams_EncodeGe(&(*ly).Acts.PopCode, pni, (*ly).Indexes.NNeurons, pv); + var pc = PopCodeParams_EncodeGe(ly.Acts.PopCode, pni, ly.Indexes.NNeurons, pv); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] = pc; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = DtParams_GeSynFromRawSteady(&(*ly).Acts.Dt, pc); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = DtParams_GeSynFromRawSteady(ly.Acts.Dt, pc); } } case LDTLayer: { var geRaw = 0.4 * GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] = geRaw; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = DtParams_GeSynFromRawSteady(&(*ly).Acts.Dt, geRaw); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = DtParams_GeSynFromRawSteady(ly.Acts.Dt, geRaw); } case VTALayer: { - var geRaw = RWDaParams_GeFromDA(&(*ly).RWDa, GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvVtaDA), u32(di))]); + var geRaw = RWDaParams_GeFromDA(ly.RWDa, GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvVtaDA), u32(di))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] = geRaw; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = DtParams_GeSynFromRawSteady(&(*ly).Acts.Dt, geRaw); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] = DtParams_GeSynFromRawSteady(ly.Acts.Dt, geRaw); } case RewLayer: { NeuronSetFlag(NeuronHasExt, ni, di); @@ -246,14 +246,14 @@ fn LayerParams_SpecialPreGs(ly: ptr, ctx: ptr, ctx: ptr, ctx: ptr, ni: u32,di: u32, saveVal: f32) { - switch ((*ly).Type) { +fn LayerParams_SpecialPostGs(ly: LayerParams, ctx: Context, ni: u32,di: u32, saveVal: f32) { + switch (ly.Type) { case PulvinarLayer, PTMaintLayer, CTLayer, BLALayer: { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeExt))] = saveVal; } @@ -280,23 +280,23 @@ fn LayerParams_SpecialPostGs(ly: ptr, ctx: ptr, ctx: ptr, ni: u32,di: u32) { +fn LayerParams_GFromRawSyn(ly: LayerParams, ctx: Context, ni: u32,di: u32) { var extraRaw = f32(0); var extraSyn = f32(0); var nrnGModRaw = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GModRaw))]; var nrnGModSyn = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GModSyn))]; var ach = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))]; - switch ((*ly).Type) { + switch (ly.Type) { case PTMaintLayer: { - var md = (*ly).Acts.Dend.ModGain * nrnGModSyn; - if ((*ly).Acts.Dend.ModACh == 1) { + var md = ly.Acts.Dend.ModGain * nrnGModSyn; + if (ly.Acts.Dend.ModACh == 1) { md *= ach; } - md += (*ly).Acts.Dend.ModBase; + md += ly.Acts.Dend.ModBase; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] *= md; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] *= md; - extraRaw = (*ly).Acts.Dend.ModGain * nrnGModRaw; - if ((*ly).Acts.Dend.ModACh == 1) { + extraRaw = ly.Acts.Dend.ModGain * nrnGModRaw; + if (ly.Acts.Dend.ModACh == 1) { extraRaw *= ach; } extraSyn = md; @@ -304,12 +304,12 @@ fn LayerParams_GFromRawSyn(ly: ptr, ctx: ptr 1) { md = f32(1); } @@ -320,82 +320,81 @@ fn LayerParams_GFromRawSyn(ly: ptr, ctx: ptr, ctx: ptr, pi: u32,ni: u32,di: u32) { - var giMult = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerGiMult))]; - var gi = giMult*Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(TotalGi))] + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiSyn))] + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoise))] + NeuroModParams_GiFromACh(&(*ly).Learn.NeuroMod, GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))]); + ActParams_GeFromSyn(ly.Acts, ctx, ni, di, geSyn, ege); // sets nrn.GeExt too + ActParams_GkFromVm(ly.Acts, ctx, ni, di); + ActParams_GSkCaFromCa(ly.Acts, ctx, ni, di); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiSyn))] = ActParams_GiFromSyn(ly.Acts, ctx, ni, di, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiSyn))]); +} +fn LayerParams_GiInteg(ly: LayerParams, ctx: Context, pi: u32,ni: u32,di: u32) { + var giMult = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ly.Index), u32(di), u32(LayerGiMult))]; + var gi = giMult*Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(TotalGi))] + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiSyn))] + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoise))] + NeuroModParams_GiFromACh(ly.Learn.NeuroMod, GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))]); var ssgi = Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSGi))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gi))] = gi; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SSGiDend))] = 0.0; - if ((*ctx).PlusPhase == 1 && (*ly).Type == PulvinarLayer) { + if (ctx.PlusPhase == 1 && ly.Type == PulvinarLayer) { var ext = Neurons[Index3D(TensorStrides[70], TensorStrides[71], // nonDrivePct TensorStrides[72], u32(ni), u32(di), u32(Ext))]; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SSGiDend))] = ext * (*ly).Acts.Dend.SSGi * ssgi; + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SSGiDend))] = ext * ly.Acts.Dend.SSGi * ssgi; } else { - if (!((*ly).Acts.Clamp.IsInput == 1 || (*ly).Acts.Clamp.IsTarget == 1)) { - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SSGiDend))] = (*ly).Acts.Dend.SSGi * ssgi; + if (!(ly.Acts.Clamp.IsInput == 1 || ly.Acts.Clamp.IsTarget == 1)) { + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SSGiDend))] = ly.Acts.Dend.SSGi * ssgi; } } var vm = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))]; var nrnGABAB = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GABAB))]; var nrnGABABx = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GABABx))]; - GABABParams_GABAB(&(*ly).Acts.GabaB, gi, &nrnGABAB, &nrnGABABx); + GABABParams_GABAB(ly.Acts.GabaB, gi, &nrnGABAB, &nrnGABABx); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GABAB))] = nrnGABAB; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GABABx))] = nrnGABABx; - var nrnGgabaB = GABABParams_GgabaB(&(*ly).Acts.GabaB, nrnGABAB, vm); + var nrnGgabaB = GABABParams_GgabaB(ly.Acts.GabaB, nrnGABAB, vm); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GgabaB))] = nrnGgabaB; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gk))] += nrnGgabaB; } -fn LayerParams_GNeuroMod(ly: ptr, ctx: ptr, ni: u32,di: u32) { - var ggain = NeuroModParams_GGain(&(*ly).Learn.NeuroMod, GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvDA), u32(di))] + GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvDAtonic), u32(di))]); +fn LayerParams_GNeuroMod(ly: LayerParams, ctx: Context, ni: u32,di: u32) { + var ggain = NeuroModParams_GGain(ly.Learn.NeuroMod, GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvDA), u32(di))] + GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvDAtonic), u32(di))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ge))] *= ggain; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gi))] *= ggain; } -fn LayerParams_SpikeFromG(ly: ptr, ctx: ptr, lpi: u32,ni: u32,di: u32) { - ActParams_VmFromG(&(*ly).Acts, ctx, ni, di); - ActParams_SpikeFromVm(&(*ly).Acts, ctx, ni, di); - LearnNeuronParams_CaFromSpike(&(*ly).Learn, ctx, ni, di); +fn LayerParams_SpikeFromG(ly: LayerParams, ctx: Context, lpi: u32,ni: u32,di: u32) { + ActParams_VmFromG(ly.Acts, ctx, ni, di); + ActParams_SpikeFromVm(ly.Acts, ctx, ni, di); + LearnNeuronParams_CaFromSpike(ly.Learn, ctx, ni, di); var lmax = PoolAvgMax(AMGeInt, AMCycle, Max, lpi, di); if (lmax > 0) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeIntNorm))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeInt))] / lmax; } else { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeIntNorm))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeInt))]; } - if ((*ctx).Cycle >= (*ly).Acts.Dt.MaxCycStart) { - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaPMaxCa))] += (*ly).Learn.CaSpike.Dt.PDt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaM))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaPMaxCa))]); + if (ctx.Cycle >= ly.Acts.Dt.MaxCycStart) { + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaPMaxCa))] += ly.Learn.CaSpike.Dt.PDt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaM))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaPMaxCa))]); var spkmax = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaPMaxCa))]; if (spkmax > Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaPMax))]) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaPMax))] = spkmax; } } var mx = NetworkIxs[0].NCaBins; - var bin = min((*ctx).Cycle/(*ctx).CaBinCycles, mx); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaBins + NeuronVars(bin)))] += Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaSyn))] / f32((*ctx).CaBinCycles); + var bin = min(ctx.Cycle/ctx.CaBinCycles, mx); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaBins + NeuronVars(bin)))] += Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaSyn))] / f32(ctx.CaBinCycles); } //////// import: "act-net.go" fn CycleNeuron(i: u32) { //gosl:kernel - var ctx = Ctx[0]; - var ni = Context_ItemIndex(&ctx, i); + let ctx = Ctx[0]; + var ni = Context_ItemIndex(ctx, i); if (ni >= NetworkIxs[0].NNeurons) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); var li = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnLayIndex))]; - var layers=Layers[li]; LayerParams_CycleNeuron(&layers, &ctx, ni, di); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_CycleNeuron(layers, ctx, ni, di); } //////// import: "act-path.go" @@ -440,21 +439,21 @@ struct SpikeParams { RDt: f32, pad: i32, } -fn SpikeParams_ActFromISI(sk: ptr, isi: f32,timeInc: f32,integ: f32) -> f32 { +fn SpikeParams_ActFromISI(sk: SpikeParams, isi: f32,timeInc: f32,integ: f32) -> f32 { if (isi <= 0) { return f32(0); } - var maxInt = 1.0 / (timeInc * integ * (*sk).MaxHz); // interval at max hz.. + var maxInt = 1.0 / (timeInc * integ * sk.MaxHz); // interval at max hz.. return maxInt / isi; // normalized } -fn SpikeParams_AvgFromISI(sk: ptr, avg: f32, isi: f32) -> f32 { +fn SpikeParams_AvgFromISI(sk: SpikeParams, avg: f32, isi: f32) -> f32 { var av = avg; if (av <= 0) { av = isi; } else if (isi < 0.8*av) { av = isi; // if significantly less than we take that } else { // integrate on slower - av += (*sk).ISIDt * (isi - av); // running avg updt + av += sk.ISIDt * (isi - av); // running avg updt }return av; } struct DendParams { @@ -505,14 +504,14 @@ struct DtParams { IntDt: f32, LongAvgDt: f32, } -fn DtParams_GeSynFromRaw(dp: ptr, geSyn: f32,geRaw: f32) -> f32 { - return geSyn + geRaw - (*dp).GeDt*geSyn; +fn DtParams_GeSynFromRaw(dp: DtParams, geSyn: f32,geRaw: f32) -> f32 { + return geSyn + geRaw - dp.GeDt*geSyn; } -fn DtParams_GeSynFromRawSteady(dp: ptr, geRaw: f32) -> f32 { - return geRaw * (*dp).GeTau; +fn DtParams_GeSynFromRawSteady(dp: DtParams, geRaw: f32) -> f32 { + return geRaw * dp.GeTau; } -fn DtParams_GiSynFromRaw(dp: ptr, giSyn: f32,giRaw: f32) -> f32 { - return giSyn + giRaw - (*dp).GiDt*giSyn; +fn DtParams_GiSynFromRaw(dp: DtParams, giSyn: f32,giRaw: f32) -> f32 { + return giSyn + giRaw - dp.GiDt*giSyn; } struct SpikeNoiseParams { On: i32, @@ -524,21 +523,21 @@ struct SpikeNoiseParams { GeExpInt: f32, GiExpInt: f32, } -fn SpikeNoiseParams_PGe(an: ptr, ctx: ptr, p: ptr, ni: u32,di: u32) -> f32 { - var nix = NetworkIxs[0]; +fn SpikeNoiseParams_PGe(an: SpikeNoiseParams, ctx: Context, p: ptr, ni: u32,di: u32) -> f32 { + let nix = NetworkIxs[0]; var ndi = di*nix.NNeurons + ni; - *p *= GetRandomNumber(ndi, (*ctx).RandCounter.Counter, RandFunActPGe); - if (*p <= (*an).GeExpInt) { - *p = f32(1);return (*an).Ge; + *p *= GetRandomNumber(ndi, ctx.RandCounter.Counter, RandFunActPGe); + if (*p <= an.GeExpInt) { + *p = f32(1);return an.Ge; }return f32( 0); } -fn SpikeNoiseParams_PGi(an: ptr, ctx: ptr, p: ptr, ni: u32,di: u32) -> f32 { - var nix = NetworkIxs[0]; +fn SpikeNoiseParams_PGi(an: SpikeNoiseParams, ctx: Context, p: ptr, ni: u32,di: u32) -> f32 { + let nix = NetworkIxs[0]; var ndi = di*nix.NNeurons + ni; - *p *= GetRandomNumber(ndi, (*ctx).RandCounter.Counter, RandFunActPGi); - if (*p <= (*an).GiExpInt) { - *p = f32(1);return (*an).Gi; + *p *= GetRandomNumber(ndi, ctx.RandCounter.Counter, RandFunActPGi); + if (*p <= an.GiExpInt) { + *p = f32(1);return an.Gi; }return f32( 0); } @@ -559,10 +558,10 @@ struct SMaintParams { Inhib: f32, ISI: F32, } -fn SMaintParams_ExpInt(sm: ptr, isi: f32) -> f32 { +fn SMaintParams_ExpInt(sm: SMaintParams, isi: f32) -> f32 { if (isi <= 0) { return f32(0); - }return FastExp(-max(isi, (*sm).ISI.Min) / (*sm).NNeurons); + }return FastExp(-max(isi, sm.ISI.Min) / sm.NNeurons); } struct PopCodeParams { On: i32, @@ -574,40 +573,40 @@ struct PopCodeParams { MaxSigma: f32, Clip: i32, } -fn PopCodeParams_ClampValue(pc: ptr, val: f32) -> f32 { +fn PopCodeParams_ClampValue(pc: PopCodeParams, val: f32) -> f32 { var clipVal = val; - if (clipVal < (*pc).Min) { - clipVal = (*pc).Min; + if (clipVal < pc.Min) { + clipVal = pc.Min; } - if (clipVal > (*pc).Max) { - clipVal = (*pc).Max; + if (clipVal > pc.Max) { + clipVal = pc.Max; }return clipVal; } -fn PopCodeParams_ProjectParam(pc: ptr, minParam: f32,maxParam: f32,clipVal: f32) -> f32 { - var normVal = (clipVal - (*pc).Min) / ((*pc).Max - (*pc).Min);return minParam + normVal*(maxParam-minParam); +fn PopCodeParams_ProjectParam(pc: PopCodeParams, minParam: f32,maxParam: f32,clipVal: f32) -> f32 { + var normVal = (clipVal - pc.Min) / (pc.Max - pc.Min);return minParam + normVal*(maxParam-minParam); } -fn PopCodeParams_EncodeValue(pc: ptr, i: u32,n: u32, val: f32) -> f32 { +fn PopCodeParams_EncodeValue(pc: PopCodeParams, i: u32,n: u32, val: f32) -> f32 { var eval = val; var clipVal = PopCodeParams_ClampValue(pc, eval); - if ((*pc).Clip == 1) { + if (pc.Clip == 1) { eval = clipVal; } - var rng = (*pc).Max - (*pc).Min; + var rng = pc.Max - pc.Min; var act = f32(1); - if ((*pc).MinAct < 1) { - act = PopCodeParams_ProjectParam(pc, (*pc).MinAct, f32(1.0), clipVal); + if (pc.MinAct < 1) { + act = PopCodeParams_ProjectParam(pc, pc.MinAct, f32(1.0), clipVal); } - var sig = (*pc).MinSigma; - if ((*pc).MaxSigma > (*pc).MinSigma) { - sig = PopCodeParams_ProjectParam(pc, (*pc).MinSigma, (*pc).MaxSigma, clipVal); + var sig = pc.MinSigma; + if (pc.MaxSigma > pc.MinSigma) { + sig = PopCodeParams_ProjectParam(pc, pc.MinSigma, pc.MaxSigma, clipVal); } var gnrm = 1.0 / (rng * sig); var incr = rng / f32(n-1); - var trg = (*pc).Min + incr*f32(i); + var trg = pc.Min + incr*f32(i); var dist = gnrm * (trg - eval);return act * FastExp(-(dist * dist)); } -fn PopCodeParams_EncodeGe(pc: ptr, i: u32,n: u32, val: f32) -> f32 { - return (*pc).Ge * PopCodeParams_EncodeValue(pc, i, n, val); +fn PopCodeParams_EncodeGe(pc: PopCodeParams, i: u32,n: u32, val: f32) -> f32 { + return pc.Ge * PopCodeParams_EncodeValue(pc, i, n, val); } struct ActParams { Spikes: SpikeParams, @@ -633,74 +632,74 @@ struct ActParams { SMaint: SMaintParams, PopCode: PopCodeParams, } -fn ActParams_NMDAFromRaw(ac: ptr, ctx: ptr, ni: u32,di: u32, geTot: f32) { - if ((*ac).NMDA.Gbar == 0) { +fn ActParams_NMDAFromRaw(ac: ActParams, ctx: Context, ni: u32,di: u32, geTot: f32) { + if (ac.NMDA.Gbar == 0) { return; } var geT = max(geTot, 0.0); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaSyn))] = NMDAParams_NMDASyn(&(*ac).NMDA, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaSyn))], geT); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gnmda))] = NMDAParams_Gnmda(&(*ac).NMDA, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaSyn))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))]); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaSyn))] = NMDAParams_NMDASyn(ac.NMDA, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaSyn))], geT); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gnmda))] = NMDAParams_Gnmda(ac.NMDA, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaSyn))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))]); } -fn ActParams_MaintNMDAFromRaw(ac: ptr, ctx: ptr, ni: u32,di: u32) { - if ((*ac).MaintNMDA.Gbar == 0) { +fn ActParams_MaintNMDAFromRaw(ac: ActParams, ctx: Context, ni: u32,di: u32) { + if (ac.MaintNMDA.Gbar == 0) { return; } - if ((*ac).SMaint.On == 1) { + if (ac.SMaint.On == 1) { ActParams_SMaintFromISI(ac, ctx, ni, di); } - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GMaintSyn))] = NMDAParams_NMDASyn(&(*ac).MaintNMDA, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GMaintSyn))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GMaintRaw))]); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaMaint))] = NMDAParams_Gnmda(&(*ac).MaintNMDA, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GMaintSyn))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))]); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GMaintSyn))] = NMDAParams_NMDASyn(ac.MaintNMDA, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GMaintSyn))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GMaintRaw))]); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaMaint))] = NMDAParams_Gnmda(ac.MaintNMDA, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GMaintSyn))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))]); } -fn ActParams_SMaintFromISI(ac: ptr, ctx: ptr, ni: u32,di: u32) { - var nix = NetworkIxs[0]; +fn ActParams_SMaintFromISI(ac: ActParams, ctx: Context, ni: u32,di: u32) { + let nix = NetworkIxs[0]; var isi = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ISIAvg))]; - if (isi < (*ac).SMaint.ISI.Min || isi > (*ac).SMaint.ISI.Max) { + if (isi < ac.SMaint.ISI.Min || isi > ac.SMaint.ISI.Max) { return; } var ndi = di*nix.NNeurons + ni; var smp = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SMaintP))]; - smp *= GetRandomNumber(ndi, (*ctx).RandCounter.Counter, RandFunActSMaintP); - var trg = SMaintParams_ExpInt(&(*ac).SMaint, isi); + smp *= GetRandomNumber(ndi, ctx.RandCounter.Counter, RandFunActSMaintP); + var trg = SMaintParams_ExpInt(ac.SMaint, isi); if (smp <= trg) { smp = f32(1); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GMaintRaw))] += (*ac).SMaint.Gbar; + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GMaintRaw))] += ac.SMaint.Gbar; } Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SMaintP))] = smp; } -fn ActParams_GvgccFromVm(ac: ptr, ctx: ptr, ni: u32,di: u32) { - if ((*ac).VGCC.Gbar == 0) { +fn ActParams_GvgccFromVm(ac: ActParams, ctx: Context, ni: u32,di: u32) { + if (ac.VGCC.Gbar == 0) { return; } - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gvgcc))] = VGCCParams_Gvgcc(&(*ac).VGCC, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccM))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccH))]); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gvgcc))] = VGCCParams_Gvgcc(ac.VGCC, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccM))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccH))]); var dm: f32; var dh: f32; - VGCCParams_DMHFromV(&(*ac).VGCC, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccM))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccH))], &dm, &dh); + VGCCParams_DMHFromV(ac.VGCC, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccM))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccH))], &dm, &dh); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccM))] += dm; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccH))] += dh; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))] = VGCCParams_CaFromG(&(*ac).VGCC, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gvgcc))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))]); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))] = VGCCParams_CaFromG(ac.VGCC, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gvgcc))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))]); } -fn ActParams_GkFromVm(ac: ptr, ctx: ptr, ni: u32,di: u32) { +fn ActParams_GkFromVm(ac: ActParams, ctx: Context, ni: u32,di: u32) { var vm = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))]; var vmd = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))]; var mahpN = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(MahpN))]; - var gmahp = MahpParams_GmAHP(&(*ac).Mahp, vm, &mahpN); + var gmahp = MahpParams_GmAHP(ac.Mahp, vm, &mahpN); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gmahp))] = gmahp; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(MahpN))] = mahpN; var gsahp = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsahp))]; - var gak = AKsParams_Gak(&(*ac).AK, vmd); + var gak = AKsParams_Gak(ac.AK, vmd); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gak))] = gak; var nkirM = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(KirM))]; - var gkir = KirParams_Gkir(&(*ac).Kir, vm, nkirM); + var gkir = KirParams_Gkir(ac.Kir, vm, nkirM); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gkir))] = gkir; - nkirM += KirParams_DM(&(*ac).Kir, VToBio(vm), nkirM); + nkirM += KirParams_DM(ac.Kir, VToBio(vm), nkirM); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(KirM))] = nkirM; var gktot = gmahp + gsahp + gak + gkir; - if ((*ac).KNa.On == 1) { + if (ac.KNa.On == 1) { var gknaMed = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaMed))]; var gknaSlow = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaSlow))]; - KNaMedSlow_GcFromSpike(&(*ac).KNa, &gknaMed, &gknaSlow, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spike))] > .5); + KNaMedSlow_GcFromSpike(ac.KNa, &gknaMed, &gknaSlow, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spike))] > .5); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaMed))] = gknaMed; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaSlow))] = gknaSlow; gktot += gknaMed + gknaSlow; @@ -708,29 +707,29 @@ fn ActParams_GkFromVm(ac: ptr, ctx: ptr, n Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gk))] = gktot; } -fn ActParams_GSkCaFromCa(ac: ptr, ctx: ptr, ni: u32,di: u32) { - if ((*ac).SKCa.Gbar == 0) { +fn ActParams_GSkCaFromCa(ac: ActParams, ctx: Context, ni: u32,di: u32) { + if (ac.SKCa.Gbar == 0) { return; } var skcar = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SKCaR))]; var skcain = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SKCaIn))]; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SKCaM))] = SKCaParams_MFromCa(&(*ac).SKCa, skcar, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SKCaM))]); - SKCaParams_CaInRFromSpike(&(*ac).SKCa, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spike))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaD))], &skcain, &skcar); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SKCaM))] = SKCaParams_MFromCa(ac.SKCa, skcar, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SKCaM))]); + SKCaParams_CaInRFromSpike(ac.SKCa, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spike))], Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaD))], &skcain, &skcar); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SKCaR))] = skcar; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SKCaIn))] = skcain; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsk))] = (*ac).SKCa.Gbar * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SKCaM))]; + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsk))] = ac.SKCa.Gbar * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SKCaM))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gk))] += Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsk))]; } -fn ActParams_GeFromSyn(ac: ptr, ctx: ptr, ni: u32,di: u32, geSyn: f32,geExt: f32) { +fn ActParams_GeFromSyn(ac: ActParams, ctx: Context, ni: u32,di: u32, geSyn: f32,geExt: f32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeExt))] = 0.0; var geS = geSyn; var geE = geExt; - if ((*ac).Clamp.Add == 1 && NeuronHasFlag(NeuronHasExt, ni, di)) { - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeExt))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ext))] * (*ac).Clamp.Ge; + if (ac.Clamp.Add == 1 && NeuronHasFlag(NeuronHasExt, ni, di)) { + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeExt))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ext))] * ac.Clamp.Ge; geS += Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeExt))]; } - if ((*ac).Clamp.Add == 0 && NeuronHasFlag(NeuronHasExt, ni, di)) { - geS = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ext))] * (*ac).Clamp.Ge; + if (ac.Clamp.Add == 0 && NeuronHasFlag(NeuronHasExt, ni, di)) { + geS = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ext))] * ac.Clamp.Ge; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeExt))] = geS; geE = f32(0); @@ -741,115 +740,115 @@ fn ActParams_GeFromSyn(ac: ptr, ctx: ptr, } ActParams_AddGeNoise(ac, ctx, ni, di); } -fn ActParams_AddGeNoise(ac: ptr, ctx: ptr, ni: u32,di: u32) { - if ((*ac).Noise.On == 0 || (*ac).Noise.Ge == 0) { +fn ActParams_AddGeNoise(ac: ActParams, ctx: Context, ni: u32,di: u32) { + if (ac.Noise.On == 0 || ac.Noise.Ge == 0) { return; } var p = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoiseP))]; - var ge = SpikeNoiseParams_PGe(&(*ac).Noise, ctx, &p, ni, di); + var ge = SpikeNoiseParams_PGe(ac.Noise, ctx, &p, ni, di); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoiseP))] = p; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoise))] = DtParams_GeSynFromRaw(&(*ac).Dt, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoise))], ge); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoise))] = DtParams_GeSynFromRaw(ac.Dt, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoise))], ge); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ge))] += Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoise))]; } -fn ActParams_AddGiNoise(ac: ptr, ctx: ptr, ni: u32,di: u32) { - if ((*ac).Noise.On == 0 || (*ac).Noise.Gi == 0) { +fn ActParams_AddGiNoise(ac: ActParams, ctx: Context, ni: u32,di: u32) { + if (ac.Noise.On == 0 || ac.Noise.Gi == 0) { return; } var p = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoiseP))]; - var gi = SpikeNoiseParams_PGi(&(*ac).Noise, ctx, &p, ni, di); + var gi = SpikeNoiseParams_PGi(ac.Noise, ctx, &p, ni, di); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoiseP))] = p; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoise))] = DtParams_GiSynFromRaw(&(*ac).Dt, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoise))], gi); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoise))] = DtParams_GiSynFromRaw(ac.Dt, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoise))], gi); } -fn ActParams_GiFromSyn(ac: ptr, ctx: ptr, ni: u32,di: u32, giSyn: f32) -> f32 { +fn ActParams_GiFromSyn(ac: ActParams, ctx: Context, ni: u32,di: u32, giSyn: f32) -> f32 { ActParams_AddGiNoise(ac, ctx, ni, di); if (giSyn < 0) { // negative inhib G doesn't make any sense return f32(0); }return giSyn; } -fn ActParams_InetFromG(ac: ptr, vm: f32,ge: f32,gl: f32,gi: f32,gk: f32) -> f32 { - var inet = ge*((*ac).Erev.E-vm) + gl*(*ac).Gbar.L*((*ac).Erev.L-vm) + gi*((*ac).Erev.I-vm) + gk*((*ac).Erev.K-vm); - if (inet > (*ac).Dt.VmTau) { - inet = (*ac).Dt.VmTau; - } else if (inet < -(*ac).Dt.VmTau) { - inet = -(*ac).Dt.VmTau; +fn ActParams_InetFromG(ac: ActParams, vm: f32,ge: f32,gl: f32,gi: f32,gk: f32) -> f32 { + var inet = ge*(ac.Erev.E-vm) + gl*ac.Gbar.L*(ac.Erev.L-vm) + gi*(ac.Erev.I-vm) + gk*(ac.Erev.K-vm); + if (inet > ac.Dt.VmTau) { + inet = ac.Dt.VmTau; + } else if (inet < -ac.Dt.VmTau) { + inet = -ac.Dt.VmTau; }return inet; } -fn ActParams_VmFromInet(ac: ptr, vm: f32,dt: f32,inet: f32) -> f32 { - return F32_ClampValue(&(*ac).VmRange, vm + dt*inet); +fn ActParams_VmFromInet(ac: ActParams, vm: f32,dt: f32,inet: f32) -> f32 { + return F32_ClampValue(ac.VmRange, vm + dt*inet); } -fn ActParams_VmInteg(ac: ptr, vm: f32,dt: f32,ge: f32,gl: f32,gi: f32,gk: f32, nvm: ptr,inet: ptr) { - var dtEff = dt * (*ac).Dt.DtStep; +fn ActParams_VmInteg(ac: ActParams, vm: f32,dt: f32,ge: f32,gl: f32,gi: f32,gk: f32, nvm: ptr,inet: ptr) { + var dtEff = dt * ac.Dt.DtStep; *nvm = vm; - for (var i = i32(0); i < (*ac).Dt.VmSteps; i++) { + for (var i = i32(0); i < ac.Dt.VmSteps; i++) { *inet = ActParams_InetFromG(ac, *nvm, ge, gl, gi, gk); *nvm = ActParams_VmFromInet(ac, *nvm, dtEff, *inet); } } -fn ActParams_VmFromG(ac: ptr, ctx: ptr, ni: u32,di: u32) { +fn ActParams_VmFromG(ac: ActParams, ctx: Context, ni: u32,di: u32) { var updtVm = true; var isi = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ISI))]; - if ((*ac).Spikes.Tr > 0 && isi >= 0 && isi < f32((*ac).Spikes.Tr)) { + if (ac.Spikes.Tr > 0 && isi >= 0 && isi < f32(ac.Spikes.Tr)) { updtVm = false; // don't update the spiking vm during refract } - var ge = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ge))] * (*ac).Gbar.E; - var gi = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gi))] * (*ac).Gbar.I; - var gk = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gk))] * (*ac).Gbar.K; + var ge = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ge))] * ac.Gbar.E; + var gi = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gi))] * ac.Gbar.I; + var gk = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gk))] * ac.Gbar.K; var nvm: f32; var inet: f32; var expi: f32; if (updtVm) { - ActParams_VmInteg(ac, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))], (*ac).Dt.VmDt, ge, f32(f32(1)), gi, gk, &nvm, &inet); - if (updtVm && (*ac).Spikes.Exp == 1) { // add spike current if relevant + ActParams_VmInteg(ac, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))], ac.Dt.VmDt, ge, f32(f32(1)), gi, gk, &nvm, &inet); + if (updtVm && ac.Spikes.Exp == 1) { // add spike current if relevant var exVm: f32; exVm = 0.5 * (nvm + Neurons[Index3D(TensorStrides[70], TensorStrides[71], // midpoint for this TensorStrides[72], u32(ni), u32(di), u32(Vm))]); - expi = (*ac).Gbar.L * (*ac).Spikes.ExpSlope * - FastExp((exVm-(*ac).Spikes.Thr)/(*ac).Spikes.ExpSlope); - if (expi > (*ac).Dt.VmTau) { - expi = (*ac).Dt.VmTau; + expi = ac.Gbar.L * ac.Spikes.ExpSlope * + FastExp((exVm-ac.Spikes.Thr)/ac.Spikes.ExpSlope); + if (expi > ac.Dt.VmTau) { + expi = ac.Dt.VmTau; } inet += expi; - nvm = ActParams_VmFromInet(ac, nvm, (*ac).Dt.VmDt, expi); + nvm = ActParams_VmFromInet(ac, nvm, ac.Dt.VmDt, expi); } Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] = nvm; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Inet))] = inet; } else { var dvm: f32; - if (i32(isi) == (*ac).Spikes.Tr-1) { - dvm = (*ac).Spikes.VmR - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))]; + if (i32(isi) == ac.Spikes.Tr-1) { + dvm = ac.Spikes.VmR - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))]; } else { - dvm = (*ac).Spikes.RDt * ((*ac).Spikes.VmR - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))]); + dvm = ac.Spikes.RDt * (ac.Spikes.VmR - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))]); } Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] += dvm; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Inet))] = dvm * (*ac).Dt.VmTau; + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Inet))] = dvm * ac.Dt.VmTau; } var glEff = f32(1); if (!updtVm) { - glEff += (*ac).Dend.GbarR; + glEff += ac.Dend.GbarR; } var giEff: f32; - giEff = gi + (*ac).Gbar.I*Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SSGiDend))]; - ActParams_VmInteg(ac, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))], (*ac).Dt.VmDendDt, ge, glEff, giEff, gk, &nvm, &inet); + giEff = gi + ac.Gbar.I*Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SSGiDend))]; + ActParams_VmInteg(ac, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))], ac.Dt.VmDendDt, ge, glEff, giEff, gk, &nvm, &inet); if (updtVm) { - nvm = ActParams_VmFromInet(ac, nvm, (*ac).Dt.VmDendDt, (*ac).Dend.GbarExp*expi); + nvm = ActParams_VmFromInet(ac, nvm, ac.Dt.VmDendDt, ac.Dend.GbarExp*expi); } Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] = nvm; } -fn ActParams_SpikeFromVmVars(ac: ptr, nrnISI: ptr,nrnISIAvg: ptr,nrnSpike: ptr,nrnSpiked: ptr,nrnAct: ptr, nrnVm: f32) { +fn ActParams_SpikeFromVmVars(ac: ActParams, nrnISI: ptr,nrnISIAvg: ptr,nrnSpike: ptr,nrnSpiked: ptr,nrnAct: ptr, nrnVm: f32) { var thr: f32; - if ((*ac).Spikes.Exp == 1) { - thr = (*ac).Spikes.ExpThr; + if (ac.Spikes.Exp == 1) { + thr = ac.Spikes.ExpThr; } else { - thr = (*ac).Spikes.Thr; + thr = ac.Spikes.Thr; } if (nrnVm >= thr) { *nrnSpike = f32(1); if (*nrnISIAvg == -1) { *nrnISIAvg = f32(-2); } else if (*nrnISI > 0) { // must have spiked to update - *nrnISIAvg = SpikeParams_AvgFromISI(&(*ac).Spikes, *nrnISIAvg, *nrnISI+1); + *nrnISIAvg = SpikeParams_AvgFromISI(ac.Spikes, *nrnISIAvg, *nrnISI+1); } *nrnISI = f32(0); } else { @@ -868,17 +867,17 @@ fn ActParams_SpikeFromVmVars(ac: ptr, nrnISI: ptr= 0 && *nrnISI > 0 && *nrnISI > 1.2**nrnISIAvg) { - *nrnISIAvg = SpikeParams_AvgFromISI(&(*ac).Spikes, *nrnISIAvg, *nrnISI); + *nrnISIAvg = SpikeParams_AvgFromISI(ac.Spikes, *nrnISIAvg, *nrnISI); } } - var nwAct = SpikeParams_ActFromISI(&(*ac).Spikes, *nrnISIAvg, f32(.001), (*ac).Dt.Integ); + var nwAct = SpikeParams_ActFromISI(ac.Spikes, *nrnISIAvg, f32(.001), ac.Dt.Integ); if (nwAct > 1) { nwAct = f32(1); } - nwAct = *nrnAct + (*ac).Dt.VmDt*(nwAct-*nrnAct); + nwAct = *nrnAct + ac.Dt.VmDt*(nwAct-*nrnAct); *nrnAct = nwAct; } -fn ActParams_SpikeFromVm(ac: ptr, ctx: ptr, ni: u32,di: u32) { +fn ActParams_SpikeFromVm(ac: ActParams, ctx: Context, ni: u32,di: u32) { var nrnISI = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ISI))]; var nrnISIAvg = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ISIAvg))]; var nrnSpike = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spike))]; @@ -904,17 +903,17 @@ struct AKsParams { pad1: i32, pad2: i32, } -fn AKsParams_MFromV(ap: ptr, vbio: f32) -> f32 { +fn AKsParams_MFromV(ap: AKsParams, vbio: f32) -> f32 { var av = vbio; - if (vbio > (*ap).Vmax) { - av = (*ap).Vmax; - }return (*ap).Hf / (1.0 + FastExp(-(*ap).Mf*(av+(*ap).Voff))); + if (vbio > ap.Vmax) { + av = ap.Vmax; + }return ap.Hf / (1.0 + FastExp(-ap.Mf*(av+ap.Voff))); } -fn AKsParams_MFromVnorm(ap: ptr, v: f32) -> f32 { +fn AKsParams_MFromVnorm(ap: AKsParams, v: f32) -> f32 { return AKsParams_MFromV(ap, VToBio(v)); } -fn AKsParams_Gak(ap: ptr, v: f32) -> f32 { - return (*ap).Gbar * AKsParams_MFromVnorm(ap, v); +fn AKsParams_Gak(ap: AKsParams, v: f32) -> f32 { + return ap.Gbar * AKsParams_MFromVnorm(ap, v); } //////// import: "chans-chans.go" @@ -943,31 +942,31 @@ struct GABABParams { pad1: f32, pad2: f32, } -fn GABABParams_GFromV(gp: ptr, v: f32) -> f32 { +fn GABABParams_GFromV(gp: GABABParams, v: f32) -> f32 { var vbio = VToBio(v); if (vbio < -90) { vbio = f32(-90); }return (vbio + 90.0) / (1.0 + FastExp(0.1*((vbio+90.0)+10.0))); } -fn GABABParams_GFromS(gp: ptr, s: f32) -> f32 { - var ss = s * (*gp).GiSpike; +fn GABABParams_GFromS(gp: GABABParams, s: f32) -> f32 { + var ss = s * gp.GiSpike; if (ss > 20) { return f32(1); }return 1.0 / (1.0 + FastExp(-(ss-7.1)/1.4)); } -fn GABABParams_BiExp(gp: ptr, g: f32,x: f32, dG: ptr,dX: ptr) { - *dG = ((*gp).TauFact*x - g) * (*gp).RiseDt; - *dX = -x * (*gp).DecayDt;return; +fn GABABParams_BiExp(gp: GABABParams, g: f32,x: f32, dG: ptr,dX: ptr) { + *dG = (gp.TauFact*x - g) * gp.RiseDt; + *dX = -x * gp.DecayDt;return; } -fn GABABParams_GABAB(gp: ptr, gi: f32, gabaB: ptr,gabaBx: ptr) { +fn GABABParams_GABAB(gp: GABABParams, gi: f32, gabaB: ptr,gabaBx: ptr) { var dG: f32; var dX: f32; GABABParams_BiExp(gp, *gabaB, *gabaBx, &dG, &dX); *gabaBx += GABABParams_GFromS(gp, gi) + dX; // gets new input *gabaB += dG;return; } -fn GABABParams_GgabaB(gp: ptr, gabaB: f32,vm: f32) -> f32 { - return (*gp).Gbar * GABABParams_GFromV(gp, vm) * (gabaB + (*gp).Gbase); +fn GABABParams_GgabaB(gp: GABABParams, gabaB: f32,vm: f32) -> f32 { + return gp.Gbar * GABABParams_GFromV(gp, vm) * (gabaB + gp.Gbase); } //////// import: "chans-kir.go" @@ -981,22 +980,22 @@ struct KirParams { DecayTau: f32, Mrest: f32, } -fn KirParams_Minf(kp: ptr, vbio: f32) -> f32 { - return 1.0 / (1.0 + FastExp((vbio-((*kp).MinfOff))/(*kp).MinfTau)); +fn KirParams_Minf(kp: KirParams, vbio: f32) -> f32 { + return 1.0 / (1.0 + FastExp((vbio-(kp.MinfOff))/kp.MinfTau)); } -fn KirParams_MTau(kp: ptr, vbio: f32) -> f32 { - var alpha = 0.1 * FastExp((vbio-((*kp).RiseOff))/(-(*kp).RiseTau)); - var beta = 0.27 / (1.0 + FastExp((vbio-((*kp).DecayOff))/(-(*kp).DecayTau))); +fn KirParams_MTau(kp: KirParams, vbio: f32) -> f32 { + var alpha = 0.1 * FastExp((vbio-(kp.RiseOff))/(-kp.RiseTau)); + var beta = 0.27 / (1.0 + FastExp((vbio-(kp.DecayOff))/(-kp.DecayTau))); var sum = alpha + beta;return 1.0 / sum; } -fn KirParams_DM(kp: ptr, vbio: f32,m: f32) -> f32 { +fn KirParams_DM(kp: KirParams, vbio: f32,m: f32) -> f32 { var minf = KirParams_Minf(kp, vbio); var mtau = KirParams_MTau(kp, vbio); var dm = (minf - m) / (mtau * 3); // 3 = Q10 return dm; } -fn KirParams_Gkir(kp: ptr, v: f32, m: f32) -> f32 { - return (*kp).Gbar * m; +fn KirParams_Gkir(kp: KirParams, v: f32, m: f32) -> f32 { + return kp.Gbar * m; } //////// import: "chans-kna.go" @@ -1010,12 +1009,12 @@ struct KNaParams { pad1: i32, pad2: i32, } -fn KNaParams_GcFromSpike(ka: ptr, gKNa: ptr, spike: bool) { - if ((*ka).On == 1) { +fn KNaParams_GcFromSpike(ka: KNaParams, gKNa: ptr, spike: bool) { + if (ka.On == 1) { if (spike) { - *gKNa += (*ka).Rise * ((*ka).Max - *gKNa); + *gKNa += ka.Rise * (ka.Max - *gKNa); } else { - *gKNa -= (*ka).Dt * *gKNa; + *gKNa -= ka.Dt * *gKNa; } } else { *gKNa = f32(0); @@ -1029,10 +1028,10 @@ struct KNaMedSlow { Med: KNaParams, Slow: KNaParams, } -fn KNaMedSlow_GcFromSpike(ka: ptr, gKNaM: ptr,gKNaS: ptr, spike: bool) { - KNaParams_GcFromSpike(&(*ka).Med, gKNaM, spike); - if ((*ka).TrialSlow == 0) { - KNaParams_GcFromSpike(&(*ka).Slow, gKNaS, spike); +fn KNaMedSlow_GcFromSpike(ka: KNaMedSlow, gKNaM: ptr,gKNaS: ptr, spike: bool) { + KNaParams_GcFromSpike(ka.Med, gKNaM, spike); + if (ka.TrialSlow == 0) { + KNaParams_GcFromSpike(ka.Slow, gKNaS, spike); } } @@ -1047,33 +1046,33 @@ struct MahpParams { pad: i32, pad2: i32, } -fn MahpParams_EFun(mp: ptr, z: f32) -> f32 { +fn MahpParams_EFun(mp: MahpParams, z: f32) -> f32 { if (abs(z) < 1.0e-4) { return 1.0 - 0.5*z; }return z / (FastExp(z) - 1.0); } -fn MahpParams_NinfTauFromV(mp: ptr, vbio: f32, ninf: ptr,tau: ptr) { - var vo = vbio - (*mp).Voff; - var a = (*mp).DtMax * (*mp).Vslope * MahpParams_EFun(mp, -vo/(*mp).Vslope); - var b = (*mp).DtMax * (*mp).Vslope * MahpParams_EFun(mp, vo/(*mp).Vslope); +fn MahpParams_NinfTauFromV(mp: MahpParams, vbio: f32, ninf: ptr,tau: ptr) { + var vo = vbio - mp.Voff; + var a = mp.DtMax * mp.Vslope * MahpParams_EFun(mp, -vo/mp.Vslope); + var b = mp.DtMax * mp.Vslope * MahpParams_EFun(mp, vo/mp.Vslope); *tau = 1.0 / (a + b); - *ninf = a * *tau; // a / (a+b) - *tau /= (*mp).Tadj; // correct right away.. + *ninf = a * *tau; // a / (a+b) + *tau /= mp.Tadj; // correct right away.. return; } -fn MahpParams_NinfTauFromVnorm(mp: ptr, v: f32, ninf: ptr,tau: ptr) { +fn MahpParams_NinfTauFromVnorm(mp: MahpParams, v: f32, ninf: ptr,tau: ptr) { MahpParams_NinfTauFromV(mp, VToBio(v), ninf, tau); } -fn MahpParams_DNFromV(mp: ptr, v: f32,n: f32) -> f32 { +fn MahpParams_DNFromV(mp: MahpParams, v: f32,n: f32) -> f32 { var ninf: f32; var tau: f32; MahpParams_NinfTauFromVnorm(mp, v, &ninf, &tau); var dn = (ninf - n) / tau;return dn; } -fn MahpParams_GmAHP(mp: ptr, v: f32, n: ptr) -> f32 { +fn MahpParams_GmAHP(mp: MahpParams, v: f32, n: ptr) -> f32 { var dn = MahpParams_DNFromV(mp, v, *n); *n += dn; - var g = (*mp).Tadj * (*mp).Gbar * *n;return g; + var g = mp.Tadj * mp.Gbar * *n;return g; } //////// import: "chans-nmda.go" @@ -1087,29 +1086,29 @@ struct NMDAParams { IDt: f32, MgFact: f32, } -fn NMDAParams_MgGFromVbio(np: ptr, vbio: f32) -> f32 { - var av = vbio + (*np).Voff; +fn NMDAParams_MgGFromVbio(np: NMDAParams, vbio: f32) -> f32 { + var av = vbio + np.Voff; if (av >= 0) { return f32(0); - }return -av / (1.0 + (*np).MgFact*FastExp(-0.062*av)); + }return -av / (1.0 + np.MgFact*FastExp(-0.062*av)); } -fn NMDAParams_MgGFromV(np: ptr, v: f32) -> f32 { +fn NMDAParams_MgGFromV(np: NMDAParams, v: f32) -> f32 { return NMDAParams_MgGFromVbio(np, VToBio(v)); } -fn NMDAParams_CaFromVbio(np: ptr, vbio: f32) -> f32 { - var av = vbio + (*np).Voff; +fn NMDAParams_CaFromVbio(np: NMDAParams, vbio: f32) -> f32 { + var av = vbio + np.Voff; if (av > -0.5 && av < 0.5) { // this eliminates div 0 at 0, and numerical "fuzz" around 0 return 1.0 / (0.0756 * (1 + 0.0378*av)); }return -av / (1.0 - FastExp(0.0756*av)); } -fn NMDAParams_CaFromV(np: ptr, v: f32) -> f32 { +fn NMDAParams_CaFromV(np: NMDAParams, v: f32) -> f32 { return NMDAParams_CaFromVbio(np, VToBio(v)); } -fn NMDAParams_NMDASyn(np: ptr, nmda: f32,raw: f32) -> f32 { - return nmda + raw - (*np).Dt*nmda; +fn NMDAParams_NMDASyn(np: NMDAParams, nmda: f32,raw: f32) -> f32 { + return nmda + raw - np.Dt*nmda; } -fn NMDAParams_Gnmda(np: ptr, nmda: f32,vm: f32) -> f32 { - return (*np).Gbar * NMDAParams_MgGFromV(np, vm) * nmda; +fn NMDAParams_Gnmda(np: NMDAParams, nmda: f32,vm: f32) -> f32 { + return np.Gbar * NMDAParams_MgGFromV(np, vm) * nmda; } //////// import: "chans-sahp.go" @@ -1139,26 +1138,26 @@ struct SKCaParams { CaRDecayDt: f32, CaInDt: f32, } -fn SKCaParams_MAsympHill(sp: ptr, cai: f32) -> f32 { - var caia = cai / (*sp).C50; +fn SKCaParams_MAsympHill(sp: SKCaParams, cai: f32) -> f32 { + var caia = cai / sp.C50; var capow = caia * caia * caia * caia;return capow / (1 + capow); } -fn SKCaParams_CaInRFromSpike(sp: ptr, spike: f32,caD: f32, caIn: ptr,caR: ptr) { - *caR -= *caR * (*sp).CaRDecayDt; +fn SKCaParams_CaInRFromSpike(sp: SKCaParams, spike: f32,caD: f32, caIn: ptr,caR: ptr) { + *caR -= *caR * sp.CaRDecayDt; if (spike > 0) { - var x = *caIn * (*sp).KCaR; + var x = *caIn * sp.KCaR; *caR += x; *caIn -= x; } - if (caD < (*sp).CaInThr) { - *caIn += (*sp).CaInDt * (1.0 - *caIn); + if (caD < sp.CaInThr) { + *caIn += sp.CaInDt * (1.0 - *caIn); } } -fn SKCaParams_MFromCa(sp: ptr, caR: f32,mcur: f32) -> f32 { +fn SKCaParams_MFromCa(sp: SKCaParams, caR: f32,mcur: f32) -> f32 { var mas = SKCaParams_MAsympHill(sp, caR); if (mas > mcur) { - return mcur + (*sp).ActDt*(mas-mcur); - }return mcur + (*sp).DeDt*(mas-mcur); + return mcur + sp.ActDt*(mas-mcur); + }return mcur + sp.DeDt*(mas-mcur); } //////// import: "chans-vgcc.go" @@ -1168,13 +1167,13 @@ struct VGCCParams { pad: i32, pad1: i32, } -fn VGCCParams_GFromV(np: ptr, v: f32) -> f32 { +fn VGCCParams_GFromV(np: VGCCParams, v: f32) -> f32 { var vbio = VToBio(v); if (vbio > -0.5 && vbio < 0.5) { // this avoids divide by 0, and numerical instability around 0 return 1.0 / (0.0756 * (1 + 0.0378*vbio)); }return -vbio / (1.0 - FastExp(0.0756*vbio)); } -fn VGCCParams_MFromV(np: ptr, vbio: f32) -> f32 { +fn VGCCParams_MFromV(np: VGCCParams, vbio: f32) -> f32 { if (vbio < -60) { return f32(0); } @@ -1182,7 +1181,7 @@ fn VGCCParams_MFromV(np: ptr, vbio: f32) -> f32 { return f32(1); }return 1.0 / (1.0 + FastExp(-(vbio + 37))); } -fn VGCCParams_HFromV(np: ptr, vbio: f32) -> f32 { +fn VGCCParams_HFromV(np: VGCCParams, vbio: f32) -> f32 { if (vbio < -50) { return f32(1); } @@ -1190,7 +1189,7 @@ fn VGCCParams_HFromV(np: ptr, vbio: f32) -> f32 { return f32(0); }return 1.0 / (1.0 + FastExp((vbio+41)*2)); } -fn VGCCParams_DMHFromV(np: ptr, v: f32,m: f32,h: f32, dm: ptr,dh: ptr) { +fn VGCCParams_DMHFromV(np: VGCCParams, v: f32,m: f32,h: f32, dm: ptr,dh: ptr) { var vbio = VToBio(v); if (vbio > 0) { vbio = f32(0); @@ -1198,11 +1197,11 @@ fn VGCCParams_DMHFromV(np: ptr, v: f32,m: f32,h: f32, dm: p *dm = (VGCCParams_MFromV(np, vbio) - m) / 3.6; *dh = (VGCCParams_HFromV(np, vbio) - h) / 29.0; } -fn VGCCParams_Gvgcc(np: ptr, vm: f32,m: f32,h: f32) -> f32 { - return (*np).Gbar * VGCCParams_GFromV(np, vm) * m * m * m * h; +fn VGCCParams_Gvgcc(np: VGCCParams, vm: f32,m: f32,h: f32) -> f32 { + return np.Gbar * VGCCParams_GFromV(np, vm) * m * m * m * h; } -fn VGCCParams_CaFromG(np: ptr, v: f32,g: f32,ca: f32) -> f32 { - var vbio = VToBio(v);return -vbio * (*np).Ca * g; +fn VGCCParams_CaFromG(np: VGCCParams, v: f32,g: f32,ca: f32) -> f32 { + var vbio = VToBio(v);return -vbio * np.Ca * g; } //////// import: "context.go" @@ -1225,11 +1224,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" @@ -1251,11 +1250,11 @@ struct PulvParams { DriveLayIndex: i32, pad: f32, } -fn PulvParams_DriveGe(tp: ptr, act: f32) -> f32 { - return (*tp).DriveScale * act; +fn PulvParams_DriveGe(tp: PulvParams, act: f32) -> f32 { + return tp.DriveScale * act; } -fn PulvParams_NonDrivePct(tp: ptr, drvMax: f32) -> f32 { - return 1.0 - min(1.0, drvMax/(*tp).FullDriveAct); +fn PulvParams_NonDrivePct(tp: PulvParams, drvMax: f32) -> f32 { + return 1.0 - min(1.0, drvMax/tp.FullDriveAct); } //////// import: "deep-path.go" @@ -1442,10 +1441,10 @@ struct CaDtParams { //types:add pad: i32, pad1: i32, } -fn CaDtParams_FromCa(kp: ptr, ca: f32, caM: ptr,caP: ptr,caD: ptr) { - *caM += (*kp).MDt * (ca - *caM); - *caP += (*kp).PDt * (*caM - *caP); - *caD += (*kp).DDt * (*caP - *caD); +fn CaDtParams_FromCa(kp: CaDtParams, ca: f32, caM: ptr,caP: ptr,caD: ptr) { + *caM += kp.MDt * (ca - *caM); + *caP += kp.PDt * (*caM - *caP); + *caD += kp.DDt * (*caP - *caD); } struct CaSpikeParams { SpikeCaM: f32, @@ -1454,12 +1453,12 @@ struct CaSpikeParams { CaSynDt: f32, Dt: CaDtParams, } -fn CaSpikeParams_CaMFromSpike(sp: ptr, spike: f32, caM: ptr,caP: ptr,caD: ptr) { - var ca = (*sp).SpikeCaM * spike; - CaDtParams_FromCa(&(*sp).Dt, ca, caM, caP, caD); +fn CaSpikeParams_CaMFromSpike(sp: CaSpikeParams, spike: f32, caM: ptr,caP: ptr,caD: ptr) { + var ca = sp.SpikeCaM * spike; + CaDtParams_FromCa(sp.Dt, ca, caM, caP, caD); } -fn CaSpikeParams_CaSynFromSpike(sp: ptr, spike: f32, caSyn: f32) -> f32 { - var ca = (*sp).SpikeCaSyn * spike;return caSyn + (*sp).CaSynDt*(ca-caSyn); +fn CaSpikeParams_CaSynFromSpike(sp: CaSpikeParams, spike: f32, caSyn: f32) -> f32 { + var ca = sp.SpikeCaSyn * spike;return caSyn + sp.CaSynDt*(ca-caSyn); } //////// import: "layerparams.go" @@ -1505,8 +1504,8 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } //////// import: "layertypes.go" @@ -1575,18 +1574,18 @@ struct LearnCaParams { pad: i32, pad2: i32, } -fn LearnCaParams_VgccCaFromSpike(lc: ptr, ctx: ptr, ni: u32,di: u32) { - if ((*lc).SpikeVGCC == 1) { - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))] = (*lc).SpikeVgccCa * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spike))]; +fn LearnCaParams_VgccCaFromSpike(lc: LearnCaParams, ctx: Context, ni: u32,di: u32) { + if (lc.SpikeVGCC == 1) { + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))] = lc.SpikeVgccCa * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spike))]; } - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCaInt))] += Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))] - (*lc).VgccDt*Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCaInt))]; + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCaInt))] += Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))] - lc.VgccDt*Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCaInt))]; } -fn LearnCaParams_LearnCas(lc: ptr, ctx: ptr, ni: u32,di: u32) { +fn LearnCaParams_LearnCas(lc: LearnCaParams, ctx: Context, ni: u32,di: u32) { LearnCaParams_VgccCaFromSpike(lc, ctx, ni, di); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCa))] = (*lc).NormInv * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(NmdaCa))] + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCaInt))]); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaM))] += (*lc).Dt.MDt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCa))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaM))]); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaP))] += (*lc).Dt.PDt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaM))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaP))]); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaD))] += (*lc).Dt.DDt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaP))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaD))]); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCa))] = lc.NormInv * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(NmdaCa))] + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCaInt))]); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaM))] += lc.Dt.MDt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCa))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaM))]); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaP))] += lc.Dt.PDt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaM))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaP))]); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaD))] += lc.Dt.DDt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaP))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaD))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaDiff))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaP))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(LearnCaD))]; } struct TrgAvgActParams { @@ -1618,26 +1617,26 @@ struct LearnNeuronParams { RLRate: RLRateParams, NeuroMod: NeuroModParams, } -fn LearnNeuronParams_LearnNMDAFromRaw(ln: ptr, ctx: ptr, ni: u32,di: u32, geTot: f32) { +fn LearnNeuronParams_LearnNMDAFromRaw(ln: LearnNeuronParams, ctx: Context, ni: u32,di: u32, geTot: f32) { var geEff = max(geTot, 0.0); var vmd = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))]; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaLrn))] = NMDAParams_NMDASyn(&(*ln).LearnNMDA, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaLrn))], geEff); - var gnmda = NMDAParams_Gnmda(&(*ln).LearnNMDA, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaLrn))], vmd); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(NmdaCa))] = f32(gnmda * NMDAParams_CaFromV(&(*ln).LearnNMDA, vmd)); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaLrn))] = NMDAParams_NMDASyn(ln.LearnNMDA, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaLrn))], geEff); + var gnmda = NMDAParams_Gnmda(ln.LearnNMDA, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaLrn))], vmd); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(NmdaCa))] = f32(gnmda * NMDAParams_CaFromV(ln.LearnNMDA, vmd)); } -fn LearnNeuronParams_CaFromSpike(ln: ptr, ctx: ptr, ni: u32,di: u32) { +fn LearnNeuronParams_CaFromSpike(ln: LearnNeuronParams, ctx: Context, ni: u32,di: u32) { var caM = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaM))]; var caP = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaP))]; var caD = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaD))]; var spike = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spike))]; - CaSpikeParams_CaMFromSpike(&(*ln).CaSpike, spike, &caM, &caP, &caD); + CaSpikeParams_CaMFromSpike(ln.CaSpike, spike, &caM, &caP, &caD); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaM))] = caM; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaP))] = caP; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaD))] = caD; var caSyn = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaSyn))]; - caSyn = CaSpikeParams_CaSynFromSpike(&(*ln).CaSpike, spike, caSyn); + caSyn = CaSpikeParams_CaSynFromSpike(ln.CaSpike, spike, caSyn); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaSyn))] = caSyn; - LearnCaParams_LearnCas(&(*ln).CaLearn, ctx, ni, di); + LearnCaParams_LearnCas(ln.CaLearn, ctx, ni, di); } struct SWtInitParams { SPct: f32, @@ -1729,12 +1728,12 @@ struct F32 { pad: i32, pad1: i32, // for gpu use } -fn F32_ClampValue(mr: ptr, val: f32) -> f32 { - if (val < (*mr).Min) { - return (*mr).Min; +fn F32_ClampValue(mr: F32, val: f32) -> f32 { + if (val < mr.Min) { + return mr.Min; } - if (val > (*mr).Max) { - return (*mr).Max; + if (val > mr.Max) { + return mr.Max; }return val; } @@ -1782,29 +1781,29 @@ struct NeuroModParams { pad1: f32, pad2: f32, } -fn NeuroModParams_IsBLAExt(nm: ptr) -> bool { - return ((*nm).Valence == Positive && (*nm).DAMod == D2Mod) || - ((*nm).Valence == Negative && (*nm).DAMod == D1Mod); +fn NeuroModParams_IsBLAExt(nm: NeuroModParams) -> bool { + return (nm.Valence == Positive && nm.DAMod == D2Mod) || + (nm.Valence == Negative && nm.DAMod == D1Mod); } -fn NeuroModParams_GGain(nm: ptr, da: f32) -> f32 { +fn NeuroModParams_GGain(nm: NeuroModParams, da: f32) -> f32 { var ada = da; if (da > 0) { - ada *= (*nm).BurstGain; + ada *= nm.BurstGain; } else { - ada *= (*nm).DipGain; + ada *= nm.DipGain; } var gain = f32(1); - switch ((*nm).DAMod) { + switch (nm.DAMod) { case NoDAMod: { } case D1Mod: { - gain += (*nm).DAModGain * ada; + gain += nm.DAModGain * ada; } case D2Mod: { - gain -= (*nm).DAModGain * ada; + gain -= nm.DAModGain * ada; } case D1AbsMod: { - gain += (*nm).DAModGain * abs(ada); + gain += nm.DAModGain * abs(ada); } default: { } @@ -1813,11 +1812,11 @@ fn NeuroModParams_GGain(nm: ptr, da: f32) -> f32 { gain = f32(0); }return gain; } -fn NeuroModParams_GiFromACh(nm: ptr, ach: f32) -> f32 { +fn NeuroModParams_GiFromACh(nm: NeuroModParams, ach: f32) -> f32 { var ai = 1 - ach; if (ai < 0) { ai = f32(0); - }return (*nm).AChDisInhib * ai; + }return nm.AChDisInhib * ai; } //////// import: "neuron.go" @@ -2084,8 +2083,8 @@ struct RWDaParams { pad: u32, pad1: u32, } -fn RWDaParams_GeFromDA(rp: ptr, da: f32) -> f32 { - var ge = (*rp).TonicGe * (1.0 + da); +fn RWDaParams_GeFromDA(rp: RWDaParams, da: f32) -> f32 { + var ge = rp.TonicGe * (1.0 + da); if (ge < 0) { ge = f32(0); }return ge; @@ -2102,8 +2101,8 @@ struct TDDaParams { pad: u32, pad1: u32, } -fn TDDaParams_GeFromDA(tp: ptr, da: f32) -> f32 { - return (*tp).TonicGe * (1.0 + da); +fn TDDaParams_GeFromDA(tp: TDDaParams, da: f32) -> f32 { + return tp.TonicGe * (1.0 + da); } //////// import: "rl-path.go" @@ -2142,7 +2141,7 @@ struct BLAPathParams { //////// import: "rubicon.go" fn RubiconUSStimValue(di: u32, usIndex: u32, valence: ValenceTypes) -> f32 { - var nix = NetworkIxs[0]; + let nix = NetworkIxs[0]; var us = f32(0); switch (valence) { case Positive: { diff --git a/axon/shaders/CyclePost.wgsl b/axon/shaders/CyclePost.wgsl index 367a4171d..0e3856181 100644 --- a/axon/shaders/CyclePost.wgsl +++ b/axon/shaders/CyclePost.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,10 +78,10 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_CyclePost(ly: ptr, ctx: ptr, di: u32) { +fn LayerParams_CyclePost(ly: LayerParams, ctx: Context, di: u32) { var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); LayerParams_CyclePostLayer(ly, ctx, lpi, di); - switch ((*ly).Type) { + switch (ly.Type) { case MatrixLayer, BGThalLayer: { LayerParams_GatedFromCaPMax(ly, ctx, di); } @@ -89,16 +89,16 @@ fn LayerParams_CyclePost(ly: ptr, ctx: ptr, ctx: ptr, ctx: ptr, lpi: u32,di: u32) { +fn LayerParams_CyclePostLayer(ly: LayerParams, ctx: Context, lpi: u32,di: u32) { var casp = PoolAvgMax(AMCaP, AMCycle, Max, lpi, di); - if ((*ctx).Cycle >= (*ly).Acts.Dt.MaxCycStart) { - if (casp > (*ly).Inhib.ActAvg.RTThr && LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerRT))] <= 0) { - LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerRT))] = f32((*ctx).Cycle); + if (ctx.Cycle >= ly.Acts.Dt.MaxCycStart) { + if (casp > ly.Inhib.ActAvg.RTThr && LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ly.Index), u32(di), u32(LayerRT))] <= 0) { + LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ly.Index), u32(di), u32(LayerRT))] = f32(ctx.Cycle); } - if (PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(lpi), u32(di), u32(PoolGated))] > 0 && LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(GatedRT))] <= 0) { - LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(GatedRT))] = f32((*ctx).Cycle); + if (PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(lpi), u32(di), u32(PoolGated))] > 0 && LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ly.Index), u32(di), u32(GatedRT))] <= 0) { + LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ly.Index), u32(di), u32(GatedRT))] = f32(ctx.Cycle); } } } -fn LayerParams_LDTSrcLayAct(ly: ptr, layIndex: i32, di: u32) -> f32 { +fn LayerParams_LDTSrcLayAct(ly: LayerParams, layIndex: i32, di: u32) -> f32 { if (layIndex < 0) { return f32(0); } - var oly = Layers[u32(layIndex)]; - var opi = LayerParams_PoolIndex(&oly, u32(u32(0)));return PoolAvgMax(AMCaP, AMCycle, Avg, opi, di); + let oly = Layers[u32(layIndex)]; + var opi = LayerParams_PoolIndex(oly, u32(u32(0)));return PoolAvgMax(AMCaP, AMCycle, Avg, opi, di); } -fn LayerParams_CyclePostLDTLayer(ly: ptr, ctx: ptr, di: u32, srcLay1Act: f32,srcLay2Act: f32,srcLay3Act: f32,srcLay4Act: f32) { - var ach = LDTParams_ACh(&(*ly).LDT, ctx, di, srcLay1Act, srcLay2Act, srcLay3Act, srcLay4Act); +fn LayerParams_CyclePostLDTLayer(ly: LayerParams, ctx: Context, di: u32, srcLay1Act: f32,srcLay2Act: f32,srcLay3Act: f32,srcLay4Act: f32) { + var ach = LDTParams_ACh(ly.LDT, ctx, di, srcLay1Act, srcLay2Act, srcLay3Act, srcLay4Act); GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvAChRaw), u32(di))] = ach; if (ach > GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], // instant up u32(GvACh), u32(di))]) { GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))] = ach; } else { - GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))] += (*ly).Acts.Dt.IntDt * (ach - GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))]); + GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))] += ly.Acts.Dt.IntDt * (ach - GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))]); } } -fn LayerParams_CyclePostRWDaLayer(ly: ptr, ctx: ptr, di: u32) { - var pli = u32((*ly).RWDa.RWPredLayIndex); +fn LayerParams_CyclePostRWDaLayer(ly: LayerParams, ctx: Context, di: u32) { + var pli = u32(ly.RWDa.RWPredLayIndex); var pred = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(pli), u32(di), u32(LayerRewPredPos))] - LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(pli), u32(di), u32(LayerRewPredNeg))]; GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], // record u32(GvRewPred), u32(di))] = pred; @@ -160,57 +160,57 @@ fn LayerParams_CyclePostRWDaLayer(ly: ptr, ctx: ptr, ctx: ptr, di: u32) { - if ((*ctx).PlusPhase == 0) { +fn LayerParams_CyclePostTDPredLayer(ly: LayerParams, ctx: Context, di: u32) { + if (ctx.PlusPhase == 0) { return; } - var pred = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerRewPredPos))] - LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerRewPredNeg))]; + var pred = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ly.Index), u32(di), u32(LayerRewPredPos))] - LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ly.Index), u32(di), u32(LayerRewPredNeg))]; GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvPrevPred), u32(di))] = pred; } -fn LayerParams_CyclePostTDIntegLayer(ly: ptr, ctx: ptr, di: u32) { +fn LayerParams_CyclePostTDIntegLayer(ly: LayerParams, ctx: Context, di: u32) { var rew = f32(0); if (GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvHasRew), u32(di))] > 0) { rew = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvRew), u32(di))]; } var rpval = f32(0); - if ((*ctx).PlusPhase == 1) { - var pli = u32((*ly).TDInteg.TDPredLayIndex); + if (ctx.PlusPhase == 1) { + var pli = u32(ly.TDInteg.TDPredLayIndex); var pred = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(pli), u32(di), u32(LayerRewPredPos))] - LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(pli), u32(di), u32(LayerRewPredNeg))]; - rpval = rew + (*ly).TDInteg.Discount*(*ly).TDInteg.PredGain*pred; + rpval = rew + ly.TDInteg.Discount*ly.TDInteg.PredGain*pred; LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], // our plus phase = new integrated value - u32((*ly).Index), u32(di), u32(LayerRewPredPos))] = rpval; + u32(ly.Index), u32(di), u32(LayerRewPredPos))] = rpval; } else { - rpval = (*ly).TDInteg.PredGain * GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvPrevPred), u32(di))]; + rpval = ly.TDInteg.PredGain * GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvPrevPred), u32(di))]; LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], // our minus phase = prior integrated value - u32((*ly).Index), u32(di), u32(LayerRewPredNeg))] = rpval; + u32(ly.Index), u32(di), u32(LayerRewPredNeg))] = rpval; } GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], // global value will be copied to layers next cycle u32(GvRewPred), u32(di))] = rpval; } -fn LayerParams_CyclePostTDDaLayer(ly: ptr, ctx: ptr, di: u32) { - var ili = u32((*ly).TDDa.TDIntegLayIndex); +fn LayerParams_CyclePostTDDaLayer(ly: LayerParams, ctx: Context, di: u32) { + var ili = u32(ly.TDDa.TDIntegLayIndex); var da = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ili), u32(di), u32(LayerRewPredPos))] - LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ili), u32(di), u32(LayerRewPredNeg))]; - if ((*ctx).PlusPhase == 0) { + if (ctx.PlusPhase == 0) { da = f32(0); } GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], // updates global value that will be copied to layers next cycle. u32(GvDA), u32(di))] = da; } -fn LayerParams_CyclePostCeMLayer(ly: ptr, ctx: ptr, lpi: u32,di: u32) { +fn LayerParams_CyclePostCeMLayer(ly: LayerParams, ctx: Context, lpi: u32,di: u32) { var casd = PoolAvgMax(AMCaD, AMCycle, Max, lpi, di); - if ((*ly).Learn.NeuroMod.Valence == Positive) { + if (ly.Learn.NeuroMod.Valence == Positive) { GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvCeMpos), u32(di))] = casd; } else { GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvCeMneg), u32(di))] = casd; } } -fn LayerParams_CyclePostVTALayer(ly: ptr, ctx: ptr, di: u32) { - VTAParams_VTADA(&(*ly).VTA, ctx, di, GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))], (GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], +fn LayerParams_CyclePostVTALayer(ly: LayerParams, ctx: Context, di: u32) { + VTAParams_VTADA(ly.VTA, ctx, di, GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))], (GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvHasRew), u32(di))] > 0)); } -fn LayerParams_CyclePostVSPatchLayer(ly: ptr, ctx: ptr, pi: u32,di: u32, spi: i32) { +fn LayerParams_CyclePostVSPatchLayer(ly: LayerParams, ctx: Context, pi: u32,di: u32, spi: i32) { var casd = PoolAvgMax(AMCaD, AMCycle, Avg, pi, di); - if ((*ly).Learn.NeuroMod.DAMod == D1Mod) { + if (ly.Learn.NeuroMod.DAMod == D1Mod) { GlobalVectors[Index3D(TensorStrides[110], TensorStrides[111], TensorStrides[112], u32(GvVSPatchD1), u32(u32(spi - 1)), u32(di))] = casd; } else { GlobalVectors[Index3D(TensorStrides[110], TensorStrides[111], TensorStrides[112], @@ -220,14 +220,13 @@ fn LayerParams_CyclePostVSPatchLayer(ly: ptr, ctx: ptr= NetworkIxs[0].NLayers) { return; } - var di = Context_DataIndex(&ctx, i); - var layers=Layers[li]; LayerParams_CyclePost(&layers, &ctx, di); - Ctx[0] = ctx; + var di = Context_DataIndex(ctx, i); + let layers=Layers[li]; LayerParams_CyclePost(layers, ctx, di); } //////// import: "act-path.go" @@ -523,11 +522,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" @@ -785,8 +784,8 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } //////// import: "layertypes.go" @@ -1202,12 +1201,12 @@ struct GPParams { pad1: u32, pad2: u32, } -fn LayerParams_GatedFromCaPMax(ly: ptr, ctx: ptr, di: u32) { +fn LayerParams_GatedFromCaPMax(ly: LayerParams, ctx: Context, di: u32) { var anyGated = false; var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); - var thr = (*ly).Matrix.GateThr; - if ((*ly).Indexes.NPools > 1) { - for (var spi = u32(1); spi < (*ly).Indexes.NPools; spi++) { + var thr = ly.Matrix.GateThr; + if (ly.Indexes.NPools > 1) { + for (var spi = u32(1); spi < ly.Indexes.NPools; spi++) { var pi = LayerParams_PoolIndex(ly, spi); var spkavg = PoolAvgMax(AMCaPMax, AMCycle, Avg, pi, di); var gthr = spkavg > thr; @@ -1330,26 +1329,26 @@ struct LDTParams { SrcLay4Index: i32, pad: f32, } -fn LDTParams_Thr(lp: ptr, val: f32) -> f32 { +fn LDTParams_Thr(lp: LDTParams, val: f32) -> f32 { var vl = abs(val); // only abs makes sense -- typically positive anyway - if ((*lp).SrcThr <= 0) { + if (lp.SrcThr <= 0) { return vl; } - if (vl < (*lp).SrcThr) { + if (vl < lp.SrcThr) { return f32(0); }return f32( 1); } -fn LDTParams_MaxSrcAct(lp: ptr, maxSrcAct: f32,srcLayAct: f32) -> f32 { +fn LDTParams_MaxSrcAct(lp: LDTParams, maxSrcAct: f32,srcLayAct: f32) -> f32 { var act = LDTParams_Thr(lp, srcLayAct);return max(act, maxSrcAct); } -fn LDTParams_ACh(lp: ptr, ctx: ptr, di: u32, srcLay1Act: f32,srcLay2Act: f32,srcLay3Act: f32,srcLay4Act: f32) -> f32 { +fn LDTParams_ACh(lp: LDTParams, ctx: Context, di: u32, srcLay1Act: f32,srcLay2Act: f32,srcLay3Act: f32,srcLay4Act: f32) -> f32 { var maxSrcAct = f32(0); maxSrcAct = LDTParams_MaxSrcAct(lp, maxSrcAct, srcLay1Act); maxSrcAct = LDTParams_MaxSrcAct(lp, maxSrcAct, srcLay2Act); maxSrcAct = LDTParams_MaxSrcAct(lp, maxSrcAct, srcLay3Act); maxSrcAct = LDTParams_MaxSrcAct(lp, maxSrcAct, srcLay4Act); - var maintInh = (*lp).MaintInhib * GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvGoalMaint), u32(di))]; + var maintInh = lp.MaintInhib * GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvGoalMaint), u32(di))]; maintInh = min(1.0, maintInh); maxSrcAct *= (1.0 - maintInh); var ach = maxSrcAct; @@ -1365,11 +1364,11 @@ struct VTAParams { AChThr: f32, pad: f32, } -fn VTAParams_VTADA(vt: ptr, ctx: ptr, di: u32, ach: f32, hasRew: bool) { - var pvDA = (*vt).LHbGain * GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvLHbPVDA), u32(di))]; +fn VTAParams_VTADA(vt: VTAParams, ctx: Context, di: u32, ach: f32, hasRew: bool) { + var pvDA = vt.LHbGain * GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvLHbPVDA), u32(di))]; var csNet = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvCeMpos), u32(di))] - GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvCeMneg), u32(di))]; var achMod = f32(0); - if (ach >= (*vt).AChThr) { + if (ach >= vt.AChThr) { achMod = ach; } var vsPatch = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], // note: critical to use thresholded version @@ -1377,7 +1376,7 @@ fn VTAParams_VTADA(vt: ptr, ctx: ptr, di: if (csNet > 0) { csNet = max(0.0, csNet-vsPatch); // vspatch can shunt positive CS DA, but no dipping! that is lhb } - var csDA = achMod * (*vt).CeMGain * csNet; + var csDA = achMod * vt.CeMGain * csNet; var netDA = f32(0); if (hasRew) { netDA = pvDA; diff --git a/axon/shaders/DWtFromDiSyn.wgsl b/axon/shaders/DWtFromDiSyn.wgsl index 3c6a72f38..712fa1e81 100644 --- a/axon/shaders/DWtFromDiSyn.wgsl +++ b/axon/shaders/DWtFromDiSyn.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -683,19 +683,18 @@ const LayerRewPredNeg: LayerVars = 11; //////// import: "learn-net.go" fn DWtFromDiSyn(syni: u32) { //gosl:kernel - var ctx = Ctx[0]; + let ctx = Ctx[0]; if (syni >= NetworkIxs[0].NSyns) { return; } var pti = SynapseIxs[Index2D(TensorStrides[20], TensorStrides[21], u32(syni), u32(SynPathIndex))]; - var paths=Paths[pti]; PathParams_DWtFromDi(&paths, &ctx, syni); - Ctx[0] = ctx; + let paths=Paths[pti]; PathParams_DWtFromDi(paths, ctx, syni); } //////// import: "learn-path.go" -fn PathParams_DWtFromDi(pt: ptr, ctx: ptr, syni: u32) { +fn PathParams_DWtFromDi(pt: PathParams, ctx: Context, syni: u32) { var dwt = f32(0); - for (var di = u32(0); di < (*ctx).NData; di++) { + for (var di = u32(0); di < ctx.NData; di++) { dwt += SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DiDWt))]; } Synapses[Index2D(TensorStrides[170], TensorStrides[171], diff --git a/axon/shaders/DWtSubMeanNeuron.wgsl b/axon/shaders/DWtSubMeanNeuron.wgsl index 06b4694be..6a1112bf5 100644 --- a/axon/shaders/DWtSubMeanNeuron.wgsl +++ b/axon/shaders/DWtSubMeanNeuron.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -680,41 +680,40 @@ const LayerRewPredPos: LayerVars = 10; const LayerRewPredNeg: LayerVars = 11; //////// import: "learn-layer.go" -fn LayerParams_DWtSubMean(ly: ptr, ctx: ptr, ri: u32) { - var lni = ri - (*ly).Indexes.NeurSt; - var rn = (*ly).Indexes.RecvN; +fn LayerParams_DWtSubMean(ly: LayerParams, ctx: Context, ri: u32) { + var lni = ri - ly.Indexes.NeurSt; + var rn = ly.Indexes.RecvN; for (var pi = u32(0); pi < rn; pi++) { - var pti = RecvPathIxs[Index1D(TensorStrides[40], u32((*ly).Indexes.RecvSt + pi))]; - var paths=Paths[pti]; PathParams_DWtSubMean(&paths, ctx, pti, ri, lni); + var pti = RecvPathIxs[Index1D(TensorStrides[40], u32(ly.Indexes.RecvSt + pi))]; + let paths=Paths[pti]; PathParams_DWtSubMean(paths, ctx, pti, ri, lni); } } //////// import: "learn-net.go" fn DWtSubMeanNeuron(ni: u32) { //gosl:kernel - var ctx = Ctx[0]; + let ctx = Ctx[0]; if (ni >= NetworkIxs[0].NNeurons) { return; } var li = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnLayIndex))]; - var layers=Layers[li]; LayerParams_DWtSubMean(&layers, &ctx, ni); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_DWtSubMean(layers, ctx, ni); } //////// import: "learn-path.go" -fn PathParams_DWtSubMean(pt: ptr, ctx: ptr, pti: u32,ri: u32,lni: u32) { - if ((*pt).Learn.Learn == 0) { +fn PathParams_DWtSubMean(pt: PathParams, ctx: Context, pti: u32,ri: u32,lni: u32) { + if (pt.Learn.Learn == 0) { return; } - var sm = (*pt).Learn.DWt.SubMean; + var sm = pt.Learn.DWt.SubMean; if (sm == 0) { // note default is now 0, so don't exclude Target layers, which should be 0 return; } - var cni = (*pt).Indexes.RecvConSt + lni; + var cni = pt.Indexes.RecvConSt + lni; var synn = PathRecvCon[Index2D(TensorStrides[50], TensorStrides[51], u32(cni), u32(Nitems))]; if (synn < 1) { return; } - var synst = (*pt).Indexes.RecvSynSt + PathRecvCon[Index2D(TensorStrides[50], TensorStrides[51], u32(cni), u32(StartOff))]; + var synst = pt.Indexes.RecvSynSt + PathRecvCon[Index2D(TensorStrides[50], TensorStrides[51], u32(cni), u32(StartOff))]; var sumDWt = f32(0); var nnz = 0; // non-zero for (var ci = u32(0); ci < synn; ci++) { diff --git a/axon/shaders/DWtSyn.wgsl b/axon/shaders/DWtSyn.wgsl index 1349d5fd6..56c8a7812 100644 --- a/axon/shaders/DWtSyn.wgsl +++ b/axon/shaders/DWtSyn.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -374,11 +374,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" @@ -636,8 +636,8 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } //////// import: "layertypes.go" @@ -692,29 +692,28 @@ const LayerRewPredNeg: LayerVars = 11; //////// import: "learn-net.go" fn DWtSyn(i: u32) { //gosl:kernel - var ctx = Ctx[0]; - var syni = Context_ItemIndex(&ctx, i); + let ctx = Ctx[0]; + var syni = Context_ItemIndex(ctx, i); if (syni >= NetworkIxs[0].NSyns) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); var pti = SynapseIxs[Index2D(TensorStrides[20], TensorStrides[21], u32(syni), u32(SynPathIndex))]; var si = SynapseIxs[Index2D(TensorStrides[20], TensorStrides[21], u32(syni), u32(SynSendIndex))]; var ri = SynapseIxs[Index2D(TensorStrides[20], TensorStrides[21], u32(syni), u32(SynRecvIndex))]; - var paths=Paths[pti]; var layers=Layers[Paths[pti].Indexes.RecvLayer]; PathParams_DWtSyn(&paths, &ctx, &layers, syni, si, ri, di); - Ctx[0] = ctx; + let paths=Paths[pti]; let layers=Layers[Paths[pti].Indexes.RecvLayer]; PathParams_DWtSyn(paths, ctx, layers, syni, si, ri, di); } //////// import: "learn-path.go" -fn PathParams_DWtSyn(pt: ptr, ctx: ptr, rlay: ptr, syni: u32,si: u32,ri: u32,di: u32) { - if ((*pt).Learn.Learn == 0) { +fn PathParams_DWtSyn(pt: PathParams, ctx: Context, rlay: LayerParams, syni: u32,si: u32,ri: u32,di: u32) { + if (pt.Learn.Learn == 0) { return; } - var isTarget = (*rlay).Acts.Clamp.IsTarget > 0; + var isTarget = rlay.Acts.Clamp.IsTarget > 0; var spi = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ri), u32(NrnSubPool))]; var pi = LayerParams_PoolIndex(rlay, spi); var lpi = LayerParams_PoolIndex(rlay, u32(u32(0))); - switch ((*pt).Type) { + switch (pt.Type) { case RWPath: { PathParams_DWtSynRWPred(pt, ctx, syni, si, ri, lpi, pi, di); } @@ -737,7 +736,7 @@ fn PathParams_DWtSyn(pt: ptr, ctx: ptr, r PathParams_DWtSynHip(pt, ctx, syni, si, ri, lpi, pi, di, isTarget); } // by default this is the same as DWtSynCortex (w/ unused Hebb component in the algorithm) except that it uses WtFromDWtSynNoLimits default: { - if ((*pt).Learn.Hebb.On == 1) { + if (pt.Learn.Hebb.On == 1) { PathParams_DWtSynHebb(pt, ctx, syni, si, ri, lpi, pi, di); } else { PathParams_DWtSynCortex(pt, ctx, syni, si, ri, lpi, pi, di, isTarget); @@ -745,7 +744,7 @@ fn PathParams_DWtSyn(pt: ptr, ctx: ptr, r } } } -fn PathParams_SynCa(pt: ptr, ctx: ptr, si: u32,ri: u32,di: u32, syCaP: ptr,syCaD: ptr) { +fn PathParams_SynCa(pt: PathParams, ctx: Context, si: u32,ri: u32,di: u32, syCaP: ptr,syCaD: ptr) { var nbins = NetworkIxs[0].NCaBins; var cadSt = GvCaBinWts + GlobalScalarVars(nbins); var cp: f32; @@ -755,21 +754,21 @@ fn PathParams_SynCa(pt: ptr, ctx: ptr, si cp += sp * GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvCaBinWts + GlobalScalarVars(i)), u32(0))]; cd += sp * GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(cadSt + GlobalScalarVars(i)), u32(0))]; } - *syCaP = (*pt).Learn.DWt.CaPScale * (*pt).Learn.DWt.CaScale * cp; - *syCaD = (*pt).Learn.DWt.CaScale * cd; + *syCaP = pt.Learn.DWt.CaPScale * pt.Learn.DWt.CaScale * cp; + *syCaD = pt.Learn.DWt.CaScale * cd; } -fn PathParams_DWtSynCortex(pt: ptr, ctx: ptr, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32, isTarget: bool) { +fn PathParams_DWtSynCortex(pt: PathParams, ctx: Context, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32, isTarget: bool) { var syCaP: f32; var syCaD: f32; PathParams_SynCa(pt, ctx, si, ri, di, &syCaP, &syCaD); - var dtr = syCaD; // delta trace, caD reflects entire window - if ((*pt).Type == CTCtxtPath) { // layer 6 CT pathway + var dtr = syCaD; // delta trace, caD reflects entire window + if (pt.Type == CTCtxtPath) { // layer 6 CT pathway dtr = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(si), u32(di), u32(BurstPrv))]; } SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DTr))] = dtr; - var tr = DWtParams_TrFromCa(&(*pt).Learn.DWt, SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], + var tr = DWtParams_TrFromCa(pt.Learn.DWt, SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))], dtr); SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))] = tr; @@ -777,7 +776,7 @@ fn PathParams_DWtSynCortex(pt: ptr, ctx: ptr, ctx: ptr, ctx: ptr, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { +fn PathParams_DWtSynHebb(pt: PathParams, ctx: Context, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { var rLearnCaP = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(LearnCaP))]; var sNrnCap = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(si), u32(di), u32(LearnCaP))]; var lwt = Synapses[Index2D(TensorStrides[170], TensorStrides[171], // linear weight u32(syni), u32(LWt))]; - var hebb = rLearnCaP * ((*pt).Learn.Hebb.Up*sNrnCap*(1-lwt) - (*pt).Learn.Hebb.Down*(1-sNrnCap)*lwt); + var hebb = rLearnCaP * (pt.Learn.Hebb.Up*sNrnCap*(1-lwt) - pt.Learn.Hebb.Down*(1-sNrnCap)*lwt); SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], - u32(syni), u32(di), u32(DiDWt))] = (*pt).Learn.LRate.Eff * hebb; + u32(syni), u32(di), u32(DiDWt))] = pt.Learn.LRate.Eff * hebb; } -fn PathParams_DWtSynHip(pt: ptr, ctx: ptr, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32, isTarget: bool) { +fn PathParams_DWtSynHip(pt: PathParams, ctx: Context, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32, isTarget: bool) { var syCaP: f32; var syCaD: f32; PathParams_SynCa(pt, ctx, si, ri, di, &syCaP, &syCaD); var dtr = syCaD; // delta trace, caD reflects entire window SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DTr))] = dtr; - var tr = DWtParams_TrFromCa(&(*pt).Learn.DWt, SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], + var tr = DWtParams_TrFromCa(pt.Learn.DWt, SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))], dtr); SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))] = tr; @@ -835,36 +834,36 @@ fn PathParams_DWtSynHip(pt: ptr, ctx: ptr err *= lwt; } var sNrnCap = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(si), u32(di), u32(LearnCaP))]; - var savg = 0.5 + (*pt).Hip.SAvgCor*((*pt).Hip.SNominal-0.5); - savg = 0.5 / max((*pt).Hip.SAvgThr, savg); // keep this Sending Average Correction term within bounds (SAvgThr) + var savg = 0.5 + pt.Hip.SAvgCor*(pt.Hip.SNominal-0.5); + savg = 0.5 / max(pt.Hip.SAvgThr, savg); // keep this Sending Average Correction term within bounds (SAvgThr) var hebb = rLearnCaP * (sNrnCap*(savg-lwt) - (1-sNrnCap)*lwt); - var dwt = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(RLRate))] * (*pt).Learn.LRate.Eff * ((*pt).Hip.Hebb*hebb + (*pt).Hip.Err*err); + var dwt = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(RLRate))] * pt.Learn.LRate.Eff * (pt.Hip.Hebb*hebb + pt.Hip.Err*err); SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DiDWt))] = dwt; } -fn PathParams_DWtSynBLA(pt: ptr, ctx: ptr, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { +fn PathParams_DWtSynBLA(pt: PathParams, ctx: Context, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { var dwt = f32(0); var ach = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))]; if (GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], // learn and reset u32(GvHasRew), u32(di))] > 0) { var ract = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(CaD))]; - if (ract < (*pt).Learn.DWt.LearnThr) { + if (ract < pt.Learn.DWt.LearnThr) { ract = f32(0); } var tr = SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))]; - var ustr = (*pt).BLA.USTrace; + var ustr = pt.BLA.USTrace; tr = ustr*Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(si), u32(di), u32(Burst))] + (1.0-ustr)*tr; var delta = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(CaP))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(CaDPrev))]; if (delta < 0) { - delta *= (*pt).BLA.NegDeltaLRate; + delta *= pt.BLA.NegDeltaLRate; } dwt = tr * delta * ract; SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))] = 0.0; - } else if (ach > (*pt).BLA.AChThr) { + } else if (ach > pt.BLA.AChThr) { var dtr = ach * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(si), u32(di), u32(Burst))]; SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DTr))] = dtr; - var tr = DWtParams_TrFromCa(&(*pt).Learn.DWt, SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))], dtr); + var tr = DWtParams_TrFromCa(pt.Learn.DWt, SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))], dtr); SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))] = tr; } else { SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DTr))] = 0.0; @@ -875,12 +874,12 @@ fn PathParams_DWtSynBLA(pt: ptr, ctx: ptr } else { dwt *= lwt; } - SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DiDWt))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(RLRate))] * (*pt).Learn.LRate.Eff * dwt; + SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DiDWt))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(RLRate))] * pt.Learn.LRate.Eff * dwt; } -fn PathParams_DWtSynRWPred(pt: ptr, ctx: ptr, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { +fn PathParams_DWtSynRWPred(pt: PathParams, ctx: Context, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { var lda = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvDA), u32(di))]; var da = lda; - var lr = (*pt).Learn.LRate.Eff; + var lr = pt.Learn.LRate.Eff; var eff_lr = lr; if (NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ri), u32(NrnNeurIndex))] == 0) { if (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(Ge))] > Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], // clipped at top, saturate up @@ -892,7 +891,7 @@ fn PathParams_DWtSynRWPred(pt: ptr, ctx: ptr, ctx: ptr= 0) { - eff_lr *= (*pt).RLPred.OppSignLRate; + eff_lr *= pt.RLPred.OppSignLRate; } } var dwt = da * Neurons[Index3D(TensorStrides[70], TensorStrides[71], // no recv unit activation @@ -913,20 +912,20 @@ fn PathParams_DWtSynRWPred(pt: ptr, ctx: ptr, ctx: ptr, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { +fn PathParams_DWtSynTDPred(pt: PathParams, ctx: Context, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { var lda = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvDA), u32(di))]; var da = lda; - var lr = (*pt).Learn.LRate.Eff; + var lr = pt.Learn.LRate.Eff; var eff_lr = lr; var ni = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ri), u32(NrnNeurIndex))]; if (ni == 0) { if (da < 0) { - eff_lr *= (*pt).RLPred.OppSignLRate; + eff_lr *= pt.RLPred.OppSignLRate; } } else { eff_lr = -eff_lr; if (da >= 0) { - eff_lr *= (*pt).RLPred.OppSignLRate; + eff_lr *= pt.RLPred.OppSignLRate; } } var dwt = da * Neurons[Index3D(TensorStrides[70], TensorStrides[71], // no recv unit activation, prior trial act @@ -934,7 +933,7 @@ fn PathParams_DWtSynTDPred(pt: ptr, ctx: ptr, ctx: ptr, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { +fn PathParams_DWtSynVSMatrix(pt: PathParams, ctx: Context, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { var hasRew = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvHasRew), u32(di))] > 0; var ach = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvACh), u32(di))]; if (!hasRew && ach < 0.1) { @@ -944,16 +943,16 @@ fn PathParams_DWtSynVSMatrix(pt: ptr, ctx: ptr (*pt).Learn.DWt.LearnThr) { // key: prevents learning if < threshold - dtr += ach * ((*pt).Matrix.Credit * sact * rminus); + var dtr = ach * (pt.Matrix.Delta * sact * (rplus - rminus)); + if (rminus > pt.Learn.DWt.LearnThr) { // key: prevents learning if < threshold + dtr += ach * (pt.Matrix.Credit * sact * rminus); } if (hasRew) { var tr = SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))]; - if ((*pt).Matrix.VSRewLearn == 1) { + if (pt.Matrix.VSRewLearn == 1) { tr += (1 - GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvGoalMaint), u32(di))]) * dtr; } - var dwt = rlr * (*pt).Learn.LRate.Eff * tr; + var dwt = rlr * pt.Learn.LRate.Eff * tr; SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DiDWt))] = dwt; SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))] = 0.0; SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DTr))] = 0.0; @@ -964,38 +963,38 @@ fn PathParams_DWtSynVSMatrix(pt: ptr, ctx: ptr, ctx: ptr, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { +fn PathParams_DWtSynDSMatrix(pt: PathParams, ctx: Context, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { var rlr = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(RLRate))]; if (GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], // US time -- use DA and current recv activity u32(GvHasRew), u32(di))] > 0) { var tr = SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))]; - var dwt = rlr * (*pt).Learn.LRate.Eff * tr; + var dwt = rlr * pt.Learn.LRate.Eff * tr; SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DiDWt))] = dwt; SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))] = 0.0; SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DTr))] = 0.0; } else { - var pfmod = (*pt).Matrix.BasePF + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(GModSyn))]; + var pfmod = pt.Matrix.BasePF + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(GModSyn))]; var rplus = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(CaP))]; var rminus = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(CaD))]; var sact = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(si), u32(di), u32(CaD))]; - var dtr = rlr * ((*pt).Matrix.Delta * sact * (rplus - rminus)); - if (rminus > (*pt).Learn.DWt.LearnThr) { // key: prevents learning if < threshold - dtr += rlr * ((*pt).Matrix.Credit * pfmod * sact * rminus); + var dtr = rlr * (pt.Matrix.Delta * sact * (rplus - rminus)); + if (rminus > pt.Learn.DWt.LearnThr) { // key: prevents learning if < threshold + dtr += rlr * (pt.Matrix.Credit * pfmod * sact * rminus); } SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DTr))] = dtr; SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(Tr))] += dtr; } } -fn PathParams_DWtSynVSPatch(pt: ptr, ctx: ptr, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { +fn PathParams_DWtSynVSPatch(pt: PathParams, ctx: Context, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) { var ract = Neurons[Index3D(TensorStrides[70], TensorStrides[71], // t-1 TensorStrides[72], u32(ri), u32(di), u32(CaDPrev))]; - if (ract < (*pt).Learn.DWt.LearnThr) { + if (ract < pt.Learn.DWt.LearnThr) { ract = f32(0); } var sact = Neurons[Index3D(TensorStrides[70], TensorStrides[71], // t-1 TensorStrides[72], u32(si), u32(di), u32(CaDPrev))]; - var dwt = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(RLRate))] * (*pt).Learn.LRate.Eff * sact * ract; + var dwt = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ri), u32(di), u32(RLRate))] * pt.Learn.LRate.Eff * sact * ract; SynapseTraces[Index3D(TensorStrides[180], TensorStrides[181], TensorStrides[182], u32(syni), u32(di), u32(DiDWt))] = dwt; @@ -1075,8 +1074,8 @@ struct DWtParams { Dt: f32, pad: f32, } -fn DWtParams_TrFromCa(tp: ptr, tr: f32, ca: f32) -> f32 { - return tr + (*tp).Dt*(ca-tr); +fn DWtParams_TrFromCa(tp: DWtParams, tr: f32, ca: f32) -> f32 { + return tr + tp.Dt*(ca-tr); } struct HebbParams { On: i32, diff --git a/axon/shaders/GPUTestWrite.wgsl b/axon/shaders/GPUTestWrite.wgsl index c15b151aa..65c8d3091 100644 --- a/axon/shaders/GPUTestWrite.wgsl +++ b/axon/shaders/GPUTestWrite.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -81,16 +81,15 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "act-net.go" fn GPUTestWrite(i: u32) { //gosl:kernel - var ctx = Ctx[0]; - var ni = Context_ItemIndex(&ctx, i); + let ctx = Ctx[0]; + var ni = Context_ItemIndex(ctx, i); if (ni >= NetworkIxs[0].NNeurons) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); for (var vi = Spike; vi < NeuronVarsN; vi++) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(vi))] = f32(ni*1000 + u32(vi)); } - Ctx[0] = ctx; } //////// import: "act-path.go" @@ -386,11 +385,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" diff --git a/axon/shaders/GatherSpikes.wgsl b/axon/shaders/GatherSpikes.wgsl index f795d2a61..7aa6bb5fd 100644 --- a/axon/shaders/GatherSpikes.wgsl +++ b/axon/shaders/GatherSpikes.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,17 +78,17 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_GatherSpikes(ly: ptr, ctx: ptr, ni: u32,di: u32) { - var lni = ni - (*ly).Indexes.NeurSt; +fn LayerParams_GatherSpikes(ly: LayerParams, ctx: Context, ni: u32,di: u32) { + var lni = ni - ly.Indexes.NeurSt; LayerParams_GatherSpikesInit(ly, ctx, ni, di); - for (var pti = u32(0); pti < (*ly).Indexes.RecvN; pti++) { - var npti = RecvPathIxs[Index1D(TensorStrides[40], u32((*ly).Indexes.RecvSt + pti))]; - var pt = Paths[npti]; - PathParams_GatherSpikes(&pt, ctx, ly, ni, di, lni); + for (var pti = u32(0); pti < ly.Indexes.RecvN; pti++) { + var npti = RecvPathIxs[Index1D(TensorStrides[40], u32(ly.Indexes.RecvSt + pti))]; + let pt = Paths[npti]; + PathParams_GatherSpikes(pt, ctx, ly, ni, di, lni); } LayerParams_GiFromSpikes(ly, ctx, ni, di); } -fn LayerParams_GatherSpikesInit(ly: ptr, ctx: ptr, ni: u32,di: u32) { +fn LayerParams_GatherSpikesInit(ly: LayerParams, ctx: Context, ni: u32,di: u32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] = 0.0; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiRaw))] = 0.0; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GModRaw))] = 0.0; @@ -98,7 +98,7 @@ fn LayerParams_GatherSpikesInit(ly: ptr, ctx: ptr, ctx: ptr, ni: u32,di: u32) { +fn LayerParams_GiFromSpikes(ly: LayerParams, ctx: Context, ni: u32,di: u32) { var pi = LayerParams_PoolIndex(ly, NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnSubPool))]); var spk = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spike))]; var geRaw = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))]; @@ -115,15 +115,14 @@ fn LayerParams_GiFromSpikes(ly: ptr, ctx: ptr= NetworkIxs[0].NNeurons) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); var li = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnLayIndex))]; - var layers=Layers[li]; LayerParams_GatherSpikes(&layers, &ctx, ni, di); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_GatherSpikes(layers, ctx, ni, di); } //////// import: "act-path.go" @@ -139,19 +138,19 @@ struct SynComParams { MaxDelay: u32, DelLen: u32, } -fn SynComParams_RingIndex(sc: ptr, i: u32) -> u32 { +fn SynComParams_RingIndex(sc: SynComParams, i: u32) -> u32 { var ri = i; - if (ri >= (*sc).DelLen) { - ri -= (*sc).DelLen; + if (ri >= sc.DelLen) { + ri -= sc.DelLen; }return ri; } -fn SynComParams_ReadOff(sc: ptr, cycTot: i32) -> u32 { - return SynComParams_RingIndex(sc, u32(cycTot) % (*sc).DelLen); +fn SynComParams_ReadOff(sc: SynComParams, cycTot: i32) -> u32 { + return SynComParams_RingIndex(sc, u32(cycTot) % sc.DelLen); } -fn SynComParams_FloatToIntFactor(sc: ptr) -> f32 { +fn SynComParams_FloatToIntFactor(sc: SynComParams) -> f32 { return f32(u32(1) << 24); // leaves 7 bits = 128 to cover any extreme cases } -fn SynComParams_FloatFromGBuf(sc: ptr, ival: i32) -> f32 { +fn SynComParams_FloatFromGBuf(sc: SynComParams, ival: i32) -> f32 { return f32(ival) / SynComParams_FloatToIntFactor(sc); } struct PathScaleParams { @@ -160,35 +159,35 @@ struct PathScaleParams { pad: f32, pad1: f32, } -fn PathParams_GatherSpikes(pt: ptr, ctx: ptr, ly: ptr, ni: u32,di: u32,lni: u32) { - var deli = SynComParams_ReadOff(&(*pt).Com, (*ctx).CyclesTotal); - var npti = (*pt).Indexes.NPathNeurSt + lni; - var gRaw = SynComParams_FloatFromGBuf(&(*pt).Com, PathGBuf[Index3D(TensorStrides[150], TensorStrides[151], TensorStrides[152], u32(npti), u32(di), u32(deli))]); +fn PathParams_GatherSpikes(pt: PathParams, ctx: Context, ly: LayerParams, ni: u32,di: u32,lni: u32) { + var deli = SynComParams_ReadOff(pt.Com, ctx.CyclesTotal); + var npti = pt.Indexes.NPathNeurSt + lni; + var gRaw = SynComParams_FloatFromGBuf(pt.Com, PathGBuf[Index3D(TensorStrides[150], TensorStrides[151], TensorStrides[152], u32(npti), u32(di), u32(deli))]); PathGBuf[Index3D(TensorStrides[150], TensorStrides[151], TensorStrides[152], u32(npti), u32(di), u32(deli))] = 0; var gsyn = PathGSyns[Index2D(TensorStrides[160], TensorStrides[161], u32(npti), u32(di))]; PathParams_GatherSpikesGSyn(pt, ctx, ly, ni, di, gRaw, &gsyn); PathGSyns[Index2D(TensorStrides[160], TensorStrides[161], u32(npti), u32(di))] = gsyn; } -fn PathParams_GatherSpikesGSyn(pt: ptr, ctx: ptr, ly: ptr, ni: u32,di: u32, gRaw: f32, gSyn: ptr) { - switch ((*pt).Com.GType) { +fn PathParams_GatherSpikesGSyn(pt: PathParams, ctx: Context, ly: LayerParams, ni: u32,di: u32, gRaw: f32, gSyn: ptr) { + switch (pt.Com.GType) { case ExcitatoryG: { - *gSyn = DtParams_GeSynFromRaw(&(*ly).Acts.Dt, *gSyn, gRaw); + *gSyn = DtParams_GeSynFromRaw(ly.Acts.Dt, *gSyn, gRaw); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] += gRaw; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] += *gSyn; } case InhibitoryG: { - *gSyn = DtParams_GiSynFromRaw(&(*ly).Acts.Dt, *gSyn, gRaw); + *gSyn = DtParams_GiSynFromRaw(ly.Acts.Dt, *gSyn, gRaw); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiRaw))] += gRaw; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiSyn))] += *gSyn; } case ModulatoryG: { - *gSyn = DtParams_GeSynFromRaw(&(*ly).Acts.Dt, *gSyn, gRaw); + *gSyn = DtParams_GeSynFromRaw(ly.Acts.Dt, *gSyn, gRaw); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GModRaw))] += gRaw; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GModSyn))] += *gSyn; } case MaintG: { - *gSyn = DtParams_GeSynFromRaw(&(*ly).Acts.Dt, *gSyn, gRaw); + *gSyn = DtParams_GeSynFromRaw(ly.Acts.Dt, *gSyn, gRaw); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GMaintRaw))] += gRaw; } @@ -263,11 +262,11 @@ struct DtParams { IntDt: f32, LongAvgDt: f32, } -fn DtParams_GeSynFromRaw(dp: ptr, geSyn: f32,geRaw: f32) -> f32 { - return geSyn + geRaw - (*dp).GeDt*geSyn; +fn DtParams_GeSynFromRaw(dp: DtParams, geSyn: f32,geRaw: f32) -> f32 { + return geSyn + geRaw - dp.GeDt*geSyn; } -fn DtParams_GiSynFromRaw(dp: ptr, giSyn: f32,giRaw: f32) -> f32 { - return giSyn + giRaw - (*dp).GiDt*giSyn; +fn DtParams_GiSynFromRaw(dp: DtParams, giSyn: f32,giRaw: f32) -> f32 { + return giSyn + giRaw - dp.GiDt*giSyn; } struct SpikeNoiseParams { On: i32, @@ -479,11 +478,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" @@ -748,8 +747,8 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } //////// import: "layertypes.go" diff --git a/axon/shaders/InitGBuffsPath.wgsl b/axon/shaders/InitGBuffsPath.wgsl index 476a76458..6e1a79203 100644 --- a/axon/shaders/InitGBuffsPath.wgsl +++ b/axon/shaders/InitGBuffsPath.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -81,9 +81,8 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "act-net.go" fn InitGBuffsPath(pti: u32) { //gosl:kernel - var ctx = Ctx[0]; - var paths=Paths[pti]; PathParams_InitGBuffs(&paths, &ctx); - Ctx[0] = ctx; + let ctx = Ctx[0]; + let paths=Paths[pti]; PathParams_InitGBuffs(paths, ctx); } //////// import: "act-path.go" @@ -105,12 +104,12 @@ struct PathScaleParams { pad: f32, pad1: f32, } -fn PathParams_InitGBuffs(pt: ptr, ctx: ptr) { - var nix = NetworkIxs[0]; +fn PathParams_InitGBuffs(pt: PathParams, ctx: Context) { + let nix = NetworkIxs[0]; var maxd = nix.MaxData; var mdel = nix.MaxDelay + 1; - var rnn = (*pt).Indexes.RecvNeurN; - var npst = (*pt).Indexes.NPathNeurSt; + var rnn = pt.Indexes.RecvNeurN; + var npst = pt.Indexes.NPathNeurSt; for (var ri = u32(0); ri < rnn; ri++) { for (var di = u32(0); di < maxd; di++) { for (var dl = u32(0); dl < mdel; dl++) { diff --git a/axon/shaders/LayerGi.wgsl b/axon/shaders/LayerGi.wgsl index 22e42ad34..3e5b9bad1 100644 --- a/axon/shaders/LayerGi.wgsl +++ b/axon/shaders/LayerGi.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,28 +78,27 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_LayerGi(ly: ptr, ctx: ptr, li: u32,di: u32) { +fn LayerParams_LayerGi(ly: LayerParams, ctx: Context, li: u32,di: u32) { var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); PoolAvgMaxCalc(lpi, di); PoolInhibIntToRaw(lpi, di); LayerParams_LayPoolGiFromSpikes(ly, ctx, lpi, di); } -fn LayerParams_LayPoolGiFromSpikes(ly: ptr, ctx: ptr, lpi: u32,di: u32) { +fn LayerParams_LayPoolGiFromSpikes(ly: LayerParams, ctx: Context, lpi: u32,di: u32) { PoolInhibSpikesFromRaw(lpi, di); - PoolInhib(&(*ly).Inhib.Layer, lpi, di, LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], - u32((*ly).Index), u32(di), u32(LayerGiMult))]); + PoolInhib(ly.Inhib.Layer, lpi, di, LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], + u32(ly.Index), u32(di), u32(LayerGiMult))]); } //////// import: "act-net.go" fn LayerGi(i: u32) { //gosl:kernel - var ctx = Ctx[0]; - var li = Context_ItemIndex(&ctx, i); + let ctx = Ctx[0]; + var li = Context_ItemIndex(ctx, i); if (li >= NetworkIxs[0].NLayers) { return; } - var di = Context_DataIndex(&ctx, i); - var layers=Layers[li]; LayerParams_LayerGi(&layers, &ctx, li, di); - Ctx[0] = ctx; + var di = Context_DataIndex(ctx, i); + let layers=Layers[li]; LayerParams_LayerGi(layers, ctx, li, di); } //////// import: "act-path.go" @@ -395,11 +394,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" @@ -471,20 +470,20 @@ struct GiParams { FFAvgDt: f32, pad: f32, } -fn GiParams_FSiFromFFs(fb: ptr, fsi: f32,ffs: f32,fbs: f32) -> f32 { - return fsi + (ffs + (*fb).FB*fbs) - (*fb).FSDt*fsi; // immediate up, slow down +fn GiParams_FSiFromFFs(fb: GiParams, fsi: f32,ffs: f32,fbs: f32) -> f32 { + return fsi + (ffs + fb.FB*fbs) - fb.FSDt*fsi; // immediate up, slow down } -fn GiParams_FS0Thr(fb: ptr, val: f32) -> f32 { - return max(val-(*fb).FS0, 0.0); +fn GiParams_FS0Thr(fb: GiParams, val: f32) -> f32 { + return max(val-fb.FS0, 0.0); } -fn GiParams_FS(fb: ptr, fsi: f32,gext: f32, clamped: bool) -> f32 { - if (clamped && gext > (*fb).ClampExtMin) { +fn GiParams_FS(fb: GiParams, fsi: f32,gext: f32, clamped: bool) -> f32 { + if (clamped && gext > fb.ClampExtMin) { return gext; }return GiParams_FS0Thr(fb, fsi) + gext; } -fn GiParams_SSFromFBs(fb: ptr, ssf: ptr,ssi: ptr, fbs: f32) { - *ssi += (*fb).SSiDt * (*ssf*fbs - *ssi); - *ssf += fbs*(1-*ssf) - (*fb).SSfDt**ssf; +fn GiParams_SSFromFBs(fb: GiParams, ssf: ptr,ssi: ptr, fbs: f32) { + *ssi += fb.SSiDt * (*ssf*fbs - *ssi); + *ssf += fbs*(1-*ssf) - fb.SSfDt**ssf; } //////// import: "fsfffb-inhib.go" @@ -607,23 +606,23 @@ struct InhibParams { Layer: GiParams, Pool: GiParams, } -fn PoolInhib(fb: ptr, pi: u32,di: u32, gimult: f32) { - if ((*fb).On == 0) { +fn PoolInhib(fb: GiParams, pi: u32,di: u32, gimult: f32) { + if (fb.On == 0) { PoolInhibZero(pi, di);return; } - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvg))] += (*fb).FFAvgDt * (Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFs))] - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvg))]); + Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvg))] += fb.FFAvgDt * (Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFs))] - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvg))]); var fsi = Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FSi))]; fsi = GiParams_FSiFromFFs(fb, fsi, Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFs))], Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FBs))]); Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FSi))] = fsi; var clamped = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(pi), u32(di), u32(Clamped))] > 0; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FSGi))] = (*fb).Gi * GiParams_FS(fb, fsi, Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(GeExts))], clamped); + Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FSGi))] = fb.Gi * GiParams_FS(fb, fsi, Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(GeExts))], clamped); var ssf = Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSf))]; var ssi = Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSi))]; GiParams_SSFromFBs(fb, &ssf, &ssi, Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FBs))]); - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSGi))] = (*fb).Gi * (*fb).SS * ssi; + Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSGi))] = fb.Gi * fb.SS * ssi; Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSf))] = ssf; Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSi))] = ssi; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(TotalGi))] = PoolInhibGiFromFSSS(pi, di) + (*fb).FFPrv*Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvgPrv))]; + Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(TotalGi))] = PoolInhibGiFromFSSS(pi, di) + fb.FFPrv*Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvgPrv))]; PoolInhibSaveOrig(pi, di); } fn PoolInhibInitRaw(pi: u32,di: u32) { @@ -740,8 +739,8 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } //////// import: "layertypes.go" diff --git a/axon/shaders/MinusPhaseNeuron.wgsl b/axon/shaders/MinusPhaseNeuron.wgsl index 7a362f2bb..ac8cac986 100644 --- a/axon/shaders/MinusPhaseNeuron.wgsl +++ b/axon/shaders/MinusPhaseNeuron.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,21 +78,20 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_MinusPhaseNeuron(ly: ptr, ctx: ptr, ni: u32,di: u32) { +fn LayerParams_MinusPhaseNeuron(ly: LayerParams, ctx: Context, ni: u32,di: u32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActM))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))]; } //////// import: "act-net.go" fn MinusPhaseNeuron(i: u32) { //gosl:kernel - var ctx = Ctx[0]; - var ni = Context_ItemIndex(&ctx, i); + let ctx = Ctx[0]; + var ni = Context_ItemIndex(ctx, i); if (ni >= NetworkIxs[0].NNeurons) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); var li = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnLayIndex))]; - var layers=Layers[li]; LayerParams_MinusPhaseNeuron(&layers, &ctx, ni, di); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_MinusPhaseNeuron(layers, ctx, ni, di); } //////// import: "act-path.go" @@ -388,11 +387,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" diff --git a/axon/shaders/MinusPhasePool.wgsl b/axon/shaders/MinusPhasePool.wgsl index 0397c5b02..c864eaac6 100644 --- a/axon/shaders/MinusPhasePool.wgsl +++ b/axon/shaders/MinusPhasePool.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,10 +78,10 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_MinusPhasePool(ly: ptr, ctx: ptr, pi: u32) { - for (var di = u32(0); di < (*ctx).NData; di++) { +fn LayerParams_MinusPhasePool(ly: LayerParams, ctx: Context, pi: u32) { + for (var di = u32(0); di < ctx.NData; di++) { PoolCycleToMinus(pi, di); - if ((*ly).Acts.Clamp.Add == 0 && (*ly).Acts.Clamp.IsTarget == 1) { + if (ly.Acts.Clamp.Add == 0 && ly.Acts.Clamp.IsTarget == 1) { PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(pi), u32(di), u32(Clamped))] = 1; } } @@ -90,30 +90,29 @@ fn LayerParams_MinusPhasePool(ly: ptr, ctx: ptr, ctx: ptr, di: u32, geIntMinusMax: f32,giIntMinusMax: f32) { - var gem = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerAvgMaxGeM))]; - var gim = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerAvgMaxGiM))]; - gem += (*ly).Acts.Dt.LongAvgDt * (geIntMinusMax - gem); - gim += (*ly).Acts.Dt.LongAvgDt * (giIntMinusMax - gim); - LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerAvgMaxGeM))] = gem; +fn LayerParams_AvgGeM(ly: LayerParams, ctx: Context, di: u32, geIntMinusMax: f32,giIntMinusMax: f32) { + var gem = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ly.Index), u32(di), u32(LayerAvgMaxGeM))]; + var gim = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ly.Index), u32(di), u32(LayerAvgMaxGiM))]; + gem += ly.Acts.Dt.LongAvgDt * (geIntMinusMax - gem); + gim += ly.Acts.Dt.LongAvgDt * (giIntMinusMax - gim); + LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ly.Index), u32(di), u32(LayerAvgMaxGeM))] = gem; LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], - u32((*ly).Index), u32(di), u32(LayerAvgMaxGiM))] = gim; + u32(ly.Index), u32(di), u32(LayerAvgMaxGiM))] = gim; } //////// import: "act-net.go" fn MinusPhasePool(pi: u32) { //gosl:kernel - var ctx = Ctx[0]; + let ctx = Ctx[0]; var li = PoolIxs[Index2D(TensorStrides[0], TensorStrides[1], u32(pi), u32(PoolLayerIdx))]; - var layers=Layers[li]; LayerParams_MinusPhasePool(&layers, &ctx, pi); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_MinusPhasePool(layers, ctx, pi); } //////// import: "act-path.go" diff --git a/axon/shaders/MinusPhasePost.wgsl b/axon/shaders/MinusPhasePost.wgsl index eac494bbe..b83f9c6bd 100644 --- a/axon/shaders/MinusPhasePost.wgsl +++ b/axon/shaders/MinusPhasePost.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,20 +78,20 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_DecayStateNeuronsAll(ly: ptr, ctx: ptr, decay: f32,glong: f32,ahp: f32) { - var nn = (*ly).Indexes.NNeurons; +fn LayerParams_DecayStateNeuronsAll(ly: LayerParams, ctx: Context, decay: f32,glong: f32,ahp: f32) { + var nn = ly.Indexes.NNeurons; for (var lni = u32(0); lni < nn; lni++) { - var ni = (*ly).Indexes.NeurSt + lni; + var ni = ly.Indexes.NeurSt + lni; if (NeuronIsOff(ni)) { continue; } - for (var di = u32(0); di < (*ctx).NData; di++) { - ActParams_DecayState(&(*ly).Acts, ctx, ni, di, decay, glong, ahp); + for (var di = u32(0); di < ctx.NData; di++) { + ActParams_DecayState(ly.Acts, ctx, ni, di, decay, glong, ahp); } } } -fn LayerParams_MinusPhasePost(ly: ptr, ctx: ptr) { - switch ((*ly).Type) { +fn LayerParams_MinusPhasePost(ly: LayerParams, ctx: Context) { + switch (ly.Type) { case MatrixLayer: { LayerParams_MatrixGated(ly, ctx); } // need gated state for decisions about action processing, so do in minus too @@ -105,9 +105,8 @@ fn LayerParams_MinusPhasePost(ly: ptr, ctx: ptr, ctx: ptr, ni: u32,di: u32, decay: f32) { +fn ActParams_DecayLearnCa(ac: ActParams, ctx: Context, ni: u32,di: u32, decay: f32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaLrn))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaLrn))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(NmdaCa))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(NmdaCa))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))]; @@ -277,7 +276,7 @@ fn ActParams_DecayLearnCa(ac: ptr, ctx: ptr, ctx: ptr, ni: u32,di: u32, decay: f32) { +fn ActParams_DecayAHP(ac: ActParams, ctx: Context, ni: u32,di: u32, decay: f32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(MahpN))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(MahpN))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gmahp))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gmahp))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SahpCa))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SahpCa))]; @@ -285,24 +284,24 @@ fn ActParams_DecayAHP(ac: ptr, ctx: ptr, n Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsahp))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsahp))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaMed))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaMed))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaSlow))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaSlow))]; - var kirMrest = (*ac).Kir.Mrest; + var kirMrest = ac.Kir.Mrest; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(KirM))] += decay * (kirMrest - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(KirM))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gkir))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gkir))]; } -fn ActParams_DecayState(ac: ptr, ctx: ptr, ni: u32,di: u32, decay: f32,glong: f32,ahp: f32) { +fn ActParams_DecayState(ac: ActParams, ctx: Context, ni: u32,di: u32, decay: f32,glong: f32,ahp: f32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ISIAvg))] = -1.0; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] = (*ac).Init.Act; + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] = ac.Init.Act; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spiked))] = 0.0; if (decay > 0) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spike))] = 0.0; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] - (*ac).Init.Act); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] - (*ac).Init.Act); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] - ac.Init.Act); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] - ac.Init.Act); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] - NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(GeBase))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ge))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ge))] - NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(GeBase))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gi))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gi))] - NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(GiBase))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gk))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gk))]; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] - (*ac).Init.Vm); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] - ac.Init.Vm); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoise))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoise))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoise))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoise))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiSyn))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiSyn))]; @@ -310,7 +309,7 @@ fn ActParams_DecayState(ac: ptr, ctx: ptr, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiInt))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiInt))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeIntNorm))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeIntNorm))]; } - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] -= glong * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] - (*ac).Init.Vm); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] -= glong * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] - ac.Init.Vm); if (ahp > 0) { ActParams_DecayAHP(ac, ctx, ni, di, ahp); } @@ -326,8 +325,8 @@ fn ActParams_DecayState(ac: ptr, ctx: ptr, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccH))] -= glong * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccH))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gak))] -= glong * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gak))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsk))] -= glong * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsk))]; - if ((*ac).Decay.LearnCa > 0) { // learning-based Ca values -- not usual - ActParams_DecayLearnCa(ac, ctx, ni, di, (*ac).Decay.LearnCa); + if (ac.Decay.LearnCa > 0) { // learning-based Ca values -- not usual + ActParams_DecayLearnCa(ac, ctx, ni, di, ac.Decay.LearnCa); } Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Inet))] = 0.0; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] = 0.0; @@ -744,8 +743,8 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } //////// import: "layertypes.go" @@ -1161,66 +1160,66 @@ struct GPParams { pad1: u32, pad2: u32, } -fn LayerParams_MatrixGated(ly: ptr, ctx: ptr) { +fn LayerParams_MatrixGated(ly: LayerParams, ctx: Context) { var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); - if ((*ly).Learn.NeuroMod.DAMod != D1Mod) { - var oly = Layers[(*ly).Matrix.OtherMatrixIndex]; + if (ly.Learn.NeuroMod.DAMod != D1Mod) { + var oly = Layers[ly.Matrix.OtherMatrixIndex]; var olpi = oly.PoolSt; - for (var di = u32(0); di < (*ctx).NData; di++) { + for (var di = u32(0); di < ctx.NData; di++) { PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(lpi), u32(di), u32(PoolGated))] = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(olpi), u32(di), u32(PoolGated))]; }return; } - for (var di = u32(0); di < (*ctx).NData; di++) { + for (var di = u32(0); di < ctx.NData; di++) { var mtxGated = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(lpi), u32(di), u32(PoolGated))] > 0; var thalGated = false; - if ((*ly).Matrix.ThalLay1Index >= 0) { - var tly = Layers[(*ly).Matrix.ThalLay1Index]; + if (ly.Matrix.ThalLay1Index >= 0) { + var tly = Layers[ly.Matrix.ThalLay1Index]; var tlpi = tly.PoolSt; var gt = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(tlpi), u32(di), u32(PoolGated))]; thalGated = thalGated || gt > 0; } - if ((*ly).Matrix.ThalLay2Index >= 0) { - var tly = Layers[(*ly).Matrix.ThalLay2Index]; + if (ly.Matrix.ThalLay2Index >= 0) { + var tly = Layers[ly.Matrix.ThalLay2Index]; var tlpi = tly.PoolSt; var gt = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(tlpi), u32(di), u32(PoolGated))]; thalGated = thalGated || gt > 0; } - if ((*ly).Matrix.ThalLay3Index >= 0) { - var tly = Layers[(*ly).Matrix.ThalLay3Index]; + if (ly.Matrix.ThalLay3Index >= 0) { + var tly = Layers[ly.Matrix.ThalLay3Index]; var tlpi = tly.PoolSt; var gt = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(tlpi), u32(di), u32(PoolGated))]; thalGated = thalGated || gt > 0; } - if ((*ly).Matrix.ThalLay4Index >= 0) { - var tly = Layers[(*ly).Matrix.ThalLay4Index]; + if (ly.Matrix.ThalLay4Index >= 0) { + var tly = Layers[ly.Matrix.ThalLay4Index]; var tlpi = tly.PoolSt; var gt = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(tlpi), u32(di), u32(PoolGated))]; thalGated = thalGated || gt > 0; } - if ((*ly).Matrix.ThalLay5Index >= 0) { - var tly = Layers[(*ly).Matrix.ThalLay5Index]; + if (ly.Matrix.ThalLay5Index >= 0) { + var tly = Layers[ly.Matrix.ThalLay5Index]; var tlpi = tly.PoolSt; var gt = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(tlpi), u32(di), u32(PoolGated))]; thalGated = thalGated || gt > 0; } - if ((*ly).Matrix.ThalLay6Index >= 0) { - var tly = Layers[(*ly).Matrix.ThalLay6Index]; + if (ly.Matrix.ThalLay6Index >= 0) { + var tly = Layers[ly.Matrix.ThalLay6Index]; var tlpi = tly.PoolSt; var gt = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(tlpi), u32(di), u32(PoolGated))]; thalGated = thalGated || gt > 0; } mtxGated = mtxGated && thalGated; if (!mtxGated) { // nobody did if thal didn't - for (var spi = u32(0); spi < (*ly).Indexes.NPools; spi++) { + for (var spi = u32(0); spi < ly.Indexes.NPools; spi++) { var pi = LayerParams_PoolIndex(ly, spi); PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(pi), u32(di), u32(PoolGated))] = 0; } } - if ((*ctx).PlusPhase == 1 && (*ly).Matrix.IsVS == 1) { + if (ctx.PlusPhase == 1 && ly.Matrix.IsVS == 1) { GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvVSMatrixJustGated), u32(di))] = f32(mtxGated); if (mtxGated) { var poolIndex = i32(-1); - for (var spi = u32(1); spi < (*ly).Indexes.NPools; spi++) { + for (var spi = u32(1); spi < ly.Indexes.NPools; spi++) { var pi = LayerParams_PoolIndex(ly, spi); if (poolIndex < 0 && PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(pi), u32(di), u32(PoolGated))] > 0) { poolIndex = i32(spi); diff --git a/axon/shaders/NewStateLayer.wgsl b/axon/shaders/NewStateLayer.wgsl deleted file mode 100644 index a96215a61..000000000 --- a/axon/shaders/NewStateLayer.wgsl +++ /dev/null @@ -1,1437 +0,0 @@ -// Code generated by "gosl"; DO NOT EDIT -// kernel: NewStateLayer - -// // Layers are all the layer parameters. -@group(0) @binding(0) -var TensorStrides: array; -@group(0) @binding(1) -var Layers: array; -@group(0) @binding(2) -var Paths: array; -@group(0) @binding(3) -var NetworkIxs: array; -@group(0) @binding(4) -var PoolIxs: array; -@group(0) @binding(5) -var NeuronIxs: array; -// // SynapseIxs have index values for each synapse: // providing index into recv, send neurons, path. // [Indexes][NSyns]; NSyns = [Layer][SendPaths][SendNeurons][Syns] -@group(1) @binding(0) -var SynapseIxs: array; -@group(1) @binding(1) -var PathSendCon: array; -@group(1) @binding(2) -var RecvPathIxs: array; -@group(1) @binding(3) -var PathRecvCon: array; -@group(1) @binding(4) -var RecvSynIxs: array; -// // Ctx is the current context state (one only). -@group(2) @binding(0) -var Ctx: array; -@group(2) @binding(1) -var Neurons: array; -@group(2) @binding(2) -var NeuronAvgs: array; -@group(2) @binding(3) -var LayerStates: array; -@group(2) @binding(4) -var GlobalScalars: array; -@group(2) @binding(5) -var GlobalVectors: array; -@group(2) @binding(6) -var Exts: array; -// // Pools are the [PoolVars] float32 state values for layer and sub-pool inhibition, // Including the float32 AvgMax values by Phase and variable: use [AvgMaxVarIndex]. // [Layer * Pools][Data][PoolVars+AvgMax] -@group(3) @binding(0) -var Pools: array; -@group(3) @binding(1) -var PoolsInt: array; -@group(3) @binding(2) -var PathGBuf: array; -@group(3) @binding(3) -var PathGSyns: array; -@group(3) @binding(4) -var Synapses: array; -@group(3) @binding(5) -var SynapseTraces: array; - -alias GPUVars = i32; - -@compute @workgroup_size(64, 1, 1) -fn main(@builtin(workgroup_id) wgid: vec3, @builtin(num_workgroups) nwg: vec3, @builtin(local_invocation_index) loci: u32) { - let idx = loci + (wgid.x + wgid.y * nwg.x + wgid.z * nwg.x * nwg.y) * 64; - NewStateLayer(idx); -} - -fn Index2D(s0: u32, s1: u32, i0: u32, i1: u32) -> u32 { - return s0 * i0 + s1 * i1; -} - -fn Index1D(s0: u32, i0: u32) -> u32 { - return s0 * i0; -} - -fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { - return s0 * i0 + s1 * i1 + s2 * i2; -} - - -//////// import: "vars.go" - -//////// import: "act-layer.go" -fn LayerParams_IsTarget(ly: ptr) -> bool { - switch ((*ly).Type) { - case TargetLayer: { - return true; - } - case PulvinarLayer: { - return true; - } - default: { - return false; - } - } -} -fn LayerParams_IsInput(ly: ptr) -> bool { - switch ((*ly).Type) { - case InputLayer: { - return true; - } - default: { - return false; - } - } -} -fn LayerParams_NewStateLayer(ly: ptr, ctx: ptr) { - var actMinusAvg = f32(0); - var actPlusAvg = f32(0); - var np = u32((*ly).Indexes.NPools); - for (var di = u32(0); di < (*ctx).NData; di++) { - var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); - actMinusAvg += PoolAvgMax(AMAct, AMMinus, Avg, lpi, di); - actPlusAvg += PoolAvgMax(AMAct, AMPlus, Avg, lpi, di); - (*ly).Acts.Clamp.IsInput = i32(LayerParams_IsInput(ly)); - (*ly).Acts.Clamp.IsTarget = i32(LayerParams_IsTarget(ly)); - LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerRT))] = -1.0; - LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(GatedRT))] = -1.0; - for (var spi = u32(0); spi < np; spi++) { - var pi = LayerParams_PoolIndex(ly, spi); - LayerParams_NewStatePool(ly, ctx, pi, di); // also calls DecayState on pool - } - } - var davg = 1 / f32((*ctx).NData); - actMinusAvg *= davg; - actPlusAvg *= davg; - for (var di = u32(0); di < (*ctx).NData; di++) { - LayerParams_NewStateLayerActAvg(ly, ctx, di, actMinusAvg, actPlusAvg); - } -} -fn LayerParams_NewStateLayerActAvg(ly: ptr, ctx: ptr, di: u32, actMinusAvg: f32,actPlusAvg: f32) { - var mavg = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerActMAvg))]; - var pavg = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerActPAvg))]; - ActAvgParams_AvgFromAct(&(*ly).Inhib.ActAvg, &mavg, actMinusAvg, (*ly).Acts.Dt.LongAvgDt); - ActAvgParams_AvgFromAct(&(*ly).Inhib.ActAvg, &pavg, actPlusAvg, (*ly).Acts.Dt.LongAvgDt); - LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerActMAvg))] = mavg; - LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerActPAvg))] = pavg; -} -fn LayerParams_NewStatePool(ly: ptr, ctx: ptr, pi: u32,di: u32) { - PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(pi), u32(di), u32(Clamped))] = 0; - if ((*ly).Acts.Clamp.Add == 0 && (*ly).Acts.Clamp.IsInput == 1) { - PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(pi), u32(di), u32(Clamped))] = 1; - } - PoolInhibDecay(pi, di, (*ly).Acts.Decay.Act); - PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], - TensorStrides[142], u32(pi), u32(di), u32(PoolGated))] = 0; -} - -//////// import: "act-net.go" -fn NewStateLayer(li: u32) { //gosl:kernel - var ctx = Ctx[0]; - var layers=Layers[li]; LayerParams_NewStateLayer(&layers, &ctx); - Ctx[0] = ctx; -} - -//////// import: "act-path.go" -alias PathGTypes = i32; //enums:enum -const ExcitatoryG: PathGTypes = 0; -const InhibitoryG: PathGTypes = 1; -const ModulatoryG: PathGTypes = 2; -const MaintG: PathGTypes = 3; -const ContextG: PathGTypes = 4; -struct SynComParams { - GType: PathGTypes, - Delay: u32, - MaxDelay: u32, - DelLen: u32, -} -struct PathScaleParams { - Rel: f32, - Abs: f32, - pad: f32, - pad1: f32, -} - -//////// import: "act.go" -struct SpikeParams { - Thr: f32, - VmR: f32, - Tr: i32, - RTau: f32, - Exp: i32, - ExpSlope: f32, - ExpThr: f32, - MaxHz: f32, - ISITau: f32, - ISIDt: f32, - RDt: f32, - pad: i32, -} -struct DendParams { - GbarExp: f32, - GbarR: f32, - SSGi: f32, - HasMod: i32, - ModGain: f32, - ModACh: i32, - ModBase: f32, - pad: i32, -} -struct ActInitParams { - Vm: f32, - Act: f32, - GeBase: f32, - GiBase: f32, - GeVar: f32, - GiVar: f32, - pad: i32, - pad1: i32, -} -struct DecayParams { - Act: f32, - Glong: f32, - AHP: f32, - LearnCa: f32, - OnRew: i32, - pad: f32, - pad1: f32, - pad2: f32, -} -struct DtParams { - Integ: f32, - VmTau: f32, - VmDendTau: f32, - VmSteps: i32, - GeTau: f32, - GiTau: f32, - IntTau: f32, - LongAvgTau: f32, - MaxCycStart: i32, - VmDt: f32, - VmDendDt: f32, - DtStep: f32, - GeDt: f32, - GiDt: f32, - IntDt: f32, - LongAvgDt: f32, -} -struct SpikeNoiseParams { - On: i32, - GeHz: f32, - Ge: f32, - GiHz: f32, - Gi: f32, - MaintGe: i32, - GeExpInt: f32, - GiExpInt: f32, -} -struct ClampParams { - IsInput: i32, - IsTarget: i32, - Ge: f32, - Add: i32, - ErrThr: f32, - pad: f32, - pad1: f32, - pad2: f32, -} -struct SMaintParams { - On: i32, - NNeurons: f32, - Gbar: f32, - Inhib: f32, - ISI: F32, -} -struct PopCodeParams { - On: i32, - Ge: f32, - Min: f32, - Max: f32, - MinAct: f32, - MinSigma: f32, - MaxSigma: f32, - Clip: i32, -} -struct ActParams { - Spikes: SpikeParams, - Dend: DendParams, - Init: ActInitParams, - Decay: DecayParams, - Dt: DtParams, - Gbar: Chans, - Erev: Chans, - Clamp: ClampParams, - Noise: SpikeNoiseParams, - VmRange: F32, - Mahp: MahpParams, - Sahp: SahpParams, - KNa: KNaMedSlow, - Kir: KirParams, - NMDA: NMDAParams, - MaintNMDA: NMDAParams, - GabaB: GABABParams, - VGCC: VGCCParams, - AK: AKsParams, - SKCa: SKCaParams, - SMaint: SMaintParams, - PopCode: PopCodeParams, -} - -//////// import: "chans-ak.go" -struct AKsParams { - Gbar: f32, - Hf: f32, - Mf: f32, - Voff: f32, - Vmax: f32, - pad: i32, - pad1: i32, - pad2: i32, -} - -//////// import: "chans-chans.go" -struct Chans { - E: f32, - L: f32, - I: f32, - K: f32, -} - -//////// import: "chans-gabab.go" -struct GABABParams { - Gbar: f32, - RiseTau: f32, - DecayTau: f32, - Gbase: f32, - GiSpike: f32, - MaxTime: f32, - TauFact: f32, - RiseDt: f32, - DecayDt: f32, - pad: f32, - pad1: f32, - pad2: f32, -} - -//////// import: "chans-kir.go" -struct KirParams { - Gbar: f32, - MinfOff: f32, - MinfTau: f32, - RiseOff: f32, - RiseTau: f32, - DecayOff: f32, - DecayTau: f32, - Mrest: f32, -} - -//////// import: "chans-kna.go" -struct KNaParams { - On: i32, - Rise: f32, - Max: f32, - Tau: f32, - Dt: f32, - pad: i32, - pad1: i32, - pad2: i32, -} -struct KNaMedSlow { - On: i32, - TrialSlow: i32, - pad: i32, - pad1: i32, - Med: KNaParams, - Slow: KNaParams, -} - -//////// import: "chans-mahp.go" -struct MahpParams { - Gbar: f32, - Voff: f32, - Vslope: f32, - TauMax: f32, - Tadj: f32, - DtMax: f32, - pad: i32, - pad2: i32, -} - -//////// import: "chans-nmda.go" -struct NMDAParams { - Gbar: f32, - Tau: f32, - ITau: f32, - MgC: f32, - Voff: f32, - Dt: f32, - IDt: f32, - MgFact: f32, -} - -//////// import: "chans-sahp.go" -struct SahpParams { - Gbar: f32, - CaTau: f32, - Off: f32, - Slope: f32, - TauMax: f32, - CaDt: f32, - DtMax: f32, - pad: i32, -} - -//////// import: "chans-skca.go" -struct SKCaParams { - Gbar: f32, - C50: f32, - ActTau: f32, - DeTau: f32, - KCaR: f32, - CaRDecayTau: f32, - CaInThr: f32, - CaInTau: f32, - ActDt: f32, - DeDt: f32, - CaRDecayDt: f32, - CaInDt: f32, -} - -//////// import: "chans-vgcc.go" -struct VGCCParams { - Gbar: f32, - Ca: f32, - pad: i32, - pad1: i32, -} - -//////// import: "context.go" -struct Context { //types:add -setters - NData: u32, - Mode: i32, - Testing: i32, - Phase: i32, - PlusPhase: i32, - PhaseCycle: i32, - Cycle: i32, - ThetaCycles: i32, - PlusCycles: i32, - CaBinCycles: i32, - CyclesTotal: i32, - Time: f32, - TrialsTotal: i32, - TimePerCycle: f32, - SlowInterval: i32, - SlowCounter: i32, - RandCounter: RandCounter, -} - -//////// import: "deep-layer.go" -struct BurstParams { - ThrRel: f32, - ThrAbs: f32, - pad: f32, - pad1: f32, -} -struct CTParams { - GeGain: f32, - DecayTau: f32, - OFCposPT: i32, - DecayDt: f32, -} -struct PulvParams { - DriveScale: f32, - FullDriveAct: f32, - DriveLayIndex: i32, - pad: f32, -} - -//////// import: "deep-path.go" - -//////// import: "enumgen.go" -const PathGTypesN: PathGTypes = 5; -const GlobalScalarVarsN: GlobalScalarVars = 58; -const GlobalVectorVarsN: GlobalVectorVars = 10; -const GPUVarsN: GPUVars = 23; -const LayerTypesN: LayerTypes = 30; -const LayerVarsN: LayerVars = 12; -const ViewTimesN: ViewTimes = 7; -const DAModTypesN: DAModTypes = 4; -const ValenceTypesN: ValenceTypes = 3; -const NeuronFlagsN: NeuronFlags = 9; -const NeuronVarsN: NeuronVars = 83; -const NeuronAvgVarsN: NeuronAvgVars = 7; -const NeuronIndexVarsN: NeuronIndexVars = 3; -const PathTypesN: PathTypes = 12; -const GPLayerTypesN: GPLayerTypes = 3; -const PoolIndexVarsN: PoolIndexVars = 4; -const PoolIntVarsN: PoolIntVars = 6; -const AvgMaxN: AvgMax = 2; -const AvgMaxPhasesN: AvgMaxPhases = 4; -const AvgMaxVarsN: AvgMaxVars = 7; -const SynapseVarsN: SynapseVars = 5; -const SynapseTraceVarsN: SynapseTraceVars = 3; -const SynapseIndexVarsN: SynapseIndexVars = 3; - -//////// import: "fsfffb-enumgen.go" -const InhibVarsN: InhibVars = 16; - -//////// import: "fsfffb-fsfffb.go" -struct GiParams { - On: i32, - Gi: f32, - FB: f32, - FSTau: f32, - SS: f32, - SSfTau: f32, - SSiTau: f32, - FS0: f32, - FFAvgTau: f32, - FFPrv: f32, - ClampExtMin: f32, - FSDt: f32, - SSfDt: f32, - SSiDt: f32, - FFAvgDt: f32, - pad: f32, -} - -//////// import: "fsfffb-inhib.go" -alias InhibVars = i32; //enums:enum -const FFsRaw: InhibVars = 0; -const FBsRaw: InhibVars = 1; -const GeExtRaw: InhibVars = 2; -const FFs: InhibVars = 3; -const FBs: InhibVars = 4; -const GeExts: InhibVars = 5; -const FSi: InhibVars = 6; -const SSi: InhibVars = 7; -const SSf: InhibVars = 8; -const FSGi: InhibVars = 9; -const SSGi: InhibVars = 10; -const TotalGi: InhibVars = 11; -const GiOrig: InhibVars = 12; -const LayGi: InhibVars = 13; -const FFAvg: InhibVars = 14; -const FFAvgPrv: InhibVars = 15; - -//////// import: "globals.go" -alias GlobalScalarVars = i32; //enums:enum -const GvRew: GlobalScalarVars = 0; -const GvHasRew: GlobalScalarVars = 1; -const GvRewPred: GlobalScalarVars = 2; -const GvPrevPred: GlobalScalarVars = 3; -const GvHadRew: GlobalScalarVars = 4; -const GvDA: GlobalScalarVars = 5; -const GvDAtonic: GlobalScalarVars = 6; -const GvACh: GlobalScalarVars = 7; -const GvNE: GlobalScalarVars = 8; -const GvSer: GlobalScalarVars = 9; -const GvAChRaw: GlobalScalarVars = 10; -const GvGoalMaint: GlobalScalarVars = 11; -const GvVSMatrixJustGated: GlobalScalarVars = 12; -const GvVSMatrixHasGated: GlobalScalarVars = 13; -const GvCuriosityPoolGated: GlobalScalarVars = 14; -const GvTime: GlobalScalarVars = 15; -const GvEffort: GlobalScalarVars = 16; -const GvUrgencyRaw: GlobalScalarVars = 17; -const GvUrgency: GlobalScalarVars = 18; -const GvHasPosUS: GlobalScalarVars = 19; -const GvHadPosUS: GlobalScalarVars = 20; -const GvNegUSOutcome: GlobalScalarVars = 21; -const GvHadNegUSOutcome: GlobalScalarVars = 22; -const GvPVposSum: GlobalScalarVars = 23; -const GvPVpos: GlobalScalarVars = 24; -const GvPVnegSum: GlobalScalarVars = 25; -const GvPVneg: GlobalScalarVars = 26; -const GvPVposEst: GlobalScalarVars = 27; -const GvPVposVar: GlobalScalarVars = 28; -const GvPVnegEst: GlobalScalarVars = 29; -const GvPVnegVar: GlobalScalarVars = 30; -const GvGoalDistEst: GlobalScalarVars = 31; -const GvGoalDistPrev: GlobalScalarVars = 32; -const GvProgressRate: GlobalScalarVars = 33; -const GvGiveUpUtility: GlobalScalarVars = 34; -const GvContUtility: GlobalScalarVars = 35; -const GvGiveUpTiming: GlobalScalarVars = 36; -const GvContTiming: GlobalScalarVars = 37; -const GvGiveUpProgress: GlobalScalarVars = 38; -const GvContProgress: GlobalScalarVars = 39; -const GvGiveUpSum: GlobalScalarVars = 40; -const GvContSum: GlobalScalarVars = 41; -const GvGiveUpProb: GlobalScalarVars = 42; -const GvGiveUp: GlobalScalarVars = 43; -const GvGaveUp: GlobalScalarVars = 44; -const GvVSPatchPos: GlobalScalarVars = 45; -const GvVSPatchPosThr: GlobalScalarVars = 46; -const GvVSPatchPosRPE: GlobalScalarVars = 47; -const GvVSPatchPosSum: GlobalScalarVars = 48; -const GvVSPatchPosPrev: GlobalScalarVars = 49; -const GvVSPatchPosVar: GlobalScalarVars = 50; -const GvLHbDip: GlobalScalarVars = 51; -const GvLHbBurst: GlobalScalarVars = 52; -const GvLHbPVDA: GlobalScalarVars = 53; -const GvCeMpos: GlobalScalarVars = 54; -const GvCeMneg: GlobalScalarVars = 55; -const GvVtaDA: GlobalScalarVars = 56; -const GvCaBinWts: GlobalScalarVars = 57; -const MaxGlobalVecN = 16; -alias GlobalVectorVars = i32; //enums:enum -const GvCost: GlobalVectorVars = 0; -const GvCostRaw: GlobalVectorVars = 1; -const GvUSneg: GlobalVectorVars = 2; -const GvUSnegRaw: GlobalVectorVars = 3; -const GvDrives: GlobalVectorVars = 4; -const GvUSpos: GlobalVectorVars = 5; -const GvVSPatchD1: GlobalVectorVars = 6; -const GvVSPatchD2: GlobalVectorVars = 7; -const GvOFCposPTMaint: GlobalVectorVars = 8; -const GvVSMatrixPoolGated: GlobalVectorVars = 9; - -//////// import: "hip_paths.go" -struct HipPathParams { - Hebb: f32, - Err: f32, - SAvgCor: f32, - SAvgThr: f32, - SNominal: f32, - pad: f32, - pad1: f32, - pad2: f32, -} - -//////// import: "inhib.go" -struct ActAvgParams { - Nominal: f32, - RTThr: f32, - AdaptGi: i32, - Offset: f32, - HiTol: f32, - LoTol: f32, - AdaptRate: f32, - pad: f32, -} -fn ActAvgParams_AvgFromAct(aa: ptr, avg: ptr, act: f32, dt: f32) { - if (act < 0.0001) { - return; - } - *avg += dt * (act - *avg); -} -struct InhibParams { - ActAvg: ActAvgParams, - Layer: GiParams, - Pool: GiParams, -} -fn PoolInhibDecay(pi: u32,di: u32, decay: f32) { - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvgPrv))] = Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvg))]; // capture prior to decay - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFs))] -= decay * Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFs))]; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FBs))] -= decay * Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FBs))]; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(GeExts))] -= decay * Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(GeExts))]; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FSi))] -= decay * Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FSi))]; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSi))] -= decay * Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSi))]; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSf))] -= decay * Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSf))]; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FSGi))] -= decay * Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FSGi))]; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSGi))] -= decay * Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSGi))]; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(TotalGi))] -= decay * Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(TotalGi))]; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvg))] -= decay * Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvg))]; -} - -//////// import: "init-layer.go" - -//////// import: "kinase-params.go" -struct CaDtParams { //types:add - MTau: f32, - PTau: f32, - DTau: f32, - MDt: f32, - PDt: f32, - DDt: f32, - pad: i32, - pad1: i32, -} -struct CaSpikeParams { - SpikeCaM: f32, - SpikeCaSyn: f32, - CaSynTau: f32, - CaSynDt: f32, - Dt: CaDtParams, -} - -//////// import: "layerparams.go" -struct LayerIndexes { - NPools: u32, - NeurSt: u32, - NNeurons: u32, - RecvSt: u32, - RecvN: u32, - SendSt: u32, - SendN: u32, - ExtsSt: u32, - ShpPlY: i32, - ShpPlX: i32, - ShpUnY: i32, - ShpUnX: i32, -} -struct LayerInhibIndexes { - Index1: i32, - Index2: i32, - Index3: i32, - Index4: i32, -} -struct LayerParams { - Type: LayerTypes, - Index: u32, - MaxData: u32, - PoolSt: u32, - Acts: ActParams, - Inhib: InhibParams, - LayInhib: LayerInhibIndexes, - Learn: LearnNeuronParams, - Bursts: BurstParams, - CT: CTParams, - Pulv: PulvParams, - Matrix: MatrixParams, - GP: GPParams, - LDT: LDTParams, - VTA: VTAParams, - RWPred: RWPredParams, - RWDa: RWDaParams, - TDInteg: TDIntegParams, - TDDa: TDDaParams, - Indexes: LayerIndexes, -} -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; -} - -//////// import: "layertypes.go" -alias LayerTypes = i32; //enums:enum -const SuperLayer: LayerTypes = 0; -const InputLayer: LayerTypes = 1; -const TargetLayer: LayerTypes = 2; -const CompareLayer: LayerTypes = 3; -const CTLayer: LayerTypes = 4; -const PulvinarLayer: LayerTypes = 5; -const TRNLayer: LayerTypes = 6; -const PTMaintLayer: LayerTypes = 7; -const PTPredLayer: LayerTypes = 8; -const MatrixLayer: LayerTypes = 9; -const STNLayer: LayerTypes = 10; -const GPLayer: LayerTypes = 11; -const BGThalLayer: LayerTypes = 12; -const VSGatedLayer: LayerTypes = 13; -const BLALayer: LayerTypes = 14; -const CeMLayer: LayerTypes = 15; -const VSPatchLayer: LayerTypes = 16; -const LHbLayer: LayerTypes = 17; -const DrivesLayer: LayerTypes = 18; -const UrgencyLayer: LayerTypes = 19; -const USLayer: LayerTypes = 20; -const PVLayer: LayerTypes = 21; -const LDTLayer: LayerTypes = 22; -const VTALayer: LayerTypes = 23; -const RewLayer: LayerTypes = 24; -const RWPredLayer: LayerTypes = 25; -const RWDaLayer: LayerTypes = 26; -const TDPredLayer: LayerTypes = 27; -const TDIntegLayer: LayerTypes = 28; -const TDDaLayer: LayerTypes = 29; - -//////// import: "layervars.go" -alias LayerVars = i32; //enums:enum -const LayerActMAvg: LayerVars = 0; -const LayerActPAvg: LayerVars = 1; -const LayerAvgMaxGeM: LayerVars = 2; -const LayerAvgMaxGiM: LayerVars = 3; -const LayerGiMult: LayerVars = 4; -const LayerPhaseDiff: LayerVars = 5; -const LayerPhaseDiffAvg: LayerVars = 6; -const LayerPhaseDiffVar: LayerVars = 7; -const LayerRT: LayerVars = 8; -const GatedRT: LayerVars = 9; -const LayerRewPredPos: LayerVars = 10; -const LayerRewPredNeg: LayerVars = 11; - -//////// import: "learn-layer.go" - -//////// import: "learn-net.go" - -//////// import: "learn-path.go" - -//////// import: "learn.go" -struct LearnCaParams { - Norm: f32, - SpikeVGCC: i32, - SpikeVgccCa: f32, - VgccTau: f32, - Dt: CaDtParams, - VgccDt: f32, - NormInv: f32, - pad: i32, - pad2: i32, -} -struct TrgAvgActParams { - GiBaseInit: f32, - RescaleOn: i32, - ErrLRate: f32, - SynScaleRate: f32, - SubMean: f32, - Permute: i32, - Pool: i32, - pad: i32, - TrgRange: F32, -} -struct RLRateParams { - On: i32, - SigmoidLinear: i32, - SigmoidMin: f32, - Diff: i32, - SpikeThr: f32, - DiffThr: f32, - Min: f32, - pad: i32, -} -struct LearnNeuronParams { - CaLearn: LearnCaParams, - CaSpike: CaSpikeParams, - LearnNMDA: NMDAParams, - TrgAvgAct: TrgAvgActParams, - RLRate: RLRateParams, - NeuroMod: NeuroModParams, -} -struct SWtInitParams { - SPct: f32, - Mean: f32, - Var: f32, - Sym: i32, -} -struct SWtAdaptParams { - On: i32, - LRate: f32, - SubMean: f32, - SigGain: f32, -} -struct SWtParams { - Init: SWtInitParams, - Adapt: SWtAdaptParams, - Limit: F32, -} -struct LRateParams { - Base: f32, - Sched: f32, - Mod: f32, - Eff: f32, -} -struct DWtParams { - Trace: i32, - Tau: f32, - CaScale: f32, - CaPScale: f32, - SubMean: f32, - LearnThr: f32, - Dt: f32, - pad: f32, -} -struct HebbParams { - On: i32, - Up: f32, - Down: f32, - pad: f32, -} -struct LearnSynParams { - Learn: i32, - pad: i32, - pad1: i32, - pad2: i32, - LRate: LRateParams, - DWt: DWtParams, - Hebb: HebbParams, -} - -//////// import: "looper.go" -alias ViewTimes = i32; //enums:enum -const Cycle: ViewTimes = 0; -const FastSpike: ViewTimes = 1; -const Gamma: ViewTimes = 2; -const Beta: ViewTimes = 3; -const Alpha: ViewTimes = 4; -const Phase: ViewTimes = 5; -const Theta: ViewTimes = 6; - -//////// import: "math32-fastexp.go" - -//////// import: "minmax-avgmax.go" -const MaxFloat32: f32 = 3.402823466e+38; -const MinFloat32: f32 = 1.175494351e-38; -struct AvgMax32 { - Avg: f32, - Max: f32, - Sum: f32, - MaxIndex: i32, - N: i32, - pad: i32, - pad1: i32, - pad2: i32, -} - -//////// import: "minmax-minmax32.go" -struct F32 { - Min: f32, - Max: f32, - pad: i32, - pad1: i32, // for gpu use -} - -//////// import: "network.go" -struct NetworkIndexes { - MaxData: u32, - MaxDelay: u32, - NCaBins: i32, - NLayers: u32, - NNeurons: u32, - NPools: u32, - NPaths: u32, - NSyns: u32, - RubiconNPosUSs: u32, - RubiconNCosts: u32, - RubiconNNegUSs: u32, - GPUMaxBuffFloats: u32, - GPUSynCaBanks: u32, - pad: u32, - pad1: u32, - pad2: u32, -} - -//////// import: "neuromod.go" -alias DAModTypes = i32; //enums:enum -const NoDAMod: DAModTypes = 0; -const D1Mod: DAModTypes = 1; -const D2Mod: DAModTypes = 2; -const D1AbsMod: DAModTypes = 3; -alias ValenceTypes = i32; //enums:enum -const Positive: ValenceTypes = 0; -const Negative: ValenceTypes = 1; -const Cost: ValenceTypes = 2; -struct NeuroModParams { - DAMod: DAModTypes, - Valence: ValenceTypes, - DAModGain: f32, - DALRateSign: i32, - DALRateMod: f32, - AChLRateMod: f32, - AChDisInhib: f32, - BurstGain: f32, - DipGain: f32, - pad: f32, - pad1: f32, - pad2: f32, -} - -//////// import: "neuron.go" -alias NeuronFlags = i32; //enums:enum -const NeuronOff: NeuronFlags = 1; -const NeuronHasExt: NeuronFlags = 2; -const NeuronHasTarg: NeuronFlags = 4; -const NeuronHasCmpr: NeuronFlags = 8; -alias NeuronVars = i32; //enums:enum -const Spike: NeuronVars = 0; -const Spiked: NeuronVars = 1; -const Act: NeuronVars = 2; -const ActInt: NeuronVars = 3; -const Ge: NeuronVars = 4; -const Gi: NeuronVars = 5; -const Gk: NeuronVars = 6; -const Inet: NeuronVars = 7; -const Vm: NeuronVars = 8; -const VmDend: NeuronVars = 9; -const ISI: NeuronVars = 10; -const ISIAvg: NeuronVars = 11; -const Ext: NeuronVars = 12; -const Target: NeuronVars = 13; -const CaM: NeuronVars = 14; -const CaP: NeuronVars = 15; -const CaD: NeuronVars = 16; -const CaDPrev: NeuronVars = 17; -const CaSyn: NeuronVars = 18; -const LearnCa: NeuronVars = 19; -const LearnCaM: NeuronVars = 20; -const LearnCaP: NeuronVars = 21; -const LearnCaD: NeuronVars = 22; -const CaDiff: NeuronVars = 23; -const RLRate: NeuronVars = 24; -const GnmdaSyn: NeuronVars = 25; -const Gnmda: NeuronVars = 26; -const GnmdaLrn: NeuronVars = 27; -const GnmdaMaint: NeuronVars = 28; -const NmdaCa: NeuronVars = 29; -const Gvgcc: NeuronVars = 30; -const VgccM: NeuronVars = 31; -const VgccH: NeuronVars = 32; -const VgccCa: NeuronVars = 33; -const VgccCaInt: NeuronVars = 34; -const Burst: NeuronVars = 35; -const BurstPrv: NeuronVars = 36; -const CtxtGe: NeuronVars = 37; -const CtxtGeRaw: NeuronVars = 38; -const CtxtGeOrig: NeuronVars = 39; -const GgabaB: NeuronVars = 40; -const GABAB: NeuronVars = 41; -const GABABx: NeuronVars = 42; -const Gak: NeuronVars = 43; -const SSGiDend: NeuronVars = 44; -const GknaMed: NeuronVars = 45; -const GknaSlow: NeuronVars = 46; -const Gkir: NeuronVars = 47; -const KirM: NeuronVars = 48; -const Gsk: NeuronVars = 49; -const SKCaIn: NeuronVars = 50; -const SKCaR: NeuronVars = 51; -const SKCaM: NeuronVars = 52; -const Gmahp: NeuronVars = 53; -const MahpN: NeuronVars = 54; -const Gsahp: NeuronVars = 55; -const SahpCa: NeuronVars = 56; -const SahpN: NeuronVars = 57; -const ActM: NeuronVars = 58; -const ActP: NeuronVars = 59; -const Beta1: NeuronVars = 60; -const Beta2: NeuronVars = 61; -const CaPMax: NeuronVars = 62; -const CaPMaxCa: NeuronVars = 63; -const GeNoise: NeuronVars = 64; -const GeNoiseP: NeuronVars = 65; -const GiNoise: NeuronVars = 66; -const GiNoiseP: NeuronVars = 67; -const GeExt: NeuronVars = 68; -const GeRaw: NeuronVars = 69; -const GeSyn: NeuronVars = 70; -const GiRaw: NeuronVars = 71; -const GiSyn: NeuronVars = 72; -const GeInt: NeuronVars = 73; -const GeIntNorm: NeuronVars = 74; -const GiInt: NeuronVars = 75; -const GModRaw: NeuronVars = 76; -const GModSyn: NeuronVars = 77; -const SMaintP: NeuronVars = 78; -const GMaintRaw: NeuronVars = 79; -const GMaintSyn: NeuronVars = 80; -const NeurFlags: NeuronVars = 81; -const CaBins: NeuronVars = 82; -alias NeuronAvgVars = i32; //enums:enum -const ActAvg: NeuronAvgVars = 0; -const AvgPct: NeuronAvgVars = 1; -const TrgAvg: NeuronAvgVars = 2; -const DTrgAvg: NeuronAvgVars = 3; -const AvgDif: NeuronAvgVars = 4; -const GeBase: NeuronAvgVars = 5; -const GiBase: NeuronAvgVars = 6; -alias NeuronIndexVars = i32; //enums:enum -const NrnNeurIndex: NeuronIndexVars = 0; -const NrnLayIndex: NeuronIndexVars = 1; -const NrnSubPool: NeuronIndexVars = 2; - -//////// import: "pathparams.go" -const StartOff: i32 = 0; -const Nitems: i32 = 1; -const StartNN: i32 = 2; -struct StartN { - Start: u32, - N: u32, - pad: u32, - pad1: u32, // todo: see if we can do without these? -} -struct PathIndexes { - RecvLayer: u32, - RecvNeurSt: u32, - RecvNeurN: u32, - SendLayer: u32, - SendNeurSt: u32, - SendNeurN: u32, - SynapseSt: u32, - SendConSt: u32, - RecvConSt: u32, - RecvSynSt: u32, - NPathNeurSt: u32, - pad: u32, -} -struct GScaleValues { - Scale: f32, - Rel: f32, - pad: f32, - pad1: f32, -} -struct PathParams { - Type: PathTypes, - Index: u32, - pad: i32, - pad1: i32, - Indexes: PathIndexes, - Com: SynComParams, - PathScale: PathScaleParams, - SWts: SWtParams, - Learn: LearnSynParams, - GScale: GScaleValues, - RLPred: RLPredPathParams, - Matrix: MatrixPathParams, - BLA: BLAPathParams, - Hip: HipPathParams, -} - -//////// import: "pathtypes.go" -alias PathTypes = i32; //enums:enum -const ForwardPath: PathTypes = 0; -const BackPath: PathTypes = 1; -const LateralPath: PathTypes = 2; -const InhibPath: PathTypes = 3; -const CTCtxtPath: PathTypes = 4; -const RWPath: PathTypes = 5; -const TDPredPath: PathTypes = 6; -const BLAPath: PathTypes = 7; -const HipPath: PathTypes = 8; -const VSPatchPath: PathTypes = 9; -const VSMatrixPath: PathTypes = 10; -const DSMatrixPath: PathTypes = 11; - -//////// import: "pcore-layer.go" -struct MatrixParams { - GateThr: f32, - IsVS: i32, - OtherMatrixIndex: i32, - ThalLay1Index: i32, - ThalLay2Index: i32, - ThalLay3Index: i32, - ThalLay4Index: i32, - ThalLay5Index: i32, - ThalLay6Index: i32, - pad: i32, - pad1: i32, - pad2: i32, -} -alias GPLayerTypes = i32; //enums:enum -const GPePr: GPLayerTypes = 0; -const GPeAk: GPLayerTypes = 1; -const GPi: GPLayerTypes = 2; -struct GPParams { - GPType: GPLayerTypes, - pad: u32, - pad1: u32, - pad2: u32, -} - -//////// import: "pcore-path.go" -struct MatrixPathParams { - Credit: f32, - BasePF: f32, - Delta: f32, - VSRewLearn: i32, -} - -//////// import: "pool.go" -alias PoolIndexVars = i32; //enums:enum -const PoolNeurSt: PoolIndexVars = 0; -const PoolNeurEd: PoolIndexVars = 1; -const PoolLayerIdx: PoolIndexVars = 2; -const PoolIsLayer: PoolIndexVars = 3; -alias PoolIntVars = i32; //enums:enum -const Clamped: PoolIntVars = 0; -const PoolGated: PoolIntVars = 1; -const FFsRawInt: PoolIntVars = 2; -const FBsRawInt: PoolIntVars = 3; -const GeExtRawInt: PoolIntVars = 4; -const PoolIntAvgMaxStart: PoolIntVars = 5; -alias AvgMax = i32; //enums:enum -const Avg: AvgMax = 0; -const Max: AvgMax = 1; -alias AvgMaxPhases = i32; //enums:enum -trim-prefix AM -const AMCycle: AvgMaxPhases = 0; -const AMMinus: AvgMaxPhases = 1; -const AMPlus: AvgMaxPhases = 2; -const AMPrev: AvgMaxPhases = 3; -alias AvgMaxVars = i32; //enums:enum -trim-prefix AM -const AMCaP: AvgMaxVars = 0; -const AMCaD: AvgMaxVars = 1; -const AMCaPMax: AvgMaxVars = 2; -const AMAct: AvgMaxVars = 3; -const AMGeInt: AvgMaxVars = 4; -const AMGiInt: AvgMaxVars = 5; -const AMAvgDif: AvgMaxVars = 6; -const poolFloatAvgMaxStart = InhibVarsN; -const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN)); -const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN)); -const avgMaxToNeuron = array(CaP, CaD, CaPMax, Act, GeInt, GiInt); -fn AvgMaxVarIndex(vr: AvgMaxVars, phase: AvgMaxPhases, am: AvgMax) -> u32 { - return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am); -} -fn PoolAvgMax(vr: AvgMaxVars, phase: AvgMaxPhases, am: AvgMax, pi: u32,di: u32) -> f32 { - return Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], - u32(pi), u32(di), u32(AvgMaxVarIndex(vr, phase, am)))]; -} - -//////// import: "rand.go" -alias RandFunIndex = u32; -const RandFunActPGe: RandFunIndex = 0; -const RandFunActPGi: RandFunIndex = 1; -const RandFunActSMaintP: RandFunIndex = 2; -const RandFunIndexN: RandFunIndex = 3; - -//////// import: "rl-layer.go" -struct RWPredParams { - PredRange: F32, -} -struct RWDaParams { - TonicGe: f32, - RWPredLayIndex: i32, - pad: u32, - pad1: u32, -} -struct TDIntegParams { - Discount: f32, - PredGain: f32, - TDPredLayIndex: i32, - pad: u32, -} -struct TDDaParams { - TonicGe: f32, - TDIntegLayIndex: i32, - pad: u32, - pad1: u32, -} - -//////// import: "rl-path.go" -struct RLPredPathParams { - OppSignLRate: f32, - DaTol: f32, - pad: f32, - pad1: f32, -} - -//////// import: "rubicon-layer.go" -struct LDTParams { - SrcThr: f32, - Rew: i32, - MaintInhib: f32, - SrcLay1Index: i32, - SrcLay2Index: i32, - SrcLay3Index: i32, - SrcLay4Index: i32, - pad: f32, -} -struct VTAParams { - CeMGain: f32, - LHbGain: f32, - AChThr: f32, - pad: f32, -} - -//////// import: "rubicon-path.go" -struct BLAPathParams { - NegDeltaLRate: f32, - AChThr: f32, - USTrace: f32, - pad: f32, -} - -//////// import: "rubicon.go" - -//////// import: "stats.go" - -//////// import: "synapse.go" -alias SynapseVars = i32; //enums:enum -const Wt: SynapseVars = 0; -const LWt: SynapseVars = 1; -const SWt: SynapseVars = 2; -const DWt: SynapseVars = 3; -const DSWt: SynapseVars = 4; -alias SynapseTraceVars = i32; //enums:enum -const Tr: SynapseTraceVars = 0; -const DTr: SynapseTraceVars = 1; -const DiDWt: SynapseTraceVars = 2; -alias SynapseIndexVars = i32; //enums:enum -const SynRecvIndex: SynapseIndexVars = 0; -const SynSendIndex: SynapseIndexVars = 1; -const SynPathIndex: SynapseIndexVars = 2; - -//////// import: "slrand.wgsl" -fn Philox2x32round(counter: su64, key: u32) -> su64 { - let mul = Uint32Mul64(u32(0xD256D193), counter.x); - var ctr: su64; - ctr.x = mul.y ^ key ^ counter.y; - ctr.y = mul.x; - return ctr; -} -fn Philox2x32bumpkey(key: u32) -> u32 { - return key + u32(0x9E3779B9); -} -fn Philox2x32(counter: su64, key: u32) -> vec2 { - var ctr = Philox2x32round(counter, key); // 1 - var ky = Philox2x32bumpkey(key); - ctr = Philox2x32round(ctr, ky); // 2 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 3 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 4 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 5 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 6 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 7 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 8 - ky = Philox2x32bumpkey(ky); - ctr = Philox2x32round(ctr, ky); // 9 - ky = Philox2x32bumpkey(ky); - return Philox2x32round(ctr, ky); // 10 -} -fn RandUint32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - return Philox2x32(Uint64Add32(counter, funcIndex), key); -} -fn RandUint32(counter: su64, funcIndex: u32, key: u32) -> u32 { - return Philox2x32(Uint64Add32(counter, funcIndex), key).x; -} -fn RandFloat32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key)); -} -fn RandFloat32(counter: su64, funcIndex: u32, key: u32) -> f32 { - return Uint32ToFloat32(RandUint32(counter, funcIndex, key)); -} -fn RandFloat32Range11Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key)); -} -fn RandFloat32Range11(counter: su64, funcIndex: u32, key: u32) -> f32 { - return Uint32ToFloat32Range11(RandUint32(counter, funcIndex, key)); -} -fn RandBoolP(counter: su64, funcIndex: u32, key: u32, p: f32) -> bool { - return (RandFloat32(counter, funcIndex, key) < p); -} -fn sincospi(x: f32) -> vec2 { - let PIf = 3.1415926535897932; - var r: vec2; - r.x = cos(PIf*x); - r.y = sin(PIf*x); - return r; -} -fn RandFloat32NormVec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { - let ur = RandUint32Vec2(counter, funcIndex, key); - var f = sincospi(Uint32ToFloat32Range11(ur.x)); - let r = sqrt(-2.0 * log(Uint32ToFloat32(ur.y))); // guaranteed to avoid 0. - return f * r; -} -fn RandFloat32Norm(counter: su64, funcIndex: u32, key: u32) -> f32 { - return RandFloat32Vec2(counter, funcIndex, key).x; -} -fn RandUint32N(counter: su64, funcIndex: u32, key: u32, n: u32) -> u32 { - let v = RandFloat32(counter, funcIndex, key); - return u32(v * f32(n)); -} -struct RandCounter { - Counter: su64, - HiSeed: u32, - pad: u32, -} -fn RandCounter_Reset(ct: ptr) { - (*ct).Counter.x = u32(0); - (*ct).Counter.y = (*ct).HiSeed; -} -fn RandCounter_Seed(ct: ptr, seed: u32) { - (*ct).HiSeed = seed; - RandCounter_Reset(ct); -} -fn RandCounter_Add(ct: ptr, inc: u32) { - (*ct).Counter = Uint64Add32((*ct).Counter, inc); -} - -//////// import: "sltype.wgsl" -alias su64 = vec2; -fn Uint32Mul64(a: u32, b: u32) -> su64 { - let LOMASK = (((u32(1))<<16)-1); - var r: su64; - r.x = a * b; /* full low multiply */ - let ahi = a >> 16; - let alo = a & LOMASK; - let bhi = b >> 16; - let blo = b & LOMASK; - let ahbl = ahi * blo; - let albh = alo * bhi; - let ahbl_albh = ((ahbl&LOMASK) + (albh&LOMASK)); - var hit = ahi*bhi + (ahbl>>16) + (albh>>16); - hit += ahbl_albh >> 16; /* carry from the sum of lo(ahbl) + lo(albh) ) */ - /* carry from the sum with alo*blo */ - if ((r.x >> u32(16)) < (ahbl_albh&LOMASK)) { - hit += u32(1); - } - r.y = hit; - return r; -} -/* -fn Uint32Mul64(a: u32, b: u32) -> su64 { - return su64(a) * su64(b); -} -*/ -fn Uint64Add32(a: su64, b: u32) -> su64 { - if (b == 0) { - return a; - } - var s = a; - if (s.x > u32(0xffffffff) - b) { - s.y++; - s.x = (b - 1) - (u32(0xffffffff) - s.x); - } else { - s.x += b; - } - return s; -} -fn Uint64Incr(a: su64) -> su64 { - var s = a; - if(s.x == 0xffffffff) { - s.y++; - s.x = u32(0); - } else { - s.x++; - } - return s; -} -fn Uint32ToFloat32(val: u32) -> f32 { - let factor = f32(1.0) / (f32(u32(0xffffffff)) + f32(1.0)); - let halffactor = f32(0.5) * factor; - var f = f32(val) * factor + halffactor; - if (f == 1.0) { // exclude 1 - return bitcast(0x3F7FFFFF); - } - return f; -} -fn Uint32ToFloat32Vec2(val: vec2) -> vec2 { - var r: vec2; - r.x = Uint32ToFloat32(val.x); - r.y = Uint32ToFloat32(val.y); - return r; -} -fn Uint32ToFloat32Range11(val: u32) -> f32 { - let factor = f32(1.0) / (f32(i32(0x7fffffff)) + f32(1.0)); - let halffactor = f32(0.5) * factor; - return (f32(val) * factor + halffactor); -} -fn Uint32ToFloat32Range11Vec2(val: vec2) -> vec2 { - var r: vec2; - r.x = Uint32ToFloat32Range11(val.x); - r.y = Uint32ToFloat32Range11(val.y); - return r; -} \ No newline at end of file diff --git a/axon/shaders/NewStateNeuron.wgsl b/axon/shaders/NewStateNeuron.wgsl index 6b70a9fa2..15d2c1973 100644 --- a/axon/shaders/NewStateNeuron.wgsl +++ b/axon/shaders/NewStateNeuron.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,13 +78,13 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_NewStateNeuron(ly: ptr, ctx: ptr, ni: u32,di: u32) { +fn LayerParams_NewStateNeuron(ly: LayerParams, ctx: Context, ni: u32,di: u32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(BurstPrv))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Burst))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaDPrev))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaD))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaPMax))] = 0.0; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaPMaxCa))] = 0.0; - ActParams_DecayState(&(*ly).Acts, ctx, ni, di, (*ly).Acts.Decay.Act, (*ly).Acts.Decay.Glong, (*ly).Acts.Decay.AHP); - ActParams_KNaNewState(&(*ly).Acts, ctx, ni, di); + ActParams_DecayState(ly.Acts, ctx, ni, di, ly.Acts.Decay.Act, ly.Acts.Decay.Glong, ly.Acts.Decay.AHP); + ActParams_KNaNewState(ly.Acts, ctx, ni, di); var mx = NetworkIxs[0].NCaBins; for (var i=0; i, ctx: ptr= NetworkIxs[0].NNeurons) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); var li = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnLayIndex))]; - var layers=Layers[li]; LayerParams_NewStateNeuron(&layers, &ctx, ni, di); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_NewStateNeuron(layers, ctx, ni, di); } //////// import: "act-path.go" @@ -249,7 +248,7 @@ struct ActParams { SMaint: SMaintParams, PopCode: PopCodeParams, } -fn ActParams_DecayLearnCa(ac: ptr, ctx: ptr, ni: u32,di: u32, decay: f32) { +fn ActParams_DecayLearnCa(ac: ActParams, ctx: Context, ni: u32,di: u32, decay: f32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaLrn))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaLrn))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(NmdaCa))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(NmdaCa))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))]; @@ -265,7 +264,7 @@ fn ActParams_DecayLearnCa(ac: ptr, ctx: ptr, ctx: ptr, ni: u32,di: u32, decay: f32) { +fn ActParams_DecayAHP(ac: ActParams, ctx: Context, ni: u32,di: u32, decay: f32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(MahpN))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(MahpN))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gmahp))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gmahp))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SahpCa))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SahpCa))]; @@ -273,24 +272,24 @@ fn ActParams_DecayAHP(ac: ptr, ctx: ptr, n Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsahp))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsahp))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaMed))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaMed))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaSlow))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaSlow))]; - var kirMrest = (*ac).Kir.Mrest; + var kirMrest = ac.Kir.Mrest; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(KirM))] += decay * (kirMrest - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(KirM))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gkir))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gkir))]; } -fn ActParams_DecayState(ac: ptr, ctx: ptr, ni: u32,di: u32, decay: f32,glong: f32,ahp: f32) { +fn ActParams_DecayState(ac: ActParams, ctx: Context, ni: u32,di: u32, decay: f32,glong: f32,ahp: f32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ISIAvg))] = -1.0; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] = (*ac).Init.Act; + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] = ac.Init.Act; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spiked))] = 0.0; if (decay > 0) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spike))] = 0.0; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] - (*ac).Init.Act); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] - (*ac).Init.Act); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] - ac.Init.Act); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] - ac.Init.Act); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] - NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(GeBase))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ge))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ge))] - NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(GeBase))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gi))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gi))] - NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(GiBase))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gk))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gk))]; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] - (*ac).Init.Vm); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] - ac.Init.Vm); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoise))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoise))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoise))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoise))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiSyn))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiSyn))]; @@ -298,7 +297,7 @@ fn ActParams_DecayState(ac: ptr, ctx: ptr, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiInt))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiInt))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeIntNorm))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeIntNorm))]; } - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] -= glong * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] - (*ac).Init.Vm); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] -= glong * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] - ac.Init.Vm); if (ahp > 0) { ActParams_DecayAHP(ac, ctx, ni, di, ahp); } @@ -314,8 +313,8 @@ fn ActParams_DecayState(ac: ptr, ctx: ptr, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccH))] -= glong * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccH))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gak))] -= glong * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gak))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsk))] -= glong * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsk))]; - if ((*ac).Decay.LearnCa > 0) { // learning-based Ca values -- not usual - ActParams_DecayLearnCa(ac, ctx, ni, di, (*ac).Decay.LearnCa); + if (ac.Decay.LearnCa > 0) { // learning-based Ca values -- not usual + ActParams_DecayLearnCa(ac, ctx, ni, di, ac.Decay.LearnCa); } Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Inet))] = 0.0; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] = 0.0; @@ -327,9 +326,9 @@ fn ActParams_DecayState(ac: ptr, ctx: ptr, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeExt))] = 0.0; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CtxtGeOrig))] -= glong * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CtxtGeOrig))]; } -fn ActParams_KNaNewState(ac: ptr, ctx: ptr, ni: u32,di: u32) { - if ((*ac).KNa.On == 1 && (*ac).KNa.TrialSlow == 1) { - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaSlow))] += (*ac).KNa.Slow.Max * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaDPrev))]; +fn ActParams_KNaNewState(ac: ActParams, ctx: Context, ni: u32,di: u32) { + if (ac.KNa.On == 1 && ac.KNa.TrialSlow == 1) { + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaSlow))] += ac.KNa.Slow.Max * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaDPrev))]; } } @@ -481,11 +480,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" diff --git a/axon/shaders/PlusPhaseNeuron.wgsl b/axon/shaders/PlusPhaseNeuron.wgsl index 42473a99a..c4d7a9d69 100644 --- a/axon/shaders/PlusPhaseNeuron.wgsl +++ b/axon/shaders/PlusPhaseNeuron.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,7 +78,7 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_PlusPhaseNeuron(ly: ptr, ctx: ptr, ni: u32,di: u32) { +fn LayerParams_PlusPhaseNeuron(ly: LayerParams, ctx: Context, ni: u32,di: u32) { var pi = LayerParams_PoolIndex(ly, NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnSubPool))]); var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActP))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))]; @@ -86,23 +86,23 @@ fn LayerParams_PlusPhaseNeuron(ly: ptr, ctx: ptr 0; - switch ((*ly).Type) { + switch (ly.Type) { case BLALayer: { - dlr = RLRateParams_RLRateDiff(&(*ly).Learn.RLRate, nrnCaP, Neurons[Index3D(TensorStrides[70], TensorStrides[71], // delta on previous trial + dlr = RLRateParams_RLRateDiff(ly.Learn.RLRate, nrnCaP, Neurons[Index3D(TensorStrides[70], TensorStrides[71], // delta on previous trial TensorStrides[72], u32(ni), u32(di), u32(CaDPrev))]); - if (!NeuroModParams_IsBLAExt(&(*ly).Learn.NeuroMod) && PoolIxs[Index2D(TensorStrides[0], TensorStrides[1], u32(pi), u32(PoolNeurSt))] == 0) { // first pool + if (!NeuroModParams_IsBLAExt(ly.Learn.NeuroMod) && PoolIxs[Index2D(TensorStrides[0], TensorStrides[1], u32(pi), u32(PoolNeurSt))] == 0) { // first pool dlr = f32(0); // first pool is novelty / curiosity -- no learn } } case VSPatchLayer: { da = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], // our own personal u32(GvVSPatchPosRPE), u32(di))]; - modlr = NeuroModParams_LRMod(&(*ly).Learn.NeuroMod, da, ach); - mlr = RLRateParams_RLRateSigDeriv(&(*ly).Learn.RLRate, Neurons[Index3D(TensorStrides[70], TensorStrides[71], // note: don't have proper max here + modlr = NeuroModParams_LRMod(ly.Learn.NeuroMod, da, ach); + mlr = RLRateParams_RLRateSigDeriv(ly.Learn.RLRate, Neurons[Index3D(TensorStrides[70], TensorStrides[71], // note: don't have proper max here TensorStrides[72], u32(ni), u32(di), u32(CaDPrev))], f32(f32(1))); } case MatrixLayer: { @@ -113,31 +113,30 @@ fn LayerParams_PlusPhaseNeuron(ly: ptr, ctx: ptr= NetworkIxs[0].NNeurons) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); var li = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnLayIndex))]; - var layers=Layers[li]; LayerParams_PlusPhaseNeuron(&layers, &ctx, ni, di); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_PlusPhaseNeuron(layers, ctx, ni, di); } //////// import: "act-path.go" @@ -388,24 +387,24 @@ struct SahpParams { DtMax: f32, pad: i32, } -fn SahpParams_EFun(mp: ptr, z: f32) -> f32 { +fn SahpParams_EFun(mp: SahpParams, z: f32) -> f32 { if (abs(z) < 1.0e-4) { return 1.0 - 0.5*z; }return z / (FastExp(z) - 1.0); } -fn SahpParams_NinfTauFromCa(mp: ptr, ca: f32, ninf: ptr,tau: ptr) { - var co = ca - (*mp).Off; - var a = (*mp).DtMax * (*mp).Slope * SahpParams_EFun(mp, -co/(*mp).Slope); - var b = (*mp).DtMax * (*mp).Slope * SahpParams_EFun(mp, co/(*mp).Slope); +fn SahpParams_NinfTauFromCa(mp: SahpParams, ca: f32, ninf: ptr,tau: ptr) { + var co = ca - mp.Off; + var a = mp.DtMax * mp.Slope * SahpParams_EFun(mp, -co/mp.Slope); + var b = mp.DtMax * mp.Slope * SahpParams_EFun(mp, co/mp.Slope); *tau = 1.0 / (a + b); *ninf = a * *tau; // a / (a+b) return; } -fn SahpParams_CaInt(mp: ptr, caInt: f32,ca: f32) -> f32 { - return caInt + (*mp).CaDt*(ca-caInt); +fn SahpParams_CaInt(mp: SahpParams, caInt: f32,ca: f32) -> f32 { + return caInt + mp.CaDt*(ca-caInt); } -fn SahpParams_GsAHP(mp: ptr, n: f32) -> f32 { - return (*mp).Gbar * n; +fn SahpParams_GsAHP(mp: SahpParams, n: f32) -> f32 { + return mp.Gbar * n; } //////// import: "chans-skca.go" @@ -452,11 +451,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" @@ -714,8 +713,8 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } //////// import: "layertypes.go" @@ -805,13 +804,13 @@ struct RLRateParams { Min: f32, pad: i32, } -fn RLRateParams_RLRateSigDeriv(rl: ptr, act: f32, laymax: f32) -> f32 { - if ((*rl).On == 0 || laymax == 0) { +fn RLRateParams_RLRateSigDeriv(rl: RLRateParams, act: f32, laymax: f32) -> f32 { + if (rl.On == 0 || laymax == 0) { return f32(1.0); } var ca = min(act/laymax, 1.0); var lr: f32; - if ((*rl).SigmoidLinear == 1) { + if (rl.SigmoidLinear == 1) { if (ca < 0.5) { lr = 2 * ca; } else { @@ -820,21 +819,21 @@ fn RLRateParams_RLRateSigDeriv(rl: ptr, act: f32, laymax: } else { lr = 4.0 * ca * (1 - ca); // .5 * .5 = .25 = peak } - if (lr < (*rl).SigmoidMin) { - lr = (*rl).SigmoidMin; + if (lr < rl.SigmoidMin) { + lr = rl.SigmoidMin; }return lr; } -fn RLRateParams_RLRateDiff(rl: ptr, scap: f32,scad: f32) -> f32 { - if ((*rl).On == 0 || (*rl).Diff == 0) { +fn RLRateParams_RLRateDiff(rl: RLRateParams, scap: f32,scad: f32) -> f32 { + if (rl.On == 0 || rl.Diff == 0) { return f32(1.0); } var smax = max(scap, scad); - if (smax > (*rl).SpikeThr) { // avoid div by 0 + if (smax > rl.SpikeThr) { // avoid div by 0 var dif = abs(scap - scad); - if (dif < (*rl).DiffThr) { - return (*rl).Min; + if (dif < rl.DiffThr) { + return rl.Min; }return (dif / smax); - }return (*rl).Min; + }return rl.Min; } struct LearnNeuronParams { CaLearn: LearnCaParams, @@ -979,32 +978,32 @@ struct NeuroModParams { pad1: f32, pad2: f32, } -fn NeuroModParams_IsBLAExt(nm: ptr) -> bool { - return ((*nm).Valence == Positive && (*nm).DAMod == D2Mod) || - ((*nm).Valence == Negative && (*nm).DAMod == D1Mod); +fn NeuroModParams_IsBLAExt(nm: NeuroModParams) -> bool { + return (nm.Valence == Positive && nm.DAMod == D2Mod) || + (nm.Valence == Negative && nm.DAMod == D1Mod); } -fn NeuroModParams_LRModFact(nm: ptr, pct: f32,val: f32) -> f32 { +fn NeuroModParams_LRModFact(nm: NeuroModParams, pct: f32,val: f32) -> f32 { var aval = clamp(abs(val), 0.0, 1.0);return 1.0 - pct*(1.0-aval); } -fn NeuroModParams_DAGain(nm: ptr, da: f32) -> f32 { +fn NeuroModParams_DAGain(nm: NeuroModParams, da: f32) -> f32 { var ada = da; if (da > 0) { - ada *= (*nm).BurstGain; + ada *= nm.BurstGain; } else { - ada *= (*nm).DipGain; + ada *= nm.DipGain; }return ada; } -fn NeuroModParams_DASign(nm: ptr) -> f32 { - if ((*nm).DAMod == D2Mod) { +fn NeuroModParams_DASign(nm: NeuroModParams) -> f32 { + if (nm.DAMod == D2Mod) { return -1.0; }return f32(1.0); } -fn NeuroModParams_LRMod(nm: ptr, da: f32,ach: f32) -> f32 { - var lmod = NeuroModParams_LRModFact(nm, (*nm).AChLRateMod, ach); - if ((*nm).DALRateSign == 1) { +fn NeuroModParams_LRMod(nm: NeuroModParams, da: f32,ach: f32) -> f32 { + var lmod = NeuroModParams_LRModFact(nm, nm.AChLRateMod, ach); + if (nm.DALRateSign == 1) { lmod *= NeuroModParams_DAGain(nm, da) * NeuroModParams_DASign(nm); } else { - lmod *= NeuroModParams_LRModFact(nm, (*nm).DALRateMod, da); + lmod *= NeuroModParams_LRModFact(nm, nm.DALRateMod, da); }return lmod; } diff --git a/axon/shaders/PlusPhasePool.wgsl b/axon/shaders/PlusPhasePool.wgsl index 6472a8839..af5a03795 100644 --- a/axon/shaders/PlusPhasePool.wgsl +++ b/axon/shaders/PlusPhasePool.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,21 +78,20 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_PlusPhasePool(ly: ptr, ctx: ptr, pi: u32,di: u32) { +fn LayerParams_PlusPhasePool(ly: LayerParams, ctx: Context, pi: u32,di: u32) { PoolCycleToPlus(pi, di); } //////// import: "act-net.go" fn PlusPhasePool(i: u32) { //gosl:kernel - var ctx = Ctx[0]; - var pi = Context_ItemIndex(&ctx, i); + let ctx = Ctx[0]; + var pi = Context_ItemIndex(ctx, i); if (pi >= NetworkIxs[0].NPools) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); var li = PoolIxs[Index2D(TensorStrides[0], TensorStrides[1], u32(pi), u32(PoolLayerIdx))]; - var layers=Layers[li]; LayerParams_PlusPhasePool(&layers, &ctx, pi, di); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_PlusPhasePool(layers, ctx, pi, di); } //////// import: "act-path.go" @@ -388,11 +387,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" diff --git a/axon/shaders/PlusPhasePost.wgsl b/axon/shaders/PlusPhasePost.wgsl index 85b9f0a47..55b362f16 100644 --- a/axon/shaders/PlusPhasePost.wgsl +++ b/axon/shaders/PlusPhasePost.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,31 +78,31 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_IsLearnTrgAvg(ly: ptr) -> bool { - if ((*ly).Acts.Clamp.IsInput == 1 || (*ly).Acts.Clamp.IsTarget == 1 || (*ly).Learn.TrgAvgAct.RescaleOn == 0) { +fn LayerParams_IsLearnTrgAvg(ly: LayerParams) -> bool { + if (ly.Acts.Clamp.IsInput == 1 || ly.Acts.Clamp.IsTarget == 1 || ly.Learn.TrgAvgAct.RescaleOn == 0) { return false; }return true; } -fn LayerParams_LearnTrgAvgErrLRate(ly: ptr) -> f32 { +fn LayerParams_LearnTrgAvgErrLRate(ly: LayerParams) -> f32 { if (!LayerParams_IsLearnTrgAvg(ly)) { return f32(0); - }return (*ly).Learn.TrgAvgAct.ErrLRate; + }return ly.Learn.TrgAvgAct.ErrLRate; } -fn LayerParams_PlusPhasePost(ly: ptr, ctx: ptr) { +fn LayerParams_PlusPhasePost(ly: LayerParams, ctx: Context) { LayerParams_PlusPhaseActAvg(ly, ctx); LayerParams_PhaseDiffFromActs(ly, ctx); // GPU syncs down the state before this - var np = (*ly).Indexes.NPools; - if ((*ly).Type == PTMaintLayer && (*ly).CT.OFCposPT == 1) { + var np = ly.Indexes.NPools; + if (ly.Type == PTMaintLayer && ly.CT.OFCposPT == 1) { for (var spi = u32(1); spi < np; spi++) { - for (var di = u32(0); di < (*ctx).NData; di++) { + for (var di = u32(0); di < ctx.NData; di++) { var pi = LayerParams_PoolIndex(ly, spi); var val = PoolAvgMax(AMCaD, AMCycle, Avg, pi, di); GlobalVectors[Index3D(TensorStrides[110], TensorStrides[111], TensorStrides[112], u32(GvOFCposPTMaint), u32(u32(spi - 1)), u32(di))] = val; } } } - if ((*ly).Acts.Decay.OnRew == 1) { - for (var di = u32(0); di < (*ctx).NData; di++) { + if (ly.Acts.Decay.OnRew == 1) { + for (var di = u32(0); di < ctx.NData; di++) { var hasRew = (GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvHasRew), u32(di))] > 0); var giveUp = (GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvGiveUp), u32(di))] > 0); if (hasRew || giveUp) { @@ -114,22 +114,22 @@ fn LayerParams_PlusPhasePost(ly: ptr, ctx: ptr, ctx: ptr) { - var nn = (*ly).Indexes.NNeurons; +fn LayerParams_PlusPhaseActAvg(ly: LayerParams, ctx: Context) { + var nn = ly.Indexes.NNeurons; for (var lni = u32(0); lni < nn; lni++) { - var ni = (*ly).Indexes.NeurSt + lni; + var ni = ly.Indexes.NeurSt + lni; if (NeuronIsOff(ni)) { continue; } var dTrgSum = f32(0); var avgSum = f32(0); - for (var di = u32(0); di < (*ctx).NData; di++) { + for (var di = u32(0); di < ctx.NData; di++) { dTrgSum += LayerParams_LearnTrgAvgErrLRate(ly) * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaP))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaD))]); - avgSum += (*ly).Acts.Dt.LongAvgDt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActM))] - NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(ActAvg))]); + avgSum += ly.Acts.Dt.LongAvgDt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActM))] - NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(ActAvg))]); } NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(DTrgAvg))] += dTrgSum; NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(ActAvg))] += avgSum; @@ -138,9 +138,8 @@ fn LayerParams_PlusPhaseActAvg(ly: ptr, ctx: ptr, avg: ptr,vr: ptr, val: f32) { +fn DtParams_AvgVarUpdate(dp: DtParams, avg: ptr,vr: ptr, val: f32) { if (*avg == 0) { // first time -- set *avg = val; *vr = f32(0); } else { var del = val - *avg; - var incr = (*dp).LongAvgDt * del; + var incr = dp.LongAvgDt * del; *avg += incr; if (*vr == 0) { - *vr = 2 * (1 - (*dp).LongAvgDt) * del * incr; + *vr = 2 * (1 - dp.LongAvgDt) * del * incr; } else { - *vr = (1 - (*dp).LongAvgDt) * (*vr + del*incr); + *vr = (1 - dp.LongAvgDt) * (*vr + del*incr); } } } @@ -309,7 +308,7 @@ struct ActParams { SMaint: SMaintParams, PopCode: PopCodeParams, } -fn ActParams_DecayLearnCa(ac: ptr, ctx: ptr, ni: u32,di: u32, decay: f32) { +fn ActParams_DecayLearnCa(ac: ActParams, ctx: Context, ni: u32,di: u32, decay: f32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaLrn))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GnmdaLrn))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(NmdaCa))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(NmdaCa))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccCa))]; @@ -325,7 +324,7 @@ fn ActParams_DecayLearnCa(ac: ptr, ctx: ptr, ctx: ptr, ni: u32,di: u32, decay: f32) { +fn ActParams_DecayAHP(ac: ActParams, ctx: Context, ni: u32,di: u32, decay: f32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(MahpN))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(MahpN))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gmahp))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gmahp))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SahpCa))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(SahpCa))]; @@ -333,24 +332,24 @@ fn ActParams_DecayAHP(ac: ptr, ctx: ptr, n Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsahp))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsahp))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaMed))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaMed))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaSlow))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GknaSlow))]; - var kirMrest = (*ac).Kir.Mrest; + var kirMrest = ac.Kir.Mrest; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(KirM))] += decay * (kirMrest - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(KirM))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gkir))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gkir))]; } -fn ActParams_DecayState(ac: ptr, ctx: ptr, ni: u32,di: u32, decay: f32,glong: f32,ahp: f32) { +fn ActParams_DecayState(ac: ActParams, ctx: Context, ni: u32,di: u32, decay: f32,glong: f32,ahp: f32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ISIAvg))] = -1.0; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] = (*ac).Init.Act; + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] = ac.Init.Act; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spiked))] = 0.0; if (decay > 0) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Spike))] = 0.0; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] - (*ac).Init.Act); - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] - (*ac).Init.Act); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] - ac.Init.Act); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] - ac.Init.Act); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeSyn))] - NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(GeBase))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ge))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ge))] - NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(GeBase))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gi))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gi))] - NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(GiBase))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gk))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gk))]; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] - (*ac).Init.Vm); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] -= decay * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Vm))] - ac.Init.Vm); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoise))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeNoise))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoise))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiNoise))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiSyn))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiSyn))]; @@ -358,7 +357,7 @@ fn ActParams_DecayState(ac: ptr, ctx: ptr, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiInt))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiInt))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeIntNorm))] -= decay * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeIntNorm))]; } - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] -= glong * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] - (*ac).Init.Vm); + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] -= glong * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VmDend))] - ac.Init.Vm); if (ahp > 0) { ActParams_DecayAHP(ac, ctx, ni, di, ahp); } @@ -374,8 +373,8 @@ fn ActParams_DecayState(ac: ptr, ctx: ptr, Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccH))] -= glong * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(VgccH))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gak))] -= glong * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gak))]; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsk))] -= glong * Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Gsk))]; - if ((*ac).Decay.LearnCa > 0) { // learning-based Ca values -- not usual - ActParams_DecayLearnCa(ac, ctx, ni, di, (*ac).Decay.LearnCa); + if (ac.Decay.LearnCa > 0) { // learning-based Ca values -- not usual + ActParams_DecayLearnCa(ac, ctx, ni, di, ac.Decay.LearnCa); } Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Inet))] = 0.0; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeRaw))] = 0.0; @@ -742,16 +741,16 @@ fn PoolInhibDecay(pi: u32,di: u32, decay: f32) { } //////// import: "init-layer.go" -fn LayerParams_DecayState(ly: ptr, ctx: ptr, di: u32, decay: f32,glong: f32,ahp: f32) { - var nn = (*ly).Indexes.NNeurons; +fn LayerParams_DecayState(ly: LayerParams, ctx: Context, di: u32, decay: f32,glong: f32,ahp: f32) { + var nn = ly.Indexes.NNeurons; for (var lni = u32(0); lni < nn; lni++) { - var ni = (*ly).Indexes.NeurSt + lni; + var ni = ly.Indexes.NeurSt + lni; if (NeuronIsOff(ni)) { continue; } - ActParams_DecayState(&(*ly).Acts, ctx, ni, di, decay, glong, ahp); + ActParams_DecayState(ly.Acts, ctx, ni, di, decay, glong, ahp); if (ahp == 1) { - var lt = (*ly).Type; + var lt = ly.Type; if (lt == PTMaintLayer) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CtxtGe))] = 0.0; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CtxtGeRaw))] = 0.0; @@ -761,8 +760,8 @@ fn LayerParams_DecayState(ly: ptr, ctx: ptr, ctx: ptr, di: u32, decay: f32,glong: f32,ahp: f32) { - var np = (*ly).Indexes.NPools; +fn LayerParams_DecayStateLayer(ly: LayerParams, ctx: Context, di: u32, decay: f32,glong: f32,ahp: f32) { + var np = ly.Indexes.NPools; for (var spi = u32(0); spi < np; spi++) { var pi = LayerParams_PoolIndex(ly, spi); PoolInhibDecay(pi, di, decay); @@ -831,8 +830,8 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } //////// import: "layertypes.go" @@ -1248,66 +1247,66 @@ struct GPParams { pad1: u32, pad2: u32, } -fn LayerParams_MatrixGated(ly: ptr, ctx: ptr) { +fn LayerParams_MatrixGated(ly: LayerParams, ctx: Context) { var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); - if ((*ly).Learn.NeuroMod.DAMod != D1Mod) { - var oly = Layers[(*ly).Matrix.OtherMatrixIndex]; + if (ly.Learn.NeuroMod.DAMod != D1Mod) { + var oly = Layers[ly.Matrix.OtherMatrixIndex]; var olpi = oly.PoolSt; - for (var di = u32(0); di < (*ctx).NData; di++) { + for (var di = u32(0); di < ctx.NData; di++) { PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(lpi), u32(di), u32(PoolGated))] = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(olpi), u32(di), u32(PoolGated))]; }return; } - for (var di = u32(0); di < (*ctx).NData; di++) { + for (var di = u32(0); di < ctx.NData; di++) { var mtxGated = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(lpi), u32(di), u32(PoolGated))] > 0; var thalGated = false; - if ((*ly).Matrix.ThalLay1Index >= 0) { - var tly = Layers[(*ly).Matrix.ThalLay1Index]; + if (ly.Matrix.ThalLay1Index >= 0) { + var tly = Layers[ly.Matrix.ThalLay1Index]; var tlpi = tly.PoolSt; var gt = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(tlpi), u32(di), u32(PoolGated))]; thalGated = thalGated || gt > 0; } - if ((*ly).Matrix.ThalLay2Index >= 0) { - var tly = Layers[(*ly).Matrix.ThalLay2Index]; + if (ly.Matrix.ThalLay2Index >= 0) { + var tly = Layers[ly.Matrix.ThalLay2Index]; var tlpi = tly.PoolSt; var gt = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(tlpi), u32(di), u32(PoolGated))]; thalGated = thalGated || gt > 0; } - if ((*ly).Matrix.ThalLay3Index >= 0) { - var tly = Layers[(*ly).Matrix.ThalLay3Index]; + if (ly.Matrix.ThalLay3Index >= 0) { + var tly = Layers[ly.Matrix.ThalLay3Index]; var tlpi = tly.PoolSt; var gt = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(tlpi), u32(di), u32(PoolGated))]; thalGated = thalGated || gt > 0; } - if ((*ly).Matrix.ThalLay4Index >= 0) { - var tly = Layers[(*ly).Matrix.ThalLay4Index]; + if (ly.Matrix.ThalLay4Index >= 0) { + var tly = Layers[ly.Matrix.ThalLay4Index]; var tlpi = tly.PoolSt; var gt = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(tlpi), u32(di), u32(PoolGated))]; thalGated = thalGated || gt > 0; } - if ((*ly).Matrix.ThalLay5Index >= 0) { - var tly = Layers[(*ly).Matrix.ThalLay5Index]; + if (ly.Matrix.ThalLay5Index >= 0) { + var tly = Layers[ly.Matrix.ThalLay5Index]; var tlpi = tly.PoolSt; var gt = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(tlpi), u32(di), u32(PoolGated))]; thalGated = thalGated || gt > 0; } - if ((*ly).Matrix.ThalLay6Index >= 0) { - var tly = Layers[(*ly).Matrix.ThalLay6Index]; + if (ly.Matrix.ThalLay6Index >= 0) { + var tly = Layers[ly.Matrix.ThalLay6Index]; var tlpi = tly.PoolSt; var gt = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(tlpi), u32(di), u32(PoolGated))]; thalGated = thalGated || gt > 0; } mtxGated = mtxGated && thalGated; if (!mtxGated) { // nobody did if thal didn't - for (var spi = u32(0); spi < (*ly).Indexes.NPools; spi++) { + for (var spi = u32(0); spi < ly.Indexes.NPools; spi++) { var pi = LayerParams_PoolIndex(ly, spi); PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(pi), u32(di), u32(PoolGated))] = 0; } } - if ((*ctx).PlusPhase == 1 && (*ly).Matrix.IsVS == 1) { + if (ctx.PlusPhase == 1 && ly.Matrix.IsVS == 1) { GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvVSMatrixJustGated), u32(di))] = f32(mtxGated); if (mtxGated) { var poolIndex = i32(-1); - for (var spi = u32(1); spi < (*ly).Indexes.NPools; spi++) { + for (var spi = u32(1); spi < ly.Indexes.NPools; spi++) { var pi = LayerParams_PoolIndex(ly, spi); if (poolIndex < 0 && PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(pi), u32(di), u32(PoolGated))] > 0) { poolIndex = i32(spi); @@ -1456,18 +1455,18 @@ struct BLAPathParams { //////// import: "rubicon.go" //////// import: "stats.go" -fn LayerParams_PhaseDiffFromActs(ly: ptr, ctx: ptr) { - var li = (*ly).Index; - for (var di = u32(0); di < (*ctx).NData; di++) { +fn LayerParams_PhaseDiffFromActs(ly: LayerParams, ctx: Context) { + var li = ly.Index; + for (var di = u32(0); di < ctx.NData; di++) { var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); var avgM = PoolAvgMax(AMAct, AMMinus, Avg, lpi, di); var avgP = PoolAvgMax(AMAct, AMPlus, Avg, lpi, di); var cosv = f32(0); var ssm = f32(0); var ssp = f32(0); - var nn = (*ly).Indexes.NNeurons; + var nn = ly.Indexes.NNeurons; for (var lni = u32(0); lni < nn; lni++) { - var ni = (*ly).Indexes.NeurSt + lni; + var ni = ly.Indexes.NeurSt + lni; if (NeuronIsOff(ni)) { continue; } @@ -1485,7 +1484,7 @@ fn LayerParams_PhaseDiffFromActs(ly: ptr, ctx: ptr RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -80,7 +80,7 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "act-layer.go" //////// import: "act-net.go" -fn PlusPhaseStartContext(i: u32) { //gosl:kernel +fn PlusPhaseStartContext(i: u32) { //gosl:kernel read-write:Ctx if (i != 0) { return; } diff --git a/axon/shaders/PlusPhaseStartNeuron.wgsl b/axon/shaders/PlusPhaseStartNeuron.wgsl index cbc097ab1..959d48d9e 100644 --- a/axon/shaders/PlusPhaseStartNeuron.wgsl +++ b/axon/shaders/PlusPhaseStartNeuron.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,28 +78,27 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_PlusPhaseStartNeuron(ly: ptr, ctx: ptr, ni: u32,di: u32) { +fn LayerParams_PlusPhaseStartNeuron(ly: LayerParams, ctx: Context, ni: u32,di: u32) { if (NeuronHasFlag(NeuronHasTarg, ni, di)) { // will be clamped in plus phase Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ext))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Target))]; NeuronSetFlag(NeuronHasExt, ni, di); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ISI))] = -1.0; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ISIAvg))] = -1.0; - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] = (*ly).Acts.Init.Act; + Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] = ly.Acts.Init.Act; } } //////// import: "act-net.go" fn PlusPhaseStartNeuron(i: u32) { //gosl:kernel - var ctx = Ctx[0]; - var ni = Context_ItemIndex(&ctx, i); + let ctx = Ctx[0]; + var ni = Context_ItemIndex(ctx, i); if (ni >= NetworkIxs[0].NNeurons) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); var li = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnLayIndex))]; - var layers=Layers[li]; LayerParams_PlusPhaseStartNeuron(&layers, &ctx, ni, di); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_PlusPhaseStartNeuron(layers, ctx, ni, di); } //////// import: "act-path.go" @@ -402,11 +401,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" diff --git a/axon/shaders/PoolGi.wgsl b/axon/shaders/PoolGi.wgsl index e48696678..f64ac88f3 100644 --- a/axon/shaders/PoolGi.wgsl +++ b/axon/shaders/PoolGi.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,9 +78,9 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_SubPoolGiFromSpikes(ly: ptr, ctx: ptr, lpi: u32,pi: u32,di: u32, lyInhib: bool, giMult: f32) { +fn LayerParams_SubPoolGiFromSpikes(ly: LayerParams, ctx: Context, lpi: u32,pi: u32,di: u32, lyInhib: bool, giMult: f32) { PoolInhibSpikesFromRaw(pi, di); - PoolInhib(&(*ly).Inhib.Pool, pi, di, giMult); + PoolInhib(ly.Inhib.Pool, pi, di, giMult); if (lyInhib) { PoolInhibLayerMax(pi, di, Pools[Index3D(TensorStrides[130], TensorStrides[131], // note: this requires lpl inhib to have been computed before! TensorStrides[132], u32(lpi), u32(di), u32(TotalGi))]); @@ -93,14 +93,13 @@ fn LayerParams_SubPoolGiFromSpikes(ly: ptr, ctx: ptr= NetworkIxs[0].NPools) { return; } - var di = Context_DataIndex(&ctx, i); - PoolPoolGi(&ctx, pi, di); - Ctx[0] = ctx; + var di = Context_DataIndex(ctx, i); + PoolPoolGi(ctx, pi, di); } //////// import: "act-path.go" @@ -396,11 +395,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" @@ -472,20 +471,20 @@ struct GiParams { FFAvgDt: f32, pad: f32, } -fn GiParams_FSiFromFFs(fb: ptr, fsi: f32,ffs: f32,fbs: f32) -> f32 { - return fsi + (ffs + (*fb).FB*fbs) - (*fb).FSDt*fsi; // immediate up, slow down +fn GiParams_FSiFromFFs(fb: GiParams, fsi: f32,ffs: f32,fbs: f32) -> f32 { + return fsi + (ffs + fb.FB*fbs) - fb.FSDt*fsi; // immediate up, slow down } -fn GiParams_FS0Thr(fb: ptr, val: f32) -> f32 { - return max(val-(*fb).FS0, 0.0); +fn GiParams_FS0Thr(fb: GiParams, val: f32) -> f32 { + return max(val-fb.FS0, 0.0); } -fn GiParams_FS(fb: ptr, fsi: f32,gext: f32, clamped: bool) -> f32 { - if (clamped && gext > (*fb).ClampExtMin) { +fn GiParams_FS(fb: GiParams, fsi: f32,gext: f32, clamped: bool) -> f32 { + if (clamped && gext > fb.ClampExtMin) { return gext; }return GiParams_FS0Thr(fb, fsi) + gext; } -fn GiParams_SSFromFBs(fb: ptr, ssf: ptr,ssi: ptr, fbs: f32) { - *ssi += (*fb).SSiDt * (*ssf*fbs - *ssi); - *ssf += fbs*(1-*ssf) - (*fb).SSfDt**ssf; +fn GiParams_SSFromFBs(fb: GiParams, ssf: ptr,ssi: ptr, fbs: f32) { + *ssi += fb.SSiDt * (*ssf*fbs - *ssi); + *ssf += fbs*(1-*ssf) - fb.SSfDt**ssf; } //////// import: "fsfffb-inhib.go" @@ -608,23 +607,23 @@ struct InhibParams { Layer: GiParams, Pool: GiParams, } -fn PoolInhib(fb: ptr, pi: u32,di: u32, gimult: f32) { - if ((*fb).On == 0) { +fn PoolInhib(fb: GiParams, pi: u32,di: u32, gimult: f32) { + if (fb.On == 0) { PoolInhibZero(pi, di);return; } - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvg))] += (*fb).FFAvgDt * (Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFs))] - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvg))]); + Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvg))] += fb.FFAvgDt * (Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFs))] - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvg))]); var fsi = Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FSi))]; fsi = GiParams_FSiFromFFs(fb, fsi, Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFs))], Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FBs))]); Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FSi))] = fsi; var clamped = PoolsInt[Index3D(TensorStrides[140], TensorStrides[141], TensorStrides[142], u32(pi), u32(di), u32(Clamped))] > 0; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FSGi))] = (*fb).Gi * GiParams_FS(fb, fsi, Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(GeExts))], clamped); + Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FSGi))] = fb.Gi * GiParams_FS(fb, fsi, Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(GeExts))], clamped); var ssf = Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSf))]; var ssi = Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSi))]; GiParams_SSFromFBs(fb, &ssf, &ssi, Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FBs))]); - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSGi))] = (*fb).Gi * (*fb).SS * ssi; + Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSGi))] = fb.Gi * fb.SS * ssi; Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSf))] = ssf; Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(SSi))] = ssi; - Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(TotalGi))] = PoolInhibGiFromFSSS(pi, di) + (*fb).FFPrv*Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvgPrv))]; + Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(TotalGi))] = PoolInhibGiFromFSSS(pi, di) + fb.FFPrv*Pools[Index3D(TensorStrides[130], TensorStrides[131], TensorStrides[132], u32(pi), u32(di), u32(FFAvgPrv))]; PoolInhibSaveOrig(pi, di); } fn PoolInhibInitRaw(pi: u32,di: u32) { @@ -748,8 +747,8 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } //////// import: "layertypes.go" @@ -1239,18 +1238,18 @@ fn PoolAvgMaxCalc(pi: u32,di: u32) { PoolAvgMaxCalcVar(vr, pi, di); } } -fn PoolPoolGi(ctx: ptr, pi: u32,di: u32) { +fn PoolPoolGi(ctx: Context, pi: u32,di: u32) { if (PoolIxs[Index2D(TensorStrides[0], TensorStrides[1], u32(pi), u32(PoolIsLayer))] > 0) { return; } var li = PoolIxs[Index2D(TensorStrides[0], TensorStrides[1], u32(pi), u32(PoolLayerIdx))]; PoolAvgMaxCalc(pi, di); PoolInhibIntToRaw(pi, di); - var ly = Layers[u32(li)]; + let ly = Layers[u32(li)]; var giMult = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(li), u32(di), u32(LayerGiMult))]; var lyIsOn = (ly.Inhib.Layer.On == 1); - var lpi = LayerParams_PoolIndex(&ly, u32(0)); - LayerParams_SubPoolGiFromSpikes(&ly, ctx, lpi, pi, di, lyIsOn, giMult); + var lpi = LayerParams_PoolIndex(ly, u32(0)); + LayerParams_SubPoolGiFromSpikes(ly, ctx, lpi, pi, di, lyIsOn, giMult); } //////// import: "rand.go" diff --git a/axon/shaders/SendSpike.wgsl b/axon/shaders/SendSpike.wgsl index d8bad68b5..439b25a84 100644 --- a/axon/shaders/SendSpike.wgsl +++ b/axon/shaders/SendSpike.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,37 +78,37 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_SendSpike(ly: ptr, ctx: ptr, ni: u32,di: u32) { +fn LayerParams_SendSpike(ly: LayerParams, ctx: Context, ni: u32,di: u32) { var pi = LayerParams_PoolIndex(ly, NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnSubPool))]); var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); - var lni = ni - (*ly).Indexes.NeurSt; + var lni = ni - ly.Indexes.NeurSt; LayerParams_PostSpike(ly, ctx, lpi, pi, ni, di); - for (var pti = u32(0); pti < (*ly).Indexes.SendN; pti++) { - var pt = Paths[(*ly).Indexes.SendSt + pti]; - PathParams_SendSpike(&pt, ctx, ni, di, lni); + for (var pti = u32(0); pti < ly.Indexes.SendN; pti++) { + let pt = Paths[ly.Indexes.SendSt + pti]; + PathParams_SendSpike(pt, ctx, ni, di, lni); } } -fn LayerParams_PostSpikeSpecial(ly: ptr, ctx: ptr, lpi: u32,pi: u32,ni: u32,di: u32) { +fn LayerParams_PostSpikeSpecial(ly: LayerParams, ctx: Context, lpi: u32,pi: u32,ni: u32,di: u32) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Burst))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaP))]; - var li = (*ly).Index; - var pil = pi - (*ly).PoolSt; // 0-n pool index + var li = ly.Index; + var pil = pi - ly.PoolSt; // 0-n pool index var pnn = u32(PoolNNeurons(pi)); var pni = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnNeurIndex))] - u32(PoolIxs[Index2D(TensorStrides[0], TensorStrides[1], u32(pi), u32(PoolNeurSt))]); var hasRew = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvHasRew), u32(di))] > 0; - switch ((*ly).Type) { + switch (ly.Type) { case SuperLayer: { - if ((*ctx).PlusPhase == 1) { + if (ctx.PlusPhase == 1) { var actMax = PoolAvgMax(AMCaP, AMCycle, Max, lpi, di); var actAvg = PoolAvgMax(AMCaP, AMCycle, Avg, lpi, di); - var thr = BurstParams_ThrFromAvgMax(&(*ly).Bursts, actAvg, actMax); + var thr = BurstParams_ThrFromAvgMax(ly.Bursts, actAvg, actMax); if (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CaP))] < thr) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Burst))] = 0.0; } } } case PTPredLayer, CTLayer: { - if ((*ctx).Cycle == (*ctx).ThetaCycles-1) { - if ((*ly).CT.DecayTau == 0) { + if (ctx.Cycle == ctx.ThetaCycles-1) { + if (ly.CT.DecayTau == 0) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CtxtGe))] = Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CtxtGeRaw))]; } else { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CtxtGe))] += Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(CtxtGeRaw))]; @@ -126,7 +126,7 @@ fn LayerParams_PostSpikeSpecial(ly: ptr, ctx: ptr, ctx: ptr 0) { - act = PopCodeParams_EncodeValue(&(*ly).Acts.PopCode, pni, pnn, dr); + act = PopCodeParams_EncodeValue(ly.Acts.PopCode, pni, pnn, dr); } Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] = act; } @@ -156,29 +156,29 @@ fn LayerParams_PostSpikeSpecial(ly: ptr, ctx: ptr 0) { - act = PopCodeParams_EncodeValue(&(*ly).Acts.PopCode, pni, pnn, ur); + act = PopCodeParams_EncodeValue(ly.Acts.PopCode, pni, pnn, ur); } Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] = act; } case USLayer: { - var us = RubiconUSStimValue(di, pil-1, (*ly).Learn.NeuroMod.Valence); + var us = RubiconUSStimValue(di, pil-1, ly.Learn.NeuroMod.Valence); var act = us; if (us > 0) { - act = PopCodeParams_EncodeValue(&(*ly).Acts.PopCode, pni, pnn, us); + act = PopCodeParams_EncodeValue(ly.Acts.PopCode, pni, pnn, us); } - if ((*ly).Learn.NeuroMod.DAMod == D1Mod || ((*ly).Learn.NeuroMod.DAMod == D2Mod && hasRew && (*ctx).PlusPhase == 1)) { + if (ly.Learn.NeuroMod.DAMod == D1Mod || (ly.Learn.NeuroMod.DAMod == D2Mod && hasRew && ctx.PlusPhase == 1)) { Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] = act; } } case PVLayer: { if (hasRew) { var pv = f32(0); - if ((*ly).Learn.NeuroMod.Valence == Positive) { + if (ly.Learn.NeuroMod.Valence == Positive) { pv = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvPVpos), u32(di))]; } else { pv = GlobalScalars[Index2D(TensorStrides[100], TensorStrides[101], u32(GvPVneg), u32(di))]; } - var act = PopCodeParams_EncodeValue(&(*ly).Acts.PopCode, pni, (*ly).Indexes.NNeurons, pv); + var act = PopCodeParams_EncodeValue(ly.Acts.PopCode, pni, ly.Indexes.NNeurons, pv); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] = act; } @@ -193,7 +193,7 @@ fn LayerParams_PostSpikeSpecial(ly: ptr, ctx: ptr, ctx: ptr, ctx: ptr, lpi: u32,pi: u32,ni: u32,di: u32) { +fn LayerParams_PostSpike(ly: LayerParams, ctx: Context, lpi: u32,pi: u32,ni: u32,di: u32) { LayerParams_PostSpikeSpecial(ly, ctx, lpi, pi, ni, di); - var intdt = (*ly).Acts.Dt.IntDt; + var intdt = ly.Acts.Dt.IntDt; Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeInt))] += intdt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Ge))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GeInt))]); Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiInt))] += intdt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiSyn))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(GiInt))]); - if ((*ctx).PlusPhase == 1) { + if (ctx.PlusPhase == 1) { intdt *= f32(3.0); } Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))] += intdt * (Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(Act))] - Neurons[Index3D(TensorStrides[70], TensorStrides[71], TensorStrides[72], u32(ni), u32(di), u32(ActInt))]); @@ -234,15 +234,14 @@ fn LayerParams_PostSpike(ly: ptr, ctx: ptr= NetworkIxs[0].NNeurons) { return; } - var di = Context_DataIndex(&ctx, i); + var di = Context_DataIndex(ctx, i); var li = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnLayIndex))]; - var layers=Layers[li]; LayerParams_SendSpike(&layers, &ctx, ni, di); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_SendSpike(layers, ctx, ni, di); } //////// import: "act-path.go" @@ -258,16 +257,16 @@ struct SynComParams { MaxDelay: u32, DelLen: u32, } -fn SynComParams_RingIndex(sc: ptr, i: u32) -> u32 { +fn SynComParams_RingIndex(sc: SynComParams, i: u32) -> u32 { var ri = i; - if (ri >= (*sc).DelLen) { - ri -= (*sc).DelLen; + if (ri >= sc.DelLen) { + ri -= sc.DelLen; }return ri; } -fn SynComParams_WriteOff(sc: ptr, cycTot: i32) -> u32 { - return SynComParams_RingIndex(sc, u32(cycTot)%(*sc).DelLen + (*sc).DelLen); +fn SynComParams_WriteOff(sc: SynComParams, cycTot: i32) -> u32 { + return SynComParams_RingIndex(sc, u32(cycTot)%sc.DelLen + sc.DelLen); } -fn SynComParams_FloatToIntFactor(sc: ptr) -> f32 { +fn SynComParams_FloatToIntFactor(sc: SynComParams) -> f32 { return f32(u32(1) << 24); // leaves 7 bits = 128 to cover any extreme cases } struct PathScaleParams { @@ -276,10 +275,10 @@ struct PathScaleParams { pad: f32, pad1: f32, } -fn PathParams_SendSpike(pt: ptr, ctx: ptr, ni: u32,di: u32,lni: u32) { - var sendVal = (*pt).GScale.Scale * SynComParams_FloatToIntFactor(&(*pt).Com); // pre-bake in conversion to uint factor - if ((*pt).Type == CTCtxtPath) { - if (u32((*ctx).Cycle) != u32((*ctx).ThetaCycles)-1-(*pt).Com.DelLen) { +fn PathParams_SendSpike(pt: PathParams, ctx: Context, ni: u32,di: u32,lni: u32) { + var sendVal = pt.GScale.Scale * SynComParams_FloatToIntFactor(pt.Com); // pre-bake in conversion to uint factor + if (pt.Type == CTCtxtPath) { + if (u32(ctx.Cycle) != u32(ctx.ThetaCycles)-1-pt.Com.DelLen) { return; } sendVal *= Neurons[Index3D(TensorStrides[70], TensorStrides[71], // Burst is regular CaP for all non-SuperLayer neurons @@ -289,16 +288,16 @@ fn PathParams_SendSpike(pt: ptr, ctx: ptr return; } } - var recvNeurSt = (*pt).Indexes.RecvNeurSt; - var npst = (*pt).Indexes.NPathNeurSt; - var cni = (*pt).Indexes.SendConSt + lni; - var synst = (*pt).Indexes.SynapseSt + PathSendCon[Index2D(TensorStrides[30], TensorStrides[31], u32(cni), u32(StartOff))]; + var recvNeurSt = pt.Indexes.RecvNeurSt; + var npst = pt.Indexes.NPathNeurSt; + var cni = pt.Indexes.SendConSt + lni; + var synst = pt.Indexes.SynapseSt + PathSendCon[Index2D(TensorStrides[30], TensorStrides[31], u32(cni), u32(StartOff))]; var synn = PathSendCon[Index2D(TensorStrides[30], TensorStrides[31], u32(cni), u32(Nitems))]; for (var ci = u32(0); ci < synn; ci++) { var syni = synst + ci; var ri = SynapseIxs[Index2D(TensorStrides[20], TensorStrides[21], u32(syni), u32(SynRecvIndex))]; var npti = npst + (ri - recvNeurSt); - var deli = SynComParams_WriteOff(&(*pt).Com, (*ctx).CyclesTotal); + var deli = SynComParams_WriteOff(pt.Com, ctx.CyclesTotal); var sv = i32(sendVal * Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(Wt))]); atomicAdd(&PathGBuf[Index3D(TensorStrides[150], TensorStrides[151], TensorStrides[152], u32(npti), u32(di), u32(deli))], sv); @@ -368,8 +367,8 @@ struct DtParams { IntDt: f32, LongAvgDt: f32, } -fn DtParams_GeSynFromRawSteady(dp: ptr, geRaw: f32) -> f32 { - return geRaw * (*dp).GeTau; +fn DtParams_GeSynFromRawSteady(dp: DtParams, geRaw: f32) -> f32 { + return geRaw * dp.GeTau; } struct SpikeNoiseParams { On: i32, @@ -408,36 +407,36 @@ struct PopCodeParams { MaxSigma: f32, Clip: i32, } -fn PopCodeParams_ClampValue(pc: ptr, val: f32) -> f32 { +fn PopCodeParams_ClampValue(pc: PopCodeParams, val: f32) -> f32 { var clipVal = val; - if (clipVal < (*pc).Min) { - clipVal = (*pc).Min; + if (clipVal < pc.Min) { + clipVal = pc.Min; } - if (clipVal > (*pc).Max) { - clipVal = (*pc).Max; + if (clipVal > pc.Max) { + clipVal = pc.Max; }return clipVal; } -fn PopCodeParams_ProjectParam(pc: ptr, minParam: f32,maxParam: f32,clipVal: f32) -> f32 { - var normVal = (clipVal - (*pc).Min) / ((*pc).Max - (*pc).Min);return minParam + normVal*(maxParam-minParam); +fn PopCodeParams_ProjectParam(pc: PopCodeParams, minParam: f32,maxParam: f32,clipVal: f32) -> f32 { + var normVal = (clipVal - pc.Min) / (pc.Max - pc.Min);return minParam + normVal*(maxParam-minParam); } -fn PopCodeParams_EncodeValue(pc: ptr, i: u32,n: u32, val: f32) -> f32 { +fn PopCodeParams_EncodeValue(pc: PopCodeParams, i: u32,n: u32, val: f32) -> f32 { var eval = val; var clipVal = PopCodeParams_ClampValue(pc, eval); - if ((*pc).Clip == 1) { + if (pc.Clip == 1) { eval = clipVal; } - var rng = (*pc).Max - (*pc).Min; + var rng = pc.Max - pc.Min; var act = f32(1); - if ((*pc).MinAct < 1) { - act = PopCodeParams_ProjectParam(pc, (*pc).MinAct, f32(1.0), clipVal); + if (pc.MinAct < 1) { + act = PopCodeParams_ProjectParam(pc, pc.MinAct, f32(1.0), clipVal); } - var sig = (*pc).MinSigma; - if ((*pc).MaxSigma > (*pc).MinSigma) { - sig = PopCodeParams_ProjectParam(pc, (*pc).MinSigma, (*pc).MaxSigma, clipVal); + var sig = pc.MinSigma; + if (pc.MaxSigma > pc.MinSigma) { + sig = PopCodeParams_ProjectParam(pc, pc.MinSigma, pc.MaxSigma, clipVal); } var gnrm = 1.0 / (rng * sig); var incr = rng / f32(n-1); - var trg = (*pc).Min + incr*f32(i); + var trg = pc.Min + incr*f32(i); var dist = gnrm * (trg - eval);return act * FastExp(-(dist * dist)); } struct ActParams { @@ -613,11 +612,11 @@ struct Context { //types:add -setters SlowCounter: i32, RandCounter: RandCounter, } -fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 { - return idx / (*ctx).NData; +fn Context_ItemIndex(ctx: Context, idx: u32) -> u32 { + return idx / ctx.NData; } -fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 { - return idx % (*ctx).NData; +fn Context_DataIndex(ctx: Context, idx: u32) -> u32 { + return idx % ctx.NData; } //////// import: "deep-layer.go" @@ -627,9 +626,9 @@ struct BurstParams { pad: f32, pad1: f32, } -fn BurstParams_ThrFromAvgMax(bp: ptr, avg: f32,mx: f32) -> f32 { - var thr = avg + (*bp).ThrRel*(mx-avg); - thr = max(thr, (*bp).ThrAbs);return thr; +fn BurstParams_ThrFromAvgMax(bp: BurstParams, avg: f32,mx: f32) -> f32 { + var thr = avg + bp.ThrRel*(mx-avg); + thr = max(thr, bp.ThrAbs);return thr; } struct CTParams { GeGain: f32, @@ -879,8 +878,8 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } //////// import: "layertypes.go" @@ -1068,12 +1067,12 @@ struct F32 { pad: i32, pad1: i32, // for gpu use } -fn F32_ClampValue(mr: ptr, val: f32) -> f32 { - if (val < (*mr).Min) { - return (*mr).Min; +fn F32_ClampValue(mr: F32, val: f32) -> f32 { + if (val < mr.Min) { + return mr.Min; } - if (val > (*mr).Max) { - return (*mr).Max; + if (val > mr.Max) { + return mr.Max; }return val; } @@ -1432,7 +1431,7 @@ struct BLAPathParams { //////// import: "rubicon.go" fn RubiconUSStimValue(di: u32, usIndex: u32, valence: ValenceTypes) -> f32 { - var nix = NetworkIxs[0]; + let nix = NetworkIxs[0]; var us = f32(0); switch (valence) { case Positive: { diff --git a/axon/shaders/SlowAdaptLayer.wgsl b/axon/shaders/SlowAdaptLayer.wgsl index 7bc261242..09e3af804 100644 --- a/axon/shaders/SlowAdaptLayer.wgsl +++ b/axon/shaders/SlowAdaptLayer.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,8 +78,8 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_IsInput(ly: ptr) -> bool { - switch ((*ly).Type) { +fn LayerParams_IsInput(ly: LayerParams) -> bool { + switch (ly.Type) { case InputLayer: { return true; } @@ -577,11 +577,11 @@ struct ActAvgParams { AdaptRate: f32, pad: f32, } -fn ActAvgParams_Adapt(aa: ptr, gimult: ptr, act: f32) -> bool { - var trg = (*aa).Nominal + (*aa).Offset; +fn ActAvgParams_Adapt(aa: ActAvgParams, gimult: ptr, act: f32) -> bool { + var trg = aa.Nominal + aa.Offset; var del = (act - trg) / trg; - if (del < -(*aa).LoTol || del > (*aa).HiTol) { - *gimult += (*aa).AdaptRate * del;return true; + if (del < -aa.LoTol || del > aa.HiTol) { + *gimult += aa.AdaptRate * del;return true; }return false; } struct InhibParams { @@ -654,8 +654,8 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } //////// import: "layertypes.go" @@ -707,28 +707,28 @@ const LayerRewPredPos: LayerVars = 10; const LayerRewPredNeg: LayerVars = 11; //////// import: "learn-layer.go" -fn LayerParams_SlowAdaptLayer(ly: ptr, ctx: ptr) { +fn LayerParams_SlowAdaptLayer(ly: LayerParams, ctx: Context) { LayerParams_AdaptInhib(ly, ctx); LayerParams_AvgDifFromTrgAvg(ly, ctx); } -fn LayerParams_AdaptInhib(ly: ptr, ctx: ptr) { - if ((*ly).Inhib.ActAvg.AdaptGi == 0 || LayerParams_IsInput(ly)) { +fn LayerParams_AdaptInhib(ly: LayerParams, ctx: Context) { + if (ly.Inhib.ActAvg.AdaptGi == 0 || LayerParams_IsInput(ly)) { return; } - for (var di = u32(0); di < (*ctx).NData; di++) { - var giMult = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerGiMult))]; - var avg = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32((*ly).Index), u32(di), u32(LayerActMAvg))]; - ActAvgParams_Adapt(&(*ly).Inhib.ActAvg, &giMult, avg); + for (var di = u32(0); di < ctx.NData; di++) { + var giMult = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ly.Index), u32(di), u32(LayerGiMult))]; + var avg = LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], u32(ly.Index), u32(di), u32(LayerActMAvg))]; + ActAvgParams_Adapt(ly.Inhib.ActAvg, &giMult, avg); LayerStates[Index3D(TensorStrides[90], TensorStrides[91], TensorStrides[92], - u32((*ly).Index), u32(di), u32(LayerGiMult))] = giMult; + u32(ly.Index), u32(di), u32(LayerGiMult))] = giMult; } } -fn LayerParams_AvgDifFromTrgAvg(ly: ptr, ctx: ptr) { +fn LayerParams_AvgDifFromTrgAvg(ly: LayerParams, ctx: Context) { var sp = u32(0); - if ((*ly).Indexes.NPools > 1) { + if (ly.Indexes.NPools > 1) { sp = u32(1); } - var np = (*ly).Indexes.NPools; + var np = ly.Indexes.NPools; for (var spi = sp; spi < np; spi++) { var pi = LayerParams_PoolIndex(ly, spi); var nsi = PoolIxs[Index2D(TensorStrides[0], TensorStrides[1], u32(pi), u32(PoolNeurSt))]; @@ -736,7 +736,7 @@ fn LayerParams_AvgDifFromTrgAvg(ly: ptr, ctx: ptr, ctx: ptr, ctx: ptr, ctx: ptr, ctx: ptr= NetworkIxs[0].NLayers) { return; } - var layers=Layers[li]; LayerParams_SlowAdaptLayer(&layers, &ctx); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_SlowAdaptLayer(layers, ctx); } //////// import: "learn-path.go" diff --git a/axon/shaders/SlowAdaptNeuron.wgsl b/axon/shaders/SlowAdaptNeuron.wgsl index 6d292bf43..54dee28b8 100644 --- a/axon/shaders/SlowAdaptNeuron.wgsl +++ b/axon/shaders/SlowAdaptNeuron.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,8 +78,8 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_IsTarget(ly: ptr) -> bool { - switch ((*ly).Type) { +fn LayerParams_IsTarget(ly: LayerParams) -> bool { + switch (ly.Type) { case TargetLayer: { return true; } @@ -91,8 +91,8 @@ fn LayerParams_IsTarget(ly: ptr) -> bool { } } } -fn LayerParams_IsLearnTrgAvg(ly: ptr) -> bool { - if ((*ly).Acts.Clamp.IsInput == 1 || (*ly).Acts.Clamp.IsTarget == 1 || (*ly).Learn.TrgAvgAct.RescaleOn == 0) { +fn LayerParams_IsLearnTrgAvg(ly: LayerParams) -> bool { + if (ly.Acts.Clamp.IsInput == 1 || ly.Acts.Clamp.IsTarget == 1 || ly.Learn.TrgAvgAct.RescaleOn == 0) { return false; }return true; } @@ -698,44 +698,43 @@ const LayerRewPredPos: LayerVars = 10; const LayerRewPredNeg: LayerVars = 11; //////// import: "learn-layer.go" -fn LayerParams_SlowAdaptNeuron(ly: ptr, ctx: ptr, ri: u32) { - var lni = ri - (*ly).Indexes.NeurSt; - var rn = (*ly).Indexes.RecvN; +fn LayerParams_SlowAdaptNeuron(ly: LayerParams, ctx: Context, ri: u32) { + var lni = ri - ly.Indexes.NeurSt; + var rn = ly.Indexes.RecvN; for (var pi = u32(0); pi < rn; pi++) { - var pti = RecvPathIxs[Index1D(TensorStrides[40], u32((*ly).Indexes.RecvSt + pi))]; - var paths=Paths[pti]; PathParams_SlowAdapt(&paths, ctx, ly, pti, ri, lni); + var pti = RecvPathIxs[Index1D(TensorStrides[40], u32(ly.Indexes.RecvSt + pi))]; + let paths=Paths[pti]; PathParams_SlowAdapt(paths, ctx, ly, pti, ri, lni); } } //////// import: "learn-net.go" fn SlowAdaptNeuron(ni: u32) { //gosl:kernel - var ctx = Ctx[0]; + let ctx = Ctx[0]; if (ni >= NetworkIxs[0].NNeurons) { return; } var li = NeuronIxs[Index2D(TensorStrides[10], TensorStrides[11], u32(ni), u32(NrnLayIndex))]; - var layers=Layers[li]; LayerParams_SlowAdaptNeuron(&layers, &ctx, ni); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_SlowAdaptNeuron(layers, ctx, ni); } //////// import: "learn-path.go" -fn PathParams_SlowAdapt(pt: ptr, ctx: ptr, rlay: ptr, pti: u32,ri: u32,lni: u32) { +fn PathParams_SlowAdapt(pt: PathParams, ctx: Context, rlay: LayerParams, pti: u32,ri: u32,lni: u32) { PathParams_SWtFromWt(pt, ctx, rlay, pti, ri, lni); PathParams_SynScale(pt, ctx, rlay, pti, ri, lni); } -fn PathParams_SWtFromWt(pt: ptr, ctx: ptr, rlay: ptr, pti: u32,ri: u32,lni: u32) { - if ((*pt).Learn.Learn == 0 || (*pt).SWts.Adapt.On == 0) { +fn PathParams_SWtFromWt(pt: PathParams, ctx: Context, rlay: LayerParams, pti: u32,ri: u32,lni: u32) { + if (pt.Learn.Learn == 0 || pt.SWts.Adapt.On == 0) { return; } if (LayerParams_IsTarget(rlay)) { return; } - var mx = (*pt).SWts.Limit.Max; - var mn = (*pt).SWts.Limit.Min; - var lr = (*pt).SWts.Adapt.LRate; - var cni = (*pt).Indexes.RecvConSt + lni; + var mx = pt.SWts.Limit.Max; + var mn = pt.SWts.Limit.Min; + var lr = pt.SWts.Adapt.LRate; + var cni = pt.Indexes.RecvConSt + lni; var synn = PathRecvCon[Index2D(TensorStrides[50], TensorStrides[51], u32(cni), u32(Nitems))]; - var synst = (*pt).Indexes.RecvSynSt + PathRecvCon[Index2D(TensorStrides[50], TensorStrides[51], u32(cni), u32(StartOff))]; + var synst = pt.Indexes.RecvSynSt + PathRecvCon[Index2D(TensorStrides[50], TensorStrides[51], u32(cni), u32(StartOff))]; var avgDWt = f32(0); for (var ci = u32(0); ci < synn; ci++) { var syni = RecvSynIxs[Index1D(TensorStrides[60], u32(synst + ci))]; @@ -749,27 +748,27 @@ fn PathParams_SWtFromWt(pt: ptr, ctx: ptr avgDWt += Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(DSWt))]; } avgDWt /= f32(synn); - avgDWt *= (*pt).SWts.Adapt.SubMean; + avgDWt *= pt.SWts.Adapt.SubMean; for (var ci = u32(0); ci < synn; ci++) { var syni = RecvSynIxs[Index1D(TensorStrides[60], u32(synst + ci))]; Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(SWt))] += lr * (Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(DSWt))] - avgDWt); var swt = Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(SWt))]; Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(DSWt))] = 0.0; - Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(LWt))] = SWtParams_LWtFromWts(&(*pt).SWts, Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(Wt))], swt); - Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(Wt))] = SWtParams_WtValue(&(*pt).SWts, swt, Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(LWt))]); + Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(LWt))] = SWtParams_LWtFromWts(pt.SWts, Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(Wt))], swt); + Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(Wt))] = SWtParams_WtValue(pt.SWts, swt, Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(LWt))]); } } -fn PathParams_SynScale(pt: ptr, ctx: ptr, rlay: ptr, pti: u32,ri: u32,lni: u32) { - if ((*pt).Learn.Learn == 0 || PathParams_IsInhib(pt)) { +fn PathParams_SynScale(pt: PathParams, ctx: Context, rlay: LayerParams, pti: u32,ri: u32,lni: u32) { + if (pt.Learn.Learn == 0 || PathParams_IsInhib(pt)) { return; } if (!LayerParams_IsLearnTrgAvg(rlay)) { return; } - var lr = (*rlay).Learn.TrgAvgAct.SynScaleRate; - var cni = (*pt).Indexes.RecvConSt + lni; + var lr = rlay.Learn.TrgAvgAct.SynScaleRate; + var cni = pt.Indexes.RecvConSt + lni; var synn = PathRecvCon[Index2D(TensorStrides[50], TensorStrides[51], u32(cni), u32(Nitems))]; - var synst = (*pt).Indexes.RecvSynSt + PathRecvCon[Index2D(TensorStrides[50], TensorStrides[51], u32(cni), u32(StartOff))]; + var synst = pt.Indexes.RecvSynSt + PathRecvCon[Index2D(TensorStrides[50], TensorStrides[51], u32(cni), u32(StartOff))]; var adif = -lr * NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ri), u32(AvgDif))]; for (var ci = u32(0); ci < synn; ci++) { var syni = RecvSynIxs[Index1D(TensorStrides[60], u32(synst + ci))]; @@ -781,7 +780,7 @@ fn PathParams_SynScale(pt: ptr, ctx: ptr, } else { Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(LWt))] += lwt * adif * swt; } - Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(Wt))] = SWtParams_WtValue(&(*pt).SWts, swt, Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(LWt))]); + Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(Wt))] = SWtParams_WtValue(pt.SWts, swt, Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(LWt))]); } } @@ -877,34 +876,34 @@ struct SWtParams { Adapt: SWtAdaptParams, Limit: F32, } -fn SWtParams_WtValue(sp: ptr, swt: f32,lwt: f32) -> f32 { +fn SWtParams_WtValue(sp: SWtParams, swt: f32,lwt: f32) -> f32 { return swt * SWtParams_SigFromLinWt(sp, lwt); } -fn SWtParams_SigFromLinWt(sp: ptr, lw: f32) -> f32 { +fn SWtParams_SigFromLinWt(sp: SWtParams, lw: f32) -> f32 { var wt: f32; - if ((*sp).Adapt.SigGain == 1) { + if (sp.Adapt.SigGain == 1) { wt = lw; - } else if ((*sp).Adapt.SigGain == 6) { + } else if (sp.Adapt.SigGain == 6) { wt = SigFun61(lw); } else { - wt = SigFun(lw, (*sp).Adapt.SigGain, f32(f32(1))); + wt = SigFun(lw, sp.Adapt.SigGain, f32(f32(1))); }return 2.0 * wt; // center at 1 instead of .5 } -fn SWtParams_LinFromSigWt(sp: ptr, wt: f32) -> f32 { +fn SWtParams_LinFromSigWt(sp: SWtParams, wt: f32) -> f32 { var wte = wt * 0.5; if (wte < 0) { wte = f32(0); } else if (wte > 1) { wte = f32(1); } - if ((*sp).Adapt.SigGain == 1) { + if (sp.Adapt.SigGain == 1) { return wte; } - if ((*sp).Adapt.SigGain == 6) { + if (sp.Adapt.SigGain == 6) { return SigInvFun61(wte); - }return SigInvFun(wte, (*sp).Adapt.SigGain, f32(f32(1))); + }return SigInvFun(wte, sp.Adapt.SigGain, f32(f32(1))); } -fn SWtParams_LWtFromWts(sp: ptr, wt: f32,swt: f32) -> f32 { +fn SWtParams_LWtFromWts(sp: SWtParams, wt: f32,swt: f32) -> f32 { var rwt = wt / swt;return SWtParams_LinFromSigWt(sp, rwt); } struct LRateParams { @@ -1167,8 +1166,8 @@ struct PathParams { BLA: BLAPathParams, Hip: HipPathParams, } -fn PathParams_IsInhib(pt: ptr) -> bool { - return (*pt).Com.GType == InhibitoryG; +fn PathParams_IsInhib(pt: PathParams) -> bool { + return pt.Com.GType == InhibitoryG; } //////// import: "pathtypes.go" diff --git a/axon/shaders/WtFromDWtLayer.wgsl b/axon/shaders/WtFromDWtLayer.wgsl index 4f60a79f3..a1c1b960c 100644 --- a/axon/shaders/WtFromDWtLayer.wgsl +++ b/axon/shaders/WtFromDWtLayer.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -78,15 +78,15 @@ fn Index3D(s0: u32, s1: u32, s2: u32, i0: u32, i1: u32, i2: u32) -> u32 { //////// import: "vars.go" //////// import: "act-layer.go" -fn LayerParams_IsLearnTrgAvg(ly: ptr) -> bool { - if ((*ly).Acts.Clamp.IsInput == 1 || (*ly).Acts.Clamp.IsTarget == 1 || (*ly).Learn.TrgAvgAct.RescaleOn == 0) { +fn LayerParams_IsLearnTrgAvg(ly: LayerParams) -> bool { + if (ly.Acts.Clamp.IsInput == 1 || ly.Acts.Clamp.IsTarget == 1 || ly.Learn.TrgAvgAct.RescaleOn == 0) { return false; }return true; } -fn LayerParams_LearnTrgAvgErrLRate(ly: ptr) -> f32 { +fn LayerParams_LearnTrgAvgErrLRate(ly: LayerParams) -> f32 { if (!LayerParams_IsLearnTrgAvg(ly)) { return f32(0); - }return (*ly).Learn.TrgAvgAct.ErrLRate; + }return ly.Learn.TrgAvgAct.ErrLRate; } //////// import: "act-net.go" @@ -647,11 +647,11 @@ struct LayerParams { TDDa: TDDaParams, Indexes: LayerIndexes, } -fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { - return (*ly).PoolSt + pi; +fn LayerParams_PoolIndex(ly: LayerParams, pi: u32) -> u32 { + return ly.PoolSt + pi; } -fn LayerParams_HasPoolInhib(ly: ptr) -> bool { - return (*ly).Inhib.Pool.On == 1; +fn LayerParams_HasPoolInhib(ly: LayerParams) -> bool { + return ly.Inhib.Pool.On == 1; } //////// import: "layertypes.go" @@ -703,13 +703,13 @@ const LayerRewPredPos: LayerVars = 10; const LayerRewPredNeg: LayerVars = 11; //////// import: "learn-layer.go" -fn LayerParams_DTrgSubMean(ly: ptr, ctx: ptr) { - var submean = (*ly).Learn.TrgAvgAct.SubMean; +fn LayerParams_DTrgSubMean(ly: LayerParams, ctx: Context) { + var submean = ly.Learn.TrgAvgAct.SubMean; if (submean == 0) { return; } - if (LayerParams_HasPoolInhib(ly) && (*ly).Learn.TrgAvgAct.Pool == 1) { - var np = (*ly).Indexes.NPools; + if (LayerParams_HasPoolInhib(ly) && ly.Learn.TrgAvgAct.Pool == 1) { + var np = ly.Indexes.NPools; for (var spi = u32(1); spi < np; spi++) { var pi = LayerParams_PoolIndex(ly, spi); var nsi = PoolIxs[Index2D(TensorStrides[0], TensorStrides[1], u32(pi), u32(PoolNeurSt))]; @@ -717,7 +717,7 @@ fn LayerParams_DTrgSubMean(ly: ptr, ctx: ptr, ctx: ptr, ctx: ptr, ctx: ptr, ctx: ptr, ctx: ptr) { +fn LayerParams_TrgAvgFromD(ly: LayerParams, ctx: Context) { var lr = LayerParams_LearnTrgAvgErrLRate(ly); if (lr == 0) { return; } LayerParams_DTrgSubMean(ly, ctx); - var nn = (*ly).Indexes.NNeurons; + var nn = ly.Indexes.NNeurons; for (var lni = u32(0); lni < nn; lni++) { - var ni = (*ly).Indexes.NeurSt + lni; + var ni = ly.Indexes.NeurSt + lni; if (NeuronIsOff(ni)) { continue; } var ntrg = NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(TrgAvg))] + NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(DTrgAvg))]; - ntrg = F32_ClampValue(&(*ly).Learn.TrgAvgAct.TrgRange, ntrg); + ntrg = F32_ClampValue(ly.Learn.TrgAvgAct.TrgRange, ntrg); NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(TrgAvg))] = ntrg; NeuronAvgs[Index2D(TensorStrides[80], TensorStrides[81], u32(ni), u32(DTrgAvg))] = 0.0; } } -fn LayerParams_WtFromDWtLayer(ly: ptr, ctx: ptr) { +fn LayerParams_WtFromDWtLayer(ly: LayerParams, ctx: Context) { LayerParams_TrgAvgFromD(ly, ctx); } //////// import: "learn-net.go" fn WtFromDWtLayer(li: u32) { //gosl:kernel - var ctx = Ctx[0]; + let ctx = Ctx[0]; if (li >= NetworkIxs[0].NLayers) { return; } - var layers=Layers[li]; LayerParams_WtFromDWtLayer(&layers, &ctx); - Ctx[0] = ctx; + let layers=Layers[li]; LayerParams_WtFromDWtLayer(layers, ctx); } //////// import: "learn-path.go" @@ -921,12 +920,12 @@ struct F32 { pad: i32, pad1: i32, // for gpu use } -fn F32_ClampValue(mr: ptr, val: f32) -> f32 { - if (val < (*mr).Min) { - return (*mr).Min; +fn F32_ClampValue(mr: F32, val: f32) -> f32 { + if (val < mr.Min) { + return mr.Min; } - if (val > (*mr).Max) { - return (*mr).Max; + if (val > mr.Max) { + return mr.Max; }return val; } diff --git a/axon/shaders/WtFromDWtSyn.wgsl b/axon/shaders/WtFromDWtSyn.wgsl index 70ca814e3..c9f720c53 100644 --- a/axon/shaders/WtFromDWtSyn.wgsl +++ b/axon/shaders/WtFromDWtSyn.wgsl @@ -25,7 +25,7 @@ var RecvPathIxs: array; var PathRecvCon: array; @group(1) @binding(4) var RecvSynIxs: array; -// // Ctx is the current context state (one only). +// // Ctx is the current context state (one only). This is read-only except in // specific kernels. @group(2) @binding(0) var Ctx: array; @group(2) @binding(1) @@ -683,18 +683,17 @@ const LayerRewPredNeg: LayerVars = 11; //////// import: "learn-net.go" fn WtFromDWtSyn(syni: u32) { //gosl:kernel - var ctx = Ctx[0]; + let ctx = Ctx[0]; if (syni >= NetworkIxs[0].NSyns) { return; } var pti = SynapseIxs[Index2D(TensorStrides[20], TensorStrides[21], u32(syni), u32(SynPathIndex))]; - var paths=Paths[pti]; PathParams_WtFromDWtSyn(&paths, &ctx, syni); - Ctx[0] = ctx; + let paths=Paths[pti]; PathParams_WtFromDWtSyn(paths, ctx, syni); } //////// import: "learn-path.go" -fn PathParams_WtFromDWtSyn(pt: ptr, ctx: ptr, syni: u32) { - switch ((*pt).Type) { +fn PathParams_WtFromDWtSyn(pt: PathParams, ctx: Context, syni: u32) { + switch (pt.Type) { case RWPath: { PathParams_WtFromDWtSynNoLimits(pt, ctx, syni); } @@ -712,18 +711,18 @@ fn PathParams_WtFromDWtSyn(pt: ptr, ctx: ptr, ctx: ptr, syni: u32) { +fn PathParams_WtFromDWtSynCortex(pt: PathParams, ctx: Context, syni: u32) { var dwt = Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(DWt))]; Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(DSWt))] += dwt; var wt = Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(Wt))]; var lwt = Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(LWt))]; - SWtParams_WtFromDWt(&(*pt).SWts, &wt, &lwt, dwt, Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(SWt))]); + SWtParams_WtFromDWt(pt.SWts, &wt, &lwt, dwt, Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(SWt))]); Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(DWt))] = 0.0; Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(Wt))] = wt; Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(LWt))] = lwt; } -fn PathParams_WtFromDWtSynNoLimits(pt: ptr, ctx: ptr, syni: u32) { +fn PathParams_WtFromDWtSynNoLimits(pt: PathParams, ctx: Context, syni: u32) { var dwt = Synapses[Index2D(TensorStrides[170], TensorStrides[171], u32(syni), u32(DWt))]; if (dwt == 0) { return; @@ -812,20 +811,20 @@ struct SWtParams { Adapt: SWtAdaptParams, Limit: F32, } -fn SWtParams_WtValue(sp: ptr, swt: f32,lwt: f32) -> f32 { +fn SWtParams_WtValue(sp: SWtParams, swt: f32,lwt: f32) -> f32 { return swt * SWtParams_SigFromLinWt(sp, lwt); } -fn SWtParams_SigFromLinWt(sp: ptr, lw: f32) -> f32 { +fn SWtParams_SigFromLinWt(sp: SWtParams, lw: f32) -> f32 { var wt: f32; - if ((*sp).Adapt.SigGain == 1) { + if (sp.Adapt.SigGain == 1) { wt = lw; - } else if ((*sp).Adapt.SigGain == 6) { + } else if (sp.Adapt.SigGain == 6) { wt = SigFun61(lw); } else { - wt = SigFun(lw, (*sp).Adapt.SigGain, f32(f32(1))); + wt = SigFun(lw, sp.Adapt.SigGain, f32(f32(1))); }return 2.0 * wt; // center at 1 instead of .5 } -fn SWtParams_WtFromDWt(sp: ptr, wt: ptr,lwt: ptr, dwt: f32,swt: f32) { +fn SWtParams_WtFromDWt(sp: SWtParams, wt: ptr,lwt: ptr, dwt: f32,swt: f32) { if (dwt == 0) { if (*wt == 0) { // restore failed wts *wt = SWtParams_WtValue(sp, swt, *lwt); diff --git a/axon/typegen.go b/axon/typegen.go index 495b7f63c..279c3dbe2 100644 --- a/axon/typegen.go +++ b/axon/typegen.go @@ -206,9 +206,17 @@ var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NeuronAvgV var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NeuronIndexVars", IDName: "neuron-index-vars", Doc: "NeuronIndexVars are neuron-level indexes used to access layers and pools\nfrom the individual neuron level."}) -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerSheets", IDName: "layer-sheets", Doc: "LayerSheets are Layer parameter Sheets."}) +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerSheets", IDName: "layer-sheets", Doc: "LayerSheets contains Layer parameter Sheets."}) -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathSheets", IDName: "path-sheets", Doc: "PathSheets are Path parameter Sheets."}) +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerSheet", IDName: "layer-sheet", Doc: "LayerSheet is one Layer parameter Sheet."}) + +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerSel", IDName: "layer-sel", Doc: "LayerSel is one Layer parameter Selector."}) + +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathSheets", IDName: "path-sheets", Doc: "PathSheets contains Path parameter Sheets."}) + +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathSheet", IDName: "path-sheet", Doc: "PathSheet is one Path parameter Sheet."}) + +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathSel", IDName: "path-sel", Doc: "PathSel is one Path parameter Selector."}) var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.Params", IDName: "params", Doc: "Params contains the [LayerParams] and [PathParams] parameter setting functions\nprovided by the [emergent] [params] package.", Fields: []types.Field{{Name: "Layer", Doc: "Layer has the parameters to apply to the [LayerParams] for layers."}, {Name: "Path", Doc: "Path has the parameters to apply to the [PathParams] for paths."}, {Name: "ExtraSheets", Doc: "ExtraSheets has optional additional sheets of parameters to apply\nafter the default Base sheet. Use \"Script\" for default Script sheet.\nMultiple names separated by spaces can be used (don't put spaces in Sheet names!)"}, {Name: "Tag", Doc: "Tag is an optional additional tag to add to log file names to identify\na specific run of the model (typically set by a config file or args)."}, {Name: "Script", Doc: "Script is a parameter setting script, which adds to the Layer and Path sheets\ntypically using the \"Script\" set name."}, {Name: "Interp", Doc: "Interp is the yaegi interpreter for running the script."}}}) diff --git a/axon/vars.go b/axon/vars.go index 233876c7e..ee0776aa7 100644 --- a/axon/vars.go +++ b/axon/vars.go @@ -85,8 +85,10 @@ var ( //////// Neuron+ State - // Ctx is the current context state (one only). + // Ctx is the current context state (one only). This is read-only except in + // specific kernels. //gosl:group Neurons + //gosl:read-or-write Ctx []Context // Neurons are all the neuron state variables. diff --git a/sims/ra25/params.go b/sims/ra25/params.go index 24b250fa0..0e78b5774 100644 --- a/sims/ra25/params.go +++ b/sims/ra25/params.go @@ -44,7 +44,7 @@ var PathParams = axon.PathSheets{ "Base": { {Sel: "Path", Doc: "basic path params", Set: func(pt *axon.PathParams) { - pt.Learn.LRate.Base = 0.08 // 0.08 for trace, 0.03 for no trace + pt.Learn.LRate.Base = 0.07 // 0.08 for trace, 0.03 for no trace pt.SWts.Adapt.LRate = 0.1 // .1 >= .2, pt.SWts.Init.SPct = 0.5 // .5 >= 1 here -- 0.5 more reliable, 1.0 faster.. pt.Learn.DWt.Trace.SetBool(true) // no trace is faster! requires lrate = 0.03