diff --git a/src/matlab/lagrange_vec.m b/src/matlab/lagrange_vec.m deleted file mode 100644 index c962843..0000000 --- a/src/matlab/lagrange_vec.m +++ /dev/null @@ -1,7 +0,0 @@ -function PX = lagrange_vec(X, Y) -% LAGRANGE_VEC vectorized version of LAGRANGE3. - -PX = X(:,2) .* X(:,3) ./ ((X(:,1) - X(:,2)).*(X(:,1) - X(:,3))).*Y(:,1) + ... - X(:,1) .* X(:,3) ./ ((X(:,2) - X(:,1)).*(X(:,2) - X(:,3))).*Y(:,2) + ... - X(:,1) .* X(:,2) ./ ((X(:,3) - X(:,1)).*(X(:,3) - X(:,2))).*Y(:,3); -end \ No newline at end of file diff --git a/src/matlab/qe_bias_correction.m b/src/matlab/qe_bias_correction.m deleted file mode 100644 index cc7f405..0000000 --- a/src/matlab/qe_bias_correction.m +++ /dev/null @@ -1,189 +0,0 @@ -function [h0]=qe_bias_correction(S,R) - - eps=10^-(17); - - ntr = max(numel(S), numel(R)); % we look at the max because the function can take a scalar instead of one of the argument to indicate that we want a marginal entropy instead of a joint entropy - R_values=unique(R); - S_values=unique(S); - - N_bins=length(R_values); - N_s=length(S_values); - - - idx=randperm(ntr); - ntr2=floor(ntr/2); - ntr4=floor(ntr/4); - r21=idx(1:ntr2); - r22=idx(ntr2+1:2*ntr2); - - if length(R)>1 - R21=R(r21); - R22=R(r22); - else - R21=ones(1,ntr2); - R22=ones(1,ntr2); - end - - - if length(S)>1 - S21=S(r21); - S22=S(r22); - else - S21=ones(1,ntr2); - S22=ones(1,ntr2); - end - - - r41=idx(1:ntr4); - r42=idx(ntr4+1:2*ntr4); - r43=idx(2*ntr4+1:3*ntr4); - r44=idx(3*ntr4+1:4*ntr4); - - - if length(R)>1 - R41=R(r41); - R42=R(r42); - R43=R(r43); - R44=R(r44); - else - R41=ones(1,ntr4); - R42=ones(1,ntr4); - R43=ones(1,ntr4); - R44=ones(1,ntr4); - end - - if length(S)>1 - S41=S(r41); - S42=S(r42); - S43=S(r43); - S44=S(r44); - else - S41=ones(1,ntr4); - S42=ones(1,ntr4); - S43=ones(1,ntr4); - S44=ones(1,ntr4); - end - - for ss=1:N_s - for rr=1:N_bins - prs(ss,rr)=sum( (R==R_values(rr)).* (S==S_values(ss)) ); - end - end - prs=prs/sum(prs(:)); - - for ss=1:N_s - for rr=1:N_bins - p21(ss,rr)=sum( (R21==R_values(rr)).* (S21==S_values(ss)) ); - end - end - p21=p21/sum(p21(:)); - - for ss=1:N_s - for rr=1:N_bins - p22(ss,rr)=sum( (R22==R_values(rr)).* (S22==S_values(ss)) ); - end - end - p22=p22/sum(p22(:)); - - - for ss=1:N_s - for rr=1:N_bins - p41(ss,rr)=sum( (R41==R_values(rr)).* (S41==S_values(ss)) ); - end - end - p41=p41/sum(p41(:)); - - for ss=1:N_s - for rr=1:N_bins - p42(ss,rr)=sum( (R42==R_values(rr)).* (S42==S_values(ss)) ); - end - end - p42=p42/sum(p42(:)); - - for ss=1:N_s - for rr=1:N_bins - p43(ss,rr)=sum( (R43==R_values(rr)).* (S43==S_values(ss)) ); - end - end - p43=p43/sum(p43(:)); - - for ss=1:N_s - for rr=1:N_bins - p44(ss,rr)=sum( (R44==R_values(rr)).* (S44==S_values(ss)) ); - end - end - p44=p44/sum(p44(:)); - - - % p21=probrs(spk,r21,t,M); - % p22=probrs(spk,r22,t,M); - % p41=probrs(spk,r41,t,M); - % p42=probrs(spk,r42,t,M); - % p43=probrs(spk,r43,t,M); - % p44=probrs(spk,r44,t,M); - - hdt=0; - for ss=1:N_s - for rr=1:N_bins - hdt=hdt-prs(ss,rr).*log2(prs(ss,rr)+eps); - end - end - - % hdt=-sum(prs.*log2(prs+eps)); - - - - - %h21=-sum(p21.*log2(p21+eps)); - - h21=0; - for ss=1:N_s - for rr=1:N_bins - h21=h21-p21(ss,rr).*log2(p21(ss,rr)+eps); - end - end - - - - h22=0; - for ss=1:N_s - for rr=1:N_bins - h22=h22-p22(ss,rr).*log2(p22(ss,rr)+eps); - end - end - - h41=0; - for ss=1:N_s - for rr=1:N_bins - h41=h41-p41(ss,rr).*log2(p41(ss,rr)+eps); - end - end - - h42=0; - for ss=1:N_s - for rr=1:N_bins - h42=h42-p42(ss,rr).*log2(p42(ss,rr)+eps); - end - end - - h43=0; - for ss=1:N_s - for rr=1:N_bins - h43=h43-p43(ss,rr).*log2(p43(ss,rr)+eps); - end - end - - h44=0; - for ss=1:N_s - for rr=1:N_bins - h44=h44-p44(ss,rr).*log2(p44(ss,rr)+eps); - end - end - - h4=(h41+h42+h43+h44)/4; - h2=(h21+h22)/2; - - h0=lagrange_vec([1/ntr4 1/ntr2 1/ntr],[h4 h2 hdt]); - - -end diff --git a/src/matlab/qe_bias_correction_II.m b/src/matlab/qe_bias_correction_II.m deleted file mode 100644 index f8c8a3e..0000000 --- a/src/matlab/qe_bias_correction_II.m +++ /dev/null @@ -1,190 +0,0 @@ -function [I_II_corr]=qe_bias_correction_II(S,R,C,I_II_non_corrected) - - ntr = numel(R); - - R_values=unique(R); - S_values=unique(S); - C_values=unique(C); - - N_r=length(R_values); - N_s=length(S_values); - N_c=length(C_values); - - idx=randperm(ntr); - ntr2=floor(ntr/2); - ntr4=floor(ntr/4); - r21=idx(1:ntr2); - r22=idx(ntr2+1:2*ntr2); - - - R_21=R(r21); - R_22=R(r22); - S_21=S(r21); - S_22=S(r22); - C_21=C(r21); - C_22=C(r22); - - - r41=idx(1:ntr4); - r42=idx(ntr4+1:2*ntr4); - r43=idx(2*ntr4+1:3*ntr4); - r44=idx(3*ntr4+1:4*ntr4); - - - R_41=R(r41); - R_42=R(r42); - R_43=R(r43); - R_44=R(r44); - - S_41=S(r41); - S_42=S(r42); - S_43=S(r43); - S_44=S(r44); - - C_41=C(r41); - C_42=C(r42); - C_43=C(r43); - C_44=C(r44); - - - p_src_21=zeros(N_s,N_r,N_c); - - for cc=1:N_c - for ss=1:N_s - for rr=1:N_r - p_src_21(ss,rr,cc)=sum( (R_21==R_values(rr)).* (S_21==S_values(ss)).* (C_21==C_values(cc))); - end - end - end - % - p_src_21=p_src_21/sum(p_src_21(:)); - - - p_src_22=zeros(N_s,N_r,N_c); - - for cc=1:N_c - for ss=1:N_s - for rr=1:N_r - p_src_22(ss,rr,cc)=sum( (R_22==R_values(rr)).* (S_22==S_values(ss)).* (C_22==C_values(cc))); - end - end - end - % - p_src_22=p_src_22/sum(p_src_22(:)); - - - [I,II,III,IIII]=pid(p_src_21); - - p_s_2=permute(p_src_21,[3 2 1]); - % - [I_s,II_s,III_s,IIII_s]=pid(p_s_2); - - I_II_21=min(I,I_s); - - - - [I,II,III,IIII]=pid(p_src_22); - - p_s_2=permute(p_src_22,[3 2 1]); - % - [I_s,II_s,III_s,IIII_s]=pid(p_s_2); - - I_II_22=min(I,I_s); - - - - p_src_41=zeros(N_s,N_r,N_c); - - for cc=1:N_c - for ss=1:N_s - for rr=1:N_r - p_src_41(ss,rr,cc)=sum( (R_41==R_values(rr)).* (S_41==S_values(ss)).* (C_41==C_values(cc))); - end - end - end - % - p_src_41=p_src_41/sum(p_src_41(:)); - - p_src_42=zeros(N_s,N_r,N_c); - - for cc=1:N_c - for ss=1:N_s - for rr=1:N_r - p_src_42(ss,rr,cc)=sum( (R_42==R_values(rr)).* (S_42==S_values(ss)).* (C_42==C_values(cc))); - end - end - end - % - p_src_42=p_src_42/sum(p_src_42(:)); - - p_src_43=zeros(N_s,N_r,N_c); - - for cc=1:N_c - for ss=1:N_s - for rr=1:N_r - p_src_43(ss,rr,cc)=sum( (R_43==R_values(rr)).* (S_43==S_values(ss)).* (C_43==C_values(cc))); - end - end - end - % - p_src_43=p_src_43/sum(p_src_43(:)); - - p_src_44=zeros(N_s,N_r,N_c); - - for cc=1:N_c - for ss=1:N_s - for rr=1:N_r - p_src_44(ss,rr,cc)=sum( (R_44==R_values(rr)).* (S_44==S_values(ss)).* (C_44==C_values(cc))); - end - end - end - % - p_src_44=p_src_44/sum(p_src_44(:)); - - - - [I,II,III,IIII]=pid(p_src_41); - - p_s_2=permute(p_src_41,[3 2 1]); - % - [I_s,II_s,III_s,IIII_s]=pid(p_s_2); - - I_II_41=min(I,I_s); - - - [I,II,III,IIII]=pid(p_src_42); - - p_s_2=permute(p_src_42,[3 2 1]); - % - [I_s,II_s,III_s,IIII_s]=pid(p_s_2); - - I_II_42=min(I,I_s); - - - - [I,II,III,IIII]=pid(p_src_43); - - p_s_2=permute(p_src_43,[3 2 1]); - % - [I_s,II_s,III_s,IIII_s]=pid(p_s_2); - - I_II_43=min(I,I_s); - - - [I,II,III,IIII]=pid(p_src_44); - - p_s_2=permute(p_src_44,[3 2 1]); - % - [I_s,II_s,III_s,IIII_s]=pid(p_s_2); - - I_II_44=min(I,I_s); - - - - - I_II_4=(I_II_41+I_II_42+I_II_43+I_II_44)/4; - I_II_2=(I_II_21+I_II_22)/2; - - I_II_corr=lagrange_vec([1/ntr4 1/ntr2 1/ntr],[I_II_4 I_II_2 I_II_non_corrected]); - -end \ No newline at end of file diff --git a/src/matlab/qe_bias_correction_information.m b/src/matlab/qe_bias_correction_information.m deleted file mode 100644 index 3b60692..0000000 --- a/src/matlab/qe_bias_correction_information.m +++ /dev/null @@ -1,3 +0,0 @@ -function I = qe_bias_correction_information(X,Y) - I = qe_bias_correction(X,1) + qe_bias_correction(1,Y) - qe_bias_correction(X,Y); -end \ No newline at end of file diff --git a/src/python/__init__.py b/src/python/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/python/examples/Irrelevant information demo.ipynb b/src/python/examples/Irrelevant information demo.ipynb deleted file mode 100644 index b304491..0000000 --- a/src/python/examples/Irrelevant information demo.ipynb +++ /dev/null @@ -1,295 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "import numpy as np\n", - "%matplotlib inline\n", - "from matplotlib import pyplot as plt\n", - "import seaborn as sns\n", - "cmap = sns.cubehelix_palette(8, as_cmap=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "collapsed": true - }, - "source": [ - "# Unique-Complementary-Synergistic decomposition" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Define input probability distribution. This is $P(x,y,z)$, and must be given as a flat array containing the values of the joint probabilities: `P[0]` will be " - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "P = np.array([1, 1, 1, 0, 0, 0, 0, 1], dtype=np.float)/4 # this is an AND gate for testing" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Define optimisation domain" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "gamma_0 = np.array([1, -1, -1, 1, 0, 0, 0, 0], dtype=np.float)\n", - "gamma_1 = np.array([0, 0, 0, 0, 1, -1, -1, 1], dtype=np.float)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The optimisation domain is parametrized by\n", - "\\begin{equation}\n", - " Q(a,b) = P + a\\gamma_0 + b\\gamma_1 \n", - "\\end{equation}\n", - "where $a$ and $b$ are such that\n", - "\\begin{gather}\n", - " a \\le \\min\\left( \\min_{i\\in \\{2,3\\}}P_i, 1-\\max_{i\\in \\{1,4\\}} P_i\\right)\\\\\n", - " a \\ge -\\min\\left( 1 -\\max_{i\\in \\{2,3\\}}P_i, \\min_{i\\in \\{1,4\\}} P_i\\right)\\\\\n", - " b \\le \\min\\left( \\min_{i\\in \\{6,7\\}}P_i, 1-\\max_{i\\in \\{5,8\\}} P_i\\right)\\\\\n", - " b \\ge -\\min\\left( 1 -\\max_{i\\in \\{6,7\\}}P_i, \\min_{i\\in \\{5,8\\}} P_i\\right)\n", - "\\end{gather}\n", - "to ensure that $Q_i(a,b)>0$ for all $i,a,b$." - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(-0.0, 0.25) (-0.0, 0.0)\n" - ] - } - ], - "source": [ - "def a_bounds(P):\n", - " upper = min(min(P[1], P[2]), 1-max(P[0], P[3]))\n", - " lower = -min(1-max(P[1],P[2]), min(P[0], P[3]))\n", - " return lower, upper\n", - "\n", - "def b_bounds(P):\n", - " upper = min(min(P[5], P[6]), 1-max(P[4], P[7]))\n", - " lower = -min(1-max(P[5],P[6]), min(P[4], P[7]))\n", - " return lower, upper\n", - "print a_bounds(P), b_bounds(P)\n", - "np.array?" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [], - "source": [ - "a_range = np.linspace(a_bounds(P)[0], a_bounds(P)[1], 100)\n", - "b_range = np.linspace(b_bounds(P)[0], b_bounds(P)[1], 100)\n", - "AA, BB = np.meshgrid(a_range, b_range)" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "def I_2D(Q):\n", - " QX = np.atleast_2d(Q.sum(axis=1))\n", - " QY = np.atleast_2d(Q.sum(axis=0))\n", - " if min(QX.min(), QY.min()) < 0 or Q.min()<0:\n", - " raise Exception\n", - " table = Q * np.log2(Q / (QX.transpose() * QY))\n", - " return table[Q>0].sum()\n", - "\n", - "def I_Q_XY(Q):\n", - " QXY = Q.reshape((2,2,2)).sum(axis=2)\n", - " return I_2D(QXY)\n", - "\n", - "def I_Q_XY_Z(Q):\n", - " Q_XYZ = Q.reshape((2,2,2))\n", - " return Q_XYZ[:,:,0].sum() * I_2D(Q_XYZ[:,:,0]/Q_XYZ[:,:,0].sum()) + Q_XYZ[:,:,1].sum() * I_2D(Q_XYZ[:,:,1]/Q_XYZ[:,:,1].sum())\n", - "\n", - "def CoI_Q(a, b):\n", - " # co-information CoI_Q(C;fUr;fUo)\n", - " Q = P + a * gamma_0 + b * gamma_1\n", - " return I_Q_XY(Q) - I_Q_XY_Z(Q)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-0.0534944364305 -6.48323861744e-07\n" - ] - }, - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfEAAAFRCAYAAABkLpSTAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAFv1JREFUeJzt3WGsZGddgPFnl3YbtduVkm2aULUf2LyuIRUoGtKG3RLb\nGIqENSYIaDQIbTBgUqsRQqzwAWPUsCoG2qQtohBNjAhKTIsmJpQuSZEEbQjhZVtTkaSVlaa7JYHa\nbq8fZi47e3dm7syZ95zzf895fslm58655/bM2dt57v+dc+/ds7W1hSRJqs/evg9AkiQ1Y8QlSaqU\nEZckqVJGXJKkShlxSZIqZcQlSarURX0fwLpuuf5dfk+cNHKHDh7s+xDUg6uvPND3IfTmjXfdtmfe\n/dVFXJJOnjq1dLuRH6bHnjh9wX1jDjsYcUkDZOTHY2fYxxZ1Iy5pdBZF3rjXbzvqY4m5EZekqXlx\nN+x1mp3Qhxx0Iy5JSxj2+g056EZckta0M+xGvR5DW2434pK0IaNen6HE3IhLUmFGvR61x9yIS1LL\njHp8tcbciEtSx2ajbtBjeeyJ01WF3IhLUo8Mejw1TeVGXJKCMOix1BBzIy5JARn0OCIvsRtxSQrO\noPcvasj9feKSVJGTp07t+gte1I7Hnjg99zep9cmIS1KFjHl/IoXc5XRJqphL7f2IsrxuxCtz8slv\n9n0InTl0+VV9H4JUle2gG/NuRAi5EQ9gTGFex7rnxehLE8a8O32H3Ih3wEh3Y9XzbOw1Fsa8G32G\n3IgXZKzrsNu/k5HX0Bjz9vUVciPegLEetmX/vgZeNTPm7eoj5EZ8BUZb2wy8huDkqVOGvCVdh9yI\nz2G01cS8zxvDrqicytvTZciNOEZb7THsis6Y1220ETfc6othV0QusZfV1TQ+uogbb0Vk2BWBU3lZ\nXYR8FBE33KrRzs9bo66uOJWX03bIBx1x460hMerqkiGvwyAjbrw1BrOf5wZdbXB5vYw2p/FBRdx4\na6yc0tUmp/LNtRXyQUTceEvnc0pXaYY8pr19H8CmDLi03Mknv/n9P9ImZn93udb32BOni3/Mqidx\nn5Sk9Tiha1NO5LFUO4kbcGkzTudqyom8udLTeJWTuE88UjlO52rCiTyG6iZxAy61x9fPtQ4n8mZK\nTuPVRVxSN4y5VmHI+2XEJS1lzLUbQ76+UtO4EZe0EmOuZQx5P6q8sE1Sf7wQTot4sVv3nMQlNeZ0\nrp2cyFdXYkndiEvamCHXLEPeHSMuqQincql7RlxSUcZc4DS+qk2X1I24pFYYcxny9hlxSa0y5FJ7\njLik1jmVj5fT+O42WVI34pI6Y8jHyZC3x4hL6pRTuVSOEZfUC0M+Lk7j7TDiknrjVC5NNH1d3IhL\n6p0hHwen8fKMuKQQDLm0PiMuKQxDPnxO42UZcUmhGHJpdUZcUjhe8DZsTuPzNbm4zYhLCsuQS8sZ\ncUmhGXJpMSMuKTxDPjwuqZdhxCVVwZBLFzLikqReOI1vzohLqobTuHS+i5rslFLaC3wEuAZ4Bnh7\nzvnRme2vB+4AngM+mnO+Z9E+KaWXA58BTk53vzPn/LdNH5CkYTv55Dc5dPlVfR+GFELTSfwYsC/n\nfB3wHuCD2xtSShcDx4GbgKPArSmlK6b7XDJnn2uB4znn10z/GHBJSzmRa6jW/V7xphG/HrgfIOf8\nEPDKmW2HgUdyzqdzzs8CDwJHpvvcN2efa4HXpZQ+l1K6J6V0acNjkjQihnwYfF18M00jfhlwZubt\ns9Pl8u1ts19KPA0cWLDPC4CHgN/OOR8F/hN4X8NjkiRpVJpG/Aywf/bj5Jyfn94+vWPbfuCpBfuc\nBT6dc/7y9L5PAy9veEySRsZpXGPXNOIngJsBUkqvAh6e2fY14FBK6YUppX1MltK/sGSf+1JKPzW9\n/TPAlxoek6QRMuQas0ZXpwOfAm5KKZ2Yvv3WlNKbgUtzznenlG4HPsvki4R7c86Pp5Qu2Gf69zuA\nD6eUngUeB25teEySpAqdPHWKQwcP9n0YVdqztbXV9zGs5YbDx+o6YEmd8NvO6mbEz7n6ygMX3PfG\nu27bM+99/WEvkgbBZXWNkRGXJKlSRlzSYDiNa2yMuCRJlTLikgbFabxO/uS2Zoy4JEmVMuKSBsdp\nXGNhxCVJqpQRlySpUkZc0iC5pK4xMOKSJFXKiEuSVCkjLmmwXFLX0BlxSZIqZcQlSaqUEZckqVJG\nXNKg+bq4hsyIS5JUKSMuSVKljLgkSZUy4pIkBXH1lQfWen8jLklSpYy4JEmVMuKSBs9vM9NQGXFJ\nUu8OHTzY9yFUyYhLklQpIy5JUqWMuCRJlTLikiRVyohLkhTAuj/oBYy4JEnVMuKSpF757WXNGXFJ\nkiplxCVJqpQRlzR4hy6/qu9DkFphxCVJ6lmTK9PBiEuSeuRFbZsx4pIkVcqIS5JUKSMuSVKljLgk\nqRe+Hr45Iy5p0Pz2MkXX9Mp0MOKSJFXLiEuSOudSehlGXJKkShlxSZJ6ssnr4WDEJQ2YF7Vp6Iy4\nJKlTvh5ejhGXJKlSRlySpB5s+no4GHFJA+Xr4TG5lF6WEZckqVJGXJLUCafw8oy4pMFxKV3RlXg9\nHIy4JEnVMuKSpNa5lN4OIy5pUFxKV3SlltLBiEuSWuYU3h4jLklSpYy4pMFwKV3RlVxKByMuSWqR\nS+ntMuKSBsEpXGNkxCVJrXAKP1/ppXQw4pIGwClcY2XEJUnFOYV3w4hLqppTuGrQxlI6GHFJUmFO\n4d0x4pIktaitKRyMuKSKuZQej1N4t4y4JEmVMuKSquQUHo9T+IXaXEoHIy6pQgZcmjDikqSNOYVf\nqO0pHIy4pMo4hUvnGHFJ0kacwvtjxCVVwylctehiKR2MuCRpA07h/bqoyU4ppb3AR4BrgGeAt+ec\nH53Z/nrgDuA54KM553sW7ZNSegnwMeB54CvAO3POW80fkqQhcgqPx4DP19UUDs0n8WPAvpzzdcB7\ngA9ub0gpXQwcB24CjgK3ppSumO5zyZx9jgPvzTkfAfYAb2h4TJIGyoBL8zWN+PXA/QA554eAV85s\nOww8knM+nXN+FngQODLd5745+7wi5/zA9PZ9wI0Nj0mS1BGn8Pm6nMKhecQvA87MvH12uly+ve30\nzLangQML9nkBk+l723em7ytJgFO4tEzTiJ8B9s9+nJzz89Pbp3ds2w88tWCfs0xeC9/5vpKkoJzC\n5+t6CofmET8B3AyQUnoV8PDMtq8Bh1JKL0wp7WOylP6FJft8OaV0dHr7tcADSBJO4REZ8FgaXZ0O\nfAq4KaV0Yvr2W1NKbwYuzTnfnVK6Hfgsky8S7s05P55SumCf6d+/Bdw9Df5Xgb9reEySBsSAqyZ9\nTOEAe7a26vpurhsOH6vrgCU1YsTjcQpfrO2Iv/Gu2/bMu98f9iIpHAMejwFfrK8pHIy4pGAMuLQ6\nIy5JWsopfLE+p3Aw4pICcQqPx4DHZsQlhWDAVZu+p3Aw4pKkBZzC4zPiknrnFB6PAV8uwhQORlxS\nzwy41JwRl9QbAx6TU/hyUaZwMOKSpBkGfLlIAQcjLqknTuHS5oy4pM4Z8JicwpeLNoWDEZfUMQMe\nkwGvkxGX1BkDHpMB313EKRyMuCRJS0UNOBhxSR1xCo/JKbxuRlxS6wx4TAZ8d5GncDDiklpmwGMy\n4MNgxCW1xoDHZMBXE30KByMuSdIFagg4GHFJLXEKj8kpfFiMuKTiDHhMBnw1tUzhYMQlFWbAYzLg\nq6kp4GDEJRVkwGMy4MNlxCUVYcBVu9qmcDDikgow4HE5ha+mxoCDEZe0IQMelwFfTa0BByMuSYNk\nwMfBiEtqzCk8JgO+upqncDDikhoy4DEZ8NXVHnAw4pIaMOAxGfDVDSHgYMQlrcmAx2TAx8mIS1qZ\nAdcQDGUKByMuaUUGPC6n8NUNKeBgxCWtwIDHZcBXN7SAgxGXtAsDHpcBlxGXtJABj8uAr2eIUzgY\ncUkLGPC4DPh6hhpwMOKS5jDgcRnw9Qw54GDEJe1gwOMy4OsZesDBiEuaYcDjMuCax4hLAgx4ZAZ8\nfWOYwsGIS8KAR2bA1ze0gL/4xfsXbjPi0sgZ8LgM+PqGFvDdGHFpxAx4XAZ8fUMM+LIpHIy4NFoG\nPC4Dvr4hBnwVRlwaIQMelwHXtt2mcICLOjgOSUEY79gMeDNjncLBSVwaDQMemwFvZqgBX2UKByMu\njYIBj82ANzPUgK/DiEsDZ8BjM+DNDDngq07hYMSlQTPgsRnwZoYc8HUZcWmgDHhsBryZoQd8nSkc\njLg0SAY8NgPejAG/kBGXBsaAx2bAmxl6wJsy4tKAGPDYDHgzYwh4kykc/GEv0iAY79iMd3MGfDkn\ncalyBjw2A97cGAK+KSMuVcyAx2bAmxtLwDeZwsGIS9Uy4LEZ8OYM+OqMuFQhAx6bAW9uLAEvxQvb\npIoY7/gMeHNjCniJKRycxKVqGPD4DHhzBrwZIy5VwIDHZ8CbM+DNuZwuBWfAYzPemxlTwNvgJC4F\nZsBjM+CbGVvAS0/h4CQuhWS84zPgmzHgZTiJS8EY8PgM+GYMeDlGXArEgMdnwDdjwMtyOV0KwoDH\nZrw3N7aAd8GISz0z3vEZ8M2NMeBtT+HgcrrUKwMenwHfnAFvjxGXemLA4zPgmzPg7XI5XeqY8Y7P\neG9ujPGGbgMOTuJSpwx4fAZ8cwa8O0Zc6ogBj8+Ab86Ad8vldKllxrsOBnxzBrx7a0c8pfQDwCeA\ng8DTwK/mnP93x/vcAtwKPAd8IOf8T4v2Syn9PPDHwH9Pd39fzvmBpg9IisSAx2e8yzDg/WiynP7r\nwH/knI8AfwX87uzGlNKVwG8A1wE/C/xBSmnfkv2uBX4n5/ya6R8DrkEw4PEZ8DIMeH+aLKdfD/zh\n9Pb9wB07tv80cCLn/CzwbErpEeCaJftdC7wspXQb8EXg3Tnnsw2OSwrBeNfBgG9urPGOZGnEU0pv\nA27bcff/AGemt58Gdv4r7gdOz7y9/T6XLdjvn4FP5ZwfSyndBbwD+PAaj0EKw4DHZ7zLGHvAI0zh\nsEvEc873AvfO3pdS+iSTUDP9+6kdu52Z2T77PmcW7PcXOeft2/8A/MIaxy+FYcDjM+BlGPAYAYdm\nr4mfAG6e3n4tsPM17C8Cr04pXZJSOgAcBr6yZL9/Tym9eHr7RuBLDY5J6s2hy68y4BUw4GUY8DgB\nh2avid8J/GVK6fPAM8BbAFJKvwk8knP+TErpQ8DnmXyR8N6c8zMppbn7AW8DPplS+h6T2N+90SOS\nOmS84zPe5RjwWAEH2LO1tdX3MazlhsPH6jpgDZYBj8+AlzH2eEP/Ab/+jrftmXe/P+xFWpPxroMB\nL8OA9x/wZYy4tAYDHp/xLmfsAY8c721GXFqB8a6DAS9j7PGGOgIORlzalQGPz3iXY8DrCTgYcWkh\n410HA16OAa8r4GDEpbkMeHzGuywDXl/AwYhLFzDg8Rnwcoz3RI0BByMufZ/xroMBL8eAT9QacDDi\nEmDAa2C8yzHe59QccDDiGjnjXQcDXo4BP6f2gIMR14gZ8PiMd1kGfGII8d5mxDU6xrsOBrwc433O\nkAIORlwjY8DjM95lGfBzhhZwMOIaCeNdBwNejvE+3xADDkZcI2DA4zPeZRnw89Ue8IM/tvjf04hr\nsIx3HQx4WQb8nNrjDcsDDkZcA2S862C8yzLe5xtDwMGIa2AMeHzGuzwDfr4hBHxVRlyDYLzrYMDL\nMt7nG1K8V5nCwYhrAAx4fMa7PAN+vjEGHIy4Kma862DAyzLeFxprwMGIq1IGPD7jXZ4BP9+Q4g3r\nBxyMuCpjvOMz3uUZ7wsZ8AkjrioY7zoY8LKM93wG/BwjrvAMeHzGuzwDfqGhxRs2CzgYcQVmvOMz\n3uUZ7/kM+HxGXOEY7zoY8PIM+IWGGG8oE3Aw4grGgMdnvMsz3vMZ8N0ZcYVgvOMz3uUZ7/mGGm8o\nG3Aw4uqZ8Y7PeLfDgM9nwNdjxNUbAx6fAS/PeM835HhDOwEHI64eGO/4jHd5xnsxA96cEVdnjHd8\nxrs8472Y8d6cEVcnDHhsxrsdBnwxA16GEVerjHdsxrsdxnuxoccbugs4GHG1xHjHZ8DLM97LGfDy\njLiKMt7xGe/yjPdyxrs9RlzFGPDYjHd5xnu5McQb+gs4GHEVYLxjM97tMOCLGe/uGHE1ZrxjM97t\nMN7LGfBuGXGtzXjHZrzbYbyXG0u8IU7AwYhrDcY7NuPdDuO9nPHulxHXrox3bMa7HcZ7uTHFG2IG\nHIy4dmHA4zLe7TDeuxtTwKPGe5sR11zGOy7j3Q7jvbsxxRviBxyMuHYw3nEZ73YY790Z77iMuADj\nHZnxbofx3p3xjs+Ij5zxjslwt8Nwr2Zs8YY6Aw5GfLSMd0zGux3GezXGuz5GfGSMd0zGux3GezXG\nu15GfCSMd0zGux3GezXGu35GfOCMdzyGux2GezVjDDfUG+/Lrn7R0u1GfKCMdzzGux3GezXGuz67\nBRyM+OAY73iMdzuM92qMd51WCTgY8cEw3rEY7nYY7tWMNdxQf7xh9YCDEa+a4Y7HeJdnuFdnvOu2\nTry3GfEKGe9YDHc7jPfqjHf9mgQcjHhVjHcchrsdhnt1Yw43GO9tRrwCxjsGw90Ow726sYcbjPdO\nRjwowx2D4W6H4V7P2OM9lHBvKxVwMOKhGO4YDHc7DPd6DPfwPl9KxnubEQ/AePfPcLfDcK9n7OEG\n470uI94Tw90/w12e0V6P0Z4w3M0Z8Y4Y7X4Z7HYY7fUY7YkhRhu6C/csI94So90vo90Oo70eo33O\nUMMN/cR7256tra3e/uOSJKm5vX0fgCRJasaIS5JUKSMuSVKljLgkSZUy4pIkVcqIS5JUKb9PvIGU\n0l7gI8A1wDPA23POj85sfz1wB/Ac8NGc8z2L9kkpvQT4GPA88BXgnTnn0X/fX+Fz/DLgQ8DZ6f2/\nknP+VqcPKKCS53hmn7cA78o5X9fdI4mt8OfyFcDdwA8De5h8Lj/W5eOJqPA5/nHgHmAL+Pr0/rDP\nyU7izRwD9k2fqN4DfHB7Q0rpYuA4cBNwFLh1+j/eMeCSOfscB96bcz7C5H/KN3T2KGIreY7/lElY\nXgP8PfDuzh5FbCXPMSmllwO/1t3hV6Pkef4j4OM556PA7wEv7exRxFbyHL8f+EDO+dXAJcDrunoQ\nTRjxZq4H7gfIOT8EvHJm22HgkZzz6Zzzs8CDwJHpPvfN2ecVOecHprfvA25s//CrUPIcvynn/PD0\n9sXAd9s//CoUO8cppRcBvw/cxuSLUZ1T8nP5OuBHUkr/AvwS8K+dPIL4Sp7j7wIvSintAfYD/9fJ\nI2jIiDdzGXBm5u2z06WZ7W2nZ7Y9DRxYsM8LOP8J7zvT91W5c7w35/wEQErpOuCdwJ+0dtR1KXWO\n9wH3Arcz+RzW+Uo+X1wNPJlzvgn4Bq4qbSv2fAH8OfBnwFeBK4DPtXXQJRjxZs4w+Qpt296c8/PT\n26d3bNsPPLVgn7NMXgvf+b4qd46fB0gp/SJwJ3BzzvnbrR11XYqcY+AngZcwOb9/A/xESul4Wwdd\noZLPF98G/nF632c4f+Ics5LPF58AXp1zPgx8nJml+YiMeDMngJsBUkqvAh6e2fY14FBK6YXTCeUI\n8IUl+3w5pXR0evu1wAMICp7jlNIvM5nAb/AioPMUOcc553/LOb90es3Bm4Cv5pxv7/BxRFfy+eJB\nzr1Ge5TJxbAqe45/kMm0DvA4k4sIw/IXoDQwfa1k+6pGgLcC1wKX5pzvTin9HJOLTvYC9+ac75y3\nT8756ymlQ0yuNt3HZPnmlshXQnal1DkGHgW+BfwX55bUPpdzfn8nDySwkp/HMx/zauCvvTr9nMLP\nFz/K5MrpH2IyTb4l5zy7VDxKhc/xjcAHgO8xuWr9lpzzNzp8OGsx4pIkVcrldEmSKmXEJUmqlBGX\nJKlSRlySpEoZcUmSKmXEJUmqlBGXJKlSRlySpEr9PyuisUD5V1/6AAAAAElFTkSuQmCC\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "CoIQQ = np.zeros_like(AA)\n", - "\n", - "for i in range(AA.shape[0]):\n", - " for j in range(AA.shape[1]):\n", - " CoIQQ[i,j] = CoI_Q(AA[i,j], BB[i,j])\n", - "print CoIQQ.min(), CoIQQ.max()\n", - "\n", - "SI_cfUrfUo = CoIQQ.max()\n", - "UI_cfUr = I_cfUr - SI_cfUrfUo\n", - "UI_cfUo = I_cfUo - SI_cfUrfUo\n", - "CI_cfUrfUo = I_cfUrfUo - SI_cfUrfUo - UI_cfUr - UI_cfUo\n", - "\n", - "fig, ax = plt.subplots()\n", - "ax.contourf(AA,BB,CoIQQ, cmap=cmap)" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "H(C) = 0.680077 bits\n", - "MI(C:F_R) = -2.02136e-16 bits\n", - "MI(C:F_O) = 0.211081 bits\n", - "MI(C:(F_R, F_O)) = 0.211081 bits\n", - "SI(C: F_R; F_O) = -6.48324e-07 bits\n", - "CI(C: F_R; F_O) = -6.48324e-07 bits\n", - "UI(C: F_R \\ F_O) = 6.48324e-07 bits\n", - "UI(C: F_O \\ F_R) = 0.211082 bits\n" - ] - } - ], - "source": [ - "print(\"H(C) = {:g} bits\".format(H_c))\n", - "print(\"MI(C:F_R) = {:g} bits\".format(I_cfUr))\n", - "print(\"MI(C:F_O) = {:g} bits\".format(I_cfUo))\n", - "print(\"MI(C:(F_R, F_O)) = {:g} bits\".format(I_cfUrfUo))\n", - "print(\"SI(C: F_R; F_O) = {:g} bits\".format(SI_cfUrfUo))\n", - "print(\"CI(C: F_R; F_O) = {:g} bits\".format(CI_cfUrfUo))\n", - "print(\"UI(C: F_R \\ F_O) = {:g} bits\".format(UI_cfUr))\n", - "print(\"UI(C: F_O \\ F_R) = {:g} bits\".format(UI_cfUo))" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "from information_decomposition import decomposition" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "decomposition?" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "tnu" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 2", - "language": "python", - "name": "python2" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 2 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.13" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} diff --git a/src/python/examples/Numerical intersection tests.ipynb b/src/python/examples/Numerical intersection tests.ipynb deleted file mode 100644 index 9032764..0000000 --- a/src/python/examples/Numerical intersection tests.ipynb +++ /dev/null @@ -1,449 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import numpy as np\n", - "import sys\n", - "sys.path.append(\"../\")\n", - "\n", - "import information_decomposition as pid\n", - "from intersection_information import intersection_information" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## A couple of simple binary examples" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Parameter definitions for the examples\n", - "sigma = 0.5 # prior probability of s=1\n", - "rho = 0.1 # bit-flip probability for S->R channel\n", - "gamma = 0.1 # bit-flip probability for S->C channel where this exists" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# Joint PDF table definitions for the examples\n", - "\n", - "# example where SI is only due to signal correlations, as R<-S->C\n", - "P1 = np.zeros((2,2,2))\n", - "P1[0,0,0] = sigma*(1-rho)*(1-gamma)\n", - "P1[0,0,1] = sigma*(1-rho)*(gamma)\n", - "P1[0,1,0] = sigma*rho*(1-gamma)\n", - "P1[0,1,1] = sigma*rho*gamma\n", - "P1[1,0,0] = (1-sigma)*rho*gamma\n", - "P1[1,0,1] = (1-sigma)*rho*(1-gamma)\n", - "P1[1,1,0] = (1-sigma)*(1-rho)*(gamma)\n", - "P1[1,1,1] = (1-sigma)*(1-rho)*(1-gamma)\n", - "\n", - "# example that should have nonzero intersection, as S->R==C\n", - "P2 = np.zeros((2,2,2))\n", - "P2[0,0,0] = sigma*(1-rho)\n", - "P2[0,0,1] = 0\n", - "P2[0,1,0] = 0\n", - "P2[0,1,1] = sigma*rho\n", - "P2[1,0,0] = (1-sigma)*rho\n", - "P2[1,0,1] = 0\n", - "P2[1,1,0] = 0\n", - "P2[1,1,1] = (1-sigma)*(1-rho)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(0.0, 0.31992237501539894, 0.31992237501539894)\n", - "(0.21108203139531984, 0.53100440641071878, 0.31992237501539894)\n" - ] - } - ], - "source": [ - "# Compute and print intersection information, together with \"non-normalised\" and \"signal correlations-only\" SI values\n", - "print(intersection_information(P1))\n", - "print(intersection_information(P2))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Introducing IDTxl" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import idtxl\n", - "from idtxl.estimators_pid import synergy_tartu" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.531004406411 0.0 0.0 0.0\n" - ] - } - ], - "source": [ - "# my decomposition for the P2 problem above, as a sanity check\n", - "[SI, CI, UI_Y, UI_Z] = pid.decomposition(P2)\n", - "print(SI, CI, UI_Y, UI_Z)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{(0, 1, 1): 0.050000000000000003, (0, 0, 0): 0.45000000000000001, (1, 1, 0): 0.0, (1, 0, 0): 0.050000000000000003, (0, 1, 0): 0.0, (0, 0, 1): 0.0, (1, 1, 1): 0.45000000000000001, (1, 0, 1): 0.0}\n" - ] - } - ], - "source": [ - "# convert definition of P2 for tartu\n", - "P = dict()\n", - "for x in [0,1]:\n", - " for y in [0,1]:\n", - " for z in [0,1]:\n", - " P[(x,y,z)] = P2[x,y,z]\n", - "print(P)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.5310044058278642 5.82854542408e-10 0.0 0.0\n" - ] - } - ], - "source": [ - "[the_p, feas, kkt, wCI, wSI, wUI_Y, wUI_Z] = synergy_tartu.solve_PDF(pdf=P)\n", - "print(wSI, wCI, wUI_Y, wUI_Z)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Example: dice" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": { - "collapsed": false, - "scrolled": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Lambda: 0.00\n", - "CPU times: user 6.28 s, sys: 12.8 s, total: 19.1 s\n", - "Wall time: 5.69 s\n", - "\n", - "Lambda: 0.05\n", - "CPU times: user 7.27 s, sys: 15.4 s, total: 22.7 s\n", - "Wall time: 6.66 s\n", - "\n", - "Lambda: 0.10\n", - "CPU times: user 6.94 s, sys: 14.6 s, total: 21.6 s\n", - "Wall time: 6.13 s\n", - "\n", - "Lambda: 0.15\n", - "CPU times: user 6.44 s, sys: 12.6 s, total: 19.1 s\n", - "Wall time: 5.42 s\n", - "\n", - "Lambda: 0.20\n", - "CPU times: user 6.58 s, sys: 13.4 s, total: 20 s\n", - "Wall time: 5.64 s\n", - "\n", - "Lambda: 0.25\n", - "CPU times: user 6.55 s, sys: 12.5 s, total: 19.1 s\n", - "Wall time: 5.38 s\n", - "\n", - "Lambda: 0.30\n", - "CPU times: user 6.54 s, sys: 12.7 s, total: 19.2 s\n", - "Wall time: 5.43 s\n", - "\n", - "Lambda: 0.35\n", - "CPU times: user 6.39 s, sys: 12.3 s, total: 18.7 s\n", - "Wall time: 5.31 s\n", - "\n", - "Lambda: 0.40\n", - "CPU times: user 7.56 s, sys: 15.7 s, total: 23.3 s\n", - "Wall time: 6.74 s\n", - "\n", - "Lambda: 0.45\n", - "CPU times: user 7.05 s, sys: 14.3 s, total: 21.3 s\n", - "Wall time: 6.02 s\n", - "\n", - "Lambda: 0.50\n", - "CPU times: user 6.92 s, sys: 13.8 s, total: 20.7 s\n", - "Wall time: 5.84 s\n", - "\n", - "Lambda: 0.55\n", - "CPU times: user 8.44 s, sys: 16.7 s, total: 25.2 s\n", - "Wall time: 7.03 s\n", - "\n", - "Lambda: 0.60\n", - "CPU times: user 8.91 s, sys: 17.7 s, total: 26.6 s\n", - "Wall time: 7.4 s\n", - "\n", - "Lambda: 0.65\n", - "CPU times: user 8.97 s, sys: 17.6 s, total: 26.5 s\n", - "Wall time: 7.4 s\n", - "\n", - "Lambda: 0.70\n", - "CPU times: user 9.22 s, sys: 19.4 s, total: 28.6 s\n", - "Wall time: 8.27 s\n", - "\n", - "Lambda: 0.75\n", - "CPU times: user 9.33 s, sys: 19.3 s, total: 28.6 s\n", - "Wall time: 8.37 s\n", - "\n", - "Lambda: 0.80\n", - "CPU times: user 9.04 s, sys: 18.2 s, total: 27.3 s\n", - "Wall time: 7.61 s\n", - "\n", - "Lambda: 0.85\n", - "CPU times: user 9.55 s, sys: 18.7 s, total: 28.2 s\n", - "Wall time: 7.85 s\n", - "\n", - "Lambda: 0.90\n", - "CPU times: user 9.85 s, sys: 19.2 s, total: 29.1 s\n", - "Wall time: 8.07 s\n", - "\n", - "Lambda: 0.95\n", - "CPU times: user 9.4 s, sys: 18.6 s, total: 28 s\n", - "Wall time: 7.78 s\n", - "\n", - "Lambda: 1.00\n", - "CPU times: user 9.5 s, sys: 19.3 s, total: 28.8 s\n", - "Wall time: 8.2 s\n" - ] - } - ], - "source": [ - "aa=np.int(0);\n", - "\n", - "lstep = 0.05\n", - "l=0\n", - "l_n = np.int(np.ceil(1/lstep)+1)\n", - "\n", - "I_shar=np.zeros(l_n);\n", - "I_uny=np.zeros(l_n);\n", - "I_unz=np.zeros(l_n);\n", - "I_syn=np.zeros(l_n);\n", - "\n", - "I_shar_z=np.zeros(l_n);\n", - "I_uny_z=np.zeros(l_n);\n", - "I_unz_z=np.zeros(l_n);\n", - "I_syn_z=np.zeros(l_n);\n", - "\n", - "#Y and Z are the dice, X is the weighted sum of the two\n", - "\n", - "\n", - "# summed-dice dimensions\n", - "dimy=np.int(6);\n", - "dimz=np.int(6);\n", - " \n", - "alpha=np.int(1); # relative ratio between the sum coefficients for summing the dice\n", - " \n", - "dimx=np.int(np.ceil(dimy+dimz*alpha-1-alpha)+1);\n", - "\n", - "\n", - "\n", - "#make X the new target\n", - "dimx_x=dimz;\n", - "dimy_x=dimy;\n", - "dimz_x=dimx;\n", - "\n", - "\n", - "while aa<=np.ceil(1/lstep): # loop over different correlations between the dice\n", - "\n", - " \n", - " p_yz = np.zeros((dimy,dimz))\n", - " \n", - " # setting correlations between the dice\n", - " for yy in range(0,dimy):\n", - " for zz in range(0,dimz):\n", - " p_yz[yy,zz]=l/36+(1-l)/6*np.float(zz==yy)\n", - " \n", - " p_yz=p_yz/p_yz.sum() # p(y,z), joint probability of the two dice alone\n", - "\n", - " p_x_yz=np.zeros((dimx,dimy,dimz)) # p(x|y,z)\n", - "\n", - " #build p(x|y,z) analytically\n", - " for xx in range(0,dimx):\n", - " for yy in range(0,dimy):\n", - " for zz in range(0,dimz):\n", - " if xx==np.ceil(zz+alpha*yy)-alpha:\n", - " p_x_yz[xx,yy,zz]=1\n", - " \n", - " p=np.zeros((dimx,dimy,dimz))\n", - "\n", - " #build p analytically\n", - " for xx in range(0,dimx):\n", - " for yy in range(0,dimy):\n", - " for zz in range(0,dimz):\n", - " p[xx,yy,zz]=p_x_yz[xx,yy,zz]*p_yz[yy,zz]\n", - "\n", - " p=p/p.sum() #final normalization\n", - "\n", - " # adapt definitions to those expected by tartu\n", - " P = dict()\n", - " P_z = dict()\n", - " for x in range(0,dimx):\n", - " for y in range(0,dimy):\n", - " for z in range(0,dimz):\n", - " P[(x,y,z)] = p[x,y,z]\n", - " P_z[(z,y,x)] = p[x,y,z]\n", - "\n", - " print('\\nLambda: {:.2f}'.format(l))\n", - " %time [_, _, _, I_syn[aa],I_shar[aa],I_uny[aa],I_unz[aa]] = synergy_tartu.solve_PDF(pdf=P)\n", - " #%time [_, _, _, I_syn_z[aa],I_shar_z[aa],I_uny_z[aa],I_unz_z[aa]] = synergy_tartu.solve_PDF(pdf=P_z)\n", - " \n", - "\n", - " aa+=1\n", - " l+=lstep\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ -1.23092603e-09 3.62954080e-01 6.35419338e-01 8.70101129e-01\n", - " 1.07845940e+00 1.26601621e+00 1.43605386e+00 1.59074580e+00\n", - " 1.73162538e+00 1.85981548e+00 1.97615336e+00 2.08126285e+00\n", - " 2.17559714e+00 2.25946294e+00 2.33303142e+00 2.39633784e+00\n", - " 2.44926910e+00 2.49153578e+00 2.52262036e+00 2.54168336e+00\n", - " 2.54738616e+00]\n" - ] - } - ], - "source": [ - "print(I_syn)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ 2.32192809 2.14829493 2.00221035 1.86891262 1.74519403 1.62951543\n", - " 1.5209715 1.41897497 1.32312654 1.23315213 1.14886976 1.07017184\n", - " 0.99701649 0.9294252 0.86748589 0.81136164 0.76130686 0.71769419\n", - " 0.6810595 0.65217958 0.63221598]\n" - ] - } - ], - "source": [ - "print(I_shar)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.2" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/src/python/information_decomposition.py b/src/python/information_decomposition.py deleted file mode 100644 index 38b735b..0000000 --- a/src/python/information_decomposition.py +++ /dev/null @@ -1,191 +0,0 @@ -"""Utilities for computing a positive information decomposition for -distributions over three binary variables using the definitions from -Bertschinger at al (2014).""" -import numpy as np - -# GAMMA_0 and GAMMA_1 are the (constant) basis vectors for the -# optimization domain $\Delta_P$. In the case of three binary -# variables, $\Delta_P$ has dimension 2. We express the vectors in the -# same "flattened joint probability distribution" coordinate system we -# will use below for P. So, for instance, GAMMA_0 corresponds to both -# gamma_(00101) and gamma_(01010) (which are identical) in the -# notation of the Bertschinger paper. All other gamma_(0....) vectors -# are zero. Since, by direct computation from the definition, -# -# gamma_(00101)=d_000+d_011-d_010-d_001 -# -# where d_xyz is the Kronecker delta, we get that in our chosen -# coordinate system for the space of joint PDFs over (x,y,z) the -# representation of gamma_(00101) is the one given below. An analogous -# argument leads to the expression for GAMMA_1. -GAMMA_0 = np.array([1, -1, -1, 1, 0, 0, 0, 0], dtype=np.float) # this -GAMMA_1 = np.array([0, 0, 0, 0, 1, -1, -1, 1], dtype=np.float) - -def I_XYZ(P): - """Compute mutual information I(X:(Y,Z)) from P(X,Y,Z), where P can be - specified as a flattened joint probability table. - - """ - PXYZ = P.reshape((2,2,2)) - PX = np.einsum('ijk->i', PXYZ).reshape((2,1,1)) - PYZ = np.einsum('ijk->jk', PXYZ).reshape((1,2,2)) - with np.errstate(divide='ignore', invalid='ignore'): - # division by zero and such warnings can be safely ignored as - # at the end we're only summing over those values where - # P(x,Y,Z) is strictly positive - result = (PXYZ * np.log2(PXYZ / (PX * PYZ)))[PXYZ>0].sum() - return result - -def I_2D(Q): - """Compute mutual information I(X:Y) from Q(X,Y), where Q must be - specified as a XxY joiint probability table. - - """ - QX = np.atleast_2d(Q.sum(axis=1)) - QY = np.atleast_2d(Q.sum(axis=0)) - if min(QX.min(), QY.min()) < 0 or Q.min()<0: - raise Exception - with np.errstate(divide='ignore', invalid='ignore'): - table = Q * np.log2(Q / (QX.transpose() * QY)) - return table[Q>0].sum() - -def I_Q_XY(Q, axis=2): - """Compute mutual information I(X:Y) from Q(X,Y,Z), expressed as a - flattened joint probability table. - - """ - QXY = Q.reshape((2,2,2)).sum(axis=axis) - return I_2D(QXY) - -def I_Q_XY_Z(Q): - """Compute mutual information I(X:Y|Z) from Q(X,Y,Z), expressed as a - flattened joint probability table. - - """ - Q_XYZ = Q.reshape((2,2,2)) - return Q_XYZ[:,:,0].sum() * I_2D(Q_XYZ[:,:,0]/Q_XYZ[:,:,0].sum()) + Q_XYZ[:,:,1].sum() * I_2D(Q_XYZ[:,:,1]/Q_XYZ[:,:,1].sum()) - -def CoI_Q(P, a, b): - """Compute the co-information CoI_Q(X;Y;Z) of the probability - distribution Q, belonging to $\Delta_P$ and generated from P and a - specific choice of the value of the coordinates (a,b). - - """ - Q = P + a * GAMMA_0 + b * GAMMA_1 - return I_Q_XY(Q) - I_Q_XY_Z(Q) - -def domain_boundaries(P): - """Compute boundaries for the polytope $\Delta_P$, which is the - optimisation domain we need to consider in the computation of the - positive information decomposition. Since Q=P+a*GAMMA_0+b*GAMMA_1 - for some value of a and b for each Q in $\Delta_P$, the boundaries - of $\Delta_P$ are given by the requirement that Q must always be a - valid probability distribution. To understand how the expressions - below are derived, it is sufficient to write down (for instance) - P+a*GAMMA_0 explicitly in terms of coordinates, yielding - - [p_000+a, p_001-a, p_010-a, p_011+a, p_100, p_101, p_110, p_111] - - and to notice that, if this is to be a valid pdf, a must be larger - than -p_000, smaller than 1-p_000, and so on. - - """ - a_upper = min(min(P[1], P[2]), 1-max(P[0], P[3])) - a_lower = -min(1-max(P[1],P[2]), min(P[0], P[3])) - b_upper = min(min(P[5], P[6]), 1-max(P[4], P[7])) - b_lower = -min(1-max(P[5],P[6]), min(P[4], P[7])) - return a_lower, a_upper, b_lower, b_upper - -def parameter_meshgrid(P, npoints): - """Prepare a meshgrid of combinations of the coordinates a and b, - parametrising the domain $\Delta_P$ over which we want to find the - probability distribution with the maximum coinformation. - - """ - a_lower, a_upper, b_lower, b_upper = domain_boundaries(P) - a_range = np.linspace(a_lower, a_upper, npoints) - b_range = np.linspace(b_lower, b_upper, npoints) - AA, BB = np.meshgrid(a_range, b_range) - return AA, BB - -def coinformation_table(P, npoints): - """Compute the coinformation for a number of probability distributions - situated on a grid in $\Delta_P$. - - Parameters - ---------- - P: the probability distribution from which to generate the domain $\Delta_P$. - - npoints: linear resolution of the grid. The total number of points - will be npoints^2. - - """ - AA, BB = parameter_meshgrid(P, npoints) - CoIQQ = np.zeros_like(AA) - for i in range(AA.shape[0]): - for j in range(AA.shape[1]): - CoIQQ[i,j] = CoI_Q(P, AA[i,j], BB[i,j]) - return CoIQQ - -def decomposition(P, npoints=100): - """Positive information decomposition of distribution over three binary variables. - - This is a toy implementation of the decomposition defined in - Bertschinger et al (2014). - - Parameters - ---------- - P : 2x2x2 numpy array. - The joint probability table of the distribution. Each entry must be a - probability, so 0<=P[i,j,k]<=1 for all i,j,k and P.sum()=1. - - Probabilities are specified indexing into the table by their value: - - p(x=0,y=0,z=0) = P[0,0,0] - p(x=0,y=0,z=1) = P[0,0,1] - - ...and so on. - - npoints: the number of points to consider on each side of the mesh - grid we will use to optimise the coinformation over $\Delta_P$. - - Returns - ------- - SI : shared information SI(X:Y;Z) - CI : complementary information CI(X:Y;Z) - UI_Y : unique information UI(X:Y\Z) - UI_Z : unique information UI(X:Z\Y) - - Examples - -------- - >>> # PID for y and z being uniformly distributed and x=AND(y,z): - >>> import numpy as np - >>> from information_decomposition import decomposition - >>> P = np.array([0.25, 0.25, 0.25, 0, 0, 0, 0, 0.25]).reshape(2,2,2) - >>> SI, CI, UI_Y, UI_Z = decomposition(P) - >>> SI - 0.31127812445913278 - >>> CI - 0.49999999999999994 - >>> UI_Y - 0.0 - >>> UI_Z - 0.0 - - """ - # make sure P is correctly specified as a joint probability table - # over binary variables - P = P.astype(np.float) - assert P.shape == (2,2,2) - assert all(np.logical_and(P.flat>=0, P.flat<=1)) and np.isclose(P.sum(), 1) - - P = P.flat[:] - # shared information SI is the maximum of the coinformation over - # $\Delta_P$ - SI = coinformation_table(P, npoints).max() - # the other terms of the decomposition follow from the SI - UI_Y = I_Q_XY(P, axis=2) - SI - UI_Z = I_Q_XY(P, axis=1) - SI - CI = I_XYZ(P) - SI - UI_Y - UI_Z - return SI, CI, UI_Y, UI_Z - diff --git a/src/python/information_decomposition_linearized.py b/src/python/information_decomposition_linearized.py deleted file mode 100644 index c9d7d22..0000000 --- a/src/python/information_decomposition_linearized.py +++ /dev/null @@ -1,264 +0,0 @@ -import numpy as np -import cvxpy - -def I_2D(q_xy): - """Compute mutual information I(X:Y) from Q(X,Y), where Q must be - specified as a XxY joint probability table. - - """ - q_x = q_xy.sum(axis=1, keepdims=True) - q_y = q_xy.sum(axis=0, keepdims=True) - if min(q_x.min(), q_y.min()) < 0 or q_xy.min()<0: - raise Exception - with np.errstate(divide='ignore', invalid='ignore'): - table = q_xy * np.log2(q_xy / (q_x * q_y)) - return table[q_xy>0].sum() - -def I_3D_cond_z(q_xyz): - """Compute conditional mutual information I(X:Y|Z) from Q(X,Y,Z), - where Q must be specified as a dimx*dimy*dimz 3D joint probability - table. - """ - q_z = q_xyz.sum(axis=(1,2), keepdims=True) - q_xz = q_xyz.sum(axis=1, keepdims=True) - q_yz = q_xyz.sum(axis=0, keepdims=True) - with np.errstate(divide='ignore', invalid='ignore'): - q_x_z = q_xz / q_z - q_y_z = q_yz / q_z - q_xy_z = q_xyz / q_z - table = q_xyz * np.log2(q_xy_z/(q_x_z*q_y_z)) - table[np.logical_not(np.isfinite(table))] = 0 - return table.sum() - -def lin_pid(p, accuracy=0.01, solver='ECOS', verbose=False): - - # work out dimensionality of the input probability distribution - (dimx, dimy, dimz) = p.shape - - # select appropriate LP solver - if solver=='CVXOPT': - solver = cvxpy.CVXOPT - elif solver=='ECOS': - solver = cvxpy.ECOS - - # Following Bertschinger2013, define a set of Gamma matrices that - # forms a basis of the optimization domain. All Gammas have the - # same dimensions as the input p. Their total number - # (dimx-1)*(dimy-1)*dimz (See Bertschinger2013, Lemma 26). For - # ease of manipulation, we keep them all stacked into a larger - # array. - GAMMA = np.zeros((dimx, dimy, dimz, dimx-1, dimy-1, dimz)) - for iz in range(dimz): - for ix in range(dimx-1): - for iy in range(dimy-1): - GAMMA[ix,iy,iz,ix,iy,iz] = 1; - GAMMA[ix+1,iy,iz,ix,iy,iz] = -1; - GAMMA[ix,iy+1,iz,ix,iy,iz] = -1; - GAMMA[ix+1,iy+1,iz,ix,iy,iz] = 1; - - # Algorithm termination flag - done = False; - - # numbers of iterations of the algorithm - iteration = 0 - - # Coefficients of the matrix q expressed in the basis of Gamma - # matrices - parameters = np.zeros((dimz*(dimx-1)*(dimy-1), 1)) - - # starting point of the algorithm - q = p.copy() - coeff_prev = parameters.copy() - - # create contraint definitions - b = np.zeros((dimz, 2*dimx*dimy,1)) - A = np.zeros((dimz, 2*dimx*dimy, (dimx-1)*(dimy-1))) - for iz in range(dimz): - A[iz,:,:], b[iz,:,:] = define_constraints(p, iz) - - while not done: - # compute derivative of MI_q along the basis vectors - with np.errstate(divide='ignore', invalid='ignore'): - deriv = np.log2(q[:-1,:-1]*q[1:,1:]) - \ - np.log2(q[:-1,1:]*q[1:,:-1]) + \ - np.log2(q[:-1,1:].sum(axis=2, keepdims=True) * \ - q[1:,:-1].sum(axis=2, keepdims=True)) - \ - np.log2(q[:-1,:-1].sum(axis=2, keepdims=True) * \ - q[1:,1:].sum(axis=2, keepdims=True)) - - # get rid of nonsense values of deriv coming from finite numerical precision - deriv[np.logical_not(np.isfinite(deriv))] = 0 - # check for bad starting point where deriv==0 - if np.all(deriv==0): - deriv = 1e-9*(np.random.random(deriv.shape)-1) - - - # stores the coefficients of the q of the current - # iteration. For each value of z there is a coeff, then - # coeff_tot stacks all coeffs. - coeff_tot = np.zeros(((dimx-1)*(dimy-1), dimz)) - - for iz in range(dimz): - # extract the portion of deriv pertaining to iz and adapt - # deriv_iz to the multiplication deriv_iz*coeff - deriv_iz = deriv[:,:,iz].reshape((-1,1), order='C') - - x = cvxpy.Variable(deriv_iz.size) - obj = cvxpy.Minimize(deriv_iz.T*x) - constraints = [A[iz,:,:]*x<=(b[iz,:].reshape(-1,1))] - prob = cvxpy.Problem(obj, constraints) - prob.solve(solver=solver, verbose=verbose) - coeff_tot[:,iz] = x.value.T - - coeff_tot = coeff_tot.reshape((-1,1), order='F') - - p_k = p.copy() - for ind_gamma in range((dimx-1)*(dimy-1)*dimz): - ix = np.mod(ind_gamma/(dimy-1), dimx-1).astype(np.int) - iy = np.mod(ind_gamma, dimy-1).astype(np.int) - iz = np.floor(ind_gamma/((dimx-1)*(dimy-1))).astype(np.int) - p_k = p_k + coeff_tot[ind_gamma]*GAMMA[:,:,:,ix,iy,iz] - - # this is supposed to build a concatenated array where the - # forst block corresponds to deriv_iz for iz=0, the second - # corresponds to deriv_iz for iz=1, etc - deriv = np.transpose(deriv, axes=(1,0,2)).reshape((-1,1), order='F') - - #set the stopping criterion based on the duality gap, see Stratos; - if (iteration>0 and np.dot(deriv.T,coeff_prev-coeff_tot)<=accuracy) or iteration > 500: - print(iteration) - done = True # exit the algorithm - else: - # fixed increment; simplest version of Franke-Wolf - gamma_k = 2/(iteration+2) - # update the q for next iteration - q = q + gamma_k*(p_k-q) - # update coeff_prev for next i - coeff_prev = coeff_prev + gamma_k*(coeff_tot-coeff_prev) - - iteration += 1 - - # get rid of tiny negative entries in q which result from - # limited numerical precision - q[q<0] = 0 - - - print(np.all(q==p)) - # compute co-information for optimal distribution: - # coI_q(X:Y:Z)=I_q(X:Y)-I_q(X:Y|Z) - # co-information - co_I = I_2D(q.sum(axis=2)) - I_3D_cond_z(q) - - # compute the decomposition - # - # shared information is the coinformation of the optimal - # distribution - SI = co_I - - I_xz = I_2D(p.sum(axis=1)) - I_yz = I_2D(p.sum(axis=0)) - I_xy_z = I_2D(p.reshape((dimx*dimy,dimz))) - - # first output unique - UI_X = I_xz - SI - - # second output unique - UI_Y = I_yz - SI - - # output synergy - CI = I_xy_z - UI_X - UI_Y - SI - - return SI, CI, UI_X, UI_Y - - -def define_constraints(p, iz): - dimx, dimy, dimz = p.shape - - # the constraints on q, for each z, are implemented via the inequality A*coeff<=b - b = np.zeros((2*dimx*dimy,1)) - A = np.zeros((2*dimx*dimy, (dimx-1)*(dimy-1))) - - # the number of constraints to be imposed - count = 0 - - for ix in range(dimx-1): - for iy in range(dimy-1): - if ix==0 and iy==0: - # set constraints for the top-left entries q[0,0,iz] - A[count, ix*(dimy-1)+iy] = -1 - A[count+dimx*dimy, ix*(dimy-1)+iy] = 1 - b[count] = p[ix,iy,iz] - b[count+dimx*dimy] = 1-p[ix,iy,iz] - count += 1 - - if ix==dimx-2 and iy==dimy-2: - # bottom right entries - A[count, ix*(dimy-1)+iy] = -1 - A[count+dimx*dimy, ix*(dimy-1)+iy] = 1 - b[count] = p[ix+1,iy+1,iz] - b[count+dimx*dimy] = 1-p[ix+1,iy+1,iz] - count += 1 - - if ix==0 and iy==dimy-2: - # top right entries - A[count, ix*(dimy-1)+iy] = 1 - A[count+dimx*dimy, ix*(dimy-1)+iy]=-1 - b[count] = p[ix,iy+1,iz] - b[count+dimx*dimy] = 1-p[ix,iy+1,iz] - count += 1 - - if ix==dimx-2 and iy==0: - # bottom left entries - A[count, ix*(dimy-1)+iy] = 1 - A[count+dimx*dimy, ix*(dimy-1)+iy] = -1 - b[count] = p[ix+1,iy,iz] - b[count+dimx*dimy] = 1-p[ix+1,iy,iz] - count += 1 - - if ix==0 and dimy>2 and iy2 and ix2 and ix2 and dimy>2 and 0C. - - """ - PX = np.einsum('ijk->i', P).reshape((2,1)) - PXY = np.einsum('ijk->ij', P) - PXZ = np.einsum('ijk->ik', P) - return np.einsum('ij,ik->ijk', PXY, PXZ)/PX - - -def intersection_information(P, npoints=100): - """Compute intersection information from P(S,R,C). - - Intersection information is computed as SI(R:S;C)-SI_n(R:S;C), - where the first term is the shared information (according to - Bertschinger) between S and C about R, and the second term is the - same quantity computed for a "null" distribution where the (S,R) - and (S,C) marginals are the same but the graphical structure is - R<-S->C (i.e. shuffled data without signal correlations). The idea - behind this definition of null distribution is to subtract out the - maximum amount of shared information that could be due to signal - correlations. - - Parameters - ---------- - P : 2x2x2 numpy array. - Joint (S,R,C) probability table, specified indexing into the table - by the values of S, R and C as for the 'decomposition' function. - - npoints: see the documentation for 'decomposition'. - - - Returns - ------- - II : intersection information for (S,R,C) - SI_P : intersection information before subtraction of null value - SI_Pn : intersection information for null distribution - - """ - # compute shared information for real data. Note that - # pid.decomposition uses the variable linked to the first index of - # P as "output" variable, so if P is indexed by (S,R,C) in this - # order we have to permute the array describing the joint pdf - # before passing it to the function. - SI_P = pid.decomposition(np.transpose(P, (1,0,2)), npoints=npoints)[0] - - # compute null distribution - Pn = compute_shuffle(P) - - # compute shared information for null distribution - SI_Pn = pid.decomposition(np.transpose(Pn, (1,0,2)), npoints=npoints)[0] - - return SI_P - SI_Pn, SI_P, SI_Pn - - diff --git a/src/python/test.py b/src/python/test.py deleted file mode 100644 index 01c8ed0..0000000 --- a/src/python/test.py +++ /dev/null @@ -1,92 +0,0 @@ -import numpy as np -from information_decomposition_linearized import lin_pid - -aa=np.int(0); - -lstep = 0.05 -l_values = np.arange(0.1, 1.01, lstep) -l_n = l_values.size - -I_shar=np.zeros(l_n); -I_uny=np.zeros(l_n); -I_unz=np.zeros(l_n); -I_syn=np.zeros(l_n); - -I_shar_z=np.zeros(l_n); -I_uny_z=np.zeros(l_n); -I_unz_z=np.zeros(l_n); -I_syn_z=np.zeros(l_n); - -#Y and Z are the dice, X is the weighted sum of the two - - -# summed-dice dimensions -dimy=np.int(6); -dimz=np.int(6); - -alpha=np.int(1); # relative ratio between the sum coefficients for summing the dice - -dimx=np.int(np.ceil(dimy+dimz*alpha-1-alpha)+1); - - - -#make X the new target -dimx_x=dimz; -dimy_x=dimy; -dimz_x=dimx; - - -for (k,l) in enumerate(l_values): # loop over different correlations between the dice - - - p_yz = np.zeros((dimy,dimz)) - - # setting correlations between the dice - for yy in range(0,dimy): - for zz in range(0,dimz): - p_yz[yy,zz]=l/36+(1-l)/6*np.float(zz==yy) - - p_yz=p_yz/p_yz.sum() # p(y,z), joint probability of the two dice alone - - p_x_yz=np.zeros((dimx,dimy,dimz)) # p(x|y,z) - - #build p(x|y,z) analytically - for xx in range(0,dimx): - for yy in range(0,dimy): - for zz in range(0,dimz): - #if xx==np.ceil(zz+alpha*yy)-alpha: - if xx==np.ceil(zz+alpha*yy): - p_x_yz[xx,yy,zz]=1 - - p=np.zeros((dimx,dimy,dimz)) - - #build p analytically - for xx in range(0,dimx): - for yy in range(0,dimy): - for zz in range(0,dimz): - p[xx,yy,zz]=p_x_yz[xx,yy,zz]*p_yz[yy,zz] - - p=p/p.sum() #final normalization - - - # compute linearised PID - [SI, CI, UI_Y, UI_Z] = lin_pid(p.T, verbose=False) - I_syn[k] = CI - I_shar[k] = SI - I_uny[k] = UI_Y - I_unz[k] = UI_Z - - - # adapt definitions to those expected by tartu - P = dict() - P_z = dict() - for x in range(0,dimx): - for y in range(0,dimy): - for z in range(0,dimz): - P[(x,y,z)] = p[x,y,z] - P_z[(z,y,x)] = p[x,y,z] - - print('\nLambda: {:.2f}'.format(l)) - #%time [_, _, _, I_syn[k],I_shar[k],I_uny[k],I_unz[k]] = synergy_tartu.solve_PDF(pdf=P,verbose=False,feas_eps=1e-15,feas_eps_2=1e-15) - #%time [_, _, _, I_syn_z[aa],I_shar_z[aa],I_uny_z[aa],I_unz_z[aa]] = synergy_tartu.solve_PDF(pdf=P_z) -