Commit 4029a3f9 authored by Larkin Heintzman's avatar Larkin Heintzman

added figures and trust_model

parent b554e2b2
import numpy as np
import scipy.stats as ss
import elfi
import matplotlib.pyplot as plt
def MA2(t1, t2, n_obs=100, batch_size=1, random_state=None):
# Make inputs 2d arrays for numpy broadcasting with w
t1 = np.asanyarray(t1).reshape((-1, 1))
t2 = np.asanyarray(t2).reshape((-1, 1))
random_state = random_state or np.random
w = random_state.randn(batch_size, n_obs+2) # i.i.d. sequence ~ N(0,1)
x = w[:, 2:] + t1*w[:, 1:-1] + t2*w[:, :-2]
return x
def autocov(x, lag=1):
C = np.mean(x[:,lag:] * x[:,:-lag], axis=1)
return C
if __name__ == "__main__":
# Set an arbitrary seed and a global random state to keep the randomly generated quantities the same between runs
seed = 20170530 # this will be separately given to ELFI
np.random.seed(seed)
# true parameters
t1_true = 0.6
t2_true = 0.2
y_obs = MA2(t1_true, t2_true)
# Plot the observed sequence
plt.figure(figsize=(11, 6));
plt.plot(y_obs.ravel());
# To illustrate the stochasticity, let's plot a couple of more observations with the same true parameters:
plt.plot(MA2(t1_true, t2_true).ravel());
plt.plot(MA2(t1_true, t2_true).ravel());
# a node is defined by giving a distribution from scipy.stats together with any arguments (here 0 and 2)
t1 = elfi.Prior('uniform', 0, 2)
# ELFI also supports giving the scipy.stats distributions as strings
t2 = elfi.Prior('uniform', 0, 2)
Y = elfi.Simulator(MA2, t1, t2, observed = y_obs)
S1 = elfi.Summary(autocov, Y, 1)
S2 = elfi.Summary(autocov, Y, 2) # the optional keyword lag is given the value 2
# Finish the model with the final node that calculates the squared distance (S1_sim-S1_obs)**2 + (S2_sim-S2_obs)**2
d = elfi.Distance('euclidean', S1, S2)
rej = elfi.Rejection(d, batch_size=10000, seed=seed)
N = 1000
# You can give the sample method a `vis` keyword to see an animation how the prior transforms towards the
# posterior with a decreasing threshold.
result = rej.sample(N, quantile=0.01)
result2 = rej.sample(N, threshold=0.2)
smc = elfi.SMC(d, batch_size=10000, seed = seed)
schedule = [0.7, 0.2, 0.05]
result_smc = smc.sample(N, schedule)
print("quantile results:")
print(result.summary())
print("threshold results:")
print(result2.summary())
print("smc results:")
print(result_smc.summary(all=True))
# plt.show()
import numpy as np
from trust_model.base_model import HumanTrust
from trust_model.MC_class import MCTrust
import elfi
import scipy.stats as ss
import pdb
import matplotlib.pyplot as plt
import time
class ABCTrust(HumanTrust):
def __init__(self, parameterNames, parameterRanges, overwrites = None, seed = None):
super().__init__() # python magic
self.capabilityList = []
self.agreementList = []
self.observedData = []
self.predictedParameterSet = {}
self.maxSampleTime = 1
if seed == None:
self.seed = 8080 # default seed
else:
self.seed = seed
self.parameterNames = parameterNames
self.parameterRanges = parameterRanges
# parameter priors
self.priorList = self.createPriors(self.parameterNames, self.parameterRanges)
# define output pool to reuse certain data
self.pool = elfi.OutputPool(self.parameterNames) # tracking only priors
# set up model
numberObservations = len(self.observedData)
self.Simulator = elfi.Simulator(self.simulator, self.parameterNames, numberObservations, *self.priorList, observed = self.observedData)
self.S1 = elfi.Summary(self.summaryMean, self.Simulator, name = 'summary_1')
self.S2 = elfi.Summary(self.summaryVar, self.Simulator, name = 'summary_2')
# trying out new summary statistics
self.S3 = elfi.Summary(self.summarySkew, self.Simulator, name = 'summary_3')
# self.S3 = elfi.Summary(self.summaryCov, self.Simulator, name = 'summary_3')
# Finish the model with the final node that calculates the squared distance (S1_sim-S1_obs)**2 + (S2_sim-S2_obs)**2
self.Dist = elfi.Distance('euclidean', self.S1, self.S2)
# do initial run to get parameters
self.fullReset()
def loadParameters(self, parameterNames, parameterValues):
self.setParameters() # handles own randomization for now
# ARRAY-IFY
# assume batch size > 1 and that parameters aren't empty
overwrites = {parameterNames[i] : parameterValues[i] for i in range(len(parameterNames))}
# overwrite whatever variables
if overwrites == None:
overwrites = {}
for key, val in overwrites.items():
self.__dict__[key] = val
def simulator(self, paramsNames, n_obs, *paramsValues, batch_size = 1, random_state = None):
# generate reliances for n_obs steps using parameters in parameterList
# priors are assumed to be uniform for now
random_state = random_state or np.random
caps = np.array(self.capabilityList[:n_obs])
ags = np.array(self.agreementList[:n_obs])
rels = np.zeros([n_obs, batch_size])
# load batch of parameters by creating overwrite
self.loadParameters(paramsNames, paramsValues)
for i in range(n_obs):
self.updateBatch(capability_a = caps[i], agreement = ags[i]) # need to control random seed here
# save reliance value
rels[i,:] = self.rel
return rels
def createPriors(self, parameterNames, parameterRanges):
output = []
for i,param in enumerate(parameterNames):
if parameterRanges[i][-1] >= 0:
output.append(elfi.Prior('uniform', parameterRanges[i][0], parameterRanges[i][1], name = parameterNames[i]))
else:
output.append(elfi.Prior('bernoulli', parameterRanges[i][0], name = parameterNames[i]))
return output
def runABC(self, samples = 10, batch_size = 100, schedule = None, threshold = 0.1):
# execute the ABC process to get approximate parameters
start_time = time.time()
# number of observations
numberObservations = len(self.observedData)
# print(self.observedData)
if (numberObservations != len(self.capabilityList)):
print("lists sizes do not match, quitting")
return None
# re define model with new number of inputs?
self.Simulator.become(elfi.Simulator(self.simulator, self.parameterNames, numberObservations, *self.priorList, observed = self.observedData))
self.S1.become(elfi.Summary(self.summaryMean, self.Simulator))
self.S2.become(elfi.Summary(self.summaryVar, self.Simulator))
self.S3.become(elfi.Summary(self.summarySkew, self.Simulator))
# self.S3.become(elfi.Summary(self.summaryCov, self.Simulator))
# Finish the model with the final node that calculates the squared distance (S1_sim-S1_obs)**2 + (S2_sim-S2_obs)**2
self.Dist.become(elfi.Distance('euclidean', self.S1, self.S2, self.S3))
# self.pool = elfi.OutputPool([d,S1,S2,*self.parameterNames]) # tracking only priors
rej = elfi.Rejection(self.Dist, batch_size=batch_size, seed=self.seed, pool = self.pool) # most basic-ist
if schedule == None:
schedule = [0.5, 0.25, 0.05]
# result = rej.sample(samples, schedule)
# request given threshold but w time limit
rej.set_objective(samples, threshold = threshold)
# We only have 1 sec of time and we are unsure if we will be finished by that time.
# So lets simulate as many as we can.
time0 = time.time()
time1 = time0 + self.maxSampleTime
iterCounter = 0
maxIters = 100
while (time.time() < time1 or np.isinf(rej.state['threshold'])) and not rej.finished and iterCounter <= maxIters:
# pdb.set_trace()
rej.iterate()
iterCounter += 1
# vals = rej.extract_result().sample_means.values()
# pdb.set_trace()
print("finished?: {} in {}".format(rej.finished, time.time() - time0))
if (iterCounter < maxIters):
# print("finished?: {}".format(rej.finished))
# Extract and print the result as it stands. It will show us how many simulations were generated.
# print(rej.extract_result())
# result = rej.sample(samples, threshold = threshold)
result = rej.extract_result()
# do some saving of parameters
self.predictedParameterSet = result.sample_means # comes as overwrite dict
pNames = [ki[0] for ki in result.sample_means.items()]
pValues = [ki[1] for ki in result.sample_means.items()]
self.loadParameters(pNames, pValues)
# re run with new parameters up to observed data
for i in range(numberObservations):
self.update(capability_a = self.capabilityList[i], agreement = self.agreementList[i])
# do something clever with priors??
print("total time required for {} samples with {} observed data points: {}".format(samples, len(self.observedData), time.time() - start_time))
print(result.summary())
return result
else:
print("TOO MANY ITERS---------------------------------------")
self.fullReset()
return False
def fullReset(self):
self.capabilityList = []
self.agreementList = []
self.observedData = []
# get random values from parameter ranges
parameterValues = []
for prior in self.priorList:
parameterValues.append(prior.generate().item())
self.loadParameters(self.parameterNames, parameterValues)
def summaryMean(self, x, p = None):
return np.mean(x, axis = 0) # take mean along number of obs
def summaryVar(self, x, p = None):
return np.var(x, axis = 0) # take variance along number of obs
def summaryCov(self, x, p = None):
# def autocovariance(Xi, N, k, Xs):
autoCov = 0
k = 1
xs = np.mean(x)
for i in np.arange(0, len(x) - k):
autoCov += ((x[i+k])-xs)*(x[i]-xs)
return (1/(len(x)-1))*autoCov
def summarySkew(self, x, p = None):
return ss.skew(x, axis = 0)
def autocov(x, lag=1):
C = np.mean(x[:,lag:] * x[:,:-lag], axis=1)
return C
def getParameters(self, paramsList):
# returns list of parameters based on names
try:
returnList = [self.__dict__[key] for key in paramsList]
except ValueError as e:
print("some key not found")
returnList = []
return returnList
def updateInputs(self, capability_m = 0.0, capability_a = 0.0, agreement = False, reliance = False):
# record capability and agreement
self.capabilityList.append(capability_a)
self.agreementList.append(agreement)
self.observedData.append(reliance)
def updateBatch(self, capability_m = 0.0, capability_a = 0.0, agreement = False):
self.update_cap(capability_m = capability_m, capability_a = capability_a)
# manual capability belief update
bm_out = self.bm + self.b1 * (self.cm - self.bm)
# messing with the automation belief, agreement bias term
if agreement:
ba_out = self.ba + self.b1 * (self.ca - self.ba) + self.b2*( 1 - self.ba )
else:
ba_out = self.ba + self.b1 * (self.ca - self.ba) - self.b2*( 1 - self.ba )
# update variables
self.bm = bm_out
self.ba = ba_out
# --------------------------------
# trust update
eps_t = np.random.normal(0, np.sqrt(self.var_t)) # noise term
self.t = (1 - self.s_t) * self.t + self.s_t * self.ba + eps_t
# self confidence update
eps_s = np.random.normal(0, np.sqrt(self.var_sc)) # noise term
self.sc = (1 - self.s_sc) * self.sc + self.s_sc * self.bm + eps_s
# preference
self.p = self.t - self.sc
if (np.isscalar(self.rel)):
self.rel = self.rel0*np.ones(self.t.shape) # if we have not updated in batch form, this is necessary
self.rel[np.where(self.t >= self.theta_t + self.theta)] = 1
self.rel[np.where(self.t <= self.theta_t - self.theta)] = 0
def update(self, capability_m = 0.0, capability_a = 0.0, agreement = False):
self.update_cap(capability_m = capability_m, capability_a = capability_a)
# manual capability belief update
bm_out = self.bm + self.b1 * (self.cm - self.bm)
# messing with the automation belief, agreement bias term
if agreement:
ba_out = self.ba + self.b1 * (self.ca - self.ba) + self.b2*( 1 - self.ba )
else:
ba_out = self.ba + self.b1 * (self.ca - self.ba)
# update variables
self.bm = bm_out
self.ba = ba_out
# --------------------------------
# trust update
eps_t = np.random.normal(0, np.sqrt(self.var_t)) # noise term
self.t = (1 - self.s_t) * self.t + self.s_t * self.ba + eps_t
# self confidence update
eps_s = np.random.normal(0, np.sqrt(self.var_sc)) # noise term
self.sc = (1 - self.s_sc) * self.sc + self.s_sc * self.bm + eps_s
# preference
self.p = self.t - self.sc
if self.t >= self.theta_t + self.theta: # modified threshold
self.rel = 1 # go to auto control
elif self.t <= self.theta_t - self.theta:
self.rel = 0 # go to manual
else:
pass # no swap
# ----------------------------------------
if __name__ == "__main__":
pass
# :)
# np.random.seed(99)
# parameterNames = ['s_t','b1','b0','b2','var_t','t0','theta','theta_t']
# parameterRanges = [(0,1),(1,100),(1,100),(1,100),(0,2),(0,1),(0,1),(0,1)]
#
# learningHuman = ABCTrust(parameterNames, parameterRanges, overwrites = None, seed = 10)
#
# # set up fake interaction data (inputs)
# interactionNumber = 100
# learningHuman.capabilityList = [0.5 + 0.25]*interactionNumber
# learningHuman.agreementList = [True]*interactionNumber
#
# parameterValues = learningHuman.getParameters(parameterNames)
#
# learningHuman.observedData = learningHuman.simulator(parameterNames, len(learningHuman.capabilityList), *parameterValues, batch_size=1, random_state=learningHuman.seed)
# print(learningHuman.observedData)
#
# # run ABC with observed data
# startTime = time.time()
# results = learningHuman.runABC(samples = 1000, batch_size = 100, threshold = 0.1)
# print("total time taken: {}".format(time.time() - startTime))
#
# # results.plot_marginals()
# # plt.show()
# print(results.summary())
# print("real values: {}".format({parameterNames[i] : parameterValues[i] for i in range(len(parameterNames))}))
import numpy as np
from trust_model.base_model import HumanTrust
import pdb
class MCTrust(HumanTrust):
def __init__(self, pNames, mcRanges, firstStep):
super().__init__() # python magic
# mcRanges is dictionary that holds parameter names and ranges for each iteration of monte carlo
self.parameterNames = pNames;
self.parameterRanges = {};
self.mcRanges = mcRanges
self.stepSets(firstStep)
def stepSets(self, step = 0):
for p, pName in enumerate(self.parameterNames):
self.parameterRanges[pName] = self.mcRanges[step][p]
self.randParameters()
# pdb.set_trace()
return self.parameterRanges
def randParameters(self):
# first set parameters, then randomize based on ranges
self.setParameters()
for (pName, pRange) in self.parameterRanges.items():
# overwrite w random garbage
self.__dict__[pName] = pRange[0] + pRange[1]*np.random.rand();
#
# def load_sets(self, sets):
# # load in sets of values to plug in for each variable
# # sets is list of tuples, (name, values)
# for i, pair in enumerate(sets):
# if pair[0] in dir(self):
# self.vars.update({pair[0] : (pair[1], pair[2])})
#
# # load initial values
# for ky, val in self.vars.items():
# self.__dict__[ky] = val[0][0]
#
# # figure out active variables, variables that'll change
#
# def step_sets(self):
# # step variables and re-init
# for ky, val in self.vars.items():
# if val[1] == 0: # columns
# if self.cstep == len(val[0]):
# self.__dict__[ky] = val[0][0] # reset
# row_flag = True
# self.cstep = 1
# else:
# self.__dict__[ky] = val[0][self.cstep]
# row_flag = False
# self.cstep = self.cstep + 1
#
# elif val[1] == 1 and row_flag: # rows
#
# # if self.rstep == len(val[0]):
# # self.rstep = 1
# if self.rstep < len(val[0]): # hack tastic
# self.__dict__[ky] = val[0][self.rstep]
# self.rstep = self.rstep + 1
#
# self.reset()
# return self.cstep, self.rstep
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as ss
from scipy.stats import binom
from matplotlib import cm
class HumanTrust:
def __init__(self):
self.setParameters()
# set up lookup table
self.lookup = {
'b1' : self.b1, # system transparency (>=1, 1 means perfect interface)
'b0' : self.b0, # recency of belief (>=1, 1 means belief depends only on initial)
'b2' : self.b2, # agreement bonus (>=1)
's_t' : self.s_t, # trust inertia
's_sc' : self.s_sc, # self confidence inertia
'var_t' : self.var_t, # variance of trust
'var_sc' : self.var_sc, # variance of self confidence
't0' : self.t0, # initial trust in automation
'sc0' : self.sc0, # initial self-confidence
'theta' : self.theta, # threshold to switch preference
'theta_t' : self.theta_t,
'ba0' : self.ba0,
'bm0' : self.bm0,
'mode' : self.mode,
'inf_mode' : self.inf_mode,
'cm' : self.cm,
'ca' : self.ca,
'rel' : self.rel
}
def setParameters(self):
# pointless values for parameters of interest
self.b1 = 0.02 # system transparency (>=1, 1 means perfect interface)
self.b0 = 0.01 # recency of belief (>=1, 1 means belief depends only on initial)
self.b2 = 0.005 # agreement bias term (>=1)
self.s_t = 0.5 # trust inertia
self.s_sc = 0.5 # self confidence inertia
self.var_t = 0.001 # variance of trust
self.var_sc = 0.001 # variance of self confidence
self.t0 = 0.5 # initial trust in automation
self.sc0 = 0.5 # initial self-confidence
self.z = self.t0-self.sc0 # initial pref
self.theta = 0.30 # threshold to switch preference
self.theta_t = 0.60 # threshold for trust only mode
self.ba0 = 0.50 # initial belief in automation
self.bm0 = 0.50 # initial belief in manual
self.rel0 = 0 # initial reliance
self.mode = 'trust' # what mode to operate in, 'trust', 'preference'
self.inf_mode = 'always' # 'reliance' indicates capability only given if relying, always given otherwise WIP
# variable defs
self.t = self.t0
self.sc = self.sc0
self.p = self.t - self.sc
self.cm = 0.5 # capabilities (set these to something smart)
self.ca = 0.5
self.bm = self.bm0 # beliefs
self.ba = self.ba0
self.rel = self.rel0
def reset(self):
# variable defs
self.t = self.t0
self.sc = self.sc0
self.p = self.t - self.sc
self.cm = 0.5
self.ca = 0.5
self.bm = self.bm0 # beliefs
self.ba = self.ba0
self.rel = self.rel0
def update(self, capability_m = 0.0, capability_a = 0.0, agreement = False):
self.update_cap(capability_m = capability_m, capability_a = capability_a)
# manual capability belief update
if self.inf_mode == 'always':
bm_out = self.bm + self.b1 * (self.cm - self.bm)
# messing with the automation belief, agreement bias term
if agreement:
ba_out = self.ba + self.b1 * (self.ca - self.ba) + self.b2*( 1 - self.ba )
else:
ba_out = self.ba + self.b1 * (self.ca - self.ba) - self.b2*( 1 - self.ba )
else:
if self.rel: # we ARE relying on automation, so manual capability is not available
ba_out = self.ba + self.b1 * (self.ca - self.ba)
bm_out = self.bm + self.b0 * (self.bm0 - self.bm)
else: # not relying on automation, so automation not available
bm_out = self.bm + self.b1 * (self.cm - self.bm)
ba_out = self.ba + self.b0 * (self.ba0 - self.ba)
# update variables
self.bm = bm_out
self.ba = ba_out
# --------------------------------
# trust update
eps_t = np.random.normal(0, np.sqrt(self.var_t)) # noise term
self.t = (1 - self.s_t) * self.t + self.s_t * self.ba + eps_t
# self confidence update
eps_s = np.random.normal(0, np.sqrt(self.var_sc)) # noise term
self.sc = (1 - self.s_sc) * self.sc + self.s_sc * self.bm + eps_s
# preference
self.p = self.t - self.sc
# update reliance
if self.mode == 'trust':
if self.t >= self.theta_t + self.theta: # modified threshold
self.rel = 1 # go to auto control
elif self.t <= self.theta_t - self.theta:
self.rel = 0 # go to manual
else:
pass # no swap
elif self.mode == 'preference':
if self.p >= self.theta:
self.rel = 1 # go to auto control
elif self.p <= -self.theta:
self.rel = 0 # go to manual
else:
pass # no swap
def update_cap(self, capability_m = 0.0, capability_a = 0.0):
self.cm = capability_m
self.ca = capability_a
def print_self(self):
# prints values of all relevant variables
max_len = max([len(key) for key in self.lookup.keys()])
for ky, val in self.lookup.items():
print("{0}{2}-> {1}".format(ky, self.__dict__[ky], ' '*(max_len - len(ky))))
# do something with the agreement here...
#------------------------------------------
def binom_prb(ga = None, gb = None, n = 25):
ha, la, pha = ga[0], ga[1], ga[2]
hb, lb, phb = gb[0], gb[1], gb[2]
if ha != la and hb != lb:
idx = np.arange(n+1) # indicies up to n
bdd_b = np.ceil( (n*(lb - la) + idx*(hb - lb)) / (ha - la) ).astype(int)
bdd_a = np.ceil( (n*(la - lb) + idx*(ha - la)) / (hb - lb) ).astype(int)
pb = np.sum([binom.pmf(i, n, phb) * binom.pmf(j, n, pha)
for i in range(n+1) for j in range(0, bdd_b[i])])
pa = np.sum([binom.pmf(i, n, pha) * binom.pmf(j, n, phb)
for i in range(n+1) for j in range(0, bdd_a[i])])
pe = 1 - pa - pb # axioms
elif ha == la and hb != lb:
bdd = (n * ha - n * lb) / (hb - lb)
if int(bdd) == bdd:
pa = binom.cdf(bdd-1, n, phb)
else:
pa = binom.cdf(bdd, n, phb)
pe = binom.pmf(bdd, n, phb) # probability of being equal
pb = 1 - pa - pe
elif hb == lb and ha != la:
bdd = (n * hb - n * la) / (ha - la)
if int(bdd) == bdd:
pb = binom.cdf(bdd-1, n, pha)
else:
pb = binom.cdf(bdd, n, pha)
pe = binom.pmf(bdd, n, pha)
pa = 1 - pb - pe
else: # both equal
pa = 0
pb = 0
pe = 1 # equality is only option
# pb, pa : chance of b out scoring a, and vice versa
# print("pa: {}, pb: {}".format(pa, pb))
return [pa, pb, pe]
if __name__ == "__main__":
hb = 10
lb = 0
phb = 0.10
ha = 10
la = 0
pha = 0.10
n = 25
M = 25
# print(binom_prb(ga=[ha, la, pha], gb=[hb, lb, phb], n=5))
# print(binom_prb(ga=[ha, la, pha], gb=[hb, lb, phb], n=4))
# print(binom_prb(ga=[ha, la, pha], gb=[hb, lb, phb], n=3))
# print(binom_prb(ga=[ha, la, pha], gb=[hb, lb, phb], n=2))
# print(binom_prb(ga=[ha, la, pha], gb=[hb, lb, phb], n=1))
# print("done")
# ra_set = np.random.choice([ha, la], size = [100000,n], p=[pha, 1 - pha]).sum(1)
# rb_set = np.random.choice([hb, lb], size = [100000,n], p=[phb, 1 - phb]).sum(1)
# print("theoretical prob\t R_a > R_b\t: " + str(pa))
# print("empirical prob\t\t R_a > R_b\t: " + str(np.mean(ra_set > rb_set)))
# print("theoretical prob\t R_b > R_a\t: " + str(pb))
# print("empirical prob\t\t R_b > R_a\t: " + str(np.mean(ra_set < rb_set)))
[Pa, Pb] = np.meshgrid( np.linspace(0,1,M), np.linspace(0,1,M) )
Za = np.zeros_like(Pa)
Zb = np.zeros_like(Pa)
for x in range(Pa.shape[0]):
for y in range(Pb.shape[1]):
[pa, pb, pe] = binom_prb(ga=[ha, la, Pa[x,y]], gb=[hb, lb, Pb[x,y]], n=n)
Zb[x,y] = pb
Za[x,y] = pa
fig = plt.figure(figsize=(5,10))
ax = fig.add_subplot(211)
im = ax.imshow(Za, cmap='viridis')
ax.set_xlabel('pha')
ax.set_ylabel('phb')
ax = fig.add_subplot(212)
ax.set_xlabel('pha')
ax.set_ylabel('phb')
im = ax.imshow(Zb, cmap='cividis')
fig.show()
quit()
# some bullshit parameters for now
# M: manual parameters
# A: automation parameters
M = 500
reliance = True # assumed starting with automation
human = HumanTrust() # operator
cap_a = human.ca # automation capability
# cap_set = {'a' : 0.5*np.cos(np.linspace(0,4*np.pi,M)) + 0.5, 'm' : 0.0*np.sin(np.linspace(0,4*np.pi,M)-0.5*np.pi) + 0.5}
cap_set = 0.25*np.cos(np.linspace(0,4*np.pi,M)) + 0.45
human.rel = reliance
t_mat = np.zeros([M,1])
p_mat = np.zeros([M,1])
ba_mat = np.zeros([M,1])
d_mat = np.zeros([M,1])
# sc_mat = np.zeros([M,1])
# bm_mat = np.zeros([M,1])
for i in range(0,M):
# interact with environment
# if human.rel:
# # using automation
# cap_a = np.random.rand()
# else:
# # using manual
# cap_m = np.random.rand()
# update capabilities
human.update_cap(capability_a = cap_set[i])
# update belief and preference
human.update_pref()
t_mat[i] = human.t
# sc_mat[i] = human.sc
p_mat[i] = human.p
d_mat[i] = human.rel
ba_mat[i] = human.ba
# bm_mat[i] = human.bm
# # update prior values
# t_last = t
# sc_last = sc
fig = plt.figure(figsize=(10,10))
# plot loss during training
ax = fig.add_subplot(111)
ax.set_title('Trust Model')
ax.plot(0.2*d_mat - 0.1, label = 'choice', linewidth = 2, color = 'gray')
ax.plot(t_mat, label = 'Rel.', linewidth = 2, color = 'g')
# ax.plot(sc_mat, label = 'conf', linewidth = 2, color = 'b')
ax.plot(ba_mat, label = 'B_A', linewidth = 2, color = 'lightgreen')
# ax.plot(bm_mat, label = 'B_M', linewidth = 2, color = 'lightblue')
# ax.plot(p_mat, label = 'pref', linewidth = 2, color = 'r')
# lines to reference
ax.plot((human.theta_t + human.theta)*np.ones([M,1]), label = 'theta', linewidth = 1.5, color = 'k', linestyle = '--')
ax.plot((human.theta_t - human.theta)**np.ones([M,1]), linewidth = 1.5, color = 'k', linestyle = '--')
ax.plot(cap_set, label = 'cap_a', linewidth = 1.5, color = 'g', linestyle = '--')
# ax.plot(cap_set['m'], label = 'cap_m', linewidth = 1.5, color = 'b', linestyle = '--')
ax.legend(loc='upper right')
ax.grid()
fig.show()
print("reliance ratio {}".format(np.mean(d_mat)))
print("done")
import numpy as np
from .base_model import HumanTrust
from .MC_class import MCTrust
from .base_model import binom_prb
from .ABC_class import ABCTrust
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
import time
import pdb
from CPC18.predictor_net import Predictor
def auto_gambler(game_row = None, b = 0, probs = None):
ha = game_row[3]
pha = game_row[4]
la = game_row[5]
hb = game_row[6]
phb = game_row[7]
lb = game_row[8]
pa, pb, pe = probs[0], probs[1], probs[2]
cap = max(pa, pb) + pe
if pa > pb:
auto_payoff = np.random.choice([ha, la], 1, p=[pha, 1 - pha])
b_out = 0
else:
auto_payoff = np.random.choice([hb, lb], 1, p=[phb, 1 - phb])
b_out = 1
if b == 1:
human_payoff = np.random.choice([hb, lb], 1, p=[phb, 1 - phb])
else:
human_payoff = np.random.choice([ha, la], 1, p=[pha, 1 - pha])
return [b_out, auto_payoff, human_payoff, cap]
if __name__ == "__main__":
# some bullshit parameters for now
# M: manual parameters
# A: automation parameters
globalSeed = 2
np.random.seed(globalSeed)
# predictor network
predictor = Predictor()
# get cpc information
game_mat = np.load('C:\\Users\\Larkin\\PyLearning\\CPC18\\saved_datasets\\game_set.npy')
# game_mat = game_mat[:5000,:,:]
# pdb.set_trace()
# game_mat row order supposed to be
# ['B', 'Gender', 'Age', 'Ha', 'pHa', 'La', 'Hb', 'pHb', 'Lb', 'Payoff', 'LotNumA','LotNumB']
save_data = True
reset_interval = 30
update_interval = 10 # number of games before an ABC update
update_counter = 0
reset_counter = 0
# abc parameters
ABCSamples = 10000
ABCBatchSize = 100000
ABCThreshold = 1.0
# monte carlo parameters
M = 27 # iterations
A = 1 # averaging runs
L = game_mat.shape[0] # number of games
N = game_mat.shape[1] # number of trials per game
# reliance model variables
t_mat = np.zeros([L,N,M,A])
ba_mat = np.zeros([L,N,M,A])
d_mat = np.zeros([L,N,M,A])
id_mat = np.zeros([L,N,M,A])
c_mat = np.zeros([L,N,M,A])
# result variables
hpo_mat = np.zeros([L,N,M,A]) # human payoff
apo_mat = np.zeros([L,N,M,A]) # automation payoff
po_mat = np.zeros([L,N,M,A]) # received payoff
hsel_mat = np.zeros([L,N,M,A]) # human selection
asel_mat = np.zeros([L,N,M,A]) # automation selection
asug_mat = np.zeros([L,N,M,A]) # automation suggestion
pred_mat = -1*np.ones([L,N,M,A]) # prediction of human selection (-1 for first 5 trials always)
param_mat = [] # hold parameter range information
iter_time = 0
# predstats = {'tp' : np.zeros([M,A]), 'fp' : np.zeros([M,A]), 'tn' : np.zeros([M,A]), 'fn' : np.zeros([M,A])} # for saving model accuracy values
predstats = {'tp' : np.zeros([L,N,M,A]), 'fp' : np.zeros([L,N,M,A]), 'tn' : np.zeros([L,N,M,A]), 'fn' : np.zeros([L,N,M,A])} # for saving model accuracy values
update_points = np.zeros([L,N,M,A])
reset_points = np.zeros([L,N,M,A])
# reset_points = []
total_time = time.time()
# parameterNames = ['s_t','b1','b0','var_t','rel']
parameterNames = ['b2','s_t','theta_t']
parameterRanges = [(0.01, 0.04), (0.1, 0.8), (0.50, 0.20)] # learning model ranges (start, width)
numParams = len(parameterNames)
mcSize = (3,3,3) # side lengths of mc grid
parameterWidth = 0.005 # width of a single parameter range, uniform across parameter set for now
# order matters here! mc model ranges
pMeshGrid = np.meshgrid(np.linspace(0.01, 0.05, mcSize[0]), np.linspace(0.1, 0.9, mcSize[1]), np.linspace(0.50, 0.70, mcSize[2]))
# make parameter ranges for each iteration of the monte carlo
mcRanges = np.zeros([M, numParams, 2])
for m in range(M):
# for each monte carlo iteration
for p in range(numParams):
idx = np.unravel_index(m, (mcSize[0], mcSize[1], mcSize[2]))
mcRanges[m, p, 0] = pMeshGrid[p][idx[2], idx[1], idx[0]]
mcRanges[m, p, 1] = parameterWidth
# print(mcRanges)
# pdb.set_trace()
# need to limit mc iterations to the ones we're gonna plot
mcIndexes = [0+1, 3+1, 6+1, 9, 10, 11]
firstStep = mcIndexes[0];
human = MCTrust(parameterNames, mcRanges, firstStep)
human_learning = ABCTrust(parameterNames, parameterRanges, overwrites = None, seed = globalSeed)
# learning human model
print("\nStarting learning human model parameters: ")
human_learning.print_self()
print("\nStarting base human parameters: ")
human.print_self()
# save first parameter range
param_mat.append(human.parameterRanges.copy())
# outter monte carlo loop
for m in mcIndexes:
# only step if going another round, bc code is hard
# if (m < M-1):
tmpSet = human.stepSets(m) # step and randomize
param_mat.append(tmpSet.copy())
# pdb.set_trace()
human_learning.fullReset() # need to fix this bit
reset_counter = 0
update_counter = 0
start_time = time.time()
for a in range(A):
for i, gid in enumerate(game_mat): # for each game
# game and predictor information
prbs = binom_prb(ga=[gid[0][3], gid[0][5], gid[0][4]], gb=[gid[0][6], gid[0][8], gid[0][7]], n=5)
predictor_information = np.zeros([predictor.params['time_steps_in'], game_mat.shape[2]])
# update ABC parameters here after each game/update interval
if len(human_learning.observedData) > 0 and update_counter >= update_interval:
# update if have data
if (reset_counter < reset_interval):
# print(human_learning.observedData)
results = human_learning.runABC(samples = ABCSamples, batch_size = ABCBatchSize, threshold = ABCThreshold)
# this FUCK was this doing here
# human_learning.fullReset()
if results:
# print error
[print("error for {} => {}\n".format(ky, np.abs(results.sample_means[ky] - human.__dict__[ky]))) for ky in parameterNames]
update_points[i, j, m, a] = 1
update_counter = 1
else:
update_counter += 1
if reset_counter >= reset_interval: # participants played 30 games in total
print("reinitialize human, mc iteration {}/{}, game {}/{}".format(m, M, i, game_mat.shape[0]))
human.randParameters()
human_learning.fullReset()
reset_points[i, j, m, a] = 1
reset_counter = 1
print("New human parameters: ")
print(human.print_self())
print("New indicator parameters: ")
print(human_learning.print_self())
# print("")
else:
reset_counter += 1
for j, row in enumerate(gid): # for each trial in said game
t_mat[i, j, m, a] = human.t
d_mat[i, j, m, a] = human.rel
id_mat[i, j, m, a] = human_learning.rel
ba_mat[i, j, m, a] = human.ba
c_mat[i, j, m, a] = human.ca
# interact with environment
[b, apo, hpo, cap] = auto_gambler(row, b = row[0], probs = prbs)
if (j >= predictor.params['time_steps_in']): # can start using predictor
prediction = predictor.predict(predictor_information)
pred_mat[i,j,m,a] = prediction # save prediction
# pdb.set_trace()
# if human_learning.rel:
temp_rel = np.random.rand() > 0.5
# if (temp_rel): # the BEST indicator
if (human_learning.rel): # the REAL indicator
suggestion = b # if we think we've got trust, make suggestion
if human.rel:
# true positive
# predstats['tp'][m,a] += 1
predstats['tp'][i,j,m,a] = 1
else:
# false positive # do something to improve future predictions!
# predstats['fp'][m,a] += 1
predstats['fp'][i,j,m,a] = 1
else:
suggestion = prediction # try to match
if human.rel:
# false negative
# predstats['fn'][i,j,m,a][m,a] = 1
predstats['fn'][i,j,m,a] = 1
else:
# true negatie
# predstats['tn'][i,j,m,a][m,a] = 1
predstats['tn'][i,j,m,a] = 1
else:
suggestion = b
suggestion = b # WHAT IF WE JUST SUGGESTED THE BEST EVERY TIME???
# THIS IS REALLY THE FINAL TEST NO!?
# update belief and preference based on capability and agreement
human.update(capability_a=cap, agreement=(suggestion == row[0]))
human_learning.update(capability_a=cap, agreement=(suggestion == row[0]))
human_learning.updateInputs(capability_a=cap, agreement=(suggestion == row[0]), reliance = human.rel) # cheating
# find out if suggestion was taken correctly/incorrectly
if human.rel:
po_mat[i, j, m, a] = apo
else:
po_mat[i, j, m, a] = hpo
hpo_mat[i, j, m, a] = hpo
apo_mat[i, j, m, a] = apo
hsel_mat[i, j, m, a] = row[0]
asel_mat[i, j, m, a] = b
asug_mat[i, j, m, a] = suggestion
# save gamble information, roll matrix
predictor_information = np.concatenate([predictor_information, np.array(row)[np.newaxis,:]])
predictor_information = predictor_information[1:,:]
# print("\ngame progress: {}/{}...\n".format(i,L-1))
# wrap up averaging runs
# plt.close('all')
print("averaging iteration... {}/{}".format(a,A-1))
print("iteration {}/{} time taken {}".format(m,M,time.time() - start_time))
print("-----------------")
# save all necessary data for later plotting
if save_data:
save_dict = {
'humanPayoff' : hpo_mat,
'autoPayoff' : apo_mat,
'receivedPayoff' : po_mat,
'humanSelection' : hsel_mat,
'autoSelection' : asel_mat,
'autoSuggestion' : asug_mat,
'autoPrediction' : pred_mat,
'humanReliance' : d_mat,
'noisyReliance' : id_mat,
'predictionStats' : predstats,
'beliefData' : ba_mat,
'preferenceData' : t_mat,
'parameterData' : np.array(param_mat),
'updateIntervals' : [reset_interval, reset_points, update_interval, update_points, mcSize],
}
# pdb.set_trace()
np.save("C:\\Users\\Larkin\\PyLearning\\figs\\monte_carlo_data\\data_dict.npy", save_dict)
import matplotlib.pyplot as plt
import numpy as np
import pdb
def getMeanValue(mcIndexes = None, filename = ''):
data_dict = np.load(filename, allow_pickle = True).item()
# pdb.set_trace()
hpo_mat = data_dict['humanPayoff']
apo_mat = data_dict['autoPayoff']
po_mat = data_dict['receivedPayoff']
hsel_mat = data_dict['humanSelection']
asel_mat = data_dict['autoSelection']
asug_mat = data_dict['autoSuggestion']
pred_mat = data_dict['autoPrediction']
d_mat = data_dict['humanReliance']
id_mat = data_dict['noisyReliance']
predstats = data_dict['predictionStats']
param_mat = data_dict['parameterData']
# ba_mat = data_dict['beliefData']
# t_mat = data_dict['preferenceData']
updateIntervals = data_dict['updateIntervals']
# get all update/reset parameters
reset_interval = updateIntervals[0]
reset_points = updateIntervals[1]
update_interval = updateIntervals[2]
update_points = updateIntervals[3]
mcSize = updateIntervals[4]
L = hpo_mat.shape[0]
N = hpo_mat.shape[1]
M = hpo_mat.shape[2]
A = hpo_mat.shape[3]
for f in range(len(mcIndexes)):
mi = mcIndexes[f]
# print("plotting iteration ... {}/{}".format(f,M-1))
# break up y data by reset points
resetIndexes = np.where(reset_points[:,:,mi,0])[0] # i's where we got reset
updateIndexes = np.mod(np.where(update_points[:,:,mi,0])[0], reset_interval) # i's where we got an update
lastIdx = 0
# fill between tweaks
# track line sets
hpo_data = []
apo_data = []
po_data = []
d_data = []
ind_data = []
for counter,i in enumerate(resetIndexes):
# base human performance
hpo_data.append(hpo_mat.sum(1).mean(2)[lastIdx:(i), mi].cumsum(0))
# hpo_data.append(hpo_mat.sum(1).mean(2)[lastIdx:(i), mi])
# pure ADA performance
apo_data.append(apo_mat.sum(1).mean(2)[lastIdx:(i), mi].cumsum(0))
# apo_data.append(apo_mat.sum(1).mean(2)[lastIdx:(i), mi])
# joint performance
po_data.append(po_mat.sum(1).mean(2)[lastIdx:(i), mi].cumsum(0))
# po_data.append(po_mat.sum(1).mean(2)[lastIdx:(i), mi])
# reliance values
d_data.append(d_mat.sum(1).mean(2)[lastIdx:(i), mi])
# Indicator Predictor performance
rels = (d_mat).mean(3)[lastIdx:(i),:,mi]
ids = (id_mat).mean(3)[lastIdx:(i),:,mi]
sels = (hsel_mat).mean(3)[lastIdx:(i),:,mi]
sugs = (asug_mat).mean(3)[lastIdx:(i),:,mi]
tempData = np.zeros(rels.shape)
for j in range(rels.shape[0]):
for k in range(rels.shape[1]):
if (ids[j,k]):
if (rels[j,k]):
tempData[j,k] = 1
else:
tempData[j,k] = 0
else:
if (rels[j,k]):
tempData[j,k] = 0
else:
# tempData[j,k] = 1
if (sugs[j,k] == sels[j,k]):
tempData[j,k] = 1
else:
tempData[j,k] = 0
ind_data.append(tempData.sum(1))
lastIdx = i
hpo_data = np.array(hpo_data)
apo_data = np.array(apo_data)
po_data = np.array(po_data)
d_data = np.array(d_data)
ind_data = np.array(ind_data)
pltTitle = ", ".join([str(ky) + ": " + str(np.round(val[0],2)) for (ky, val) in param_mat[f].items()])
print("calc " + pltTitle + " iteration {}/{} complete...".format(f,M-1))
print("mean reliance: " + str(d_data.mean(0).mean(0)/N))
print("std reliance: " + str(d_data.std(0).mean(0)/N))
print("mean performance: " + str(ind_data.mean(0).mean(0)/N))
def addToPlot(axs, colors = None, mcIndexes = None, filename = '', label = True):
data_dict = np.load(filename, allow_pickle = True).item()
# pdb.set_trace()
hpo_mat = data_dict['humanPayoff']
apo_mat = data_dict['autoPayoff']
po_mat = data_dict['receivedPayoff']
hsel_mat = data_dict['humanSelection']
asel_mat = data_dict['autoSelection']
asug_mat = data_dict['autoSuggestion']
pred_mat = data_dict['autoPrediction']
d_mat = data_dict['humanReliance']
id_mat = data_dict['noisyReliance']
predstats = data_dict['predictionStats']
param_mat = data_dict['parameterData']
# ba_mat = data_dict['beliefData']
# t_mat = data_dict['preferenceData']
updateIntervals = data_dict['updateIntervals']
# get all update/reset parameters
reset_interval = updateIntervals[0]
reset_points = updateIntervals[1]
update_interval = updateIntervals[2]
update_points = updateIntervals[3]
mcSize = updateIntervals[4]
L = hpo_mat.shape[0]
N = hpo_mat.shape[1]
M = hpo_mat.shape[2]
A = hpo_mat.shape[3]
for f in range(len(mcIndexes)):
mi = mcIndexes[f]
# print("plotting iteration ... {}/{}".format(f,M-1))
# break up y data by reset points
resetIndexes = np.where(reset_points[:,:,mi,0])[0] # i's where we got reset
updateIndexes = np.mod(np.where(update_points[:,:,mi,0])[0], reset_interval) # i's where we got an update
lastIdx = 0
# fill between tweaks
# track line sets
hpo_data = []
apo_data = []
po_data = []
d_data = []
ind_data = []
for counter,i in enumerate(resetIndexes):
# base human performance
hpo_data.append(hpo_mat.sum(1).mean(2)[lastIdx:(i), mi].cumsum(0))
# hpo_data.append(hpo_mat.sum(1).mean(2)[lastIdx:(i), mi])
# pure ADA performance
apo_data.append(apo_mat.sum(1).mean(2)[lastIdx:(i), mi].cumsum(0))
# apo_data.append(apo_mat.sum(1).mean(2)[lastIdx:(i), mi])
# joint performance
po_data.append(po_mat.sum(1).mean(2)[lastIdx:(i), mi].cumsum(0))
# po_data.append(po_mat.sum(1).mean(2)[lastIdx:(i), mi])
# reliance values
d_data.append(d_mat.sum(1).mean(2)[lastIdx:(i), mi])
# Indicator Predictor performance
rels = (d_mat).mean(3)[lastIdx:(i),:,mi]
ids = (id_mat).mean(3)[lastIdx:(i),:,mi]
sels = (hsel_mat).mean(3)[lastIdx:(i),:,mi]
sugs = (asug_mat).mean(3)[lastIdx:(i),:,mi]
tempData = np.zeros(rels.shape)
for j in range(rels.shape[0]):
for k in range(rels.shape[1]):
if (ids[j,k]):
if (rels[j,k]):
tempData[j,k] = 1
else:
tempData[j,k] = 0
else:
if (rels[j,k]):
tempData[j,k] = 0
else:
# tempData[j,k] = 1
if (sugs[j,k] == sels[j,k]):
tempData[j,k] = 1
else:
tempData[j,k] = 0
ind_data.append(tempData.sum(1))
lastIdx = i
hpo_data = np.array(hpo_data)
apo_data = np.array(apo_data)
po_data = np.array(po_data)
d_data = np.array(d_data)
ind_data = np.array(ind_data)
# global normalizer
normalizer = np.max(np.append(np.append(hpo_data,apo_data), po_data))
# axs[f].plot(np.arange(reset_interval), hpo_data.mean(0)/normalizer, color = reds, marker = '.', label = "Base")
# axs[f].plot(np.arange(reset_interval), apo_data.mean(0)/normalizer, color = greens, marker = '.', label = "ADA")
# axs[f].plot(np.arange(reset_interval), po_data.mean(0)/normalizer, color = blues, marker = '.', label = "Joint")
if label:
axs[f].plot(np.arange(reset_interval), d_data.mean(0)/(N), color = colors[0], marker = '.', label = "Rel.")
axs[f].plot(np.arange(reset_interval), ind_data.mean(0)/(N), color = colors[1], marker = '.', label = "Perf.")
else:
axs[f].plot(np.arange(reset_interval), d_data.mean(0)/(N), color = colors[0], marker = '.')
axs[f].plot(np.arange(reset_interval), ind_data.mean(0)/(N), color = colors[1], marker = '.')
# plot error polygons
# DECISION MATRIX top line
topEdge = (d_data.mean(0) + d_data.std(0))/(N)
# bottom line
botEdge = (d_data.mean(0) - d_data.std(0))/(N)
topEdge[topEdge > 1] = 1
botEdge[botEdge < 0] = 0
axs[f].fill_between(np.arange(reset_interval), botEdge, topEdge, color = colors[0], alpha = 0.3)
# PERFORMANCE MATRIX top line
topEdge = (ind_data.mean(0) + ind_data.std(0))/(N)
# bottom line
botEdge = (ind_data.mean(0) - ind_data.std(0))/(N)
topEdge[topEdge > 1] = 1
botEdge[botEdge < 0] = 0
axs[f].fill_between(np.arange(reset_interval), botEdge, topEdge, color = colors[1], alpha = 0.3)
# # HUMAN PAYOFF top line
# topEdge = (hpo_data.mean(0) + hpo_data.std(0))/(normalizer)
# # topEdge[topEdge > 1] = 1
# # bottom line
# botEdge = (hpo_data.mean(0) - hpo_data.std(0))/(normalizer)
# # botEdge[botEdge < 0] = 0
# axs[f].fill_between(np.arange(reset_interval), botEdge, topEdge, color = reds, alpha = 0.3)
#
# # ADA PAYOFF top line
# topEdge = (apo_data.mean(0) + apo_data.std(0))/(normalizer)
# # topEdge[topEdge > 1] = 1
# # bottom line
# botEdge = (apo_data.mean(0) - apo_data.std(0))/(normalizer)
# # botEdge[botEdge < 0] = 0
# axs[f].fill_between(np.arange(reset_interval), botEdge, topEdge, color = blues, alpha = 0.3)
#
# # JOINT PAYOFF top line
# topEdge = (po_data.mean(0) + po_data.std(0))/(normalizer)
# # topEdge[topEdge > 1] = 1
# # bottom line
# botEdge = (po_data.mean(0) - po_data.std(0))/(normalizer)
# # botEdge[botEdge < 0] = 0
# axs[f].fill_between(np.arange(reset_interval), botEdge, topEdge, color = greens, alpha = 0.3)
# plot steady state reliance values
axs[f].plot(np.arange(reset_interval), np.ones(reset_interval)*d_data.mean(0).mean(0)/N, linestyle = '--', color = colors[0])
axs[f].plot(np.arange(reset_interval), np.ones(reset_interval)*ind_data.mean(0).mean(0)/N, linestyle = '--', color = colors[1])
# update/reset locations
# [axs[f].plot([pt]*2,[0,1], color = 'blue', linewidth = 2, linestyle = '-') for pt in resetIndexes]
# axs[f].plot([updateIndexes[0]]*2,[0,1], color = 'black', linewidth = 2, linestyle = '--', label = "ABC Update")
# [axs[f].plot([pt]*2,[0,1], color = 'black', linewidth = 2, linestyle = '--') for pt in updateIndexes[1:]]
pltTitle = ", ".join([str(ky) + ": " + str(np.round(val[0],2)) for (ky, val) in param_mat[f+1].items()])
# pltTitle = pltTitle + ", End Rel. = " + str(np.round(d_data.mean(0).mean(0)/N,2))
axs[f].set_title(pltTitle, fontsize = 8)
axs[f].set_xlabel(r"Games")
axs[f].set_ylabel(r"$d,\,\rho$")
axs[f].set_ylim([0,1])
axs[f].set_xlim([0,30])
axs[f].grid(True)
print("plot " + pltTitle + " iteration {}/{} complete...".format(f,M-1))
print("mean reliance: " + str(d_data.mean(0).mean(0)/N))
print("mean performance: " + str(ind_data.mean(0).mean(0)/N))
if __name__ == "__main__":
# load mc data
# get parameters...
# data_dict = np.load("C:\\Users\\Larkin\\PyLearning\\figs\\monte_carlo_data\\data_dict.npy", allow_pickle = True).item()
# updateIntervals = data_dict['updateIntervals']
# # get parameter
# mcSize = updateIntervals[4]
mcSize = [2,3]
# mcSize = [1,1]
mcIndexes = [0+1, 3+1, 6+1, 9, 10, 11] # which indexes of the (3,3,3) mc
# mcIndexes = [1] # which indexes of the (3,3,3) mc
# mcIndexes = range(27)
# payoff comparison line plot
plt.clf()
fig, axs = plt.subplots(mcSize[0],mcSize[1],figsize=(2.5*mcSize[1], 2.5*mcSize[0]), facecolor='w', edgecolor='k', sharey = True, sharex = True)
fig.subplots_adjust(hspace = .5, wspace=.1)
axs = axs.ravel() # python magic i stole
reds = (1,0,0)
greens = (0,1,0)
lightgreens = (0.5,1,0.5)
blues = (0,0,1)
lightblues = (0.5,0.5,1)
grays = (0.5,0.5,0.5)
lightgrays = (0.7,0.7,0.7)
yellows = (0.80,0.70,0.01)
#
# print("naive set")
# addToPlot(axs, colors = [blues, greens], mcIndexes = mcIndexes, filename = "C:\\Users\\Larkin\\PyLearning\\figs\\monte_carlo_data\\plottables_full_naive.npy", label = True)
# print("predictive set")
# addToPlot(axs, colors = [blues, greens], mcIndexes = mcIndexes, filename = "C:\\Users\\Larkin\\PyLearning\\figs\\monte_carlo_data\\plottables_full.npy", label = True)
# markerList = ['.','+','*','x','^','v']
# markerListLength = len(markerList)
#
# axs[2].legend(loc = 'lower right', fontsize = 8)
#
print("naive set")
addToPlot(axs, colors = [blues, greens], mcIndexes = mcIndexes, filename = "C:\\Users\\Larkin\\PyLearning\\figs\\monte_carlo_data\\plottables_full_naive.npy", label = True)
plt.savefig("C:\\Users\\Larkin\\PyLearning\\figs\\naive_final_plot.png", pad_inches = 0.0, dpi = 1000)
print("figure saved")
plt.clf()
print("predictive set")
addToPlot(axs, colors = [blues, greens], mcIndexes = mcIndexes, filename = "C:\\Users\\Larkin\\PyLearning\\figs\\monte_carlo_data\\plottables_full.npy", label = True)
plt.savefig("C:\\Users\\Larkin\\PyLearning\\figs\\final_plot.png", pad_inches = 0.0, dpi = 1000)
print("figure saved")
# getMeanValue(mcIndexes, filename = "C:\\Users\\Larkin\\PyLearning\\figs\\monte_carlo_data\\M27_5000_naive.npy")
# print("predictive set")
# addToPlot(axs, colors = [blues, greens], mcIndexes = mcIndexes, filename = "C:\\Users\\Larkin\\PyLearning\\figs\\monte_carlo_data\\plottables_full.npy", label = True)
# getMeanValue(mcIndexes, filename = "C:\\Users\\Larkin\\PyLearning\\figs\\monte_carlo_data\\M27_5000.npy")
# addToPlot(axs, colors = [lightblues, lightgreens], filename = "C:\\Users\\Larkin\\PyLearning\\figs\\monte_carlo_data\\M9_5000_noPred.npy", label = False)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment