Best Python code snippet using lemoncheesecake
bankProblem.py
Source:bankProblem.py  
...722		page.th("%0.2f" % y)723		for x in xValues:724			o = xy_to_outcome[(x,y)]725			page.td.open()726			#page.font(markup.oneliner.a("*", href="index.html#%d" % i), color=outcome_to_color(o))727			page.a(markup.oneliner.font("*", color=outcome_to_color(o)), href="index.html#%d" % xy_to_i[(x,y)])728			page.td.close()			729		page.tr.close()730	page.table.close()731	# color graph732	fig = plt.figure()733	ax = fig.add_subplot(111)	734	ax.set_xlabel(xlabel if (xlabel != None) else xName)735	ax.set_ylabel(ylabel if (ylabel != None) else yName)736	colors = [outcome_to_color(o) for o in outcomeList]737	ax.scatter(xList, yList, color=colors, edgecolor=colors)738	page.br()739	imgFilename = "2d_summary.png"740	imgPath = os.path.join(dirname, imgFilename)741	try:                                        742		os.remove(imgPath)743	except OSError:        744		pass   745	plt.savefig(imgPath, format='png', dpi=120)					746	page.img(src=imgFilename)               747	# non-color graph748	fig = plt.figure()749	ax = fig.add_subplot(111)	750	ax.set_xlabel(xlabel if (xlabel != None) else xName)751	ax.set_ylabel(ylabel if (ylabel != None) else yName)752	(markers_labels) = [outcome_to_marker(o) for o in outcomeList]753	by_marker = defaultdict(list)754	[by_marker[marker_label].append((x,y)) for (x,y,marker_label) in zip(xList, yList, markers_labels)]755	for ((marker, label), xyList) in by_marker.items():756		(x_list, y_list) = zip(*xyList)757		ax.scatter(x_list, y_list, marker=marker, label=label, facecolor='none')758	(handles, labels) = ax.get_legend_handles_labels()759	hl = sorted(zip(handles, labels), key=operator.itemgetter(1))760	(handles2, labels2) = zip(*hl)	761	fig.legend(handles2, labels2, 'upper center')	762	if (addLines1): 763		ax.axvline((1.0/beta)-1.0, color='gray')764		ax.plot([xList[0], xList[-1]], [yList[0], yList[-1]], color='gray')765	page.br()766	imgFilename = "2d_summary2.png"767	imgPath = os.path.join(dirname, imgFilename)768	try:                                        769		os.remove(imgPath)770	except OSError:        771		pass   772	plt.savefig(imgPath, format='png', dpi=120)					773	page.img(src=imgFilename)               774	775	filename = os.path.join(dirname, "2d_summary.html")776	f = open(filename, 'w')                             777	f.write(str(page))778	f.close()779	780	return (xList, yList, outcomeList)781	782def outcome_to_color(outcome):783	if (outcome == SOLUTION_CATEGORY_NOINVEST): return 'black'784	if (outcome == SOLUTION_CATEGORY_INVEST): return 'green'785	if (outcome == SOLUTION_CATEGORY_NONCONVERGENCE): return 'red'786	assert(false)787def outcome_to_marker(outcome):788	if (outcome == SOLUTION_CATEGORY_NOINVEST): return ('o', "no loans made")789	if (outcome == SOLUTION_CATEGORY_INVEST): return ('+', "loans made")790	if (outcome == SOLUTION_CATEGORY_NONCONVERGENCE): return ('s', "nonconvergence")791	assert(false)792	793def run_all(dirname, testGenObj):794	result1 = run_test_cases(dirname, testGenObj)795	result2 = generate_plots(dirname, testGenObj)796	result3 = write_test_summary_2d(dirname, testGenObj)797	result4 = plot_test_summary_2d(dirname, testGenObj)798	(filename, xName, yName, xList, yList, outcomes) = simulate_summary(dirname, testGenObj)799	result6 = plot_simulation_summary(dirname, testGenObj, filename)800	801def run_test_cases(dirname, testGenObj, skipWrite=False, skipIfExists=True, usePrevVArray=True, nMaxIters=g.MAX_VITERS, maxTime=g.MAX_TIME, maxV=g.MAX_V):	802	currentVArray = None803	prevRunConverged = False804	for (testName, overrideParams) in testGenObj.getOverrideParamsList():			805		newParams = dict(testGenObj.getDefaultParamsDict())806		newParams.update(overrideParams)807		prefix = testGenObj.getFilenamePrefix(testName, newParams)808		path = os.path.join(dirname, prefix) + ".out"809		print(path)810		if (skipIfExists and os.path.exists(path)):811			print("%s exists, skipping" % path)812			continue813		g.reset()814		if ((not usePrevVArray) or (prevRunConverged == False)): currentVArray = None;		# if the previous optimization didn't converge, start from scratch, otherwise, start from previous optimized value815		(iterCode, prevNIter, currentVArray, newVArray, optControls) = test_bank2(plotResult=False, initialVArray=currentVArray, nMaxIters=nMaxIters, maxTime=g.MAX_TIME, maxV=g.MAX_V, overrideParamsDict=newParams)816		prevRunConverged = True if (iterCode == bellman.ITER_RESULT_CONVERGENCE) else False817		if (not skipWrite):818			saveRun(path)819def generate_one_plot(arg):820	(dirname, outPath, prefix, caption, skipIfExists) = arg821	if (not os.path.exists(outPath)):822		print ("not found: %s" % outPath)823		return None824	try:825		plt.ioff()826		loadRun(outPath)827		#G = createGraph(-1)828		# save plots						829		suffixes = ["-V", "-optD", "-inF"]830		plotFns = [plotV, plotOptD, plotFracIn]831		imgList = []832		for (suffix, plotFn) in zip(suffixes, plotFns):833			plotFn(-1, len(g.Grid_P)/2)834			imgFilename = prefix + suffix + ".png"835			imgPath = os.path.join(dirname, imgFilename)836			if (not skipIfExists):837				try:                                        838					os.remove(imgPath)839				except OSError:        840					pass   841				print("writing %s" % imgPath) 842				plt.savefig(imgPath, format='png', dpi=120)				  843			imgList.append(imgFilename)	844		plt.close('all')845		plt.ion()846		return (caption, imgList)847	except AssertionError as err:848		print("exception on %s: " % outPath, err)849		return None850		851def generate_plots(dirname, testGenObj, skipIfExists=False, multiprocess=True, nProcesses=6):852	argList = []853	for (testName, overrideParams) in testGenObj.getOverrideParamsList():			854		newParams = dict(testGenObj.getDefaultParamsDict())855		newParams.update(overrideParams)856		prefix = testGenObj.getFilenamePrefix(testName, newParams)857		outPath = os.path.join(dirname, prefix) + ".out"858		caption = "%s: beta=%f, rSlow=%f, rFast=%f, diff=%f, probSpace=[%f, %f], fastOutFraction=[%f, %f]" % (testName, newParams['beta'], newParams['rSlow'], newParams['rFast'], newParams['rSlow']-newParams['rFast'], newParams['probSpace'][0], newParams['probSpace'][1], newParams['fastOut'][0], newParams['fastOut'][1])859		argList.append((dirname, outPath, prefix, caption, skipIfExists))860		861	if (multiprocess):862		p = multiprocessing.Pool(nProcesses)		863		plotList = p.map(generate_one_plot, argList)864		p.close()865	else:		866		plotList = map(generate_one_plot, argList)867			                                               868	# write html index869	page = markup.page()870	page.init(title="bankProblem")871	page.br( )872						873	for (i, plot) in enumerate(plotList):874		(prefix, imgList) = plot875		page.p(prefix, id="%d" % i)876		for imgFile in imgList:877			page.img(src=imgFile)             878	filename = os.path.join(dirname, "index.html")879	f = open(filename, 'w')                             880	f.write(str(page))881	f.close()882	883# run with current parameters884def test_bank0(**kwargs):885	overrideParamsDict = dict(g.ParamSettings)886	del overrideParamsDict['grid_M']887	del overrideParamsDict['grid_S']888	del overrideParamsDict['grid_P']889	return test_bank2(overrideParamsDict=overrideParamsDict, **kwargs)890	891def test_bank1(beta=0.9, rSlow=0.15, rFast=0.02, probSpace=scipy.array([0.5, 0.5]), fastOut=scipy.array([0.7, 0.9]), 892  slowOut=scipy.array([0.1, 0.1]), fastIn=scipy.array([0.8, 0.8]), bankruptcyPenalty=scipy.array([0.0, 0.0, 0.0]), popGrowth=1.0, gridSizeDict=None, **kwargs):893	overrideParamsDict = {'beta':beta, 'rSlow':rSlow, 'rFast':rFast, 'probSpace':probSpace, 'fastOut':fastOut, 'slowOut':slowOut, 'fastIn':fastIn, 'bankruptcyPenalty':bankruptcyPenalty, 'popGrowth':popGrowth, 'gridSizeDict':gridSizeDict}894	return test_bank2(overrideParamsDict=overrideParamsDict, **kwargs)895	896def test_bank2(useValueIter=True, plotResult=True, nMaxIters=g.MAX_VITERS, initialVArray=None, nMultiGrid=2, overrideParamsDict=None, **kwargs):897	time1 = time.time()898	localvars = {}899	900	def postVIterCallbackFn(nIter, currentVArray, newVArray, optControls, stoppingResult):				901		(stoppingDecision, diff) = stoppingResult902		print("iter %d, diff %f" % (nIter, diff))903		localvars[0] = nIter904		# append iteration results to g.IterList905		g.IterList.append({'V': currentVArray, 'd': optControls[0], 'fracIn': optControls[1]})906		g.NIters += 1907	def postPIterCallbackFn(nIter, newVArray, currentPolicyArrayList, greedyPolicyList, stoppingResult):				908		(stoppingDecision, diff) = stoppingResult909		print("iter %d, diff %f" % (nIter, diff))910		localvars[0] = nIter		911	g.setDefaultGridSize()912	if ('gridSizeDict' in overrideParamsDict and overrideParamsDict['gridSizeDict'] != None):913		g.setGridSize(**overrideParamsDict['gridSizeDict'])		914		# TODO: fix this915	(grid_M, grid_S, grid_P) = (g.Grid_M, g.Grid_S, g.Grid_P)916	917	defaultParamsDict = {918	  'beta': 0.9,919	  'rSlow': 0.15,920	  'rFast': 0.10,921	  'probSpace': scipy.array([0.5, 0.5]),922	  'fastOut': scipy.array([0.7, 0.9]),923	  'slowOut': scipy.array([0.1, 0.1]),924	  'fastIn':  scipy.array([0.8, 0.8]),925	  'bankruptcyPenalty': scipy.array([0.0, 0.0, 0.0]),926	  'popGrowth': 1.0927	}928	paramsDict = dict(defaultParamsDict)929	paramsDict.update(overrideParamsDict)930	[beta, rSlow, rFast, probSpace, fastOut, slowOut, fastIn, bankruptcyPenalty, popGrowth] = [paramsDict[x] for x in ['beta', 'rSlow', 'rFast', 'probSpace', 'fastOut', 'slowOut', 'fastIn', 'bankruptcyPenalty', 'popGrowth']]931	print("using params: beta=%f rFast=%f rSlow=%f" % (beta, rFast, rSlow))932	print("probSpace: ", probSpace, " slowOut: ", slowOut, " fastOut: ", fastOut, " fastIn: ", fastIn, " bp: ", bankruptcyPenalty, " popGrowth: ", popGrowth)933	print("M grid: ", (grid_M[-1], len(grid_M)), " S grid: ", (grid_S[-1], len(grid_S)), " P grid: ", (grid_P[-1], len(grid_P)), "d grid size: %d inFrac grid: " % g.D_GRID_SIZE, (g.SLOW_IN_FRAC_MAX, g.SLOW_IN_GRID_SIZE))934	g.ParamSettings = {'beta':beta, 'grid_M':grid_M, 'grid_S':grid_S, 'grid_P':grid_P, 'rFast':rFast, 'rSlow':rSlow, 'slowOut':slowOut, 935	  'fastOut':fastOut, 'fastIn':fastIn, 'probSpace':probSpace, 'bankruptcyPenalty':bankruptcyPenalty, 'popGrowth':popGrowth}936	#beta, rFast, rSlow, slowOut, fastOut, fastIn, probSpace937	params = BankParams(**g.ParamSettings)938	g.Params = params939		940	# initial guess for V: V = M941	if (initialVArray == None):942		initialVArray = scipy.zeros((len(grid_M), len(grid_S), len(grid_P)))943		for (iM, M) in enumerate(grid_M):944			for (iS, S) in enumerate(grid_S):945				for (iP, P) in enumerate(grid_P):946					initialVArray[iM, iS, iP] = M947			#initialVArray[iM,:,:] = M		948			949	g.IterList.append({'V':initialVArray, 'd':None, 'fracIn':None})950	if (useValueIter == True):951		if (nMultiGrid == None):			952			result = bellman.grid_valueIteration([grid_M, grid_S, grid_P], initialVArray, params, postIterCallbackFn=postVIterCallbackFn, parallel=True, nMaxIters=nMaxIters, **kwargs)953		else:954			# start with coarse grids and progressively get finer.			955			gridList = [grid_M, grid_S, grid_P]956			coarseGridList = []957			for n in reversed(range(nMultiGrid)):958				coarseGridList.append( [scipy.linspace(grid[0], grid[-1], len(grid)/(2**n)) for grid in gridList] )959			result = bellman.grid_valueIteration2(coarseGridList, gridList, initialVArray, params, postIterCallbackFn=postVIterCallbackFn, parallel=True, nMaxIters=nMaxIters, **kwargs)960		(iterCode, nIter, currentVArray, newVArray, optControls) = result961		g.IterResult = iterCode962	else:963		initialPolicyArrayList = bellman.getGreedyPolicy([grid_M, grid_S, grid_P], initialVArray, params, parallel=True)964		result = bellman.grid_policyIteration([grid_M, grid_S, grid_P], initialPolicyArrayList, initialVArray, params, postIterCallbackFn=postPIterCallbackFn, parallel=True)965		(iterCode, nIter, currentVArray, currentPolicyArrayList, greedyPolicyList) = result966		newVArray = currentVArray967		optControls = currentPolicyArrayList968	time2 = time.time()969	nIters = localvars[0]970	print("total time: %f, avg time: %f iter code: %s" % (time2-time1, (time2-time1)/(1+nIters), bellman.iterResultString(iterCode)))971	972	if (plotResult):973		plotV(-1, 1)974		plotOptD(-1, 1)975		plotFracIn(-1, 1)	976	return result977def plotV(nIter, iP):978	setupData = setupGetNextState(nIter)979	colorFn = lambda x: getColor(setupData, x[0], x[1], g.Grid_P[iP])980	currentVArray = g.IterList[nIter]['V']981	fnObj_V = linterp.GetLinterpFnObj([g.Grid_M, g.Grid_S, g.Grid_P], currentVArray)982	fnObj_V2 = lambda x: fnObj_V([x[0], x[1], g.Grid_P[iP]])983	plot3d.plotSurface(g.Grid_M, g.Grid_S, fnObj_V2, xlabel="M", ylabel="L", zlabel="V", colorFn=colorFn)	984def plotOptD(nIter, iP):985	setupData = setupGetNextState(nIter)986	colorFn = lambda x: getColor(setupData, x[0], x[1], g.Grid_P[iP])987	optControl_d = g.IterList[nIter]['d']988	fnObj_optD = linterp.GetLinterpFnObj([g.Grid_M, g.Grid_S, g.Grid_P], optControl_d)989	fnObj_optD2 = lambda x: fnObj_optD([x[0], x[1], g.Grid_P[iP]])990	plot3d.plotSurface(g.Grid_M, g.Grid_S, fnObj_optD2, xlabel="M", ylabel="L", zlabel="opt d", colorFn=colorFn)	991def plotFracIn(nIter, iP):992	setupData = setupGetNextState(nIter)993	colorFn = lambda x: getColor(setupData, x[0], x[1], g.Grid_P[iP])994	optControl_inFrac = g.IterList[nIter]['fracIn']		995	fnObj_optInFrac = linterp.GetLinterpFnObj([g.Grid_M, g.Grid_S, g.Grid_P], optControl_inFrac)996	fnObj_optInFrac2 = lambda x: fnObj_optInFrac([x[0], x[1], g.Grid_P[iP]])997	plot3d.plotSurface(g.Grid_M, g.Grid_S, fnObj_optInFrac2, xlabel="M", ylabel="L", zlabel="opt frac in", colorFn=colorFn)	998# outcome can be 0 (low) or 1 (high)999# returns (M, S)1000def setupGetNextState(n, currentVArray=None, optControl_d=None, optControl_inFrac=None):1001	if (n != None):1002		currentVArray = g.IterList[n]['V']1003		optControl_d = g.IterList[n]['d']1004		optControl_inFrac = g.IterList[n]['fracIn']1005	fnObj_optD = linterp.GetLinterpFnObj([g.Grid_M, g.Grid_S, g.Grid_P], optControl_d)1006	fnObj_optInFrac = linterp.GetLinterpFnObj([g.Grid_M, g.Grid_S, g.Grid_P], optControl_inFrac)		1007	params = BankParams(**g.ParamSettings)1008	params.setPrevIteration([g.Grid_M, g.Grid_S, g.Grid_P], currentVArray)1009	return (fnObj_optD, fnObj_optInFrac, params)1010def getNextStateVar(setupData, M, S, P, outcome):1011	(fnObj_optD, fnObj_optInFrac, params) = setupData1012	d = fnObj_optD([M, S, P])1013	inFrac = fnObj_optInFrac([M, S, P])1014	params.setStateVars([M, S, P])	1015	(ev, nextMList, nextSList, nextPList) = params.calc_EV(d, inFrac)1016	return ((nextMList[outcome], nextSList[outcome], nextPList[outcome]), (d, inFrac))1017# coloring states:1018def getColor(setupData, M, S, P):1019	(fnObj_optD, fnObj_optInFrac, params) = setupData1020	d = fnObj_optD([M, S, P])1021	inFrac = fnObj_optInFrac([M, S, P])1022	params.setStateVars([M, S, P])	1023	(ev, nextMList, nextSList, nextPList) = params.calc_EV(d, inFrac)1024	state = None1025	# bankrupt next period1026	if (nextMList[0] <= 0.0 and nextMList[1] <= 0.0):1027		state = g.STATE_BANKRUPT1028	elif (nextMList[0] > 0.0 and nextMList[1] <= 0.0):1029		state = g.STATE_RISKY1030	else:1031		assert(nextMList[0] > 0.0 and nextMList[1] > 0.0)1032		state = g.STATE_SAFE1033	return g.StateToColor[state]1034def plotNextM(n, outcome):1035	setupData = setupGetNextState(n)1036	colorFn = lambda x: getColor(setupData, x[0], x[1], x[2])1037	fnObj_nextM = lambda x: getNextStateVar(setupData, x[0], x[1], x[2], outcome)[0][0]1038	#plot3d.plotSurface(g.Grid_M, g.Grid_S, fnObj_nextM, xlabel="M", ylabel="S", zlabel="next M | outcome %d" % outcome, colorFn=colorFn)	1039def plotNextS(n, outcome):1040	setupData = setupGetNextState(n)1041	colorFn = lambda x: getColor(setupData, x[0], x[1], x[2])1042	fnObj_nextS = lambda x: getNextStateVar(setupData, x[0], x[1], x[2], outcome)[0][1]1043	#plot3d.plotSurface(g.Grid_M, g.Grid_S, fnObj_nextS, xlabel="M", ylabel="S", zlabel="next S | outcome %d" % outcome, colorFn=colorFn)	1044	1045def plotObjectiveFn(params, nIter, state1, state2, state3):1046	prevIterVArray = g.IterList[nIter][0]1047	params.setPrevIteration([g.Grid_M, g.Grid_S, g.Grid_P], prevIterVArray)1048	params.setStateVars([state1, state2, state3])1049	controlGrids = params.getControlGridList([state1, state2, state3])1050	objFn = lambda x: params.objectiveFunction([x[0], x[1], x[2]])1051	#plot3d.plotSurface(controlGrids[0], controlGrids[1], objFn, xlabel="d", ylabel="frac in", zlabel="objFn")	1052# def createGraph(n):	1053	# setupData = setupGetNextState(n)1054	# def fnObj_nextM(x, shock_i):1055		# (next_x, controls) = getNextStateVar(setupData, x[0], x[1], shock_i)1056		# return (next_x[0], controls)	1057	# def fnObj_nextS(x, shock_i): 1058		# (next_x, controls) = getNextStateVar(setupData, x[0], x[1], shock_i)1059		# return (next_x[1], controls)	1060	# gridList = [g.Grid_M, g.Grid_S, g.Grid_P]1061	# coffinState = g.STATE_BANKRUPT1062	# shockList = range(len(g.ParamSettings['probSpace']));		1063	# def nextStateFn(x, shock_i):1064		# return getNextStateVar(setupData, x[0], x[1], shock_i)1065	# def isBankrupt(x):1066		# (M, S) = (x[0], x[1])1067		# return (M <= 0.0)1068	# (isAbsorbedFn, nextIListFn) = markovChain.policyFn_to_transitionFns(gridList, nextStateFn, isBankrupt)1069	# G = markovChain.createGraphFromPolicyFn(gridList, coffinState, shockList, isAbsorbedFn, nextIListFn)1070	# return G1071# plot locus of nodes that are destinations, conditional on shock1072# def plotLocus(G, shock):1073	# def isSuccessor(G, node):1074		# if (node == g.STATE_BANKRUPT):1075			# return False		1076		# predecessors = G.predecessors(node)1077		# if (len(predecessors) == 0):1078			# return False1079		# #print(node, predecessors)1080		# for p in predecessors:1081			# s = G.edge[p][node]['shock']1082			# if (s == shock):1083				# return True1084		# return False1085	# markovChain.plotGraphNodes2D(G, isSuccessor, title="locus | %d" % shock, xlabel="M", ylabel="S")1086# def plotPrunedNodes(G):1087	# S = markovChain.pruneZeroIndegree(G)1088	# def in_S(G, node):1089		# return (node in S)1090	# markovChain.plotGraphNodes2D(G, in_S, title="after pruning zero indegree", xlabel="M", ylabel="S")1091		1092class MarkovChain:1093	def __init__(self, initialM, initialS, initialP, VArray, optDArray, optInFracArray):1094		(self.initialM, self.initialS, self.initialP) = (initialM, initialS, initialP)1095		(self.currentM, self.currentS, self.currentP) = (initialM, initialS, initialP)1096		self.VArray = VArray1097		self.optDArray = optDArray1098		self.optInFracArray = optInFracArray1099		self.setupData = setupGetNextState(n=None, currentVArray=VArray, optControl_d=optDArray, optControl_inFrac=optInFracArray)1100		self.fnObj_V = linterp.GetLinterpFnObj([g.Grid_M, g.Grid_S, g.Grid_P], VArray)1101		#self.fnObj_nextM = lambda x, shock_i: getNextStateVar(self.setupData, x[0], x[1], shock_i)[0]1102		#self.fnObj_nextS = lambda x, shock_i: getNextStateVar(self.setupData, x[0], x[1], shock_i)[1]1103	1104	def applyShock(self, shock_i):		1105		((nextM, nextS, nextP), (d, inFrac)) =  getNextStateVar(self.setupData, self.currentM, self.currentS, self.currentP, shock_i)1106		(self.currentM, self.currentS, self.currentP) = (nextM, nextS, nextP)1107		return ((nextM, nextS, nextP), (d, inFrac))1108	def reset(self):1109		(self.currentM, self.currentS, self.currentP) = (self.initialM, self.initialS, self.initialP)1110	1111# probSpace is an array that sums to 1.01112# x is a random number drawn from Uniform[0,1]1113# return the index of probSpace corresponding to x1114def drawFromProbSpace(probSpace, x):1115	c = scipy.cumsum(probSpace)1116	return scipy.sum(c < x)	1117M_ERROR_MAX = 0.0001;		# if M is within this much of 0, truncate to 01118def simulate(n, initialM, initialS, initialP, nSteps=100, bPrint=True, markovChainObj=None):1119	def myprint(*args, **kwargs):1120		if (bPrint): print(*args, **kwargs)		1121	(currentM, currentS, currentP) = (initialM, initialS, initialP)	1122	# simulate directly using policies and transition equations1123	if (markovChainObj == None):1124		currentVArray = g.IterList[n]['V']		1125		optControl_d = g.IterList[n]['d']1126		optControl_inFrac = g.IterList[n]['fracIn']		1127		markovChainObj = MarkovChain(initialM, initialS, initialP, currentVArray, optControl_d, optControl_inFrac)1128	# shocks1129	rvs = scipy.stats.uniform.rvs(size=nSteps)1130	[beta, grid_M, grid_S, grid_P, rFast, rSlow, popGrowth, slowOut, fastOut, fastIn, probSpace] = [g.ParamSettings[x] for x in 1131	  ['beta', 'grid_M', 'grid_S', 'grid_P', 'rFast', 'rSlow', 'popGrowth', 'slowOut', 'fastOut', 'fastIn', 'probSpace']]1132	shockList = [drawFromProbSpace(probSpace, rv) for rv in rvs]1133	# initialize stats list1134	stats = defaultdict(list)1135	stats['M_outside']=[False]1136	stats['S_outside']=[False]1137	stats['P_outside']=[False]1138	1139	for (i, shock) in enumerate(shockList):1140		myprint("initial: M=%.4f, S=%.4f, P=%.4f" % (currentM, currentS, currentP), end="")		1141		# apply shock to initial state		1142		myprint(" -> shock: %d, " % shock, end="")1143		((nextM, nextS, nextP), (d, slow_in_frac)) = markovChainObj.applyShock(shock)1144		myprint("controls: d=%.4f, frac=%.4f" % (d, slow_in_frac), end="")1145		# record statistics1146		stats['M'].append(currentM)1147		stats['S'].append(currentS)1148		stats['P'].append(currentP)1149		stats['shock'].append(shock)1150		stats['d'].append(d)1151		stats['in_frac'].append(slow_in_frac)1152		1153		# calculate next period's state variables1154		(fast_in_frac, fast_out_frac, slow_out_frac, pop_growth) = (fastIn[shock], fastOut[shock], slowOut[shock], popGrowth)1155		(M, S, P) = (currentM, currentS, currentP)1156		fast_growth = (1.0 + fast_in_frac*P - fast_out_frac) * (1.0 + rFast);1157		nextM_2 = (M - d + slow_out_frac*S - slow_in_frac*S - fast_out_frac + fast_in_frac*P)/fast_growth;1158		nextS_2 = (1.0 + slow_in_frac - slow_out_frac) * S * (1.0 + rSlow) / fast_growth;	1159		nextP_2 = (pop_growth * P) / fast_growth;1160		1161		myprint("  fast_growth = (1.0 + %f*%f - %f) * (%0.3f) = %0.4f" % (fast_in_frac, P, fast_out_frac, 1.0+rFast, fast_growth))1162		myprint("  nextM = %f/%f = %f" % ((M - d + slow_out_frac*S - slow_in_frac*S - fast_out_frac + fast_in_frac*P), fast_growth, nextM_2))1163		myprint("  nextS = %0.4f/%0.4f = %0.4f" % ((1.0 + slow_in_frac - slow_out_frac) * S * (1.0 + rSlow), fast_growth, nextS_2))1164		myprint("  nextP = %0.4f/%0.4f = %0.4f" % (pop_growth*P, fast_growth, nextS_2))1165		myprint("  next: M=%0.4f, S=%0.4f, P=%0.4f" % (nextM, nextS, nextP))		1166		# check if absorbed or outside grid1167		if (nextM <= 0.0):1168			myprint("  -> absorbed")1169			break1170		(M_outside, S_outside, P_outside) = (False, False, False)1171		if (nextM > g.Grid_M[-1]):1172			myprint("  -> M outside grid, set to %0.4f" % g.Grid_M[-1])1173			nextM = g.Grid_M[-1]1174			M_outside = True1175		if (nextS > g.Grid_S[-1]):1176			myprint("  -> S outside grid, set to %0.4f" % g.Grid_S[-1])1177			nextS = g.Grid_S[-1]1178			S_outside = True1179		if (nextP > g.Grid_P[-1]):1180			myprint("  -> P outside grid, set to %0.4f" % g.Grid_P[-1])1181			nextP = g.Grid_P[-1]1182			P_outside = True			1183		# record stats that are conditional on bankruptcy1184		stats['M_outside'].append(M_outside)1185		stats['S_outside'].append(S_outside)1186		stats['P_outside'].append(P_outside)1187		# update state1188		(currentM, currentS, currentP) = (nextM, nextS, nextP)1189	# print statistics1190	stats['nSteps'] = i+11191	stats['V'] = markovChainObj.fnObj_V([initialM, initialS, initialP])1192	myprint ("number of steps: %d" % i)1193	myprint ("average M: %0.4f" % scipy.mean(stats['M']))1194	myprint ("average S: %0.4f" % scipy.mean(stats['S']))1195	myprint ("average P: %0.4f" % scipy.mean(stats['P']))1196	myprint ("average shock: %0.4f" % scipy.mean(stats['shock']))1197	myprint ("M outside grid: %d/%d = %f" % (scipy.sum(stats['M_outside']), i, scipy.mean(stats['M_outside'])))1198	myprint ("S outside grid: %d/%d = %f" % (scipy.sum(stats['S_outside']), i, scipy.mean(stats['S_outside'])))1199	myprint ("P outside grid: %d/%d = %f" % (scipy.sum(stats['P_outside']), i, scipy.mean(stats['P_outside'])))1200	myprint ("average d: %0.4f" % scipy.mean(stats['d']))1201	myprint ("average in_frac: %0.4f" % scipy.mean(stats['in_frac']))1202	myprint ("V: %0.4f" % scipy.mean(stats['V']))1203	return (i, stats)1204def plot_one_sim(initialM, initialS, initialP, nSteps=100):1205	(i, stats) = simulate(-1, initialM, initialS, initialP, nSteps=nSteps, bPrint=False)1206	fig = plt.figure()1207	ax = fig.add_subplot(111)1208	M_series = stats['M']1209	S_series = stats['S']1210	in_frac_series=stats['in_frac']1211	lines = ax.plot(range(len(S_series)), S_series, range(len(M_series)), M_series, 'r--', range(len(in_frac_series)), in_frac_series, 'g-.')1212	ax.set_xlabel("time")1213	ax.legend(lines, (r'$\bar{L}$', r'$\bar{M}$', r'$\gamma^L_t$'))1214	1215g_outcome_to_label = {'avg_M': r'mean $m$', 'avg_S': r'mean $l$', 'V': 'V', 'nSteps': 'mean lifetime', 'avg_in_frac': r'mean $\gamma^L_t$'}1216def simulate_multiple(initialM, initialS, initialP, nSteps=1000, nRuns=1000):1217	t1 = time.time()1218	nIter = -11219	runStats = defaultdict(list)1220	currentVArray = g.IterList[nIter]['V']		1221	optControl_d = g.IterList[nIter]['d']1222	optControl_inFrac = g.IterList[nIter]['fracIn']		1223	markovChainObj = MarkovChain(initialM, initialS, initialP, currentVArray, optControl_d, optControl_inFrac)	1224	for i in range(nRuns):1225		markovChainObj.reset()1226		(n, stats) = simulate(nIter, initialM, initialS, initialP, nSteps=nSteps, bPrint=False, markovChainObj=markovChainObj)1227		# mean1228		avg_M = scipy.mean(stats['M'])1229		avg_S = scipy.mean(stats['S'])1230		avg_P = scipy.mean(stats['P'])1231		avg_shock = scipy.mean(stats['shock'])1232		avg_M_outside = scipy.mean(stats['M_outside'])1233		avg_S_outside = scipy.mean(stats['S_outside'])1234		avg_P_outside = scipy.mean(stats['P_outside'])1235		avg_d = scipy.mean(stats['d'])1236		avg_in_frac = scipy.mean(stats['in_frac'])		1237		V = scipy.mean(stats['V'])1238		1239		runStats['nSteps'].append(n)1240		runStats['V'].append(V)1241		runStats['avg_M'].append(avg_M)1242		runStats['avg_S'].append(avg_S)1243		runStats['avg_P'].append(avg_P)1244		runStats['avg_shock'].append(avg_shock)1245		runStats['avg_M_outside'].append(avg_M_outside)1246		runStats['avg_S_outside'].append(avg_S_outside)	1247		runStats['avg_P_outside'].append(avg_P_outside)	1248		runStats['avg_d'].append(avg_d)1249		runStats['avg_in_frac'].append(avg_in_frac)		1250		1251	print("runs=%d, steps=%d" % (nRuns, nSteps))1252	for key in ['nSteps', 'avg_M', 'avg_S', 'avg_P', 'avg_shock', 'avg_M_outside', 'avg_S_outside', 'avg_P_outside', 'avg_d', 'avg_in_frac', 'V']:	            1253		print("mean %s: %f (%f)" % (key, scipy.mean(runStats[key]), scipy.std(runStats[key])))1254	t2 = time.time()1255	print("total time for %d runs: %f avg: %f" % (nRuns, t2-t1, (t2-t1)/nRuns))1256	return runStats1257def simulateTestCase(arg):1258	(test, newParams, outPath, initialM, initialS, initialP, nSteps, nRuns, xName, yName, x, y, recreate) = arg1259	(testName, overrideParams) = test1260	if (not os.path.exists(outPath)):1261		print ("not found: %s" % outPath)1262		return (None, None, None)1263	try:1264		loadRun(outPath)1265		print("%s=%f, %s=%f" % (xName, x, yName, y))1266		simFilename = outPath + ".sim"1267		if (recreate or not os.path.exists(simFilename)):1268			runStats = simulate_multiple(initialM, initialS, initialP, nSteps=nSteps, nRuns=nRuns)1269			f = gzip.open(simFilename, 'wb')1270			pickle.dump((x, y, runStats), f)1271			f.close()1272		else:1273			f = gzip.open(simFilename, 'rb')1274			(x, y, runStats) = pickle.load(f)1275			f.close()1276	except AssertionError as err:1277		print("exception on %s: " % outPath, err)1278	return (x, y, runStats)1279	1280def simulate_summary(dirname, testGenObj, recreate=True, initialM=1, initialS=1, initialP=1, nSteps=1000, nRuns=1000, multiprocess=True, nProcesses=6):	1281	(xList, yList) = ([], [])	1282	outcomes = defaultdict(list)1283	args = []1284	for (testName, overrideParams) in testGenObj.getOverrideParamsList():			1285		newParams = dict(testGenObj.getDefaultParamsDict())1286		newParams.update(overrideParams)1287		prefix = testGenObj.getFilenamePrefix(testName, newParams)1288		outPath = os.path.join(dirname, prefix) + ".out"1289		(xName, yName) = testGenObj.getXYName()1290		(x, y) = testGenObj.getXY(newParams)1291		args.append(((testName, overrideParams), newParams, outPath, initialM, initialS, initialP, nSteps, nRuns, xName, yName, x, y, recreate))1292	if (multiprocess):1293		p = multiprocessing.Pool(nProcesses)1294		# run simulations in parallel1295		result = p.map(simulateTestCase, args)1296		p.close()1297	else:1298		# run simulations1299		result = map(simulateTestCase, args)1300	for (x, y, runStats) in result:1301		xList.append(x)1302		yList.append(y)1303		# store sample averages from simulations as outcomes1304		for (key, value) in runStats.items():1305			mean_val_across_runs = scipy.mean(value)1306			outcomes[key].append(mean_val_across_runs)1307		# store 1308	sum_dict = {}	1309	g_dict = g.to_dict()1310	sum_dict['g'] = g_dict1311	sum_dict['initialState'] = (initialM, initialS)1312	sum_dict['xName'] = xName1313	sum_dict['yName'] = yName1314	sum_dict['xList'] = xList1315	sum_dict['yList'] = yList1316	sum_dict['outcomes'] = outcomes1317	filename = "sim_M_%0.2f_S_%0.2f_summary.out" % (initialM, initialS)1318	path = os.path.join(dirname, filename)1319	f = gzip.open(path, 'wb')1320	pickle.dump(sum_dict, f)1321	f.close()1322		1323	return (filename, xName, yName, xList, yList, outcomes)1324import fnmatch	1325def convert_to_gzip():1326	files = os.listdir(".")1327	for file in files:1328		if fnmatch.fnmatch(file, '*.sim'):1329			print(file)1330			f = open(file, 'r')1331			(x, y, runStats) = pickle.load(f)			1332			f.close()		1333			f = gzip.open(file, 'wb')1334			pickle.dump((x, y, runStats), f)			1335			f.close()1336def test_sim1(dirname, initialM=1, initialS=1, recreate=False, beta=0.9, multiprocess=True):1337	x = simulate_summary(dirname, "rSlow", "rFast", initialM=initialM, initialS=initialS, recreate=recreate, generateTestListFn=generateTestList_test_r, generateFnArgs={'beta':beta}, multiprocess=multiprocess)1338def test_sim2(dirname, initialM=1, initialS=1, recreate=False, beta=0.9, multiprocess=True, nProcesses=2):1339	x = simulate_summary(dirname, initialM=initialM, initialS=initialS, recreate=recreate, multiprocess=multiprocess, nProcesses=nProcesses, **test_var_args())1340def test_delta_sim(dirname, **kwargs):1341	x = simulate_summary(dirname, g_TestDelta, initialM=1.1, initialS=1.1, **kwargs)1342def test_delta_sim2(dirname, **kwargs):1343	x = simulate_summary(dirname, g_TestDelta2, initialM=1.1, initialS=1.1, **kwargs)1344	1345def plot_simulation_summary(dirname, testGenObj, filename="sim_M_1.00_S_1.00_summary.out"):1346	(xlabel, ylabel) = testGenObj.getXYGraphLabel()1347	simFilename = os.path.join(dirname, filename)1348	f = gzip.open(simFilename, 'rb')1349	sim_dict = pickle.load(f)1350	f.close()1351	[sim_xName, sim_yName, sim_xList, sim_yList, sim_outcomes, sim_initialState] = [sim_dict[x] for x in ['xName', 'yName', 'xList', 'yList', 'outcomes', 'initialState']]1352	# load summary of solutions1353	solutionFilename = os.path.join(dirname, "solutions.out")1354	f = gzip.open(solutionFilename, 'rb')1355	solution_dict = pickle.load(f)1356	f.close()1357	[sol_xName, sol_yName, sol_xList, sol_yList, sol_outcomeList] = [solution_dict[x] for x in ['xName', 'yName', 'xList', 'yList', 'outcomeList']]	1358	# keep track of points where iteration did not converge1359	nonconverge_points = {}1360	for (x, y, outcome) in zip(sol_xList, sol_yList, sol_outcomeList):1361		if (outcome == SOLUTION_CATEGORY_NONCONVERGENCE):	# did not converge1362			nonconverge_points[(x,y)] = 11363	sol_colors = [outcome_to_color(o) for o in sol_outcomeList]1364	sol_markers_labels = [outcome_to_marker(o) for o in sol_outcomeList]1365	# return (xList, yList, valList) with nonconvergence points removed1366	def filter_nonconverge(*args):			1367		result = [item for item in zip(*args) if (not (item[0],item[1]) in nonconverge_points)]1368		if (len(result) > 0):1369			return zip(*result)1370		else:1371			return [] * len(args)1372		1373	page = markup.page()1374	page.init(title="simulation summary, initial state=%f,%f" % sim_initialState)1375	page.p("initial state=%f,%f" % sim_initialState)1376	page.br( )		1377	# text summary of simulations...experiment.py
Source:experiment.py  
...765            smoothed_lengths = pd.Series(lengths).rolling(window=window, center=True, min_periods=1).sum()766            highest_points[outcome] = max(smoothed_lengths / self.total_reads * 100)767        return highest_points768    @memoized_property769    def outcome_to_color(self):770        # To minimize the chance that a color will be used more than once in the same panel in 771        # outcome_stratified_lengths plots, sort color order by highest point.772        # Factored out here so that same colors can be used in svgs.773        color_order = sorted(self.categories_by_frequency, key=self.outcome_highest_points.get, reverse=True)774        return {outcome: f'C{i % 10}' for i, outcome in enumerate(color_order)}775    @memoized_property776    def expected_lengths(self):777        ti = self.target_info778        expected_lengths = {779            'expected\nWT': ti.amplicon_length,780        }781        if ti.clean_HDR_length is not None:782            expected_lengths['expected\nHDR'] = ti.clean_HDR_length783        return expected_lengths...bankProblem_orig.py
Source:bankProblem_orig.py  
...518		page.th("%0.2f" % y)519		for x in xValues:520			o = xy_to_outcome[(x,y)]521			page.td.open()522			#page.font(markup.oneliner.a("*", href="index.html#%d" % i), color=outcome_to_color(o))523			page.a(markup.oneliner.font("*", color=outcome_to_color(o)), href="index.html#%d" % xy_to_i[(x,y)])524			page.td.close()			525		page.tr.close()526	page.table.close()527	# color graph528	fig = plt.figure()529	ax = fig.add_subplot(111)	530	ax.set_xlabel(xlabel if (xlabel != None) else xName)531	ax.set_ylabel(ylabel if (ylabel != None) else yName)532	colors = [outcome_to_color(o) for o in outcomeList]533	ax.scatter(xList, yList, color=colors, edgecolor=colors)534	page.br()535	imgFilename = "2d_summary.png"536	imgPath = os.path.join(dirname, imgFilename)537	try:                                        538		os.remove(imgPath)539	except OSError:        540		pass   541	plt.savefig(imgPath, format='png', dpi=100)					542	page.img(src=imgFilename)               543	# non-color graph544	fig = plt.figure()545	ax = fig.add_subplot(111)	546	ax.set_xlabel(xlabel if (xlabel != None) else xName)547	ax.set_ylabel(ylabel if (ylabel != None) else yName)548	(markers_labels) = [outcome_to_marker(o) for o in outcomeList]549	by_marker = defaultdict(list)550	[by_marker[marker_label].append((x,y)) for (x,y,marker_label) in zip(xList, yList, markers_labels)]551	for ((marker, label), xyList) in by_marker.items():552		(x_list, y_list) = zip(*xyList)553		ax.scatter(x_list, y_list, marker=marker, label=label, facecolor='none')554	(handles, labels) = ax.get_legend_handles_labels()555	hl = sorted(zip(handles, labels), key=operator.itemgetter(1))556	(handles2, labels2) = zip(*hl)	557	fig.legend(handles2, labels2, 'upper center')	558	if (addLines1): 559		ax.axvline((1.0/beta)-1.0, color='gray')560		ax.plot([xList[0], xList[-1]], [yList[0], yList[-1]], color='gray')561	page.br()562	imgFilename = "2d_summary2.png"563	imgPath = os.path.join(dirname, imgFilename)564	try:                                        565		os.remove(imgPath)566	except OSError:        567		pass   568	plt.savefig(imgPath, format='png', dpi=100)					569	page.img(src=imgFilename)               570	571	filename = os.path.join(dirname, "2d_summary.html")572	f = open(filename, 'w')                             573	f.write(str(page))574	f.close()575	576	return (xList, yList, outcomeList)577	578def outcome_to_color(outcome):579	if (outcome == SOLUTION_CATEGORY_NOINVEST): return 'black'580	if (outcome == SOLUTION_CATEGORY_INVEST): return 'green'581	if (outcome == SOLUTION_CATEGORY_NONCONVERGENCE): return 'red'582	assert(false)583def outcome_to_marker(outcome):584	if (outcome == SOLUTION_CATEGORY_NOINVEST): return ('o', "no loans made")585	if (outcome == SOLUTION_CATEGORY_INVEST): return ('+', "loans made")586	if (outcome == SOLUTION_CATEGORY_NONCONVERGENCE): return ('s', "nonconvergence")587	assert(false)588	589# vary pHigh and out frac variance590# def generateTestList_test_var_pHigh(beta=0.9, pHighRange=scipy.linspace(0., 1., 20), outFracLowRange=scipy.linspace(0.3, 0.9, 20)):591	# defaultParams = {592	  # 'beta': beta,593	  # 'rSlow': 0.15,594	  # 'rFast': 0.10,595	  # 'probSpace': scipy.array([0.5, 0.5]),596	  # 'fastOut': scipy.array([0.7, 0.9]),597	  # 'slowOut': scipy.array([0.1, 0.1]),598	  # 'fastIn':  scipy.array([0.8, 0.8]),599	  # 'bankruptcyPenalty': scipy.array([0.0, 0.0, 0.0])600	# }601	# tests = []602	# beta = defaultParams['beta']603	# for pHigh in pHighRange:604		# for outFracLow in outFracLowRange:605			# probSpace = scipy.array([1.0-pHigh, pHigh])606			# fastOut = scipy.array([outFracLow, 0.9])607			# tests.append( ("test_var_pHigh", {'probSpace':probSpace, 'fastOut':fastOut}) )608	# # filename generating function609	# def filenameFn(testName, newParams):610		# filename = "%s_pHigh%.2f_fastOut%.2f" % (testName, newParams['probSpace'][1], newParams['fastOut'][0])611		# return filename612		613	# return (defaultParams, tests, filenameFn)614# def test_var_args():615	# dict = {}616	# xName = 'outFracLow'617	# yName = 'pHigh'618	# def getXFn(params):619		# return params['fastOut'][0]620	# def getYFn(params):621		# return params['probSpace'][1]622	# return {'xName':xName, 'yName':yName, 'getXFn':getXFn, 'getYFn':getYFn, 'generateTestListFn':generateTestList_test_var_pHigh}623# # param range 2 (narrower): pHigh=linspace(0.3, 0.8, 20), outFrac=linspace(0.5, 0.9, 20)624# def run_test_var_cases(dirname, pHighMin=0., pHighMax=1., pHighLen=20, outFracLowMin=0.3, outFracLowMax=0.9, outFracLowLen=20, **kwargs):625	# return run_test_cases(dirname, generateTestListFn=generateTestList_test_var_pHigh, 626	  # generateFnArgs={'pHighRange': scipy.linspace(pHighMin, pHighMax, pHighLen), 'outFracLowRange': scipy.linspace(outFracLowMin, outFracLowMax, outFracLowLen)})	  627def run_all(dirname, testGenObj):628	result1 = run_test_cases(dirname, testGenObj)629	result2 = generate_plots(dirname, testGenObj)630	result3 = write_test_summary_2d(dirname, testGenObj)631	(xlabel, ylabel) = testGenObj.getXYGraphLabel()632	result4 = plot_test_summary_2d(dirname, xlabel=xlabel, ylabel=ylabel)633	634def run_test_cases(dirname, testGenObj, skipWrite=False, skipIfExists=True, usePrevVArray=True, nMaxIters=g.MAX_VITERS, maxTime=g.MAX_TIME, maxV=g.MAX_V):	635	currentVArray = None636	prevRunConverged = False637	for (testName, overrideParams) in testGenObj.getOverrideParamsList():			638		newParams = dict(testGenObj.getDefaultParamsDict())639		newParams.update(overrideParams)640		prefix = testGenObj.getFilenamePrefix(testName, newParams)641		path = os.path.join(dirname, prefix) + ".out"642		print(path)643		if (skipIfExists and os.path.exists(path)):644			print("%s exists, skipping" % path)645			continue646		g.reset()647		if ((not usePrevVArray) or (prevRunConverged == False)): currentVArray = None;		# if the previous optimization didn't converge, start from scratch, otherwise, start from previous optimized value648		(iterCode, prevNIter, currentVArray, newVArray, optControls) = test_bank2(plotResult=False, initialVArray=currentVArray, nMaxIters=nMaxIters, maxTime=g.MAX_TIME, maxV=g.MAX_V, overrideParamsDict=newParams)649		prevRunConverged = True if (iterCode == bellman.ITER_RESULT_CONVERGENCE) else False650		if (not skipWrite):651			saveRun(path)652def generate_one_plot(arg):653	(dirname, outPath, prefix, caption, skipIfExists) = arg654	if (not os.path.exists(outPath)):655		print ("not found: %s" % outPath)656		return None657	try:658		plt.ioff()659		loadRun(outPath)660		G = createGraph(-1)661		# save plots						662		suffixes = ["-V", "-optD", "-inF", "-nextM_1", "-nextS_1", "-locus_0", "-locus_1", "-pruned"]663		plotFns = [plotV, plotOptD, plotFracIn, lambda x: plotNextM(-1, 1), lambda x: plotNextS(-1, 1), lambda x: plotLocus(G, 0), lambda x: plotLocus(G, 1), lambda x: plotPrunedNodes(G)]664		imgList = []665		for (suffix, plotFn) in zip(suffixes, plotFns):666			plotFn(-1)667			imgFilename = prefix + suffix + ".png"668			imgPath = os.path.join(dirname, imgFilename)669			if (not skipIfExists):670				try:                                        671					os.remove(imgPath)672				except OSError:        673					pass   674				print("writing %s" % imgPath) 675				plt.savefig(imgPath, format='png', dpi=60)				  676			imgList.append(imgFilename)	677		plt.close('all')678		plt.ion()679		return (caption, imgList)680	except AssertionError as err:681		print("exception on %s: " % outPath, err)682		return None683		684def generate_plots(dirname, testGenObj, skipIfExists=False, multiprocess=True, nProcesses=6):685	argList = []686	for (testName, overrideParams) in testGenObj.getOverrideParamsList():			687		newParams = dict(testGenObj.getDefaultParamsDict())688		newParams.update(overrideParams)689		prefix = testGenObj.getFilenamePrefix(testName, newParams)690		outPath = os.path.join(dirname, prefix) + ".out"691		caption = "%s: beta=%f, rSlow=%f, rFast=%f, diff=%f, probSpace=[%f, %f], fastOutFraction=[%f, %f]" % (testName, newParams['beta'], newParams['rSlow'], newParams['rFast'], newParams['rSlow']-newParams['rFast'], newParams['probSpace'][0], newParams['probSpace'][1], newParams['fastOut'][0], newParams['fastOut'][1])692		argList.append((dirname, outPath, prefix, caption, skipIfExists))693		694	if (multiprocess):695		p = multiprocessing.Pool(nProcesses)		696		plotList = p.map(generate_one_plot, argList)697		p.close()698	else:		699		plotList = map(generate_one_plot, argList)700			                                               701	# write html index702	page = markup.page()703	page.init(title="bankProblem")704	page.br( )705						706	for (i, plot) in enumerate(plotList):707		(prefix, imgList) = plot708		page.p(prefix, id="%d" % i)709		for imgFile in imgList:710			page.img(src=imgFile)             711	filename = os.path.join(dirname, "index.html")712	f = open(filename, 'w')                             713	f.write(str(page))714	f.close()715	716def test_bank1(beta=0.9, rSlow=0.15, rFast=0.02, probSpace=scipy.array([0.5, 0.5]), fastOut=scipy.array([0.7, 0.9]), 717  slowOut=scipy.array([0.1, 0.1]), fastIn=scipy.array([0.8, 0.8]), bankruptcyPenalty=scipy.array([0.0, 0.0, 0.0]), **kwargs):718	overrideParamsDict = {'beta':beta, 'rSlow':rSlow, 'rFast':rFast, 'probSpace':probSpace, 'fastOut':fastOut, 'slowOut':slowOut, 'fastIn':fastIn, 'bankruptcyPenalty':bankruptcyPenalty}719	return test_bank2(overrideParamsDict=overrideParamsDict, **kwargs)720	721def test_bank2(useValueIter=True, plotResult=True, nMaxIters=g.MAX_VITERS, initialVArray=None, nMultiGrid=3, overrideParamsDict=None, **kwargs):722	time1 = time.time()723	localvars = {}724	725	def postVIterCallbackFn(nIter, currentVArray, newVArray, optControls, stoppingResult):				726		(stoppingDecision, diff) = stoppingResult727		print("iter %d, diff %f" % (nIter, diff))728		localvars[0] = nIter729		# append iteration results to g.IterList730		g.IterList.append({'V': currentVArray, 'd': optControls[0], 'fracIn': optControls[1]})731		g.NIters += 1732	def postPIterCallbackFn(nIter, newVArray, currentPolicyArrayList, greedyPolicyList, stoppingResult):				733		(stoppingDecision, diff) = stoppingResult734		print("iter %d, diff %f" % (nIter, diff))735		localvars[0] = nIter		736	grid_M = scipy.linspace(0, g.M_MAX, g.M_GRID_SIZE)737	grid_S = scipy.linspace(0, g.S_MAX, g.S_GRID_SIZE)	738	(g.Grid_M, g.Grid_S) = (grid_M, grid_S)739	740	defaultParamsDict = {741	  'beta': 0.9,742	  'rSlow': 0.15,743	  'rFast': 0.10,744	  'probSpace': scipy.array([0.5, 0.5]),745	  'fastOut': scipy.array([0.7, 0.9]),746	  'slowOut': scipy.array([0.1, 0.1]),747	  'fastIn':  scipy.array([0.8, 0.8]),748	  'bankruptcyPenalty': scipy.array([0.0, 0.0, 0.0])749	}750	paramsDict = dict(defaultParamsDict)751	paramsDict.update(overrideParamsDict)752	[beta, rSlow, rFast, probSpace, fastOut, slowOut, fastIn, bankruptcyPenalty] = [paramsDict[x] for x in ['beta', 'rSlow', 'rFast', 'probSpace', 'fastOut', 'slowOut', 'fastIn', 'bankruptcyPenalty']]753	print("using params: beta=%f rFast=%f rSlow=%f" % (beta, rFast, rSlow))754	print("probSpace: ", probSpace, " slowOut: ", slowOut, " fastOut: ", fastOut, " fastIn: ", fastIn, " bp: ", bankruptcyPenalty)755	g.ParamSettings = {'beta':beta, 'grid_M':grid_M, 'grid_S':grid_S, 'rFast':rFast, 'rSlow':rSlow, 'slowOut':slowOut, 756	  'fastOut':fastOut, 'fastIn':fastIn, 'probSpace':probSpace, 'bankruptcyPenalty':bankruptcyPenalty}757	#beta, rFast, rSlow, slowOut, fastOut, fastIn, probSpace758	params = BankParams(**g.ParamSettings)759	g.Params = params760		761	# initial guess for V: V = M762	if (initialVArray == None): 763		initialVArray = scipy.zeros((g.M_GRID_SIZE, g.S_GRID_SIZE));	764		for (iM, M) in enumerate(grid_M):765			for (iS, S) in enumerate(grid_S):766				initialVArray[iM, iS] = M767	initialPolicyArrayList = bellman.getGreedyPolicy([grid_M, grid_S], initialVArray, params, parallel=True)768	769	g.IterList.append({'V':initialVArray, 'd':None, 'fracIn':None})770	if (useValueIter == True):771		if (nMultiGrid == None):			772			result = bellman.grid_valueIteration([grid_M, grid_S], initialVArray, params, postIterCallbackFn=postVIterCallbackFn, parallel=True, nMaxIters=nMaxIters, **kwargs)773		else:774			# start with coarse grids and progressively get finer.			775			gridList = [grid_M, grid_S]776			coarseGridList = []777			for n in reversed(range(nMultiGrid)):778				coarseGridList.append( [scipy.linspace(grid[0], grid[-1], len(grid)/(2**n)) for grid in gridList] )779			result = bellman.grid_valueIteration2(coarseGridList, gridList, initialVArray, params, postIterCallbackFn=postVIterCallbackFn, parallel=True, nMaxIters=nMaxIters, **kwargs)780		(iterCode, nIter, currentVArray, newVArray, optControls) = result781		g.IterResult = iterCode782	else:783		result = bellman.grid_policyIteration([grid_M, grid_S], initialPolicyArrayList, initialVArray, params, postIterCallbackFn=postPIterCallbackFn, parallel=True)784		(iterCode, nIter, currentVArray, currentPolicyArrayList, greedyPolicyList) = result785		newVArray = currentVArray786		optControls = currentPolicyArrayList787	time2 = time.time()788	nIters = localvars[0]789	print("total time: %f, avg time: %f iter code: %s" % (time2-time1, (time2-time1)/(1+nIters), bellman.iterResultString(iterCode)))790	791	if (plotResult):792		plotV(-1)793		plotOptD(-1)794		plotFracIn(-1)	795	return result796def plotV(n):797	setupData = setupGetNextState(n)798	colorFn = lambda x: getColor(setupData, x[0], x[1])799	currentVArray = g.IterList[n]['V']800	fnObj_V = linterp.GetLinterpFnObj([g.Grid_M, g.Grid_S], currentVArray)	801	plot3d.plotSurface(g.Grid_M, g.Grid_S, fnObj_V, xlabel="M", ylabel="L", zlabel="V", colorFn=colorFn)	802def plotOptD(n):803	setupData = setupGetNextState(n)804	colorFn = lambda x: getColor(setupData, x[0], x[1])805	optControl_d = g.IterList[n]['d']806	fnObj_optD = linterp.GetLinterpFnObj([g.Grid_M, g.Grid_S], optControl_d)807	plot3d.plotSurface(g.Grid_M, g.Grid_S, fnObj_optD, xlabel="M", ylabel="L", zlabel="opt d", colorFn=colorFn)	808def plotFracIn(n):809	setupData = setupGetNextState(n)810	colorFn = lambda x: getColor(setupData, x[0], x[1])811	optControl_inFrac = g.IterList[n]['fracIn']		812	fnObj_optInFrac = linterp.GetLinterpFnObj([g.Grid_M, g.Grid_S], optControl_inFrac)813	plot3d.plotSurface(g.Grid_M, g.Grid_S, fnObj_optInFrac, xlabel="M", ylabel="L", zlabel="opt frac in", colorFn=colorFn)	814# outcome can be 0 (low) or 1 (high)815# returns (M, S)816def setupGetNextState(n, currentVArray=None, optControl_d=None, optControl_inFrac=None):817	if (n != None):818		currentVArray = g.IterList[n]['V']819		optControl_d = g.IterList[n]['d']820		optControl_inFrac = g.IterList[n]['fracIn']821	fnObj_optD = linterp.GetLinterpFnObj([g.Grid_M, g.Grid_S], optControl_d)822	fnObj_optInFrac = linterp.GetLinterpFnObj([g.Grid_M, g.Grid_S], optControl_inFrac)		823	params = BankParams(**g.ParamSettings)824	params.setPrevIteration([g.Grid_M, g.Grid_S], currentVArray)825	return (fnObj_optD, fnObj_optInFrac, params)826def getNextStateVar(setupData, M, S, outcome):827	(fnObj_optD, fnObj_optInFrac, params) = setupData828	d = fnObj_optD([M, S])829	inFrac = fnObj_optInFrac([M, S])830	params.setStateVars([M, S])	831	(ev, nextMList, nextSList) = params.calc_EV(d, inFrac)832	return ((nextMList[outcome], nextSList[outcome]), (d, inFrac))833# coloring states:834def getColor(setupData, M, S):835	(fnObj_optD, fnObj_optInFrac, params) = setupData836	d = fnObj_optD([M, S])837	inFrac = fnObj_optInFrac([M, S])838	params.setStateVars([M, S])	839	(ev, nextMList, nextSList) = params.calc_EV(d, inFrac)840	state = None841	# bankrupt next period842	if (nextMList[0] <= 0.0 and nextMList[1] <= 0.0):843		state = g.STATE_BANKRUPT844	elif (nextMList[0] > 0.0 and nextMList[1] <= 0.0):845		state = g.STATE_RISKY846	else:847		assert(nextMList[0] > 0.0 and nextMList[1] > 0.0)848		state = g.STATE_SAFE849	return g.StateToColor[state]850def plotNextM(n, outcome):851	setupData = setupGetNextState(n)852	colorFn = lambda x: getColor(setupData, x[0], x[1])853	fnObj_nextM = lambda x: getNextStateVar(setupData, x[0], x[1], outcome)[0][0]854	plot3d.plotSurface(g.Grid_M, g.Grid_S, fnObj_nextM, xlabel="M", ylabel="S", zlabel="next M | outcome %d" % outcome, colorFn=colorFn)	855def plotNextS(n, outcome):856	setupData = setupGetNextState(n)857	colorFn = lambda x: getColor(setupData, x[0], x[1])858	fnObj_nextS = lambda x: getNextStateVar(setupData, x[0], x[1], outcome)[0][1]859	plot3d.plotSurface(g.Grid_M, g.Grid_S, fnObj_nextS, xlabel="M", ylabel="S", zlabel="next S | outcome %d" % outcome, colorFn=colorFn)	860	861def plotObjectiveFn(params, nIter, state1, state2):862	prevIterVArray = g.IterList[nIter][0]863	params.setPrevIteration([g.Grid_M, g.Grid_S], prevIterVArray)864	params.setStateVars([state1, state2])865	controlGrids = params.getControlGridList([state1, state2])866	objFn = lambda x: params.objectiveFunction([x[0], x[1]])867	plot3d.plotSurface(controlGrids[0], controlGrids[1], objFn, xlabel="d", ylabel="frac in", zlabel="objFn")	868def createGraph(n):	869	setupData = setupGetNextState(n)870	def fnObj_nextM(x, shock_i):871		(next_x, controls) = getNextStateVar(setupData, x[0], x[1], shock_i)872		return (next_x[0], controls)	873	def fnObj_nextS(x, shock_i): 874		(next_x, controls) = getNextStateVar(setupData, x[0], x[1], shock_i)875		return (next_x[1], controls)	876	gridList = [g.Grid_M, g.Grid_S]877	coffinState = g.STATE_BANKRUPT878	shockList = range(len(g.ParamSettings['probSpace']));		879	def nextStateFn(x, shock_i):880		return getNextStateVar(setupData, x[0], x[1], shock_i)881	def isBankrupt(x):882		(M, S) = (x[0], x[1])883		return (M <= 0.0)884	(isAbsorbedFn, nextIListFn) = markovChain.policyFn_to_transitionFns(gridList, nextStateFn, isBankrupt)885	G = markovChain.createGraphFromPolicyFn(gridList, coffinState, shockList, isAbsorbedFn, nextIListFn)886	return G887# plot locus of nodes that are destinations, conditional on shock888def plotLocus(G, shock):889	def isSuccessor(G, node):890		if (node == g.STATE_BANKRUPT):891			return False		892		predecessors = G.predecessors(node)893		if (len(predecessors) == 0):894			return False895		#print(node, predecessors)896		for p in predecessors:897			s = G.edge[p][node]['shock']898			if (s == shock):899				return True900		return False901	markovChain.plotGraphNodes2D(G, isSuccessor, title="locus | %d" % shock, xlabel="M", ylabel="S")902def plotPrunedNodes(G):903	S = markovChain.pruneZeroIndegree(G)904	def in_S(G, node):905		return (node in S)906	markovChain.plotGraphNodes2D(G, in_S, title="after pruning zero indegree", xlabel="M", ylabel="S")907		908class MarkovChain:909	def __init__(self, initialM, initialS, VArray, optDArray, optInFracArray):910		(self.initialM, self.initialS) = (initialM, initialS)911		(self.currentM, self.currentS) = (initialM, initialS)912		self.VArray = VArray913		self.optDArray = optDArray914		self.optInFracArray = optInFracArray915		self.setupData = setupGetNextState(n=None, currentVArray=VArray, optControl_d=optDArray, optControl_inFrac=optInFracArray)916		self.fnObj_V = linterp.GetLinterpFnObj([g.Grid_M, g.Grid_S], VArray)917		#self.fnObj_nextM = lambda x, shock_i: getNextStateVar(self.setupData, x[0], x[1], shock_i)[0]918		#self.fnObj_nextS = lambda x, shock_i: getNextStateVar(self.setupData, x[0], x[1], shock_i)[1]919	920	def applyShock(self, shock_i):		921		((nextM, nextS), (d, inFrac)) =  getNextStateVar(self.setupData, self.currentM, self.currentS, shock_i)922		(self.currentM, self.currentS) = (nextM, nextS)923		return ((nextM, nextS), (d, inFrac))924	def reset(self):925		self.currentM = self.initialM926		self.currentS = self.initialS927	928	929# probSpace is an array that sums to 1.0930# x is a random number drawn from Uniform[0,1]931# return the index of probSpace corresponding to x932def drawFromProbSpace(probSpace, x):933	c = scipy.cumsum(probSpace)934	return scipy.sum(c < x)935	936def testSim(n, initialNode, nSteps=100):937	G = createGraph(n)938	(iM, iS) = initialNode939	# simulate using networkx graph940	sim = markovChain.Simulation(G, initialNode)941	# simulate directly using policies and transition equations942	currentVArray = g.IterList[n]['V']943	optControl_d = g.IterList[n]['d']944	optControl_inFrac = g.IterList[n]['fracIn']945	currentM = g.Grid_M[iM]946	currentS = g.Grid_S[iS]947	mc = MarkovChain(currentM, currentS, currentVArray, optControl_d, optControl_inFrac)948	# shocks949	rvs = scipy.stats.uniform.rvs(size=nSteps)950	[beta, grid_M, grid_S, rFast, rSlow, slowOut, fastOut, fastIn, probSpace] = [g.ParamSettings[x] for x in 951	  ['beta', 'grid_M', 'grid_S', 'rFast', 'rSlow', 'slowOut', 'fastOut', 'fastIn', 'probSpace',]]952	shockList = [drawFromProbSpace(probSpace, rv) for rv in rvs]953	coffinState = G.graph['coffinState']954	for (i, shock) in enumerate(shockList):955		print ("initial: M=%.4f, S=%.4f" % (currentM, currentS), end="")956		# apply shock to initial state		957		print (" -> shock: %d, " % shock, end="")958		print ("controls: d=%.4f, frac=%.4f" % (optControl_d[iM, iS], optControl_inFrac[iM, iS]))959		sim.applyShock(shock)960		mc.applyShock(shock)961		962		# record next state963		(currentNode, controls) = (sim.currentNode, sim.currentControls)964		965		if (currentNode == coffinState):966			print("  -> absorbed")967			return i968		M = G.graph['gridList'][0][currentNode[0]]969		S = G.graph['gridList'][1][currentNode[1]]970		print("  -> M=%0.4f, S=%0.4f" % (mc.currentM, mc.currentS))971		(iM, iS) = currentNode972		currentM = g.Grid_M[iM]973		currentS = g.Grid_S[iS]		974	return i975M_ERROR_MAX = 0.0001;		# if M is within this much of 0, truncate to 0976def simulate(n, initialM, initialS, nSteps=100, bPrint=True, markovChainObj=None):977	def myprint(*args, **kwargs):978		if (bPrint): print(*args, **kwargs)		979	(currentM, currentS) = (initialM, initialS)	980	# simulate directly using policies and transition equations981	if (markovChainObj == None):982		currentVArray = g.IterList[n]['V']		983		optControl_d = g.IterList[n]['d']984		optControl_inFrac = g.IterList[n]['fracIn']		985		markovChainObj = MarkovChain(initialM, initialS, currentVArray, optControl_d, optControl_inFrac)986	# shocks987	rvs = scipy.stats.uniform.rvs(size=nSteps)988	[beta, grid_M, grid_S, rFast, rSlow, slowOut, fastOut, fastIn, probSpace] = [g.ParamSettings[x] for x in 989	  ['beta', 'grid_M', 'grid_S', 'rFast', 'rSlow', 'slowOut', 'fastOut', 'fastIn', 'probSpace']]990	shockList = [drawFromProbSpace(probSpace, rv) for rv in rvs]991	# initialize stats list992	stats = defaultdict(list)993	stats['M_outside']=[False]994	stats['S_outside']=[False]995	996	for (i, shock) in enumerate(shockList):997		myprint("initial: M=%.4f, S=%.4f" % (currentM, currentS), end="")		998		# apply shock to initial state		999		myprint(" -> shock: %d, " % shock, end="")1000		((nextM, nextS), (d, slow_in_frac)) = markovChainObj.applyShock(shock)1001		myprint("controls: d=%.4f, frac=%.4f" % (d, slow_in_frac), end="")1002		# record statistics1003		stats['M'].append(currentM)1004		stats['S'].append(currentS)1005		stats['shock'].append(shock)1006		stats['d'].append(d)1007		stats['in_frac'].append(slow_in_frac)1008		1009		# calculate next period's state variables1010		(fast_in_frac, fast_out_frac, slow_out_frac) = (fastIn[shock], fastOut[shock], slowOut[shock])1011		(M, S) = (currentM, currentS)1012		fast_growth = (1.0 + fast_in_frac - fast_out_frac) * (1.0 + rFast);1013		nextM_2 = (M - d + slow_out_frac*S - slow_in_frac*S - fast_out_frac + fast_in_frac)/fast_growth;1014		nextS_2 = (1.0 + slow_in_frac - slow_out_frac) * S * (1.0 + rSlow) / fast_growth;	1015		1016		myprint("  fast_growth = (1.0 + %f - %f) * (%0.3f) = %0.4f" % (fast_in_frac, fast_out_frac, 1.0+rFast, fast_growth))1017		myprint("  nextM = %f/%f = %f" % ((M - d + slow_out_frac*S - slow_in_frac*S - fast_out_frac + fast_in_frac), fast_growth, nextM_2))1018		myprint("  nextS = %0.4f/%0.4f = %0.4f" % ((1.0 + slow_in_frac - slow_out_frac) * S * (1.0 + rSlow), fast_growth, nextS_2))1019		myprint("  next: M=%0.4f, S=%0.4f" % (nextM, nextS))		1020		# check if absorbed or outside grid1021		if (nextM <= 0.0):1022			myprint("  -> absorbed")1023			break1024		(M_outside, S_outside) = (False, False)1025		if (nextM > g.Grid_M[-1]):1026			myprint("  -> M outside grid, set to %0.4f" % g.Grid_M[1])1027			nextM = g.Grid_M[-1]1028			M_outside = True1029		if (nextS > g.Grid_S[-1]):1030			myprint("  -> S outside grid, set to %0.4f" % g.Grid_S[1])1031			nextS = g.Grid_S[-1]1032			S_outside = True1033		# record stats that are conditional on bankruptcy1034		stats['M_outside'].append(M_outside)1035		stats['S_outside'].append(S_outside)1036		# update state1037		(currentM, currentS) = (nextM, nextS)1038	# print statistics1039	stats['nSteps'] = i+11040	stats['V'] = markovChainObj.fnObj_V((initialM, initialS))1041	myprint ("number of steps: %d" % i)1042	myprint ("average M: %0.4f" % scipy.mean(stats['M']))1043	myprint ("average S: %0.4f" % scipy.mean(stats['S']))1044	myprint ("average shock: %0.4f" % scipy.mean(stats['shock']))1045	myprint ("M outside grid: %d/%d = %f" % (scipy.sum(stats['M_outside']), i, scipy.mean(stats['M_outside'])))1046	myprint ("S outside grid: %d/%d = %f" % (scipy.sum(stats['S_outside']), i, scipy.mean(stats['S_outside'])))1047	myprint ("average d: %0.4f" % scipy.mean(stats['d']))1048	myprint ("average in_frac: %0.4f" % scipy.mean(stats['in_frac']))1049	return (i, stats)1050	1051def simulate_multiple(initialM, initialS, nSteps=1000, nRuns=1000):1052	t1 = time.time()1053	nIter = -11054	runStats = defaultdict(list)1055	currentVArray = g.IterList[nIter]['V']		1056	optControl_d = g.IterList[nIter]['d']1057	optControl_inFrac = g.IterList[nIter]['fracIn']		1058	markovChainObj = MarkovChain(initialM, initialS, currentVArray, optControl_d, optControl_inFrac)	1059	for i in range(nRuns):1060		markovChainObj.reset()1061		(n, stats) = simulate(nIter, initialM, initialS, nSteps=nSteps, bPrint=False, markovChainObj=markovChainObj)1062		# mean1063		avg_M = scipy.mean(stats['M'])1064		avg_S = scipy.mean(stats['S'])1065		avg_shock = scipy.mean(stats['shock'])1066		avg_M_outside = scipy.mean(stats['M_outside'])1067		avg_S_outside = scipy.mean(stats['S_outside'])1068		avg_d = scipy.mean(stats['d'])1069		avg_in_frac = scipy.mean(stats['in_frac'])		1070		V = scipy.mean(stats['V'])1071		1072		runStats['nSteps'].append(n)1073		runStats['V'].append(V)1074		runStats['avg_M'].append(avg_M)1075		runStats['avg_S'].append(avg_S)1076		runStats['avg_shock'].append(avg_shock)1077		runStats['avg_M_outside'].append(avg_M_outside)1078		runStats['avg_S_outside'].append(avg_S_outside)	1079		runStats['avg_d'].append(avg_d)1080		runStats['avg_in_frac'].append(avg_in_frac)		1081		1082	print("runs=%d, steps=%d" % (nRuns, nSteps))1083	for key in ['nSteps', 'avg_M', 'avg_S', 'avg_shock', 'avg_M_outside', 'avg_S_outside', 'avg_d', 'avg_in_frac']:	            1084		print("mean %s: %f (%f)" % (key, scipy.mean(runStats[key]), scipy.std(runStats[key])))1085	t2 = time.time()1086	print("total time for %d runs: %f avg: %f" % (nRuns, t2-t1, (t2-t1)/nRuns))1087	return runStats1088def simulateTestCase(arg):1089	(test, newParams, outPath, initialM, initialS, nSteps, nRuns, xName, yName, x, y, recreate) = arg1090	(testName, overrideParams) = test1091	if (not os.path.exists(outPath)):1092		print ("not found: %s" % outPath)1093		return (None, None, None)1094	try:1095		loadRun(outPath)1096		print("%s=%f, %s=%f" % (xName, x, yName, y))1097		simFilename = outPath + ".sim"1098		if (recreate or not os.path.exists(simFilename)):1099			runStats = simulate_multiple(initialM, initialS, nSteps=nSteps, nRuns=nRuns)1100			f = gzip.open(simFilename, 'wb')1101			pickle.dump((x, y, runStats), f)1102			f.close()1103		else:1104			f = gzip.open(simFilename, 'rb')1105			(x, y, runStats) = pickle.load(f)1106			f.close()1107	except AssertionError as err:1108		print("exception on %s: " % outPath, err)1109	return (x, y, runStats)1110	1111def simulate_summary(dirname, testGenObj, recreate=True, initialM=1, initialS=1, nSteps=1000, nRuns=1000, multiprocess=True, nProcesses=6):	1112	(xList, yList) = ([], [])	1113	outcomes = defaultdict(list)1114	args = []1115	for (testName, overrideParams) in testGenObj.getOverrideParamsList():			1116		newParams = dict(testGenObj.getDefaultParamsDict())1117		newParams.update(overrideParams)1118		prefix = testGenObj.getFilenamePrefix(testName, newParams)1119		outPath = os.path.join(dirname, prefix) + ".out"1120		(xName, yName) = testGenObj.getXYName()1121		(x, y) = testGenObj.getXY(newParams)1122		args.append(((testName, overrideParams), newParams, outPath, initialM, initialS, nSteps, nRuns, xName, yName, x, y, recreate))1123	if (multiprocess):1124		p = multiprocessing.Pool(nProcesses)1125		# run simulations in parallel1126		result = p.map(simulateTestCase, args)1127		p.close()1128	else:1129		# run simulations1130		result = map(simulateTestCase, args)1131	for (x, y, runStats) in result:1132		xList.append(x)1133		yList.append(y)1134		# store sample averages from simulations as outcomes1135		for (key, value) in runStats.items():1136			mean_val_across_runs = scipy.mean(value)1137			outcomes[key].append(mean_val_across_runs)1138		# store 1139	sum_dict = {}	1140	g_dict = g.to_dict()1141	sum_dict['g'] = g_dict1142	sum_dict['initialState'] = (initialM, initialS)1143	sum_dict['xName'] = xName1144	sum_dict['yName'] = yName1145	sum_dict['xList'] = xList1146	sum_dict['yList'] = yList1147	sum_dict['outcomes'] = outcomes1148	summaryFilename = os.path.join(dirname, "sim_M_%0.2f_S_%0.2f_summary.out" % (initialM, initialS))1149	f = gzip.open(summaryFilename, 'wb')1150	pickle.dump(sum_dict, f)1151	f.close()1152		1153	return (xName, yName, xList, yList, outcomes)1154import fnmatch	1155def convert_to_gzip():1156	files = os.listdir(".")1157	for file in files:1158		if fnmatch.fnmatch(file, '*.sim'):1159			print(file)1160			f = open(file, 'r')1161			(x, y, runStats) = pickle.load(f)			1162			f.close()		1163			f = gzip.open(file, 'wb')1164			pickle.dump((x, y, runStats), f)			1165			f.close()1166def test_sim1(dirname, initialM=1, initialS=1, recreate=False, beta=0.9, multiprocess=True):1167	x = simulate_summary(dirname, "rSlow", "rFast", initialM=initialM, initialS=initialS, recreate=recreate, generateTestListFn=generateTestList_test_r, generateFnArgs={'beta':beta}, multiprocess=multiprocess)1168def test_sim2(dirname, initialM=1, initialS=1, recreate=False, beta=0.9, multiprocess=True, nProcesses=2):1169	x = simulate_summary(dirname, initialM=initialM, initialS=initialS, recreate=recreate, multiprocess=multiprocess, nProcesses=nProcesses, **test_var_args())1170def test_delta_sim(dirname, **kwargs):1171	x = simulate_summary(dirname, g_TestDelta, initialM=1.1, initialS=1.1, **kwargs)1172def test_delta_sim2(dirname, **kwargs):1173	x = simulate_summary(dirname, g_TestDelta2, initialM=1.1, initialS=1.1, **kwargs)1174	1175def plot_simulation_summary(dirname, prefix="sim_summary", xlabel=None, ylabel=None):1176	simFilename = os.path.join(dirname, prefix + ".out")1177	f = gzip.open(simFilename, 'rb')1178	sim_dict = pickle.load(f)1179	f.close()1180	[sim_xName, sim_yName, sim_xList, sim_yList, sim_outcomes, sim_initialState] = [sim_dict[x] for x in ['xName', 'yName', 'xList', 'yList', 'outcomes', 'initialState']]1181	# load summary of solutions1182	solutionFilename = os.path.join(dirname, "solutions.out")1183	f = gzip.open(solutionFilename, 'rb')1184	solution_dict = pickle.load(f)1185	f.close()1186	[sol_xName, sol_yName, sol_xList, sol_yList, sol_outcomeList] = [solution_dict[x] for x in ['xName', 'yName', 'xList', 'yList', 'outcomeList']]	1187	# keep track of points where iteration did not converge1188	nonconverge_points = {}1189	for (x, y, outcome) in zip(sol_xList, sol_yList, sol_outcomeList):1190		if (outcome == SOLUTION_CATEGORY_NONCONVERGENCE):	# did not converge1191			nonconverge_points[(x,y)] = 11192	sol_colors = [outcome_to_color(o) for o in sol_outcomeList]1193	sol_markers_labels = [outcome_to_marker(o) for o in sol_outcomeList]1194	# return (xList, yList, valList) with nonconvergence points removed1195	def filter_nonconverge(*args):			1196		result = [item for item in zip(*args) if (not (item[0],item[1]) in nonconverge_points)]1197		if (len(result) > 0):1198			return zip(*result)1199		else:1200			return [] * len(args)1201		1202	page = markup.page()1203	page.init(title="simulation summary, initial state=%f,%f" % sim_initialState)1204	page.p("initial state=%f,%f" % sim_initialState)1205	page.br( )		1206	# text summary of simulations...calc_fig5_clustering.py
Source:calc_fig5_clustering.py  
1# imports and definitions2import pandas as pd3import numpy as np4from scipy.stats import ttest_ind, mannwhitneyu5import matplotlib as mpl6import matplotlib.pyplot as plt7import seaborn as sns8from sklearn import preprocessing9import os10import cPickle as pickle11from ShaniBA.GeneralFeaturePhenotypeInteractions.Feature_phenotype_functions import add_corrected_pValues,\12    compare_TCR_between_groups, calc_PCA13GENIE_BASE_DIR = '/net/mraid08/export/genie/Lab/Personal/ShaniBAF/'14PRED_RESULTS_DIR='/net/mraid08/export/jafar/Microbiome/Analyses/ShaniBAF/predictions2/'15DATA_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/TCR/'16CARDIO_PHEN_DIR='/net/mraid08/export/genie/Lab/Personal/ShaniBAF/TCR_real_data/CardioSamples/phenotypicData/'17FIGURE_DIR = GENIE_BASE_DIR + 'Presentations and Manuscripts/CardioTCR paper/\18June2019/fig5_outcomes/calc_fig5/'19with open('/net/mraid08/export/genie/Lab/Personal/ShaniBAF/Sample files/BD lists/Cardio126', 'rb') as fp:20    Cardio126 = pickle.load(fp)21def get_suffix(only_productive,only_annotation,only_calc_features,get_suffix):22    d = {'only_productive': only_productive,23         'only_annotation': only_annotation,24         'only_calc_features': only_calc_features,25         'filter_negatives_only': filter_negatives_only}26    suffix = ''27    for k,v in d.items():28        if v:  suffix = suffix + '_' + k29    return suffix30def get_productive_only(df):31    col_list = [col for col in df.columns if col.endswith('_1')]32    return df[col_list]33def get_annotation_only(df):34    col_list = [col for col in df.columns if 'cum_freq' in col]35    return df[col_list]36def get_calc_features_only(df):37    col_list=df.columns.tolist()38    for prefix in ['V','D','J']:39        col_list = [col for col in col_list if not col.startswith(prefix)]40        col_list = [col for col in col_list if 'rel' not in col]41    return df[col_list]42def filter_by_followup_length(df,outcome_df,filter_negatives_only=True,length_limit=547,follow_up_end="2019-05-13"):43    #length_limit = 1.5 years44    outcome = outcome_df.columns.tolist()[0]45    follow_up_end_str = follow_up_end.replace('_', '')46    fu = pd.read_excel(CARDIO_PHEN_DIR + 'follow_up_period_length_%s.xlsx' % follow_up_end_str).set_index('BD')47    df = pd.merge(df,fu,how='left',left_index=True,right_index=True)48    df = pd.merge(df,outcome_df,how='left',left_index=True,right_index=True)49    if filter_negatives_only:50        df_negatives = df[(df['follow_up_period_length'] > length_limit) & \51                          (df[outcome] ==0)]52        df_positives = df[df[outcome] == 1]53        df = pd.concat([df_negatives,df_positives])54    else:55        df = df[df['follow_up_period_length']> length_limit]56    return df57def get_patients_feature_df(patient_list,only_productive=False,only_annotation=False,only_calc_features=False):58    feature_df=pd.read_excel(DATA_DIR +59                             'TCRfeatures/TCRfeaturesNoT_PCA10percShared_withRels_VDJ\60_noCorr0-999_nanFilled_noConsts_Cardio126.xlsx').set_index('BD')61    feature_df = feature_df.loc[patient_list,:]62    if only_productive: feature_df = get_productive_only(feature_df)63    elif only_annotation: feature_df = get_annotation_only(feature_df)64    elif only_calc_features: feature_df = get_calc_features_only(feature_df)65    return feature_df66def compare_TCR_features(feature_df,outcome_df,filter_negatives_only,filter_by_fu_length=None):67    if filter_by_fu_length is not None:68        feature_df = filter_by_followup_length(feature_df, outcome_df, filter_negatives_only=filter_negatives_only,69                    length_limit=filter_by_fu_length)70    outcome = outcome_df.columns.tolist()[0]71    groupName1 = outcome + '_0'; groupName2 = outcome + '_1'72    TCRdf_binary=pd.DataFrame()73    negatives = outcome_df[outcome_df[outcome]==0].index.tolist()74    positives = outcome_df[outcome_df[outcome] == 1].index.tolist()75    sample_list1 = [BD for BD in feature_df.index if BD in negatives]76    sample_list2 = [BD for BD in feature_df.index if BD in positives]77    print ('# patients in %s is %s ' %(groupName1,len(sample_list1)))78    print ('# patients in %s is %s ' % (groupName2, len(sample_list2)))79    feature_df = feature_df.drop(outcome, axis=1)80    feature_comparison_df,TCR_comparison_df = compare_TCR_between_groups(sample_list1,groupName1,81                                sample_list2,groupName2,feature_df,TCRdf_binary,82                                compare_features=True,compare_seqs=False)83    return feature_comparison_df84def normalize_df(df):85    return pd.DataFrame(index=df.index, columns=df.columns, data=preprocessing.scale(df,copy=False))86def get_outcome(outcome):87    outcome_df=pd.read_excel(CARDIO_PHEN_DIR +'outcomeDF.xlsx').set_index('BD')88    return pd.DataFrame(outcome_df[outcome])89def get_patients_clusters(patient_list, outcome_df,do_binary = True,filter_by_fu_length=None,filter_negatives_only=False):90    print ('loading TCR cluster file...')91    cluster_df = pd.read_pickle(DATA_DIR + 'TCR_seqs_clusters/\92newX_onlySeqs_040_085_noNans.dat')93    cluster_df = cluster_df.loc[patient_list, :]94    if filter_by_fu_length is not None:95        cluster_df = filter_by_followup_length(cluster_df, outcome_df, filter_negatives_only,length_limit=filter_by_fu_length)96    if do_binary:97        print ('binarizing file...')98        cluster_df = (cluster_df>0).astype(int)99    print ('Done!')100    return cluster_df101def gen_clustermap(df,only_partial=False,filter_by_fu_length=None,102                   do_cluster=True, filter_negatives_only=False,103                   figsize_frac=None, outcome_to_color=None):104    row_cluster = True; col_cluster=True105    if not do_cluster:106        row_cluster = False107        col_cluster = True108    outcome = outcome_to_color.columns.tolist()[0]109    df = pd.merge(df, outcome_to_color, how='inner', left_index=True, right_index=True)110    color_map = {0: 'b', 1: 'r', np.nan: 'g'}111    row_colors = df.iloc[:, -1].map(color_map)112    if only_partial:113        df = df.iloc[:10,:10]114    print ('df shape before filtering by fu period:' ,df.shape)115    if filter_by_fu_length is not None:116        df = filter_by_followup_length(df, outcome_to_color, filter_negatives_only,length_limit=filter_by_fu_length)117    print ('df shape after filtering by fu period:', df.shape)118    if figsize_frac is not None:119        height = np.ceil(df.shape[0] * figsize_frac)120        width = np.ceil(df.shape[1] * figsize_frac)121    else:122        height = 10; width = 10123    # if outcome_to_color is not None:124    #     df = pd.merge(df,outcome_to_color,how='inner',left_index=True,right_index=True)125    #     color_map = {0:'b',1:'r',np.nan:'g'}126    #     row_colors = df.iloc[:,-1].map(color_map)127    #     outcome=outcome_to_color.columns.tolist()[0]128    print ('df shape after filtering by fu period:', df.shape)129    df = df.sort_values(by=outcome, ascending=False)130    if filter_by_fu_length is not None: df = df.drop('follow_up_period_length',axis=1)131    g = sns.clustermap(df.drop(outcome,axis=1),xticklabels=True,yticklabels=True,132                       figsize=(width,height),row_colors=row_colors,133                       row_cluster=row_cluster, col_cluster=col_cluster)134    plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), fontsize='xx-small')135    plt.setp(g.ax_heatmap.xaxis.get_majorticklabels(), fontsize='xx-small')136    plt.subplots_adjust(bottom=0.3)137    # g.ax_heatmap.set_title('Subplot Title', y=1.25)138    return g139def gen_heatmap(df,only_partial=False,figsize_frac=None,outcome_to_color=None):140    outcome = outcome_to_color.columns.tolist()[0]141    df = pd.merge(df, outcome_to_color, how='inner', left_index=True, right_index=True)142    color_map = {0: 'b', 1: 'r', np.nan: 'g'}143    row_colors = df.iloc[:, -1].map(color_map)144    df = df.sort_values(by=outcome,ascending=False)145    if only_partial:146        df = df.iloc[:10,:10]147    if figsize_frac is not None:148        height = np.ceil(df.shape[0] * figsize_frac)149        width = np.ceil(df.shape[1] * figsize_frac)150    else:151        height = 10; width = 10152    g = sns.clustermap(df.drop(outcome,axis=1),xticklabels=True,yticklabels=True,153                       figsize=(width,height),row_colors=row_colors,154                       row_cluster=False, col_cluster=False)155    plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), fontsize='xx-small')156    plt.setp(g.ax_heatmap.xaxis.get_majorticklabels(), fontsize='xx-small')157    plt.subplots_adjust(bottom=0.3)158    return g159def plot_PCA(feature_df,outcome_df,suffix,to_scale=True):160    outcome = outcome_df.columns[0]161    sample_list1 = (outcome_df[outcome_df[outcome]==0]).index.tolist()162    sample_list2 = (outcome_df[outcome_df[outcome] == 1]).index.tolist()163    print ('outcome: ', outcome)164    print ('sample list 1:', sample_list1)165    print ('sample list 2:', sample_list2)166    sample_list_list=[(outcome+'_0', sample_list1),(outcome+'_1', sample_list2)]167    color_list=['blue','red']168    PCAdf,fig,ax = calc_PCA(feature_df,isSparse=False,n_comp=10,sample_list_list=sample_list_list,169              color_list=color_list,pca_n1_toplot=0,pca_n2_toplot=1,toScale=to_scale,170             toPlot=True, toAnnotate=True,fig=None,ax=None,calculateSeperation=True)171    ax.set_title('PCA_%s%s' %(outcome,suffix))172    plt.subplots_adjust(right=0.8)173    return PCAdf174if __name__ == "__main__":175    outcome = 'CV hospitalization including chest pain'176    filter_by_fu_length= None177    filter_negatives_only=False178    only_productive=False179    only_annotation=False180    only_calc_features=True181    feature_for_pca='clusters' #'clusters' / 'features'182    suffix = get_suffix(only_productive,only_annotation,only_calc_features,filter_negatives_only)183    print ('loading feature df...')184    feature_df = get_patients_feature_df(patient_list=Cardio126,only_productive=only_productive,only_annotation=only_annotation,185                                only_calc_features=only_calc_features)186    print ('loading outcome df...')187    outcome_df = get_outcome(outcome)188    print ('loading cluster df...')189    cluster_df = get_patients_clusters(patient_list=Cardio126, outcome_df=outcome_df, do_binary=True,190                                       filter_by_fu_length=filter_by_fu_length,191                                       filter_negatives_only=filter_negatives_only)192    # print ('comparing TCR features...')193    # feature_comparison_df = compare_TCR_features(feature_df,outcome_df,filter_negatives_only,filter_by_fu_length)194    # feature_comparison_df.to_excel(os.path.join(FIGURE_DIR+'feature_comparison/',195    #                     'feature_comparison_df%s_%s_%s.xlsx' %(suffix,outcome,str(filter_by_fu_length).replace('.',''))))196    #197    # print feature_comparison_df.head()198    # print ('normalizing df...')199    # feature_df_scaled = normalize_df(feature_df)200    print ('calculating PCA...')201    if feature_for_pca == 'clusters':202        suffix = suffix + '_clusters'203        PCAdf = plot_PCA(cluster_df,outcome_df,suffix,to_scale=False)204    else:205        PCAdf = plot_PCA(feature_df, outcome_df, suffix)206    # print('generating heatmapmap...')207    # do_cluster=False208    # g = gen_clustermap(feature_df_scaled,only_partial=False,filter_by_fu_length=filter_by_fu_length,209    #                    do_cluster=do_cluster, filter_negatives_only=filter_negatives_only,210    #                    figsize_frac=0.25,outcome_to_color=outcome_df)211    #212    #213    # print('generating clustermap...')214    # do_cluster=True215    # g = gen_clustermap(feature_df_scaled,only_partial=False,filter_by_fu_length=filter_by_fu_length,216    #                    do_cluster=do_cluster,217    #                    figsize_frac=0.25,outcome_to_color=outcome_df)218    # df = pd.DataFrame(np.random.randint(0,99,(4,10)))219    # g2 = sns.clustermap(df)220    # plt.setp(g2.ax_heatmap.yaxis.get_majorticklabels(), fontsize='xx-large',rotation=0)221    # g2.ax_heatmap.set_title('Subplot Title',y=1.25)222    # iris = sns.load_dataset("iris")223    # species = iris.pop("species")224    # lut = dict(zip(species.unique(), "rbg"))225    # row_colors = species.map(lut)226    # >>> g = sns.clustermap(iris, row_colors=row_colors)...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
