How to use send_instances method in Contexts

Best Python code snippet using Contexts

Worker.py

Source:Worker.py Github

copy

Full Screen

...163 return self.raw_instance_list[-1].attack_info[0] == '0'164 def check_sequnce(self, game_end):165 if game_end == 1:166 for index, item in enumerate(self.raw_instance_list[:-1]):167 self.send_instances(index, game_end)168 else:169 if len(self.raw_instance_list) == 1:170 return171 remove_index_list = []172 cur_time = self.raw_instance_list[-1].time173 for index, item in enumerate(self.raw_instance_list):174 if cur_time - self.raw_instance_list[index].time > 2 * 60 * 24:175 self.send_instances(index, game_end)176 remove_index_list.append(index)177 else:178 break179 self.raw_instance_list = [item for index, item in enumerate(self.raw_instance_list)180 if index not in remove_index_list]181 def send_instances(self, start, game_end):182 td_discount = self.discount_rate183 terminal_state = copy.deepcopy(self.raw_instance_list[-1].state_feature)184 terminal_action_valid_info = self.raw_instance_list[-1].action_valid_info185 terminal_reward = self.raw_instance_list[-1].terminal_reward186 is_end = self.raw_instance_list[-1].is_end187 #step cost188 actual_reward = 0189 instant_rewards = actual_reward #+ diff_score190 if instant_rewards > self.clipping_reward_threshold:191 instant_rewards = self.clipping_reward_threshold192 if instant_rewards < -self.clipping_reward_threshold:193 instant_rewards = -self.clipping_reward_threshold194 max_Q_value = self.model.get_Q_value(self.raw_instance_list[start], self.oppo_and_time, self.game_index)195 training_instance = Training_Instance(self.raw_instance_list[start].time, self.raw_instance_list[start].state_feature, self.raw_instance_list[start].choose_action,...

Full Screen

Full Screen

pyServer.py

Source:pyServer.py Github

copy

Full Screen

...67 command = message['command']68 if command == 'put_instances':69 receive_instances(message)70 elif command == 'get_instances':71 send_instances(message)72 elif command == 'execute_script':73 execute_script(message)74 elif command == 'get_variable_list':75 send_variable_list(message)76 elif command == 'get_variable_type':77 send_variable_type(message)78 elif command == 'get_variable_value':79 send_variable_value(message)80 elif command == 'get_image':81 send_image_as_png(message)82 elif command == 'variable_is_set':83 send_variable_is_set(message)84 elif command == 'set_variable_value':85 receive_variable_value(message)86 elif command == 'get_debug_buffer':87 send_debug_buffer()88 elif command == 'shutdown':89 if _global_startup_debug == True:90 print ('Received shutdown command...\n')91 exit()92 else:93 if _global_startup_debug == True:94 print('message did not contain a command field!')95 finally:96 _global_connection.close()97def message_debug(message):98 if 'debug' in message:99 return message['debug']100 else:101 return False102def send_debug_buffer():103 tOut = sys.stdout104 tErr = sys.stderr105 ok_response = {}106 ok_response['response'] = 'ok'107 ok_response['std_out'] = tOut.getvalue()108 ok_response['std_err'] = tErr.getvalue()109 # clear the buffers110 tOut.close()111 tErr.close()112 sys.stdout = StringIO()113 sys.stderr = StringIO()114 send_response(ok_response, True)115def receive_instances(message):116 if 'header' in message:117 # get the frame name118 header = message['header']119 frame_name = header['frame_name']120 # could store the header (but don't currently)121 # _headers[frame_name] = header122 num_instances = message['num_instances']123 if num_instances > 0:124 # receive the CSV125 csv_data = receive_message(False)126 frame = None127 if 'date_atts' in header:128 frame = pd.read_csv(StringIO(csv_data), na_values='?',129 quotechar='\'', escapechar='\\',130 index_col=None,131 parse_dates=header['date_atts'],132 infer_datetime_format=True)133 else:134 frame = pd.read_csv(StringIO(csv_data), na_values='?',135 quotechar='\'', escapechar='\\',136 index_col=None)137 _global_env[frame_name] = frame138 if message_debug(message) == True:139 print(frame.info(), '\n')140 ack_command_ok()141 else:142 error = 'put instances json message does not contain a header entry!'143 ack_command_err(error)144def send_instances(message):145 frame_name = message['frame_name']146 frame = get_variable(frame_name)147 if type(frame) is not pd.DataFrame:148 message = 'Variable ' + frame_name149 if frame is None:150 message += ' is not defined'151 else:152 message += ' is not a DataFrame object'153 ack_command_err(message)154 else:155 ack_command_ok()156 # now convert and send data157 response = {}158 response['response'] = 'instances_header'...

Full Screen

Full Screen

trainer_slave.py

Source:trainer_slave.py Github

copy

Full Screen

1#!/usr/bin/env python2import sys, os.path, math, gc, time3import collections, random, itertools4from mpi4py import MPI5import decoder6import model7import svector8import oracle9import maxmargin10import monitor, log11### Per-feature learning rates12# possible values are: "arow", "gauss-newton"13update_feature_scales = "gauss-newton"14# The following two parameters set the initial feature learning rates.15# If a feature is in initial_feature_learning_rates, its value there16# is its initial learning rate. Otherwise, it is initial_learning_rate.17initial_learning_rate = 1.18initial_feature_learning_rates = svector.Vector()19# The maximum learning rate for any feature (or None)20max_learning_rate = 0.121# The following only applies to arow:22# How quickly to slow down per-feature learning rates.23learning_rate_decay = 0.0124# The following only applies to gauss-newton:25# The learning rate for a feature with unit variance.26unit_learning_rate = 0.0127initial_learning_rate_strength = 128# Weight on BLEU score used to compute hope translations.29hope_weight = 1.30# Which features to show in log file31watch_features = svector.Vector("lm1=1 lm2=1 pef=1 pfe=1 lef=1 lfe=1 word=1 green=1 unknown=1")32class ForestInstance(maxmargin.Instance):33 def __init__(self, sentid, goal):34 maxmargin.Instance.__init__(self, instance_id=sentid)35 self.sentid = sentid36 self.goal = goal37 def get_fear(self):38 """Assumes that oraclemodel.input() has been called"""39 if not self.goal:40 raise NotImplementedError()41 weights = theoracle.make_weights(additive="edge")42 # use in-place operations because theoracle.make_weights might43 # be a subclass of svector.Vector44 weights += self.qp.mweights45 self.goal.reweight(weights)46 fear_vector, fear = decoder.get_nbest(self.goal, 1, 1)[0]47 fear_ovector = theoracle.finish(fear_vector, fear)48 fear_mvector = theoracle.clean(fear_vector)49 if log.level >= 1:50 log.write("fear hyp: %s\n" % " ".join(sym.tostring(e) for e in fear))51 log.write("fear features: %s\n" % fear_mvector)52 log.write("fear oracle: %s\n" % fear_ovector)53 return maxmargin.Hypothesis(fear_mvector, fear_ovector)54 def get_hope(self):55 """Assumes that oraclemodel.input() has been called"""56 if not self.goal:57 _, hope = min((self.qp.mweights.dot(hyp.mvector) + hope_weight * self.qp.oweights.dot(hyp.ovector), hyp) for hyp in self.hyps)58 return hope59 60 weights = theoracle.make_weights(additive="edge")61 # use in-place operations because theoracle.make_weights might62 # be a subclass of svector.Vector63 weights *= -hope_weight64 weights += self.qp.mweights65 self.goal.reweight(weights)66 hope_vector, hope = decoder.get_nbest(self.goal, 1, 1)[0]67 hope_ovector = theoracle.finish(hope_vector, hope)68 hope_mvector = theoracle.clean(hope_vector)69 if log.level >= 1:70 log.write("hope hyp: %s\n" % " ".join(sym.tostring(e) for e in hope))71 log.write("hope features: %s\n" % hope_mvector)72 log.write("hope oracle: %s\n" % hope_ovector)73 return maxmargin.Hypothesis(hope_mvector, hope_ovector)74class Learner(object):75 def __init__(self):76 self.sum_weights_helper = svector.Vector()77 self.n_weights = 078 self.sum_updates2 = svector.Vector()79 self.n_updates = 080 def compute_feature_learning_rate(self, f):81 if f in initial_feature_learning_rates:82 r0 = initial_feature_learning_rates[f]83 else:84 r0 = initial_learning_rate85 if update_feature_scales == "arow":86 # \Sigma^{-1} := \Sigma^{-1} + learning_rate_decay * sum_updates2[f]87 variance = 1./r0 + sum_updates2[f] * learning_rate_decay88 r = 1. / variance89 elif update_feature_scales == "gauss-newton":90 variance = (initial_learning_rate_strength/r0 + self.sum_updates2[f] / unit_learning_rate) / (initial_learning_rate_strength + self.n_updates)91 r = 1. / variance92 if max_learning_rate:93 r = min(max_learning_rate, r)94 return r95 def train(self, sent, instances):96 # Set up quadratic program97 qp = maxmargin.QuadraticProgram()98 for instance in instances:99 qp.add_instance(instance)100 # Make oracle weight vector101 oweights = theoracle.make_weights(additive="sentence")102 oweights *= -1103 # Make vector of learning rates104 # We have to be careful to assign a learning rate to every feature in the forest105 # This is not very efficient106 feats = set()107 for instance in qp.instances:108 if hasattr(instance, "goal") and instance.goal:109 for item in instance.goal.bottomup():110 for ded in item.deds:111 feats.update(ded.dcost)112 for hyp in instance.hyps:113 feats.update(hyp.mvector)114 learning_rates = svector.Vector()115 for feat in feats:116 learning_rates[feat] = self.compute_feature_learning_rate(feat)117 if log.level >= 3:118 log.writeln("learning rate vector: %s" % learning_rates)119 # Solve the quadratic program120 qp.optimize(thedecoder.weights, oweights, learning_rate=learning_rates)121 thedecoder.weights.compact()122 log.write("feature weights: %s\n" % (thedecoder.weights * watch_features))123 # Update weight sum for averaging124 # sum_weights_helper = \sum_{i=0}^n (i \Delta w_i)125 self.sum_weights_helper += self.n_weights * qp.delta_mweights()126 self.n_weights += 1127 # Update feature scales128 if update_feature_scales:129 for instance in qp.instances:130 """u = svector.Vector(instance.hope.mvector)131 for hyp in instance.hyps:132 u -= hyp.alpha*hyp.mvector133 self.sum_updates2 += u*u"""134 for hyp in instance.hyps:135 if hyp is not instance.hope: # hyp = instance.hope is a non-update136 u = instance.hope.mvector - hyp.mvector137 self.sum_updates2 += hyp.alpha*(u*u)138 self.n_updates += hyp.alpha139 log.write("feature learning rates: %s\n" % (" ".join("%s=%s" % (f, self.compute_feature_learning_rate(f)) for f in watch_features)))140 theoracle.update(sent.score_comps)141 # make a plain Instance (without forest)142 # we used to designate a hope translation,143 #send_instance = maxmargin.Instance(cur_instance.hyps, hope=cur_instance.hope, instance_id=cur_instance.sentid)144 # but now are letting the other node choose.145 send_instances = []146 for instance in instances:147 if hasattr(instance, "goal") and instance.goal:148 send_instances.append(maxmargin.Instance(instance.hyps, instance_id=instance.sentid))149 assert len(send_instances) == 1150 return send_instances[0]151decoder_errors = 0152def process(sent):153 # Add an flen attribute that gives the length of the input sentence.154 # In the lattice-decoding case, we have to make a guess.155 distance = sent.compute_distance()156 sent.flen = distance.get((0,sent.n-1), None) # could be missing if n == 0157 theoracle.input(sent)158 global decoder_errors159 try:160 goal = thedecoder.translate(sent)161 thedecoder.process_output(sent, goal)162 decoder_errors = 0163 if goal is None: raise Exception("parse failure")164 except Exception:165 import traceback166 log.write("decoder raised exception: %s" % "".join(traceback.format_exception(*sys.exc_info())))167 decoder_errors += 1168 if decoder_errors >= 3:169 log.write("decoder failed too many times, passing exception through!\n")170 raise171 else:172 return173 # Augment forest with oracle features174 # this is overkill if we aren't going to search for hope/fear175 goal.rescore(theoracle.models, thedecoder.weights, add=True)176 best_vector, best = decoder.get_nbest(goal, 1)[0]177 best_mvector = theoracle.clean(best_vector)178 best_ovector = theoracle.finish(best_vector, best)179 best_loss = theoracle.make_weights(additive="sentence").dot(best_ovector)180 log.writeln("best hyp: %s %s cost=%s loss=%s" % (" ".join(sym.tostring(e) for e in best), best_vector, thedecoder.weights.dot(best_mvector), best_loss))181 sent.score_comps = best_ovector182 sent.ewords = [sym.tostring(e) for e in best]183 return goal184if __name__ == "__main__":185 gc.set_threshold(100000,10,10)186 187 import optparse188 optparser = optparse.OptionParser()189 # Most of these aren't actually used here...ugly190 optparser.add_option("-W", dest="outweightfilename", help="filename to write weights to")191 optparser.add_option("-L", dest="outscorefilename", help="filename to write BLEU scores to")192 optparser.add_option("-B", dest="bleuvariant", default="NIST")193 optparser.add_option("-S", dest="stopfile")194 optparser.add_option("-p", dest="parallel", type=int, help="how many slaves to start", default=1)195 optparser.add_option("--input-lattice", dest="input_lattice", action="store_true")196 optparser.add_option("--holdout", "--heldout-ratio", dest="heldout_ratio", help="fraction of sentences to hold out", type=float, default=None)197 optparser.add_option("--heldout-sents", dest="heldout_sents", help="number of sentences to hold out", type=int, default=None)198 optparser.add_option("--heldout-policy", dest="heldout_policy", default="last", help="which sentences to hold out (first, last, uniform)")199 optparser.add_option("--no-shuffle", dest="shuffle_sentences", action="store_false", default=True)200 try:201 configfilename = sys.argv[1]202 except IndexError:203 sys.stderr.write("usage: trainer_slave.py config-file [options...]\n")204 sys.exit(1)205 if log.level >= 1:206 log.write("Reading configuration from %s\n" % configfilename)207 execfile(configfilename)208 opts, args = optparser.parse_args(args=sys.argv[2:])209 maxmargin.watch_features = watch_features210 theoracle = oracle.Oracle(order=4, variant=opts.bleuvariant, oracledoc_size=10)211 thedecoder = make_decoder()212 thelearner = Learner()213 weight_stack = []214 if log.level >= 1:215 gc.collect()216 log.write("all structures loaded, memory=%s\n" % (monitor.memory()))217 comm = MPI.Comm.Get_parent()218 log.prefix = '[%s] ' % (comm.Get_rank(),)219 instances = []220 while True:221 msg = comm.recv()222 if msg[0] == 'train':223 sent = msg[1]224 goal = process(sent)225 instances.append(ForestInstance(sent.id, goal))226 while comm.Iprobe(tag=1):227 msg = comm.recv(tag=1)228 if msg[0] == 'update':229 log.writeln("receive update for sentence %s" % msg[1].id)230 instances.append(msg[1].instance)231 232 instance = thelearner.train(sent, instances)233 sent.instance = instance # it would be nicer if sent and instance were the same object234 comm.send(sent, dest=0)235 instances = []236 elif msg[0] == 'translate':237 sent = msg[1]238 process(sent)239 comm.send(sent, dest=0)240 elif msg[0] == 'gather-weights':241 # Average weights (Daume trick)242 sum_weights = float(thelearner.n_weights) * thedecoder.weights - thelearner.sum_weights_helper243 sum_weights.compact()244 log.writeln("send summed weights")245 comm.gather(sum_weights, root=0)246 comm.gather(thelearner.n_weights, root=0)247 elif msg[0] == 'push-weights':248 weight_stack.append(svector.Vector(thedecoder.weights))249 thedecoder.weights = msg[1]250 log.writeln("receive weights: %s" % (thedecoder.weights * watch_features))251 elif msg[0] == 'pop-weights':252 thedecoder.weights = weight_stack.pop()253 log.writeln("restore weights: %s" % (thedecoder.weights * watch_features))254 elif msg[0] == 'end':255 break256 else:...

Full Screen

Full Screen

server.py

Source:server.py Github

copy

Full Screen

...273274 275#Send all the snakes and food and ID to client276#to be done in multithreading277def send_instances(food):278 for i in range(len(all_snakes)):279 280 trysend = 0281 while True:282 try:283 to_send = pickle.dumps([all_snakes,food,i])284 all_connections[i].send(to_send)285 break286 except:287 if trysend == 0:288 trysend = trysend +1289 continue290 else:291 all_connections[i].close()292 all_addresses.pop(i)293 all_snakes.pop(i)294 all_connections.pop(i)295 break296297298299300301#Updated snakes to be received & update302#To be done in multithreading303def receive_updates():304 eaten = False305 for i in range(len(all_snakes)):306 tryrecv =0307 while True:308 try:309 received = all_connections[i].recv(5*1024)310 updated = pickle.loads(received)311 all_snakes[i] = updated[0]312 if updated[1] == True:313 eaten = True314 break315 except:316 if tryrecv == 0:317 tryrecv = tryrecv +1318 continue319 else:320 all_connections[i].close()321 all_addresses.pop(i)322 all_snakes.pop(i)323 all_connections.pop(i)324 break325326327 return eaten328 329330331332333334335 336def main():337 bindSocket()338 accepting_connections()339 340 create_snakes()341 print(len(all_snakes))342 food = Food('*')343344345346 while isEnd() == False:347 348 send_instances(food)349 print("Sent")350 eaten = receive_updates()351 print("Received")352 if eaten:353 food.reset()354355 list_collided = []356 for i in range(len(all_snakes)):357 if all_snakes[i].collided and i not in list_collided:358 list_collided.append(i)359360361362 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Contexts automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful