How to use get_skips method in Slash

Best Python code snippet using slash

keras_model.py

Source:keras_model.py Github

copy

Full Screen

...18 self.embedding_size = embedding_size19 self.window_size = window_size20 self.model = None21 self.embeddings = None22 def get_skips(self, docs):23 """24 Formats the data and generates negative samples.25 :param docs: list; a list of documents; each document is a list of sentences;26 a sentence is a list of tokens (strings)27 :return: tuple; contains the center and context words, and the corresponding labels28 """29 sampling_table = make_sampling_table(self.vocab_size)30 center_words, context_words, labels = [], [], []31 for doc in docs:32 tokens = [token for sent in doc for token in sent]33 pairs, labels_ = skipgrams(tokens,34 self.vocab_size,35 window_size=self.window_size,36 sampling_table=sampling_table)37 try:38 center, context = zip(*pairs)39 except ValueError:40 continue41 center_words += center42 context_words += context43 labels += labels_44 return center_words, context_words, labels45 def w2v_model(self, learning_rate):46 """47 Generates the neural architecture for the word2vec skip-gram model48 :return: keras.models.Model(); the word2vec model49 """50 # Add the input and embedding layers51 input_center = Input((1,))52 input_context = Input((1,))53 self.embeddings = Embedding(self.vocab_size, self.embedding_size, input_length=1, name="Embeddings")54 # Get the center and context embeddings55 center = self.embeddings(input_center)56 center = Reshape((self.embedding_size, 1))(center)57 context = self.embeddings(input_context)58 context = Reshape((self.embedding_size, 1))(context)59 # Calculate the linear activations60 # dot_product = Concatenate([center, context], mode="dot", dot_axes=1)61 dot_product = dot([center, context], axes=1, normalize=False)62 dot_product = Reshape((1,))(dot_product)63 # Sigmoid activations64 output = Dense(1, activation="sigmoid")(dot_product)65 # Define the model66 model = Model(input=[input_center, input_context], output=output)67 optimizer = RMSprop(lr=learning_rate, rho=0.9, epsilon=None, decay=0.0)68 model.compile(loss="binary_crossentropy", optimizer=optimizer)69 return model70 def train(self, docs, num_batches=2000, learning_rate=0.001, verbose=True):71 """72 Optimizes the model on the training data73 :param docs: list; a sequence of documents; each document is a list of sentences;74 a sentence is a list of tokens (strings)75 :param num_batches: int; the number of (center, context) pairs to use in training76 :param verbose: Boolean; if true, prints the loss druing training77 """78 # Get the data and the model79 center_words, context_words, labels = self.get_skips(docs)80 self.model = self.w2v_model(learning_rate)81 # Randomly sample pair/label82 loss = []83 for batch in range(num_batches):84 idx = np.random.randint(0, len(center_words)-1)85 center_word = np.array([center_words[idx]])86 context_word = np.array([context_words[idx]])87 label = np.array([labels[idx]])88 loss += [self.model.train_on_batch([center_word, context_word], label)]89 # Print the loss every 1000 batches90 if len(loss) >= 1000 and verbose:91 print(batch, sum(loss)/1000)92 loss = []93 def get_embedding_array(self):...

Full Screen

Full Screen

analogy.py

Source:analogy.py Github

copy

Full Screen

...43 embeds = Format(args.format).load(args.embeddings, args.lossy, args.mmap)44 with open(args.input) as queries:45 for query in queries:46 query_a, query_b, query_c = query.strip().split()47 skips = get_skips(query_a, query_b, query_c, args.include)48 res = embeds.analogy(query_a,49 query_b,50 query_c,51 k=args.k,52 skip=skips)53 if res is None:54 print(55 f"Could not compute for: {query_a} : {query_b}, {query_c} : ? ",56 file=sys.stderr)57 else:58 print("\n".join(f"{ws.word} {ws.similarity}" for ws in res))59def get_skips( # pylint: disable=missing-function-docstring60 query_a: str, query_b: str, query_c: str,61 includes: List[str]) -> Set[str]:62 if includes == []:63 return {query_c, query_b, query_a}64 skips = set()65 if 'a' not in includes:66 skips.add(query_a)67 if 'b' not in includes:68 skips.add(query_b)69 if 'c' not in includes:70 skips.add(query_b)71 return skips72if __name__ == '__main__':73 main()

Full Screen

Full Screen

aoc10.py

Source:aoc10.py Github

copy

Full Screen

...36 difs[3] += 137 break38 return difs[1] * difs[3]3940def get_skips(arr):41 arr.append(0)42 arr.append(max(arr) + 3)43 arr.sort()44 result = []45 group_len = 046 for i in range(1, len(arr) - 1):47 if arr[i] - arr[i - 1] == 3 or arr[i + 1] - arr[i] == 3:48 if group_len > 0:49 if group_len == 1:50 result.append(1)51 elif group_len == 2:52 result.append(3)53 elif group_len == 3:54 result.append(6)55 group_len = 056 else:57 group_len += 158 return result5960def part2(adapters):61 skips = get_skips(adapters)62 result = 163 for choose in range(1, len(skips) + 1):64 choices = list(itertools.combinations(skips, choose))65 possibilities = list(map(math.prod, choices))66 result += sum(possibilities)67 return result6869def main():70 data = get_data('aoc10.txt')7172 # Part 173 print(f'PART 1--Answer: {part1(data)}')7475 # Part 2 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful