How to use track_events method in localstack

Best Python code snippet using localstack_python

exportmidi.py

Source:exportmidi.py Github

copy

Full Screen

1#!/usr/bin/env python2import logging3import numpy as np4from collections import defaultdict, OrderedDict5from mido import MidiFile, MidiTrack, Message, MetaMessage6import partitura.score as score7from partitura.utils import partition8__all__ = ["save_score_midi", "save_performance_midi"]9LOGGER = logging.getLogger(__name__)10def get_partgroup(part):11 parent = part12 while parent.parent:13 parent = parent.parent14 return parent15def map_to_track_channel(note_keys, mode):16 ch_helper = {}17 tr_helper = {}18 track = {}19 channel = {}20 for (pg, p, v) in note_keys:21 if mode == 0:22 trk = tr_helper.setdefault(p, len(tr_helper))23 ch1 = ch_helper.setdefault(p, {})24 ch2 = ch1.setdefault(v, len(ch1) + 1)25 track[(pg, p, v)] = trk26 channel[(pg, p, v)] = ch227 elif mode == 1:28 trk = tr_helper.setdefault(pg, len(tr_helper))29 ch1 = ch_helper.setdefault(pg, {})30 ch2 = ch1.setdefault(p, len(ch1) + 1)31 track[(pg, p, v)] = trk32 channel[(pg, p, v)] = ch233 elif mode == 2:34 track[(pg, p, v)] = 035 ch = ch_helper.setdefault(p, len(ch_helper) + 1)36 channel[(pg, p, v)] = ch37 elif mode == 3:38 trk = tr_helper.setdefault(p, len(tr_helper))39 track[(pg, p, v)] = trk40 channel[(pg, p, v)] = 141 elif mode == 4:42 track[(pg, p, v)] = 043 channel[(pg, p, v)] = 144 elif mode == 5:45 trk = tr_helper.setdefault((p, v), len(tr_helper))46 track[(pg, p, v)] = trk47 channel[(pg, p, v)] = 148 else:49 raise Exception("unsupported part/voice assign mode {}".format(mode))50 result = dict((k, (track.get(k, 0), channel.get(k, 1))) for k in note_keys)51 # for (pg, p, voice), v in result.items():52 # pgn = pg.group_name if hasattr(pg, 'group_name') else pg.id53 # print(pgn, p.id, voice)54 # print(v)55 # print()56 return result57def get_ppq(parts):58 ppqs = np.concatenate(59 [part.quarter_durations()[:, 1] for part in score.iter_parts(parts)]60 )61 ppq = np.lcm.reduce(ppqs)62 return ppq63def save_performance_midi(64 performed_part, out, mpq=500000, ppq=480, default_velocity=6465):66 """Save a :class:`~partitura.performance.PerformedPart` instance as a67 MIDI file.68 Parameters69 ----------70 performed_part : :class:`~partitura.performance.PerformedPart`71 The performed part to save72 out : str or file-like object73 Either a filename or a file-like object to write the MIDI data74 to.75 mpq : int, optional76 Microseconds per quarter note. This is known in MIDI parlance77 as the "tempo" value. Defaults to 500000 (i.e. 120 BPM).78 ppq : int, optional79 Parts per quarter, also known as ticks per beat. Defaults to80 480.81 default_velocity : int, optional82 A default velocity value (between 0 and 127) to be used for83 notes without a specified velocity. Defaults to 64.84 """85 track_events = defaultdict(lambda: defaultdict(list))86 for c in performed_part.controls:87 track = c.get("track", 0)88 ch = c.get("channel", 1)89 t = int(np.round(10 ** 6 * ppq * c["time"] / mpq))90 track_events[track][t].append(91 Message("control_change", control=c["number"], value=c["value"], channel=ch)92 )93 for n in performed_part.notes:94 track = n.get("track", 0)95 ch = n.get("channel", 1)96 t_on = int(np.round(10 ** 6 * ppq * n["note_on"] / mpq))97 t_off = int(np.round(10 ** 6 * ppq * n["note_off"] / mpq))98 vel = n.get("velocity", default_velocity)99 track_events[track][t_on].append(100 Message("note_on", note=n["midi_pitch"], velocity=vel, channel=ch)101 )102 track_events[track][t_off].append(103 Message("note_off", note=n["midi_pitch"], velocity=0, channel=ch)104 )105 for p in performed_part.programs:106 track = p.get("track", 0)107 ch = p.get("channel", 1)108 t = int(np.round(10 ** 6 * ppq * p["time"] / mpq))109 track_events[track][t].append(110 Message("program_change", program=int(p["program"]), channel=ch)111 )112 if len(performed_part.programs) == 0:113 # Add default program (to each track/channel)114 channels_and_tracks = np.array(115 list(116 set(117 [118 (c.get("channel", 1), c.get("track", 0))119 for c in performed_part.controls120 ]121 + [122 (n.get("channel", 1), n.get("track", 0))123 for n in performed_part.notes124 ]125 )126 ),127 dtype=int,128 )129 timepoints = []130 for tr in track_events.keys():131 timepoints += list(track_events[tr].keys())132 timepoints = list(set(timepoints))133 for tr in np.unique(channels_and_tracks[:, 1]):134 channel_idxs = np.where(channels_and_tracks[:, 1] == tr)[0]135 track_channels = np.unique(channels_and_tracks[channel_idxs, 0])136 for ch in track_channels:137 track_events[tr][min(timepoints)].append(138 Message("program_change", program=0, channel=ch)139 )140 midi_type = 0 if len(track_events) == 1 else 1141 mf = MidiFile(type=midi_type, ticks_per_beat=ppq)142 for j, i in enumerate(sorted(track_events.keys())):143 track = MidiTrack()144 mf.tracks.append(track)145 if j == 0:146 track.append(MetaMessage("set_tempo", tempo=mpq, time=0))147 t = 0148 for t_msg in sorted(track_events[i].keys()):149 t_delta = t_msg - t150 for msg in track_events[i][t_msg]:151 track.append(msg.copy(time=t_delta))152 t_delta = 0153 t = t_msg154 if out:155 if hasattr(out, "write"):156 mf.save(file=out)157 else:158 mf.save(out)159def save_score_midi(160 parts, out, part_voice_assign_mode=0, velocity=64, anacrusis_behavior="shift"161):162 """Write data from Part objects to a MIDI file163 Parameters164 ----------165 parts : Part, PartGroup or list of these166 The musical score to be saved.167 out : str or file-like object168 Either a filename or a file-like object to write the MIDI data169 to.170 part_voice_assign_mode : {0, 1, 2, 3, 4, 5}, optional171 This keyword controls how part and voice information is172 associated to track and channel information in the MIDI file.173 The semantics of the modes is as follows:174 0175 Write one track for each Part, with channels assigned by176 voices177 1178 Write one track for each PartGroup, with channels assigned by179 Parts (voice info is lost) (There can be multiple levels of180 partgroups, I suggest using the highest level of181 partgroup/part) [note: this will e.g. lead to all strings into182 the same track] Each part not in a PartGroup will be assigned183 its own track184 2185 Write a single track with channels assigned by Part (voice186 info is lost)187 3188 Write one track per Part, and a single channel for all voices189 (voice info is lost)190 4191 Write a single track with a single channel (Part and voice192 info is lost)193 5194 Return one track per <Part, voice> combination, each track195 having a single channel.196 The default mode is 0.197 velocity : int, optional198 Default velocity for all MIDI notes. Defaults to 64.199 anacrusis_behavior : {"shift", "pad_bar"}, optional200 Strategy to deal with anacrusis. If "shift", all201 time points are shifted by the anacrusis (i.e., the first202 note starts at 0). If "pad_bar", the "incomplete" bar of203 the anacrusis is padded with silence. Defaults to 'shift'.204 """205 ppq = get_ppq(parts)206 events = defaultdict(lambda: defaultdict(list))207 meta_events = defaultdict(lambda: defaultdict(list))208 event_keys = OrderedDict()209 tempos = {}210 quarter_maps = [part.quarter_map for part in score.iter_parts(parts)]211 first_time_point = min(qm(0) for qm in quarter_maps)212 ftp = 0213 # Deal with anacrusis214 if first_time_point < 0:215 if anacrusis_behavior == "shift":216 ftp = first_time_point217 elif anacrusis_behavior == "pad_bar":218 time_signatures = []219 for qm, part in zip(quarter_maps, score.iter_parts(parts)):220 ts_beats, ts_beat_type = part.time_signature_map(0)221 time_signatures.append((ts_beats, ts_beat_type, qm(0)))222 # sort ts according to time223 time_signatures.sort(key=lambda x: x[2])224 ftp = -time_signatures[0][0] / (time_signatures[0][1] / 4)225 else:226 raise Exception(227 'Invalid anacrusis_behavior value, must be one of ("shift", "pad_bar")'228 )229 for qm, part in zip(quarter_maps, score.iter_parts(parts)):230 pg = get_partgroup(part)231 notes = part.notes_tied232 def to_ppq(t):233 # convert div times to new ppq234 return int(ppq * (qm(t) - ftp))235 for tp in part.iter_all(score.Tempo):236 tempos[to_ppq(tp.start.t)] = MetaMessage(237 "set_tempo", tempo=tp.microseconds_per_quarter238 )239 for ts in part.iter_all(score.TimeSignature):240 meta_events[part][to_ppq(ts.start.t)].append(241 MetaMessage(242 "time_signature", numerator=ts.beats, denominator=ts.beat_type243 )244 )245 for ks in part.iter_all(score.KeySignature):246 meta_events[part][to_ppq(ks.start.t)].append(247 MetaMessage("key_signature", key=ks.name)248 )249 for note in notes:250 # key is a tuple (part_group, part, voice) that will be251 # converted into a (track, channel) pair.252 key = (pg, part, note.voice)253 events[key][to_ppq(note.start.t)].append(254 Message("note_on", note=note.midi_pitch)255 )256 events[key][to_ppq(note.start.t + note.duration_tied)].append(257 Message("note_off", note=note.midi_pitch)258 )259 event_keys[key] = True260 tr_ch_map = map_to_track_channel(list(event_keys.keys()), part_voice_assign_mode)261 # replace original event keys (partgroup, part, voice) by (track, ch) keys:262 for key in list(events.keys()):263 evs_by_time = events[key]264 del events[key]265 tr, ch = tr_ch_map[key]266 for t, evs in evs_by_time.items():267 events[tr][t].extend((ev.copy(channel=ch) for ev in evs))268 # figure out in which tracks to replicate the time/key signatures of each part269 part_track_map = partition(lambda x: x[0][1], tr_ch_map.items())270 for part, rest in part_track_map.items():271 part_track_map[part] = set(x[1][0] for x in rest)272 # add the time/key sigs to their corresponding tracks273 for part, m_events in meta_events.items():274 tracks = part_track_map[part]275 for tr in tracks:276 for t, me in m_events.items():277 events[tr][t] = me + events[tr][t]278 n_tracks = max(tr for tr, _ in tr_ch_map.values()) + 1279 tracks = [MidiTrack() for _ in range(n_tracks)]280 # tempo events are handled differently from key/time sigs because the have a281 # global effect. Instead of adding to each relevant track, like the key/time282 # sig events, we add them only to the first track283 for t, tp in tempos.items():284 events[0][t].insert(0, tp)285 for tr, events_by_time in events.items():286 t_prev = 0287 for t in sorted(events_by_time.keys()):288 evs = events_by_time[t]289 delta = t - t_prev290 for ev in evs:291 tracks[tr].append(ev.copy(time=delta))292 delta = 0293 t_prev = t294 midi_type = 0 if n_tracks == 1 else 1295 mf = MidiFile(type=midi_type, ticks_per_beat=ppq)296 for track in tracks:297 mf.tracks.append(track)298 if out:299 if hasattr(out, "write"):300 mf.save(file=out)301 else:...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

1import os2import time3import importlib4import datetime as dt5from tqdm import tqdm6from utils import file_write, log_end_epoch, INF, valid_hyper_params7from data_path_constants import get_log_file_path, get_model_file_path8# NOTE: No global-level torch imports as the GPU-ID is set through code9def train(model, criterion, optimizer, reader, hyper_params, forgetting_events, track_events):10 import torch11 model.train()12 13 # Initializing metrics since we will calculate MSE on the train set on the fly14 metrics = {}15 16 # Initializations17 at = 018 19 # Train for one epoch, batch-by-batch20 loop = tqdm(reader)21 for data, y in loop:22 # Empty the gradients23 model.zero_grad()24 optimizer.zero_grad()25 26 # Forward pass27 output = model(data)28 # Compute per-interaction loss29 loss = criterion(output, y, return_mean = False)30 criterion.anneal(1.0 / float(len(reader) * hyper_params['epochs']))31 # loop.set_description("Loss: {}".format(round(float(loss), 4)))32 33 # Track forgetting events34 if track_events:35 with torch.no_grad():36 if hyper_params['task'] == 'explicit': forgetting_events[at : at+data[0].shape[0]] += loss.data37 else:38 pos_output, neg_output = output39 pos_output = pos_output.repeat(1, neg_output.shape[1])40 num_incorrect = torch.sum((neg_output > pos_output).float(), -1)41 forgetting_events[at : at+data[0].shape[0]] += num_incorrect.data42 43 at += data[0].shape[0]44 # Backward pass45 loss = torch.mean(loss)46 loss.backward()47 optimizer.step()48 return metrics, forgetting_events49def train_complete(hyper_params, train_reader, val_reader, model, model_class, track_events):50 import torch51 from loss import CustomLoss52 from eval import evaluate53 from torch_utils import is_cuda_available54 criterion = CustomLoss(hyper_params)55 optimizer = torch.optim.Adam(56 model.parameters(), lr=hyper_params['lr'], betas=(0.9, 0.98),57 weight_decay=hyper_params['weight_decay']58 )59 file_write(hyper_params['log_file'], str(model))60 file_write(hyper_params['log_file'], "\nModel Built!\nStarting Training...\n")61 try:62 best_MSE = float(INF)63 best_AUC = -float(INF)64 best_HR = -float(INF)65 decreasing_streak = 066 forgetting_events = None67 if track_events: 68 forgetting_events = torch.zeros(train_reader.num_interactions).float()69 if is_cuda_available: forgetting_events = forgetting_events.cuda()70 for epoch in range(1, hyper_params['epochs'] + 1):71 epoch_start_time = time.time()72 73 # Training for one epoch74 metrics, local_forgetted_count = train(75 model, criterion, optimizer, train_reader, hyper_params, 76 forgetting_events, track_events77 )78 # Calulating the metrics on the validation set79 if (epoch % hyper_params['validate_every'] == 0) or (epoch == 1):80 metrics = evaluate(model, criterion, val_reader, hyper_params, train_reader.item_propensity)81 metrics['dataset'] = hyper_params['dataset']82 decreasing_streak += 183 # Save best model on validation set84 if hyper_params['task'] == 'explicit' and metrics['MSE'] < best_MSE:85 print("Saving model...")86 torch.save(model.state_dict(), hyper_params['model_path'])87 decreasing_streak, best_MSE = 0, metrics['MSE']88 elif hyper_params['task'] != 'explicit' and metrics['AUC'] > best_AUC:89 print("Saving model...")90 torch.save(model.state_dict(), hyper_params['model_path'])91 decreasing_streak, best_AUC = 0, metrics['AUC']92 elif hyper_params['task'] != 'explicit' and metrics['HR@10'] > best_HR:93 print("Saving model...")94 torch.save(model.state_dict(), hyper_params['model_path'])95 decreasing_streak, best_HR = 0, metrics['HR@10']96 97 log_end_epoch(hyper_params, metrics, epoch, time.time() - epoch_start_time, metrics_on = '(VAL)')98 # Check if need to early-stop99 if 'early_stop' in hyper_params and decreasing_streak >= hyper_params['early_stop']:100 file_write(hyper_params['log_file'], "Early stopping..")101 break102 103 except KeyboardInterrupt: print('Exiting from training early')104 # Load best model and return it for evaluation on test-set105 if os.path.exists(hyper_params['model_path']):106 model = model_class(hyper_params)107 if is_cuda_available: model = model.cuda()108 model.load_state_dict(torch.load(hyper_params['model_path']))109 110 model.eval()111 if track_events: forgetting_events = forgetting_events.cpu().numpy() / float(hyper_params['epochs'])112 return model, forgetting_events113def train_neumf(hyper_params, train_reader, val_reader, track_events):114 from pytorch_models.NeuMF import GMF, MLP, NeuMF115 from torch_utils import is_cuda_available, xavier_init116 initial_path = hyper_params['model_path']117 # Pre-Training the GMF Model118 hyper_params['model_path'] = initial_path[:-3] + "_gmf.pt"119 gmf_model = GMF(hyper_params)120 if is_cuda_available: gmf_model = gmf_model.cuda()121 xavier_init(gmf_model)122 gmf_model, _ = train_complete(hyper_params, train_reader, val_reader, gmf_model, GMF, track_events)123 # Pre-Training the MLP Model124 hyper_params['model_path'] = initial_path[:-3] + "_mlp.pt"125 mlp_model = MLP(hyper_params)126 if is_cuda_available: mlp_model = mlp_model.cuda()127 xavier_init(mlp_model)128 mlp_model, _ = train_complete(hyper_params, train_reader, val_reader, mlp_model, MLP, track_events)129 # Training the final NeuMF Model130 hyper_params['model_path'] = initial_path131 model = NeuMF(hyper_params)132 if is_cuda_available: model = model.cuda()133 model.init(gmf_model, mlp_model)134 model, forgetting_events = train_complete(hyper_params, train_reader, val_reader, model, NeuMF, track_events)135 # Remove GMF and MLP models136 mlp_path = initial_path[:-3] + "_mlp.pt"137 gmf_path = initial_path[:-3] + "_gmf.pt"138 os.remove(mlp_path) ; os.remove(gmf_path)139 140 return model, forgetting_events141def main_pytorch(hyper_params, track_events = False, eval_full = True):142 from load_data import load_data143 from eval import evaluate144 145 from torch_utils import is_cuda_available, xavier_init, get_model_class146 from loss import CustomLoss147 if not valid_hyper_params(hyper_params): 148 print("Invalid task combination specified, exiting.")149 return150 # Load the data readers151 train_reader, test_reader, val_reader, hyper_params = load_data(hyper_params, track_events = track_events)152 file_write(hyper_params['log_file'], "\n\nSimulation run on: " + str(dt.datetime.now()) + "\n\n")153 file_write(hyper_params['log_file'], "Data reading complete!")154 file_write(hyper_params['log_file'], "Number of train batches: {:4d}".format(len(train_reader)))155 file_write(hyper_params['log_file'], "Number of validation batches: {:4d}".format(len(val_reader)))156 file_write(hyper_params['log_file'], "Number of test batches: {:4d}".format(len(test_reader)))157 # Initialize & train the model158 start_time = time.time()159 if hyper_params['model_type'] == 'NeuMF': 160 model, forgetting_events = train_neumf(hyper_params, train_reader, val_reader, track_events)161 else:162 model = get_model_class(hyper_params)(hyper_params)163 if is_cuda_available: model = model.cuda()164 xavier_init(model)165 model, forgetting_events = train_complete(166 hyper_params, train_reader, val_reader, model, get_model_class(hyper_params), track_events167 )168 metrics = {}169 if eval_full:170 # Calculating MSE on test-set171 criterion = CustomLoss(hyper_params)172 metrics = evaluate(model, criterion, test_reader, hyper_params, train_reader.item_propensity, test = True)173 log_end_epoch(hyper_params, metrics, 'final', time.time() - start_time, metrics_on = '(TEST)')174 # We have no space left for storing the models175 os.remove(hyper_params['model_path'])176 del model, train_reader, test_reader, val_reader177 return metrics, forgetting_events178def main_pop_rec(hyper_params):179 from load_data import load_data180 from eval import evaluate181 from loss import CustomLoss182 from pytorch_models.pop_rec import PopRec183 # Load the data readers184 train_reader, test_reader, val_reader, hyper_params = load_data(hyper_params)185 file_write(hyper_params['log_file'], "\n\nSimulation run on: " + str(dt.datetime.now()) + "\n\n")186 file_write(hyper_params['log_file'], "Data reading complete!")187 file_write(hyper_params['log_file'], "Number of test batches: {:4d}\n\n".format(len(test_reader)))188 # Make the model189 start_time = time.time()190 model = PopRec(hyper_params, train_reader.get_item_count_map())191 # Calculating MSE on test-set192 criterion = CustomLoss(hyper_params)193 metrics = evaluate(model, criterion, test_reader, hyper_params, train_reader.item_propensity, test = True)194 log_end_epoch(hyper_params, metrics, 'final', time.time() - start_time, metrics_on = '(TEST)')195 196 del model, train_reader, test_reader, val_reader197 return metrics, None198def main(hyper_params, gpu_id = None): 199 if not valid_hyper_params(hyper_params): 200 print("Invalid task combination specified, exiting.")201 return202 # Setting GPU ID for running entire code ## Very Very Imp.203 if gpu_id is not None: 204 os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)205 # torch.cuda.set_device(int(gpu_id))206 # torch.cuda.empty_cache()207 # Set dataset specific hyper-params208 hyper_params.update(209 importlib.import_module('data_hyperparams.{}'.format(hyper_params['dataset'])).hyper_params210 )211 # Learning rate is "highly" dataset AND model specific212 if 'lr' not in hyper_params:213 if hyper_params['model_type'] == 'SASRec': hyper_params['lr'] = 0.006214 elif hyper_params['model_type'] == 'SVAE': hyper_params['lr'] = 0.02215 elif hyper_params['model_type'] == 'MVAE': hyper_params['lr'] = 0.01216 else: hyper_params['lr'] = 0.008217 hyper_params['log_file'] = get_log_file_path(hyper_params)218 hyper_params['model_path'] = get_model_file_path(hyper_params)219 if hyper_params['model_type'] == 'pop_rec': main_pop_rec(hyper_params)220 else: main_pytorch(hyper_params)221 # torch.cuda.empty_cache()222if __name__ == '__main__':223 from hyper_params import hyper_params...

Full Screen

Full Screen

ml64.py

Source:ml64.py Github

copy

Full Screen

1from chiptunesak.base import *2from chiptunesak import chirp, mchirp3from chiptunesak import constants4'''5This file contains functions required to export MidiSimple songs to ML64 format.6This is the format created by Stirring Dragon Games for music content contributed7by those who backed the Unknown Realm kickstarter at the bard tier.8A few years ago, Knapp and Youd wrote a midi->ml64 tool in Python 2 for the Unknown9Realm developers. ChiptuneSAK (in Python 3) replaces that tool.10Note: We haven't seen the game or any source code for it. We know they're cranking11away at it, but have no details as to when the game will be released. Please12direct all questions regarding Unknown Realm to Stirring Dragon Games13(https://stirringdragon.games)14'''15ml64_durations = {16 Fraction(6, 1): '1d', Fraction(4, 1): '1', Fraction(3, 1): '2d', Fraction(2, 1): '2',17 Fraction(3, 2): '4d', Fraction(1, 1): '4', Fraction(3, 4): '8d', Fraction(1, 2): '8',18 Fraction(1, 4): '16'19}20def pitch_to_ml64_note_name(note_num, octave_offset=0):21 """22 Gets note name for a given MIDI pitch23 """24 if not 0 <= note_num <= 127:25 raise ChiptuneSAKValueError("Illegal note number %d" % note_num)26 octave_num = ((note_num - constants.C0_MIDI_NUM) // 12) + octave_offset27 pitch = note_num % 1228 return "%s%d" % (constants.PITCHES[pitch], octave_num)29def make_ml64_notes(note_name, duration, ppq):30 durs = decompose_duration(duration, ppq, ml64_durations)31 if note_name == 'r' or note_name == 'c':32 retval = ''.join("%s(%s)" % (note_name, ml64_durations[f]) for f in durs)33 else:34 retval = "%s(%s)" % (note_name, ml64_durations[durs[0]])35 if len(durs) > 1:36 retval += ''.join("c(%s)" % (ml64_durations[f]) for f in durs[1:])37 return retval38def ml64_sort_order(c):39 """40 Sort function for measure contents.41 Items are sorted by time and then, for equal times, in this order:42 * Patch Change43 * Tempo44 * Notes and rests45 """46 if isinstance(c, chirp.Note):47 return (c.start_time, 10)48 elif isinstance(c, Rest):49 return (c.start_time, 10)50 elif isinstance(c, MeasureMarker):51 return (c.start_time, 1)52 elif isinstance(c, TempoEvent):53 return (c.start_time, 3)54 elif isinstance(c, ProgramEvent):55 return (c.start_time, 2)56 else:57 return (c.start_time, 5)58def events_to_ml64(events, song, last_continue=False):59 """60 Takes a list of events (such as a measure or a track) and converts it to ML64 commands. If the previous61 list (such as the previous measure) had notes that were not completed, set last_continue.62 :param events:63 :type events:64 :param song:65 :type song:66 :param last_continue:67 :type last_continue:68 :return:69 :rtype: tuple70 """71 content = []72 for e in events:73 if isinstance(e, chirp.Note):74 if last_continue:75 tmp_note = make_ml64_notes('c', e.duration, song.metadata.ppq)76 else:77 tmp_note = make_ml64_notes(pitch_to_ml64_note_name(e.note_num), e.duration, song.metadata.ppq)78 content.append(tmp_note)79 last_continue = e.tied_from80 elif isinstance(e, Rest):81 tmp_note = make_ml64_notes('r', e.duration, song.metadata.ppq)82 content.append(tmp_note)83 last_continue = False84 elif isinstance(e, MeasureMarker):85 content.append('[m%d]' % e.measure_number)86 elif isinstance(e, ProgramEvent):87 content.append('i(%d)' % e.program)88 return (content, last_continue)89class ML64(ChiptuneSAKIO):90 @classmethod91 def cts_type(cls):92 return "ML64"93 def __init__(self):94 ChiptuneSAKIO.__init__(self)95 self.set_options(format='standard')96 @property97 def format(self):98 return self.get_option('format')[0].lower()99 def to_bin(self, song, **kwargs):100 """101 Generates an ML64 string for a song102 :param song: song103 :type song: ChirpSong or mchirp.MChirpSong104 :return: ML64 encoding of song105 :rtype: str106 :keyword options:107 * **format** (string) - 'compact', 'standard', or 'measures';108 'measures' requires MChirp; the others convert from Chirp109 """110 self.set_options(**kwargs)111 if isinstance(song, chirp.ChirpSong):112 if self.format == 'm':113 raise ChiptuneSAKTypeError("Cannot export Chirp song to Measures format")114 else:115 return self.export_chirp_to_ml64(song)116 elif isinstance(song, mchirp.MChirpSong):117 if self.format != 'm':118 tmp_song = chirp.ChirpSong(song)119 tmp_song.quantize(*tmp_song.estimate_quantization())120 return self.export_chirp_to_ml64(tmp_song)121 else:122 return self.export_mchirp_to_ml64(song)123 else:124 raise ChiptuneSAKTypeError(f"Cannot export object of type {str(type(song))} to ML64")125 def to_file(self, song, filename, **kwargs):126 """127 Writes ML64 to a file128 :param song: song129 :type song: ChirpSong or mchirp.MChirpSong130 :return: ML64 encoding of song131 :rtype: str132 :keyword options: see `to_bin()`133 """134 with open(filename, 'w') as f:135 f.write(self.to_bin(song, **kwargs))136 def export_chirp_to_ml64(self, chirp_song):137 """138 Export song to ML64 format, with a minimum number of notes, either with or without measure comments.139 With measure comments, the comments appear within the measure but are not guaranteed to be exactly at the140 beginning of the measure, as tied notes will take precedence. In compact mode, the ML64 emitted is almost141 as small as possible.142 :param chirp_song:143 :type chirp_song:144 """145 output = []146 if not chirp_song.is_quantized():147 raise ChiptuneSAKQuantizationError("ChirpSong must be quantized for export to ML64")148 if any(t.qticks_notes < chirp_song.metadata.ppq // 4 for t in chirp_song.tracks):149 raise ChiptuneSAKQuantizationError("ChirpSong must be quantized to 16th notes or larger for ML64")150 if chirp_song.is_polyphonic():151 raise ChiptuneSAKPolyphonyError("All tracks must be non-polyphonic for export to ML64")152 mode = self.format153 output.append('ML64(1.3)')154 output.append('song(1)')155 output.append('tempo(%d)' % chirp_song.metadata.qpm)156 for it, t in enumerate(chirp_song.tracks):157 output.append('track(%d)' % (it + 1))158 track_events = []159 last_note_end = 0160 # Create a list of events for the entire track161 for n in t.notes:162 if n.start_time > last_note_end:163 track_events.append(Rest(last_note_end, n.start_time - last_note_end))164 track_events.append(n)165 last_note_end = n.start_time + n.duration166 track_events.extend(t.program_changes)167 if mode == 's': # Add measures for standard format168 last_note_end = max(n.start_time + n.duration for t in chirp_song.tracks for n in t.notes)169 measures = [m.start_time for m in chirp_song.measures_and_beats() if m.beat == 1]170 for im, m in enumerate(measures):171 if m < last_note_end:172 track_events.append(MeasureMarker(m, im + 1))173 track_events.sort(key=ml64_sort_order)174 # Now send the entire list of events to the ml64 creator175 track_content, *_ = events_to_ml64(track_events, chirp_song)176 output.append(''.join(track_content).strip())177 output.append('track(-)')178 output.append('song(-)')179 output.append('ML64(-)')180 return '\n'.join(output)181 def export_mchirp_to_ml64(self, mchirp_song):182 """183 Export the song in ML64 format, grouping notes into measures. The measure comments are guaranteed to184 appear at the beginning of each measure; tied notes will be split to accommodate the measure markers.185 :param mchirp_song: An mchirp song186 :type mchirp_song: MChirpSong187 """188 output = []189 output.append('ML64(1.3)')190 output.append('song(1)')191 output.append('tempo(%d)' % mchirp_song.metadata.qpm)192 for it, t in enumerate(mchirp_song.tracks):193 output.append('track(%d)' % (it + 1))194 measures = t.measures195 last_continue = False196 for im, measure in enumerate(measures):197 measure_content, last_continue = events_to_ml64(measure.events, mchirp_song, last_continue)198 output.append(''.join(measure_content))199 output.append('track(-)')200 output.append('song(-)')201 output.append('ML64(-)')...

Full Screen

Full Screen

test_tap.py

Source:test_tap.py Github

copy

Full Screen

...25 <div id="b" style="background: pink; width: 50px; height: 50px">b</div>26 """27 )28 page.tap("#a")29 element_handle = track_events(page.query_selector("#b"))30 page.tap("#b")31 assert element_handle.json_value() == [32 "pointerover",33 "pointerenter",34 "pointerdown",35 "touchstart",36 "pointerup",37 "pointerout",38 "pointerleave",39 "touchend",40 "mouseover",41 "mouseenter",42 "mousemove",43 "mousedown",44 "mouseup",45 "click",46 ]47def test_should_not_send_mouse_events_touchstart_is_canceled(page):48 page.set_content("hello world")49 page.evaluate(50 """() => {51 // touchstart is not cancelable unless passive is false52 document.addEventListener('touchstart', t => t.preventDefault(), {passive: false});53 }"""54 )55 events_handle = track_events(page.query_selector("body"))56 page.tap("body")57 assert events_handle.json_value() == [58 "pointerover",59 "pointerenter",60 "pointerdown",61 "touchstart",62 "pointerup",63 "pointerout",64 "pointerleave",65 "touchend",66 ]67def test_should_not_send_mouse_events_touchend_is_canceled(page):68 page.set_content("hello world")69 page.evaluate(70 """() => {71 // touchstart is not cancelable unless passive is false72 document.addEventListener('touchend', t => t.preventDefault());73 }"""74 )75 events_handle = track_events(page.query_selector("body"))76 page.tap("body")77 assert events_handle.json_value() == [78 "pointerover",79 "pointerenter",80 "pointerdown",81 "touchstart",82 "pointerup",83 "pointerout",84 "pointerleave",85 "touchend",86 ]87def track_events(target: ElementHandle) -> JSHandle:88 return target.evaluate_handle(89 """target => {90 const events = [];91 for (const event of [92 'mousedown', 'mouseenter', 'mouseleave', 'mousemove', 'mouseout', 'mouseover', 'mouseup', 'click',93 'pointercancel', 'pointerdown', 'pointerenter', 'pointerleave', 'pointermove', 'pointerout', 'pointerover', 'pointerup',94 'touchstart', 'touchend', 'touchmove', 'touchcancel',])95 target.addEventListener(event, () => events.push(event), false);96 return events;97 }"""...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful