Best Python code snippet using tempest_python
test_bess_chpp_hwt.py
Source:test_bess_chpp_hwt.py  
...37    assert (model.state == [123,2,4,11,1,900,1800,2700,55,17]).all()38    model.bess.soc_min = 039    model.bess.soc_max = 140def test_get_feasible_actions():41    def reset_state(mode, dwell_time, temperature, stored_energy=0):42        model.bess.stored_energy = stored_energy43        model.demand.demand = 1144        model.chpp.mode = mode45        model.chpp.dwell_time = dwell_time46        model.chpp.min_off_time = 180047        model.chpp.min_on_time = 180048        model.hwt.temperature = temperature49        model.hwt.ambient_temperature = 1750    bess_actions  = model.bess.feasible_actions51    reset_state(1, 900, 70)52    model.eval()53    assert np.isin(model.determine_feasible_actions(), [-5500] + bess_actions).all() # must remain running54    assert len(model.feasible_actions) == len([-5500] + bess_actions)55    # top, lower boundary region56    reset_state(1, 900, 79.5)57    assert np.isin(model.determine_feasible_actions(), [-1000, -5500] + bess_actions.transpose().reshape(-1, 1).repeat(2, axis=1)).all() # may turn off58    reset_state(1, 900, 79.5)59    model.train()60    assert np.isin(model.determine_feasible_actions(), [-5500] + bess_actions).all() # must remain running61    reset_state(0, 1800, 79.5)62    assert np.isin(model.determine_feasible_actions(), [0] + bess_actions).all() # must remain stopped63    64    reset_state(0, 1800, 79.5)65    model.eval()66    assert np.isin(model.determine_feasible_actions(), [0, -4000] + bess_actions.transpose().reshape(-1, 1).repeat(2, axis=1)).all() # may turn on67    # top, upper boundary region68    reset_state(1, 900, 80.5)69    assert np.isin(model.determine_feasible_actions(), [-1000, -5500] + bess_actions.transpose().reshape(-1, 1).repeat(2, axis=1)).all() # may turn off70    reset_state(1, 900, 80.5)71    model.train()72    assert np.isin(model.determine_feasible_actions(), [-1000] + bess_actions).all() # must turn off73    74    # top, above boundary region75    reset_state(1, 900, 81.5)76    model.eval()77    assert np.isin(model.determine_feasible_actions(), [-1000] + bess_actions).all() # must turn off78    reset_state(1, 900, 80.5)79    model.train()80    assert np.isin(model.determine_feasible_actions(), [-1000] + bess_actions).all() # must turn off81    # bottom, upper boundary region82    reset_state(0, 900, 60.5)83    model.eval()84    assert np.isin(model.determine_feasible_actions(), [-4000, 0] + bess_actions.transpose().reshape(-1, 1).repeat(2, axis=1)).all() # may turn on85    assert len(model.feasible_actions) == len(np.unique([-4000, 0] + bess_actions.transpose().reshape(-1, 1).repeat(2, axis=1)))86    reset_state(0, 900, 60.5)87    model.train()88    assert np.isin(model.determine_feasible_actions(), [0] + bess_actions).all() # must remain off89    reset_state(1, 1800, 60.5)90    assert np.isin(model.determine_feasible_actions(), [-5500] + bess_actions).all() # must remain running91    92    reset_state(1, 1800, 60.5)93    model.eval()94    assert np.isin(model.determine_feasible_actions(), [-5500, -1000] + bess_actions.transpose().reshape(-1, 1).repeat(2, axis=1)).all() # may turn off95    # bottom, lower boundary region96    reset_state(0, 900, 59.5)97    assert np.isin(model.determine_feasible_actions(), [-4000, 0] + bess_actions.transpose().reshape(-1, 1).repeat(2, axis=1)).all() # may turn on98    99    reset_state(0, 900, 59.5)100    model.train()101    assert np.isin(model.determine_feasible_actions(), [-4000] + bess_actions).all() # must turn on102    # bottom, below boundary region103    reset_state(0, 900, 58.5)104    model.eval()105    assert np.isin(model.determine_feasible_actions(), [-4000] + bess_actions).all() # must turn on106    reset_state(0, 900, 58.5)107    model.train()108    assert np.isin(model.determine_feasible_actions(), [-4000] + bess_actions).all() # must turn on109    model.eval()110    # brute force111    np.random.seed(1924)112    for i in range(1000):113        state = model.sample_state()114        model.feasible_actions115    model.bess.soc_min = 0116    model.bess.soc_max = 1117def test_state_transition():118    def reset_state(mode, dwell_time, temperature, stored_energy=0):119        model.bess.stored_energy = stored_energy120        model.demand.demand = 11121        model.chpp.mode = mode122        model.chpp.dwell_time = dwell_time123        model.chpp.min_off_time = 1800124        model.chpp.min_on_time = 1800125        model.hwt.temperature = temperature126        model.hwt.ambient_temperature = 17127        model._feasible_actions = None128    model.eval()129    model.bess.soc_min = 0130    model.bess.soc_max = 1131    reset_state(0, 2700, 75, 0)132    state, interaction = model.transition(-5500) # chpp needs to run, since bess is empty133    assert model.chpp.mode == 1134    assert bess.stored_energy == 0135    assert interaction[0] == 4000136    reset_state(1, 2700, 75, 0)137    state, interaction = model.transition(-5000) # chpp needs to run, since bess is empty138    assert model.chpp.mode == 1139    assert bess.stored_energy == 500 * 900140    assert interaction[0] == 5000141    142    reset_state(1, 2700, 75, 5000*900)143    state, interaction = model.transition(-5000) # high tank temp., use bess and switch off chpp (=1000W)144    assert model.chpp.mode == 0 145    assert bess.stored_energy == 1000*900146    assert interaction[0] == 5000147    reset_state(0, 2700, 65, model.bess.capacity)148    state, interaction = model.transition(-5500) # chpp should run, since temp. is low149    assert model.chpp.mode == 1150    assert bess.stored_energy == model.bess.capacity - (5500 - 4000) * 900151    assert interaction[0] == 5500152    reset_state(1, 2700, 65, 5000*900)153    state, interaction = model.transition(-5000) # chpp should run, since temp. is low154    assert model.chpp.mode == 1155    assert bess.stored_energy == 5500 * 900156    assert interaction[0] == 5000157    # top, lower boundary region158    reset_state(1, 900, 79.5)159    state, interaction = model.transition(-1000)160    assert model.chpp.mode == 0  # may turn off161    assert bess.stored_energy == 0162    assert interaction[0] == 1000163    reset_state(1, 900, 79.5)164    model.train()165    state, interaction = model.transition(-1000)166    assert model.chpp.mode == 1 # must remain running, BESS will cover the difference167    assert bess.stored_energy == (5500-1000) * 900168    assert interaction[0] == 1000169    reset_state(1, 900, 79.5, model.bess.capacity)170    state, interaction = model.transition(-1000)171    assert model.chpp.mode == 1 # must remain running, BESS can not cover the difference172    assert np.isclose(bess.stored_energy, model.bess.capacity)173    assert interaction[0] == 5500174    reset_state(0, 1800, 79.5)175    state, interaction = model.transition(-4000)176    assert model.chpp.mode == 0 # must remain stopped177    assert bess.stored_energy == 0 178    assert interaction[0] == 0179    180    reset_state(0, 1800, 79.5)181    model.eval()182    state, interaction = model.transition(-4000)183    assert model.chpp.mode == 1 # may turn on, as battery is empty184    assert bess.stored_energy == 0 185    assert interaction[0] == 4000186    reset_state(0, 1800, 79.5, model.bess.capacity)187    model.eval()188    state, interaction = model.transition(-4000)189    assert model.chpp.mode == 0 # may turn on, but won't as battery can be discharged190    assert bess.stored_energy == model.bess.capacity - 4000 * 900191    assert interaction[0] == 4000192    # top, upper boundary region193    reset_state(1, 900, 80.5) 194    state, interaction = model.transition(-1000)195    assert model.chpp.mode == 0 # may turn off196    assert bess.stored_energy == 0197    assert interaction[0] == 1000198    reset_state(1, 900, 80.5)199    model.train() 200    state, interaction = model.transition(-5500)201    assert model.chpp.mode == 0 # must turn off202    assert bess.stored_energy == 0203    assert interaction[0] == 1000 # -1000 is closer to -5500 than 0204    205    # top, above boundary region206    reset_state(1, 900, 81.5)207    model.eval()208    state, interaction = model.transition(-5500)209    assert model.chpp.mode == 0 # must turn off210    assert bess.stored_energy == 0 211    assert interaction[0] == 1000212    reset_state(1, 900, 80.5)213    model.train()214    state, interaction = model.transition(-5500)215    assert model.chpp.mode == 0 # must turn off216    assert bess.stored_energy == 0217    assert interaction[0] == 1000 218    ###219    # bottom, upper boundary region220    reset_state(0, 900, 60.5)221    model.eval()222    state, interaction = model.transition(-4000)223    assert model.chpp.mode == 1 # may turn on224    assert bess.stored_energy == 0225    assert interaction[0] == 4000 226    reset_state(0, 900, 60.5)227    model.train()228    state, interaction = model.transition(-4000)229    assert model.chpp.mode == 0 # must remain off230    assert bess.stored_energy == 0 231    assert interaction[0] == 0232    reset_state(1, 1800, 60.5)233    state, interaction = model.transition(-1000)234    assert model.chpp.mode == 1 # must remain running, bess will cover the difference235    assert bess.stored_energy == (5500-1000)*900236    assert interaction[0] == 1000237    238    reset_state(1, 1800, 60.5, model.bess.capacity)239    state, interaction = model.transition(-1000)240    assert model.chpp.mode == 1 # must remain running, bess can not cover the difference241    assert bess.stored_energy == model.bess.capacity242    assert interaction[0] == 5500243    reset_state(1, 1800, 60.5)244    model.eval()245    state, interaction = model.transition(-1000)246    assert model.chpp.mode == 1 # may turn off, but won't as battery can charge247    assert bess.stored_energy == (5500-1000) * 900248    assert interaction[0] == 1000249    reset_state(1, 1800, 60.5, model.bess.capacity)250    state, interaction = model.transition(-1000)251    assert model.chpp.mode == 0 # may turn off, as battery is already full252    assert bess.stored_energy == model.bess.capacity253    assert interaction[0] == 1000254    # bottom, lower boundary region255    reset_state(0, 900, 59.5)256    state, interaction = model.transition(-4000)257    assert model.chpp.mode == 1 # may turn on258    assert bess.stored_energy == 0 259    assert interaction[0] == 4000260    261    reset_state(0, 900, 59.5)262    model.train()263    state, interaction = model.transition(-0)264    assert model.chpp.mode == 1 # must turn on, but bess has free capacity265    assert bess.stored_energy == 4000 * 900266    assert interaction[0] == 0267    reset_state(0, 900, 59.5, model.bess.capacity)268    state, interaction = model.transition(-0)269    assert model.chpp.mode == 1 # must turn on, and bess is full270    assert bess.stored_energy == model.bess.capacity271    assert interaction[0] == 4000272    # bottom, below boundary region273    reset_state(0, 900, 58.5)274    model.eval()275    state, interaction = model.transition(-0)276    assert model.chpp.mode == 1 # must turn on, but bess has free capacity277    assert bess.stored_energy == 4000 * 900278    assert interaction[0] == 0279    reset_state(0, 900, 58.5, model.bess.capacity)280    state, interaction = model.transition(-0)281    assert model.chpp.mode == 1 # must turn on, and bess is full282    assert bess.stored_energy == model.bess.capacity283    assert interaction[0] == 4000284    reset_state(0, 900, 58.5)285    model.train()286    state, interaction = model.transition(-0)287    assert model.chpp.mode == 1 # must turn on, but bess has free capacity288    assert bess.stored_energy == 4000 * 900289    assert interaction[0] == 0290    reset_state(0, 900, 58.5, model.bess.capacity)291    state, interaction = model.transition(-0)292    assert model.chpp.mode == 1 # must turn on, and bess is full293    assert bess.stored_energy == model.bess.capacity294    assert interaction[0] == 4000...CS598_Mohan_Sun_HW7_RNN_model.py
Source:CS598_Mohan_Sun_HW7_RNN_model.py  
...14        self.lstm = nn.LSTMCell(in_size, out_size)15        self.out_size = out_size16        self.h = None17        self.c = None18    def reset_state(self):19        self.h = None20        self.c = None21    def forward(self, x):22        batch_size = x.size(0)23        if self.h is None:24            state_size = [batch_size, self.out_size]25            if is_cuda:26                self.c = Variable(torch.zeros(state_size)).cuda()27                self.h = Variable(torch.zeros(state_size)).cuda()28            else:29                self.c = Variable(torch.zeros(state_size))30                self.h = Variable(torch.zeros(state_size))31        self.h, self.c = self.lstm(x,(self.h,self.c))32        return self.h33class LockedDropout(nn.Module):34    def __init__(self):35        super(LockedDropout,self).__init__()36        self.m = None37    def reset_state(self):38        self.m = None39    def forward(self, x, dropout=0.5, train=True):40        if not train:41            return x42        if self.m is None:43            self.m = x.data.new(x.size()).bernoulli_(1-dropout)44        if is_cuda:45            mask = Variable(self.m, requires_grad=False).cuda()/(1-dropout)46        else:47            mask = Variable(self.m, requires_grad=False)/(1-dropout)48        return mask * x49class RNN_model(nn.Module):50    def __init__(self, vocab_size, no_of_hidden_units, switches = [False, False]):51        super(RNN_model, self).__init__()52        self.switches = switches53        self.embedding = nn.Embedding(vocab_size, no_of_hidden_units)#padding_idx=0)54        self.lstm1 = StatefulLSTM(no_of_hidden_units, no_of_hidden_units)55        self.bn_lstm1 = nn.BatchNorm1d(no_of_hidden_units)56        self.dropout1 = LockedDropout() if not switches[0] else nn.Dropout(p=0.5)57        if switches[1]:58            self.lstm2 = StatefulLSTM(no_of_hidden_units, no_of_hidden_units)59            self.bn_lstm2 = nn.BatchNorm1d(no_of_hidden_units)60            self.dropout2 = LockedDropout() if not switches[0] else nn.Dropout(p=0.5)61        self.fc_output = nn.Linear(no_of_hidden_units, 1)62        self.loss = nn.BCEWithLogitsLoss()63    def reset_state(self):64        self.lstm1.reset_state()65        if not  self.switches[0]:66            self.dropout1.reset_state()67        if self.switches[1]:68            self.lstm2.reset_state()69            if not self.switches[0]:70                self.dropout2.reset_state()71    def forward(self, x, t, train=True):72        embed = self.embedding(x) #[batch_size, time_steps, features]73        no_of_timesteps = embed.shape[1]74        self.reset_state()75        outputs = []76        for i in range(no_of_timesteps):77            h = self.lstm1(embed[:,i,:])78            h = self.bn_lstm1(h)79            dargs = [h, 0.5, train] if not self.switches[0] else [h]80            h = self.dropout1(*dargs)81            if self.switches[1]:82                h = self.lstm2(h)83                h = self.bn_lstm2(h)84                dargs = [h, 0.3, train] if not self.switches[0] else [h]85                h = self.dropout2(*dargs)86            outputs.append(h)87        outputs = torch.stack(outputs) #[time_steps, batch_size, features]88        outputs = outputs.permute(1,2,0) #[batch_size, features, time_steps]89        pool = nn.MaxPool1d(no_of_timesteps)90        h = pool(outputs)91        h = h.view(h.size(0),-1)92        #h = self.dropout(h)93        h = self.fc_output(h)94        return self.loss(h[:,0],t), h[:,0]95class RNN_model_GloVe(nn.Module):96    def __init__(self, no_of_hidden_units, switches=[False, False]):97        super(RNN_model_GloVe, self).__init__()98        self.switches = switches99        self.lstm1 = StatefulLSTM(300, no_of_hidden_units)100        self.bn_lstm1 = nn.BatchNorm1d(no_of_hidden_units)101        self.dropout1 = LockedDropout() if not switches[0] else nn.Dropout(p=0.5)102        if switches[1]:103            self.lstm2 = StatefulLSTM(no_of_hidden_units, no_of_hidden_units)104            self.bn_lstm2 = nn.BatchNorm1d(no_of_hidden_units)105            self.dropout2 = LockedDropout() if not switches[0] else nn.Dropout(p=0.5)106        self.fc_output = nn.Linear(no_of_hidden_units, 1)107        self.loss = nn.BCEWithLogitsLoss()108    def reset_state(self):109        self.lstm1.reset_state()110        if not self.switches[0]:111            self.dropout1.reset_state()112        if self.switches[1]:113            self.lstm2.reset_state()114            if not self.switches[0]:115                self.dropout2.reset_state()116    def forward(self, x, t, train=True):117        no_of_timesteps = x.shape[1]118        self.reset_state()119        outputs = []120        for i in range(no_of_timesteps):121            h = self.lstm1(x[:,i,:])122            h = self.bn_lstm1(h)123            dargs = [h, 0.5, train] if not self.switches[0] else [h]124            h = self.dropout1(*dargs)125            if self.switches[1]:126                h = self.lstm2(h)127                h = self.bn_lstm2(h)128                dargs = [h, 0.3, train] if not self.switches[0] else [h]129                h = self.dropout2(*dargs)130            outputs.append(h)131        outputs = torch.stack(outputs) #[time_steps, batch_size, features]132        outputs = outputs.permute(1,2,0) #[batch_size, features, time_steps]133        pool = nn.MaxPool1d(no_of_timesteps)134        h = pool(outputs)135        h = h.view(h.size(0),-1)136        #h = self.dropout(h)137        h = self.fc_output(h)138        return self.loss(h[:,0],t), h[:,0]139class RNN_language_model(nn.Module):140    def __init__(self, vocab_size, no_of_hidden_units):141        super(RNN_language_model, self).__init__()142        self.embedding = nn.Embedding(vocab_size, no_of_hidden_units)143        self.lstm1 = StatefulLSTM(no_of_hidden_units,no_of_hidden_units)144        self.bn_lstm1 = nn.BatchNorm1d(no_of_hidden_units)145        self.dropout1 = LockedDropout()146        self.lstm2 = StatefulLSTM(no_of_hidden_units,no_of_hidden_units)147        self.bn_lstm2 = nn.BatchNorm1d(no_of_hidden_units)148        self.dropout2 = LockedDropout()149        self.lstm3 = StatefulLSTM(no_of_hidden_units,no_of_hidden_units)150        self.bn_lstm3 = nn.BatchNorm1d(no_of_hidden_units)151        self.dropout3 = LockedDropout()152        self.decoder = nn.Linear(no_of_hidden_units, vocab_size)153        self.loss = nn.CrossEntropyLoss()154        self.vocab_size = vocab_size155    def reset_state(self):156        self.lstm1.reset_state()157        self.dropout1.reset_state()158        self.lstm2.reset_state()159        self.dropout2.reset_state()160        self.lstm3.reset_state()161        self.dropout3.reset_state()162    163    def forward(self, x, train=True):164        embed = self.embedding(x)165        no_of_timesteps = embed.shape[1]-1166        self.reset_state()167        outputs = []168        for i in range(no_of_timesteps):169            h = self.lstm1(embed[:,i,:])170            h = self.bn_lstm1(h)171            h = self.dropout1(h, 0.3, train)172            173            h = self.lstm2(h)174            h = self.bn_lstm2(h)175            h = self.dropout2(h, 0.3, train)176            177            h = self.lstm3(h)178            h = self.bn_lstm3(h)179            h = self.dropout3(h, 0.3, train)180            181            h = self.decoder(h)182            outputs.append(h)183        outputs = torch.stack(outputs) # time_steps, batch_size, vocab_size184        prediction = outputs.permute(1,0,2) # batch, time, vocab185        outputs = outputs.permute(1,2,0) # batch, vocab, time186        if(train):187            prediction = prediction.contiguous().view(-1,self.vocab_size)188            target = x[:,1:].contiguous().view(-1)189            loss = self.loss(prediction, target)190            return loss, outputs191        else:192            return outputs193class RNN_model_modified(nn.Module):194    def __init__(self, vocab_size, no_of_hidden_units, switches = False):195        super(RNN_model, self).__init__()196        self.switches = switches197        self.embedding = nn.Embedding(vocab_size, no_of_hidden_units)#padding_idx=0)198        self.lstm1 = StatefulLSTM(no_of_hidden_units, no_of_hidden_units)199        self.bn_lstm1 = nn.BatchNorm1d(no_of_hidden_units)200        self.dropout1 = LockedDropout() if not switches else nn.Dropout(p=0.5)201        self.lstm2 = StatefulLSTM(no_of_hidden_units, no_of_hidden_units)202        self.bn_lstm2 = nn.BatchNorm1d(no_of_hidden_units)203        self.dropout2 = LockedDropout() if not switches else nn.Dropout(p=0.5)204        205        self.lstm3 = StatefulLSTM(no_of_hidden_units, no_of_hidden_units)206        self.bn_lstm3 = nn.BatchNorm1d(no_of_hidden_units)207        self.dropout3 = LockedDropout() if not switches else nn.Dropout(p=0.5)208        self.fc_output = nn.Linear(no_of_hidden_units, 1)209        self.loss = nn.BCEWithLogitsLoss()210    def reset_state(self):211        self.lstm1.reset_state()212        self.lstm2.reset_state()213        self.lstm3.reset_state()214        if not self.switches:215            self.dropout1.reset_state()216            self.dropout2.reset_state()217            self.dropout3.reset_state()218    def forward(self, x, t, train=True):219        embed = self.embedding(x) #[batch_size, time_steps, features]220        no_of_timesteps = embed.shape[1]221        self.reset_state()222        outputs = []223        for i in range(no_of_timesteps):224            h = self.lstm1(embed[:,i,:])225            h = self.bn_lstm1(h)226            dargs = [h, 0.5, train] if not self.switches else [h]227            h = self.dropout1(*dargs)228            h = self.lstm2(h)229            h = self.bn_lstm2(h)230            dargs = [h, 0.3, train] if not self.switches else [h]231            h = self.dropout2(*dargs)232            h = self.lstm3(h)233            h = self.bn_lstm3(h)234            dargs = [h, 0.3, train] if not self.switches else [h]235            h = self.dropout3(*dargs)...user_profile_setname_test.py
Source:user_profile_setname_test.py  
...8BASE_URL = 'http://127.0.0.1:' + str(PORT)9HEADER = {"Content-Type": "application/json"}1011@pytest.fixture(autouse=True)12def reset_state():13    requests.post(BASE_URL + "/workspace/reset", json = ())14    reg1 = json.dumps({15        'email': 'first@first.com',16        'password': 'happydays',17        'name_first': 'James',18        'name_last': 'Lu'19    }).encode('utf-8')2021    # Register a user22    req = urllib.request.Request(f"{BASE_URL}/auth/register", headers=HEADER, data=reg1)23    payload_json = json.load(urllib.request.urlopen(req))24    #assert type(payload_json) == 'dict'25    u_id = payload_json['u_id']26    token = payload_json['token']
...pure_ranking_example.py
Source:pure_ranking_example.py  
...23import tensorflow as tf24os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'25os.environ["KMP_WARNINGS"] = "FALSE"26tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)27def reset_state(name):28    tf.compat.v1.reset_default_graph()29    print("\n", "=" * 30, name, "=" * 30)30if __name__ == "__main__":31    start_time = time.perf_counter()32    data = pd.read_csv("sample_data/sample_movielens_rating.dat", sep="::",33                       names=["user", "item", "label", "time"])34    train_data, eval_data = split_by_ratio_chrono(data, test_size=0.2)35    train_data, data_info = DatasetPure.build_trainset(train_data)36    eval_data = DatasetPure.build_evalset(eval_data)37    print(data_info)38    # do negative sampling, assume the data only contains positive feedback39    train_data.build_negative_samples(data_info, item_gen_mode="random",40                                      num_neg=1, seed=2020)41    eval_data.build_negative_samples(data_info, item_gen_mode="random",42                                     num_neg=1, seed=2222)43    reset_state("SVD")44    svd = SVD("ranking", data_info, embed_size=16, n_epochs=3, lr=0.001,45              reg=None, batch_size=256, batch_sampling=False, num_neg=1)46    svd.fit(train_data, verbose=2, shuffle=True, eval_data=eval_data,47            metrics=["loss", "balanced_accuracy",48                     "roc_auc", "pr_auc", "precision",49                     "recall", "map", "ndcg"])50    print("prediction: ", svd.predict(user=1, item=2333))51    print("recommendation: ", svd.recommend_user(user=1, n_rec=7))52    reset_state("SVD++")53    svdpp = SVDpp(task="ranking", data_info=data_info, embed_size=16,54                  n_epochs=3, lr=0.001, reg=None, batch_size=256)55    svdpp.fit(train_data, verbose=2, eval_data=eval_data,56              metrics=["loss", "balanced_accuracy",57                       "roc_auc", "pr_auc", "precision",58                       "recall", "map", "ndcg"])59    print("prediction: ", svdpp.predict(user=1, item=2333))60    print("recommendation: ", svdpp.recommend_user(user=1, n_rec=7))61    reset_state("NCF")62    ncf = NCF("ranking", data_info, embed_size=16, n_epochs=1, lr=0.001,63              lr_decay=False, reg=None, batch_size=256, num_neg=1, use_bn=True,64              dropout_rate=None, hidden_units="128,64,32", tf_sess_config=None)65    ncf.fit(train_data, verbose=2, shuffle=True, eval_data=eval_data,66            metrics=["loss", "balanced_accuracy",67                     "roc_auc", "pr_auc", "precision",68                     "recall", "map", "ndcg"])69    print("prediction: ", ncf.predict(user=1, item=2333))70    print("recommendation: ", ncf.recommend_user(user=1, n_rec=7))71    reset_state("ALS")72    als = ALS(task="ranking", data_info=data_info, embed_size=16, n_epochs=2,73              reg=5.0, alpha=10, seed=42)74    als.fit(train_data, verbose=2, use_cg=True, n_threads=1,75            eval_data=eval_data, metrics=["loss", "balanced_accuracy",76                                          "roc_auc", "pr_auc", "precision",77                                          "recall", "map", "ndcg"])78    print("prediction: ", als.predict(user=1, item=2333))79    print("recommendation: ", als.recommend_user(user=1, n_rec=7))80    reset_state("BPR")81    bpr = BPR("ranking", data_info, embed_size=16, n_epochs=3, lr=3e-4,82              reg=None, batch_size=256, num_neg=1, use_tf=True)83    bpr.fit(train_data, verbose=2, num_threads=4, eval_data=eval_data,84            metrics=["loss", "balanced_accuracy", "roc_auc", "pr_auc",85                     "precision", "recall", "map", "ndcg"],86            optimizer="adam")87    reset_state("RNN4Rec")88    rnn = RNN4Rec("ranking", data_info, rnn_type="gru", loss_type="cross_entropy",89                  embed_size=16, n_epochs=2, lr=0.001, lr_decay=None,90                  hidden_units="16,16", reg=None, batch_size=256, num_neg=1,91                  dropout_rate=None, recent_num=10, tf_sess_config=None)92    rnn.fit(train_data, verbose=2, shuffle=True, eval_data=eval_data,93            metrics=["loss", "balanced_accuracy",94                     "roc_auc", "pr_auc", "precision",95                     "recall", "map", "ndcg"])96    print("prediction: ", rnn.predict(user=1, item=2333))97    print("recommendation: ", rnn.recommend_user(user=1, n_rec=7))98    reset_state("Caser")99    caser = Caser("ranking", data_info, embed_size=16, n_epochs=2, lr=1e-4,100                  lr_decay=None, reg=None, batch_size=2048, num_neg=1,101                  dropout_rate=0.0, use_bn=False, nh_filters=16, nv_filters=4,102                  recent_num=10, tf_sess_config=None)103    caser.fit(train_data, verbose=2, shuffle=True, eval_data=eval_data,104              metrics=["loss", "balanced_accuracy", "roc_auc", "pr_auc",105                       "precision", "recall", "map", "ndcg"])106    print("prediction: ", caser.predict(user=1, item=2333))107    print("recommendation: ", caser.recommend_user(user=1, n_rec=7))108    reset_state("WaveNet")109    wave = WaveNet("ranking", data_info, embed_size=16, n_epochs=2, lr=1e-4,110                   lr_decay=None, reg=None, batch_size=2048, num_neg=1,111                   dropout_rate=0.0, use_bn=False, n_filters=16, n_blocks=2,112                   n_layers_per_block=4, recent_num=10, tf_sess_config=None)113    wave.fit(train_data, verbose=2, shuffle=True, eval_data=eval_data,114             metrics=["loss", "balanced_accuracy", "roc_auc", "pr_auc",115                      "precision", "recall", "map", "ndcg"])116    print("prediction: ", wave.predict(user=1, item=2333))117    print("recommendation: ", wave.recommend_user(user=1, n_rec=7))118    reset_state("Item2Vec")119    item2vec = Item2Vec("ranking", data_info, embed_size=16, norm_embed=False,120                        window_size=3, n_epochs=2, n_threads=0)121    item2vec.fit(train_data, verbose=2, shuffle=True, eval_data=eval_data,122                 metrics=["loss", "balanced_accuracy", "roc_auc", "pr_auc",123                          "precision", "recall", "map", "ndcg"])124    print("prediction: ", item2vec.predict(user=1, item=2333))125    print("recommendation: ", item2vec.recommend_user(user=1, n_rec=7))126    reset_state("DeepWalk")127    deepwalk = DeepWalk("ranking", data_info, embed_size=16, norm_embed=False,128                        n_walks=10, walk_length=10, window_size=5, n_epochs=2,129                        n_threads=0)130    deepwalk.fit(train_data, verbose=2, shuffle=True, eval_data=eval_data,131                 metrics=["loss", "balanced_accuracy", "roc_auc", "pr_auc",132                          "precision", "recall", "map", "ndcg"])133    print("prediction: ", deepwalk.predict(user=1, item=2333))134    print("recommendation: ", deepwalk.recommend_user(user=1, n_rec=7))135    reset_state("NGCF")136    ngcf = NGCF("ranking", data_info, embed_size=16, n_epochs=2, lr=3e-4,137                lr_decay=None, reg=0.0, batch_size=2048, num_neg=1,138                node_dropout=0.0, message_dropout=0.0, hidden_units="64,64,64",139                device=torch.device("cpu"))140    ngcf.fit(train_data, verbose=2, shuffle=True, eval_data=eval_data,141             metrics=["loss", "balanced_accuracy", "roc_auc", "pr_auc",142                      "precision", "recall", "map", "ndcg"])143    print("prediction: ", ngcf.predict(user=1, item=2333))144    print("recommendation: ", ngcf.recommend_user(user=1, n_rec=7))145    reset_state("LightGCN")146    lightgcn = LightGCN("ranking", data_info, embed_size=32, n_epochs=2, lr=1e-4,147                        lr_decay=None, reg=0.0, batch_size=2048, num_neg=1,148                        dropout=0.0, n_layers=3, device=torch.device("cpu"))149    lightgcn.fit(train_data, verbose=2, shuffle=True, eval_data=eval_data,150                 metrics=["loss", "balanced_accuracy", "roc_auc", "pr_auc",151                          "precision", "recall", "map", "ndcg"])152    print("prediction: ", lightgcn.predict(user=1, item=2333))153    print("recommendation: ", lightgcn.recommend_user(user=1, n_rec=7))154    reset_state("user_cf")155    user_cf = UserCF(task="ranking", data_info=data_info, k=20, sim_type="cosine")156    user_cf.fit(train_data, verbose=2, mode="invert", num_threads=4, min_common=1,157                eval_data=eval_data, metrics=["loss", "balanced_accuracy",158                                              "roc_auc", "pr_auc", "precision",159                                              "recall", "map", "ndcg"])160    print("prediction: ", user_cf.predict(user=1, item=2333))161    print("recommendation: ", user_cf.recommend_user(user=1, n_rec=7))162    reset_state("item_cf")163    item_cf = ItemCF(task="ranking", data_info=data_info, k=20, sim_type="pearson")164    item_cf.fit(train_data, verbose=2, mode="invert", num_threads=1, min_common=1,165                eval_data=eval_data, metrics=["loss", "balanced_accuracy",166                                              "roc_auc", "pr_auc", "precision",167                                              "recall", "map", "ndcg"])168    print("prediction: ", item_cf.predict(user=1, item=2333))169    print("recommendation: ", item_cf.recommend_user(user=1, n_rec=7))...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
