Best Python code snippet using tox_python
nudging_loss.py
Source:nudging_loss.py  
...211        reduction = 'sum'212    # ===== Get good action values above the best bad action213    loss_feedback = regression_loss(qs_a_bad, min_no_feedback_per_line, reduction=reduction)214    return loss_feedback215def feedback_frontier_margin_learnt_feedback(qs, action=None, feedback=None, margin=None, regression_loss=None, testing=False, feedback_logits=None, ceil=None):216    """217    Compute the expert loss218    ab is the "bad" action219    m is a margin function220    """221    if feedback_logits is None :222        return 0223    n_actions = qs.size(1)224    almost_sure_feedback = torch.zeros(*feedback_logits.size()).to(TORCH_DEVICE)225    almost_sure_no_feedback = torch.zeros(*feedback_logits.size()).to(TORCH_DEVICE)226    # Feedback (or no feedback) that is almost sure (using a classification network), use them.227    almost_sure_feedback[feedback_logits > 0.5 + ceil] = 1228    almost_sure_no_feedback[feedback_logits < 0.5 - ceil] = 1229    # check if at least there are "sure" predicted feedback per line230    at_least_one_feedback_per_line = almost_sure_feedback.sum(dim=1).type(torch.uint8)231    at_least_one_nofeedback_per_line = almost_sure_no_feedback.sum(dim=1).type(torch.uint8)232    # Statistics about the number of action that are considered233    certainty_percentage = ((at_least_one_nofeedback_per_line + at_least_one_feedback_per_line).float() / n_actions).mean()234    certainty_percentage_feed = (at_least_one_feedback_per_line.float() / n_actions).mean()235    certainty_percentage_no_feed = (at_least_one_nofeedback_per_line.float() / n_actions).mean()236    both_per_line = at_least_one_feedback_per_line + at_least_one_nofeedback_per_line > 1237    qs = qs[both_per_line, :]238    if qs.size(0) == 0:239        return 0240    almost_sure_no_feedback = almost_sure_no_feedback[both_per_line, :]241    almost_sure_feedback = almost_sure_feedback[both_per_line, :]242    qs_feedback = qs.clone().detach() # Q(s,a) for action flagged as "gives feedback" aka bad actions by classification algorithm243    qs_no_feedback = qs.clone().detach() #Q(s,a) for action flagged as "don't give feedback" aka good actions by classification algorithm244    # Don't know if it's a feedback yet ? Temporarily set to the minimum qs so the optim doesn't touch it245    qs_feedback[almost_sure_feedback == 0] = torch.min(qs).item() - margin246    # Don't know if it's NOT a feedback yet ? Temporarily set to the maximum qs so the optim doesn't touch it247    qs_no_feedback[almost_sure_no_feedback == 0] = torch.max(qs).item() + margin248    min_no_feedback_per_line = qs_no_feedback.min(dim=1)[0].repeat([n_actions, 1]).t() - margin249    max_feedback_per_line = qs_feedback.max(dim=1)[0].repeat([n_actions, 1]).t() + margin250    sure_feedback_and_above_min = almost_sure_feedback.byte() * (qs > min_no_feedback_per_line) # '*' is logical and251    sure_no_feedback_and_below_max = almost_sure_no_feedback.byte() * (qs < max_feedback_per_line)252    qs_target = qs.clone().detach()253    qs_feedback_target = torch.where(sure_feedback_and_above_min, min_no_feedback_per_line, qs_target)254    qs_no_feedback_target = torch.where(sure_no_feedback_and_below_max, max_feedback_per_line, qs_target)255    if not testing:256        assert qs_no_feedback_target.requires_grad == False257        assert qs_feedback_target.requires_grad == False258        assert qs.requires_grad == True259        reduction = 'mean'260    else:261        reduction = 'sum'262    # ===== Get good action values above the best bad action263    loss_no_feedback = regression_loss(qs, qs_no_feedback_target, reduction=reduction)264    loss_feedback = regression_loss(qs, qs_feedback_target, reduction=reduction)265    return loss_no_feedback + loss_feedback266if __name__ == "__main__":267    import torch268    TORCH_DEVICE = 'cpu'269    regr_loss = torch.nn.functional.smooth_l1_loss270    margin = 0.1271    # Test 1272    qs = torch.arange(12).view(4,3).float()273    actions = torch.Tensor([0,0,0,0]).long()274    feedback = torch.Tensor([1,1,1,0])275    assert feedback_bad_to_min_when_max(qs, actions, testing=True, feedback=feedback, margin=margin, regression_loss=regr_loss) == 0276    # Test 2277    qs = torch.arange(12).view(4,3).float()278    actions = torch.Tensor([0,0,0,0]).long()279    feedback = torch.Tensor([1,1,1,0])280    loss1 = feedback_bad_to_min_when_max(qs, actions, testing=True, feedback=feedback, margin=margin, regression_loss=regr_loss)281    qs = torch.arange(12).view(4,3).float()282    actions = torch.Tensor([0,1,2,0]).long()283    feedback = torch.Tensor([1,1,1,0])284    loss2 = feedback_bad_to_min_when_max(qs, actions, testing=True, feedback=feedback, margin=margin, regression_loss=regr_loss)285    assert loss1 < loss2286    # # Test 3287    # qs = torch.arange(12).view(4, 3).float()288    #289    # max_margin = 0.50 # If bad action is 50% of the max : Put it down niggah290    # actions = torch.Tensor([0, 1, 2, 0]).long()291    # feedback = torch.Tensor([1, 1, 1, 0])292    # loss1 = feedback_bad_to_percent_max(qs, actions, feedback, regr_loss, max_margin)293    #294    # max_margin = 0.90 # If bad action is 90% of the max : Put it down niggah295    # qs = torch.arange(12).view(4, 3).float()296    # actions = torch.Tensor([0, 1, 2, 0]).long()297    # feedback = torch.Tensor([1, 1, 1, 0])298    # loss2 = feedback_bad_to_percent_max(qs, actions, feedback, regr_loss, max_margin)299    #300    # # assert loss1 > loss2, "loss1 {},  loss2 {}".format(loss1, loss2)301    #302    # max_margin = 0.90  # If bad action is 90% of the max : Put it down niggah303    # qs = - torch.arange(12).view(4, 3).float()304    # actions = torch.Tensor([0, 1, 2, 0]).long()305    # feedback = torch.Tensor([1, 1, 1, 0])306    # loss3 = feedback_bad_to_percent_max(qs, actions, feedback, regr_loss, max_margin)307    #308    # #=================================================309    #310    # qs = torch.arange(12).view(4,3).float()311    # actions = torch.Tensor([1,2,1,0]).long()312    # feedback = torch.Tensor([1,1,1,0])313    # loss1 = feedback_bad_to_min(qs, actions, feedback, margin, regr_loss)314    #==================================================315    #316    # qs = torch.arange(21).view(7, 3).float()317    # actions = torch.Tensor([1, 2, 1, 0, 2 , 1, 0]).long().view(-1, 1)318    # feedback = torch.Tensor([1, 0, 1, 0, 1, 0, 1])319    # loss1 = feedback_frontier_margin(qs, actions, feedback, margin, regr_loss, testing=True)320    #321    # actions = torch.Tensor([[1, 2, 1, 0, 2, 1, 0]]).long().view(-1, 1)322    # feedback = torch.Tensor([1, 0, 1, 0, 1, 0, 1])323    #324    # qs = - torch.arange(21).view(7, 3).float()325    # loss2s = feedback_frontier_margin(qs, actions, feedback, margin, regr_loss, testing=True)326    #=======================================================327    ceil = 0.4328    #=======================================================329    #330    # qs = torch.arange(21).view(7, 3).float()331    # logits = torch.ones(7,3)332    # logits[:, 2] *= -1333    # logits[:, 1] = 0334    #335    #336    # loss1 = feedback_frontier_margin_learnt_feedback(qs, margin=margin, regression_loss=regr_loss, feedback_logits=logits,337    #                                                  testing=True, ceil=ceil)338    # assert loss1 == 0339    #340    # # =======================================================341    # qs = torch.arange(21).view(7, 3).float()342    # logits = torch.ones(7, 3)343    # logits[:, 0] = 0.5344    # logits[:, 1] = 0.5345    #346    # loss2 = feedback_frontier_margin_learnt_feedback(qs, margin=margin, regression_loss=regr_loss,347    #                                                  feedback_logits=logits,348    #                                                  testing=True, ceil=ceil)349    # assert loss2 == 0350    #351    # #=======================================================352    # qs = -torch.arange(21).view(7, 3).float()353    # logits = torch.ones(7, 3)354    # logits[:, 2] *= 0355    # logits[:, 1] = 0.5356    #357    # loss3 = feedback_frontier_margin_learnt_feedback(qs, margin=margin, regression_loss=regr_loss,358    #                                                  feedback_logits=logits,359    #                                                  testing=True, ceil=ceil)360    #361    # #========================================================362    #363    # qs = torch.arange(21).view(7, 3).float()364    # logits = torch.ones(7, 3)365    # logits[:, 0] *= 0366    # logits[:, 1] = 0.5367    #368    # loss4 = feedback_frontier_margin_learnt_feedback(qs, margin=margin, regression_loss=regr_loss,369    #                                                  feedback_logits=logits,370    #                                                  testing=True, ceil=ceil)371    #372    # assert loss3 == loss4373    #374    # # ========================================================375    # qs = torch.arange(21).view(7, 3).float()376    # logits = torch.ones(7, 3)377    # logits[:, 0] *= -1378    # logits[:, 1] = 0379    # logits[-1, :] = 0380    #381    # loss5 = feedback_frontier_margin_learnt_feedback(qs, margin=margin, regression_loss=regr_loss,382    #                                                  feedback_logits=logits,383    #                                                  testing=True, ceil=ceil)384    #385    # assert loss5 != 0386    # assert loss5 < loss4387    #388    # # =========================================================389    # qs = torch.arange(21).view(7, 3).float()390    # logits = torch.zeros(7, 3)391    # logits[:,0] = 1392    #393    # loss6 = feedback_frontier_margin_learnt_feedback(qs, margin=margin, regression_loss=regr_loss,394    #                                                  feedback_logits=logits,395    #                                                  testing=True, ceil=ceil)396    #397    # assert loss6 == 0398    #===========================================================399    qs = torch.tensor([[0,0,0],[1,0,0.5],[0,1,0.5]]).float()400    actions = torch.Tensor([1,1,1]).long()401    feedback = torch.Tensor([0,1,1])402    logits = torch.zeros_like(qs)403    logits[1, 1] = 1404    logits[2, 1] = 1405    loss7 = feedback_ponctual_negative_only(qs,406                                            action=actions,407                                            feedback=feedback,...feedback_trigger.py
Source:feedback_trigger.py  
...151	triggers = { dt[0]: dt[1] for dt in triggers }152	return triggers153def get_context(doc):154	return { "doc": doc }155def delete_feedback_request_and_feedback(reference_doctype, reference_name):156	""" delete all the feedback request and feedback communication """157	if not all([reference_doctype, reference_name]):158		return159	feedback_requests = frappe.get_all("Feedback Request", filters={160		"is_feedback_submitted": 0,161		"reference_doctype": reference_doctype,162		"reference_name": reference_name163	})164	communications = frappe.get_all("Communication", {165		"communication_type": "Feedback",166		"reference_doctype": reference_doctype,167		"reference_name": reference_name168	})169	for request in feedback_requests:...test_feedback_trigger.py
Source:test_feedback_trigger.py  
1# -*- coding: utf-8 -*-2# Copyright (c) 2015, Frappe Technologies and Contributors3# See license.txt4from __future__ import unicode_literals5import frappe6import unittest7# test_records = frappe.get_test_records('Feedback Trigger')8def get_feedback_request(todo, feedback_trigger):9	return frappe.db.get_value("Feedback Request", {10		"is_sent": 1,11		"is_feedback_submitted": 0,12		"reference_doctype": "ToDo",13		"reference_name": todo,14		"feedback_trigger": feedback_trigger15	}, ["name", "key"])16class TestFeedbackTrigger(unittest.TestCase):17	def setUp(self):18		new_user = frappe.get_doc(dict(doctype='User', email='test-feedback@example.com',19			first_name='Tester')).insert(ignore_permissions=True)20		new_user.add_roles("System Manager")21	def tearDown(self):22		frappe.db.sql("delete from tabContact where email_id='test-feedback@example.com'")23		frappe.delete_doc("User", "test-feedback@example.com")24		frappe.delete_doc("Feedback Trigger", "ToDo")25		frappe.db.sql('delete from `tabEmail Queue`')26		frappe.db.sql('delete from `tabFeedback Request`')27	def test_feedback_trigger(self):28		""" Test feedback trigger """29		from frappe.www.feedback import accept30		frappe.delete_doc("Feedback Trigger", "ToDo")31		frappe.db.sql('delete from `tabEmail Queue`')32		frappe.db.sql('delete from `tabFeedback Request`')33		feedback_trigger = frappe.get_doc({34			"enabled": 1,35			"doctype": "Feedback Trigger",36			"document_type": "ToDo",37			"email_field": "assigned_by",38			"email_fieldname": "assigned_by",39			"subject": "{{ doc.name }} Task Completed",40			"condition": "doc.status == 'Closed'",41			"message": """Task {{ doc.name }} is Completed by {{ doc.owner }}.42			regarding the Task {{ doc.name }}"""43		}).insert(ignore_permissions=True)44		# create a todo45		todo = frappe.get_doc({46			"doctype": "ToDo",47			"owner": "test-feedback@example.com",48			"assigned_by": "test-feedback@example.com",49			"description": "Unable To Submit Sales Order #SO-00001"50		}).insert(ignore_permissions=True)51		# feedback alert mail should be sent only on 'Closed' status52		email_queue = frappe.db.sql("""select name from `tabEmail Queue` where53			reference_doctype='ToDo' and reference_name='{0}'""".format(todo.name))54		self.assertFalse(email_queue)55		# add a communication56		frappe.get_doc({57			"reference_doctype": "ToDo",58			"reference_name": todo.name,59			"communication_type": "Communication",60			"content": "Test Communication",61			"subject": "Test Communication",62			"doctype": "Communication"63		}).insert(ignore_permissions=True)64		# check if feedback mail alert is triggered65		todo.reload()66		todo.status = "Closed"67		todo.save(ignore_permissions=True)68		email_queue = frappe.db.sql("""select name from `tabEmail Queue` where69			reference_doctype='ToDo' and reference_name='{0}'""".format(todo.name))70		self.assertTrue(email_queue)71		# test if feedback is submitted for the todo72		feedback_request, request_key = get_feedback_request(todo.name, feedback_trigger.name)73		self.assertTrue(feedback_request)74		# test if mail alerts are triggered multiple times for same document75		todo.save(ignore_permissions=True)76		email_queue = frappe.db.sql("""select name from `tabEmail Queue` where77			reference_doctype='ToDo' and reference_name='{0}'""".format(todo.name))78		self.assertTrue(len(email_queue) == 1)79		frappe.db.sql('delete from `tabEmail Queue`')80		# Test if feedback is submitted sucessfully81		result = accept(request_key, "test-feedback@example.com", "ToDo", todo.name, "Great Work !!", 4, fullname="Test User")82		self.assertTrue(result)83		# test if feedback is saved in Communication84		docname = frappe.db.get_value("Communication", {85			"reference_doctype": "ToDo",86			"reference_name": todo.name,87			"communication_type": "Feedback",88			"feedback_request": feedback_request89		})90		communication = frappe.get_doc("Communication", docname)91		self.assertEqual(communication.rating, 4)92		self.assertEqual(communication.content, "Great Work !!")93		# test if link expired after feedback submission94		self.assertRaises(Exception, accept, key=request_key, sender="test-feedback@example.com",95			reference_doctype="ToDo", reference_name=todo.name, feedback="Thank You !!", rating=4, fullname="Test User")96		# auto feedback request should trigger only once97		todo.reload()98		todo.save(ignore_permissions=True)99		email_queue = frappe.db.sql("""select name from `tabEmail Queue` where100			reference_doctype='ToDo' and reference_name='{0}'""".format(todo.name))101		self.assertFalse(email_queue)102		frappe.delete_doc("ToDo", todo.name)103		# test if feedback requests and feedback communications are deleted?104		communications = frappe.get_all("Communication", {105			"reference_doctype": "ToDo",106			"reference_name": todo.name,107			"communication_type": "Feedback"108		})109		self.assertFalse(communications)110		feedback_requests = frappe.get_all("Feedback Request", {111			"reference_doctype": "ToDo",112			"reference_name": todo.name,113			"is_feedback_submitted": 0114		})...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
