How to use compute method in avocado

Best Python code snippet using avocado_python

views.py

Source:views.py Github

copy

Full Screen

1import hashlib2import json3import subprocess4from django.core.exceptions import PermissionDenied5from django.shortcuts import get_object_or_4046from rest_framework import status7from rest_framework.decorators import detail_route8from rest_framework.generics import RetrieveUpdateAPIView, ListAPIView9from rest_framework.parsers import MultiPartParser10from rest_framework.permissions import IsAuthenticated11from rest_framework.response import Response12from rest_framework.views import APIView13from rest_framework.viewsets import ModelViewSet, ViewSet14from tasks import poweron_nodes, poweroff_nodes15from tasks import submit_computeset, cancel_computeset, attach_iso16import hostlist17from models import Cluster, Compute, ComputeSet, Frontend18from serializers import ComputeSerializer, ComputeSetSerializer, FullComputeSetSerializer19from serializers import ClusterSerializer, FrontendSerializer, ProjectSerializer20from serializers import UserDetailsSerializer21import re, os, random, string, datetime22from django.db.models import Q23from rest_framework.authentication import BasicAuthentication24from rest_framework.permissions import IsAuthenticated25# #################################################26# CLUSTER27# #################################################28class ClusterViewSet(ModelViewSet):29 lookup_field = 'cluster_name'30 serializer_class = ClusterSerializer31 def get_queryset(self):32 """Obtain details about all clusters."""33 clusters = Cluster.objects.filter(34 project__in=self.request.user.groups.all())35 return clusters36 def retrieve(self, request, cluster_name, format=None):37 """Obtain details about the named cluster."""38 clust = get_object_or_404(Cluster, name=cluster_name)39 if not clust.project in request.user.groups.all():40 raise PermissionDenied()41 serializer = ClusterSerializer(clust)42 return Response(serializer.data)43 def destroy(self, request, cluster_name, format=None):44 """Destroy the named cluster."""45 return Response("destroy todo")46# #################################################47# COMPUTE48# #################################################49class ComputeViewSet(ViewSet):50 lookup_field = 'compute_name'51 serializer_class = ClusterSerializer52 def retrieve(self, request, compute_name_cluster_name, compute_name, format=None):53 """Obtain the details of a named compute resource in a named cluster."""54 compute = get_object_or_404(55 Compute, name=compute_name, cluster__name=compute_name_cluster_name)56 if not compute.cluster.project in request.user.groups.all():57 raise PermissionDenied()58 serializer = ComputeSerializer(compute)59 return Response(serializer.data)60 def destroy(self, request, compute_name_cluster_name, compute_name, format=None):61 """Destroy the named compute resource in a named cluster."""62 return Response("todo")63 @detail_route(methods=['put'])64 def shutdown(self, request, compute_name_cluster_name, compute_name, format=None):65 """Shutdown the named compute resource in a named cluster.66 """67 compute = get_object_or_404(68 Compute, name=compute_name, cluster__name=compute_name_cluster_name)69 if not compute.cluster.project in request.user.groups.all():70 raise PermissionDenied()71 if(not compute.computeset.filter(state=ComputeSet.CSET_STATE_RUNNING).exists()):72 return Response("Compute is not a member of an active computeset",73 status=status.HTTP_400_BAD_REQUEST)74 poweroff_nodes.delay([compute.rocks_name], "shutdown")75 return Response(status=204)76 @detail_route(methods=['put'])77 def reboot(self, request, compute_name_cluster_name, compute_name, format=None):78 """Reboot the named compute resource in a named cluster.79 """80 compute = get_object_or_404(81 Compute, name=compute_name, cluster__name=compute_name_cluster_name)82 if not compute.cluster.project in request.user.groups.all():83 raise PermissionDenied()84 if(not compute.computeset.filter(state=ComputeSet.CSET_STATE_RUNNING).exists()):85 return Response("Compute is not a member of an active computeset",86 status=status.HTTP_400_BAD_REQUEST)87 poweroff_nodes.delay([compute.rocks_name], "reboot")88 return Response(status=204)89 @detail_route(methods=['put'])90 def reset(self, request, compute_name_cluster_name, compute_name, format=None):91 """Reset the named compute resource in a named cluster.92 """93 compute = get_object_or_404(94 Compute, name=compute_name, cluster__name=compute_name_cluster_name)95 if not compute.cluster.project in request.user.groups.all():96 raise PermissionDenied()97 if(not compute.computeset.filter(state=ComputeSet.CSET_STATE_RUNNING).exists()):98 return Response("Compute is not a member of an active computeset",99 status=status.HTTP_400_BAD_REQUEST)100 poweroff_nodes.delay([compute.rocks_name], "reset")101 return Response(status=204)102 @detail_route(methods=['put'])103 def poweroff(self, request, compute_name_cluster_name, compute_name, format=None):104 """Power off the named compute resource in a named cluster.105 """106 compute = get_object_or_404(107 Compute, name=compute_name, cluster__name=compute_name_cluster_name)108 if not compute.cluster.project in request.user.groups.all():109 raise PermissionDenied()110 if(not compute.computeset.filter(state=ComputeSet.CSET_STATE_RUNNING).exists()):111 return Response("Compute is not a member of an active computeset",112 status=status.HTTP_400_BAD_REQUEST)113 poweroff_nodes.delay([compute.rocks_name], "poweroff")114 return Response(status=204)115 @detail_route(methods=['put'])116 def poweron(self, request, compute_name_cluster_name, compute_name, format=None):117 """Power on the named compute resource in a named cluster.118 """119 compute = get_object_or_404(120 Compute, name=compute_name, cluster__name=compute_name_cluster_name)121 if not compute.cluster.project in request.user.groups.all():122 raise PermissionDenied()123 if(not compute.computeset.filter(state=ComputeSet.CSET_STATE_RUNNING).exists()):124 return Response("Compute is not a member of an active computeset",125 status=status.HTTP_400_BAD_REQUEST)126 poweron_nodes.delay([compute.rocks_name])127 return Response(status=204)128 @detail_route(methods=['put'])129 def attach_iso(self, request, compute_name_cluster_name, compute_name, format=None):130 """Attach an ISO to the named compute resource in a named cluster.131 """132 compute = get_object_or_404(133 Compute, name=compute_name, cluster__name=compute_name_cluster_name)134 if not compute.cluster.project in request.user.groups.all():135 raise PermissionDenied()136 if not "iso_name" in request.GET:137 return Response("Please provide the iso_name", status=400)138 attach_iso.delay([compute.rocks_name], request.GET["iso_name"])139 return Response(status=204)140 @detail_route(methods=['post'])141 def rename(self, request, compute_name_cluster_name, compute_name, format=None):142 """Rename the named compute resource in a named cluster.143 """144 compute = get_object_or_404(145 Compute, name=compute_name, cluster__name=compute_name_cluster_name)146 if not compute.cluster.project in request.user.groups.all():147 raise PermissionDenied()148 new_name = request.data.get("name")149 if(not re.match('^[a-zA-Z0-9_-]+$',new_name)):150 return Response("New name can opnly contain alphanumeric symbols, digits and '-_'.",151 status=status.HTTP_400_BAD_REQUEST)152 compute.name = new_name153 compute.save()154 return Response(status=204)155# #################################################156# CONSOLE157##################################################158def get_console(request, console_compute_name, nucleus_name=None, is_frontend=False):159 """Open VNC console to named resource."""160 resp = "Success"161 sleep_time = 15162 from xml.dom.minidom import parse, parseString163 import libvirt164 if(is_frontend):165 compute = Frontend.objects.get(rocks_name=console_compute_name)166 else:167 compute = Compute.objects.get(rocks_name=console_compute_name)168 physical_host = compute.physical_host169 if(not physical_host):170 return Response("The VM is not running",171 status=status.HTTP_400_BAD_REQUEST)172 hypervisor = libvirt.open("qemu+tls://%s.comet/system?pkipath=/var/secrets/cometvc" %173 physical_host)174 domU = hypervisor.lookupByName(compute.name)175 # Grab the current XML definition of the domain...176 flags = libvirt.VIR_DOMAIN_XML_SECURE177 domU_xml = parseString(domU.XMLDesc(flags))178 # Parse out the <graphics>...</graphics> device node...179 for gd in domU_xml.getElementsByTagName('graphics'):180 xml = gd.toxml()181 duration = 3600182 password = ''.join(183 random.SystemRandom().choice(184 string.ascii_uppercase +185 string.ascii_lowercase +186 string.digits) for _ in range(16))187 # Generate a new passwdValidTo string...188 dt1 = datetime.datetime.utcnow()189 dt2 = dt1 + datetime.timedelta(0, int(duration))190 timestr = dt2.strftime("%Y-%m-%dT%H:%M:%S")191 # Modify the passwd and passwdValidUntil fields...192 gd.setAttribute('passwd', password)193 gd.setAttribute('passwdValidTo', timestr)194 port = gd.getAttribute("port")195 # Apply the change to the domain...196 flags = libvirt.VIR_DOMAIN_DEVICE_MODIFY_FORCE | \197 libvirt.VIR_DOMAIN_DEVICE_MODIFY_LIVE198 retval = domU.updateDeviceFlags(gd.toxml(), flags)199 cmd = ['/usr/bin/sudo',200 '-u',201 'nucleus_comet',202 '/opt/nucleus-scripts/bin/open_tunnel.py',203 '-H',204 '{hostname}'.format(hostname=physical_host),205 '-p',206 '{hostport}'.format(hostport=port),207 '-s',208 '{duration}'.format(duration=sleep_time)]209 try:210 proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)211 except OSError as e:212 resp = "Execution failed: %s" % (e)213 return Response(resp)214 tun_port = ''215 tun_port = proc.stdout.readline().strip()216 url_base = "/nucleus-guacamole-0.9.8/index.html?hostname=localhost"217 url = request.build_absolute_uri("%s&port=%s&token=%s&host=%s" % (url_base, tun_port, password, nucleus_name))218 response = Response(219 url,220 status=303,221 headers={'Location': url})222 return response223class ConsoleViewSet(ViewSet):224 """Open VNC console to named compute resource."""225 authentication_classes = (BasicAuthentication,)226 permission_classes = (IsAuthenticated,)227 def retrieve(self, request, compute_name_cluster_name, console_compute_name, format=None):228 compute = get_object_or_404(229 Compute, name=console_compute_name, cluster__name=compute_name_cluster_name)230 if not compute.cluster.project in request.user.groups.all():231 raise PermissionDenied()232 return get_console(request, compute.rocks_name, console_compute_name)233class FrontendConsoleViewSet(ViewSet):234 """Open VNC console to name frontend resource."""235 authentication_classes = (BasicAuthentication,)236 permission_classes = (IsAuthenticated,)237 def retrieve(self, request, console_cluster_name, format=None):238 clust = get_object_or_404(Cluster, name=console_cluster_name)239 if not clust.project in request.user.groups.all():240 raise PermissionDenied()241 return get_console(request, clust.frontend.rocks_name, console_cluster_name, True)242##################################################243# COMPUTESET244# #################################################245class ComputeSetViewSet(ModelViewSet):246 lookup_field = 'computeset_id'247 serializer_class = ComputeSetSerializer248 def get_queryset(self):249 """Obtain the details of all ComputeSets."""250 cset = ComputeSet.objects.filter(251 cluster__project__in=self.request.user.groups.all())252 states = self.request.GET.getlist('state')253 #state = self.request.query_params.get('state', None)254 if states:255 cset = cset.filter(state__in=states)256 return cset257 def retrieve(self, request, computeset_id, format=None):258 """Obtain the details of the identified ComputeSet."""259 cset = get_object_or_404(ComputeSet, pk=computeset_id)260 if not cset.cluster.project in request.user.groups.all():261 raise PermissionDenied()262 serializer = ComputeSetSerializer(cset)263 return Response(serializer.data)264 @detail_route(methods=['put'])265 def poweroff(self, request, computeset_id, format=None):266 """Power off the identified ComputeSet."""267 cset = ComputeSet.objects.get(pk=computeset_id)268 if not cset.cluster.project in request.user.groups.all():269 raise PermissionDenied()270 computes = []271 for compute in cset.computes.all():272 computes.append(compute.rocks_name)273 poweroff_nodes.delay(computes, "poweroff")274 cancel_computeset.delay(FullComputeSetSerializer(cset).data)275 return Response(status=204)276 def poweron(self, request, format=None):277 """ Power on a set of computes creating a ComputeSet."""278 clust = get_object_or_404(Cluster, name=request.data["cluster"])279 if not clust.project in request.user.groups.all():280 raise PermissionDenied()281 walltime_mins = request.data.get("walltime_mins")282 if not walltime_mins:283 walltime_mins = 2880284 nodes = []285 hosts = []286 if request.data.get("computes"):287 if request.data["computes"] is list:288 for obj in request.data["computes"]:289 nodes.append(obj["name"])290 hosts.append(obj["host"])291 else:292 nodes = hostlist.expand_hostlist("%s" % request.data["computes"])293 if request.data.get("hosts"):294 hosts = hostlist.expand_hostlist("%s" % request.data["hosts"])295 elif request.data.get("count"):296 computes_selected = Compute.objects.filter(cluster=clust).exclude(297 computeset__state__in=[298 ComputeSet.CSET_STATE_CREATED,299 ComputeSet.CSET_STATE_SUBMITTED,300 ComputeSet.CSET_STATE_RUNNING,301 ComputeSet.CSET_STATE_ENDING]302 ).exclude(state="active").filter(Q(image_state="unmapped") | Q(image_state__isnull=True)).exclude(image_locked=True)[:int(request.data["count"])]303 nodes.extend([comp.name for comp in computes_selected])304 if(len(nodes) < int(request.data["count"]) or int(request.data["count"]) == 0):305 return Response("There are %i nodes available for starting. Requested number should be greater than zero."%len(nodes),306 status=status.HTTP_400_BAD_REQUEST)307 if hosts and len(nodes) != len(hosts):308 return Response("The length of hosts should be equal to length of nodes",309 status=status.HTTP_400_BAD_REQUEST)310 cset = ComputeSet()311 cset.cluster = clust312 cset.user = clust.username313 if(request.data.get("allocation")):314 cset.account = request.data["allocation"]315 elif(clust.allocations.count() == 1):316 cset.account = clust.allocations.get().allocation317 else:318 return Response("Please specify the allocation",319 status=status.HTTP_400_BAD_REQUEST)320 if(not clust.allocations.filter(allocation=cset.account).exists()):321 return Response("Allocation %s does not belong to the cluster."%cset.account,322 status=status.HTTP_400_BAD_REQUEST)323 cset.walltime_mins = walltime_mins324 cset.jobid = None325 cset.name = None326 cset.nodelist = ""327 cset.reservation = request.data.get("reservation")328 cset.state = ComputeSet.CSET_STATE_CREATED329 cset.node_count = len(nodes)330 cset.save()331 for node in nodes:332 compute = Compute.objects.get(name=node, cluster=clust)333 other_cs_query = ComputeSet.objects.filter(computes__id__exact=compute.id).exclude(334 state__in=[ComputeSet.CSET_STATE_COMPLETED, ComputeSet.CSET_STATE_FAILED, ComputeSet.CSET_STATE_CANCELLED])335 if other_cs_query.exists():336 cset.delete()337 err_cs = other_cs_query.get()338 return Response("The compute %s belongs to computeset %s which is in %s state" % (node, err_cs.id, err_cs.state), status=status.HTTP_400_BAD_REQUEST)339 if (compute.image_state not in ["unmapped", None]) or compute.image_locked:340 cset.delete()341 return Response("The node %s's image is in %s state and image locked status is %s. Please contact the user support if the VM is not running." %(node, compute.image_state, compute.image_locked), status=status.HTTP_400_BAD_REQUEST)342 if compute.cluster.name != request.data["cluster"]:343 cset.delete()344 return Response("The node %s does not belong to the cluster %s, belongs to %s" % (node, request.data["cluster"], compute.cluster.name), status=status.HTTP_400_BAD_REQUEST)345 cset.computes.add(compute)346 submit_computeset.delay(FullComputeSetSerializer(cset).data)347 # We should only poweron computes after entering jobscript and348 # finishing the PROLOG on all allocated nodes. At that point the349 # nodelist will be returned and we can call poweron_nodeset()350 #poweron_nodeset.delay(nodes, hosts)351 location = "/nucleus/v1/computeset/%s" % (cset.id)352 serializer = ComputeSetSerializer(cset)353 response = Response(354 serializer.data,355 status=201,356 headers={'Location': request.build_absolute_uri(location)})357 return response358 @detail_route(methods=['put'])359 def shutdown(self, request, computeset_id, format=None):360 """Shutdown the nodes in the identified ComputeSet."""361 cset = ComputeSet.objects.get(pk=computeset_id)362 if not cset.cluster.project in request.user.groups.all():363 raise PermissionDenied()364 computes = []365 for compute in cset.computes.all():366 computes.append(compute.rocks_name)367 if compute.cluster.name != request.data["cluster"]:368 cset.delete()369 return Response("The node %s does not belong to the cluster %s, belongs to %s" % (node, request.data["cluster"], compute.cluster.name), status=status.HTTP_400_BAD_REQUEST)370 cset.computes.add(compute)371 submit_computeset.delay(FullComputeSetSerializer(cset).data)372 # We should only poweron computes after entering jobscript and373 # finishing the PROLOG on all allocated nodes. At that point the374 # nodelist will be returned and we can call poweron_nodeset()375 #poweron_nodeset.delay(nodes, hosts)376 location = "/nucleus/v1/computeset/%s" % (cset.id)377 serializer = ComputeSetSerializer(cset)378 response = Response(379 serializer.data,380 status=201,381 headers={'Location': request.build_absolute_uri(location)})382 return response383 @detail_route(methods=['put'])384 def shutdown(self, request, computeset_id, format=None):385 """Shutdown the nodes in the identified ComputeSet."""386 cset = ComputeSet.objects.get(pk=computeset_id)387 if not cset.cluster.project in request.user.groups.all():388 raise PermissionDenied()389 computes = []390 for compute in cset.computes.all():391 computes.append(compute.rocks_name)392 if compute.cluster.name != request.data["cluster"]:393 cset.delete()394 return Response("The node %s does not belong to the cluster %s, belongs to %s" % (node, request.data["cluster"], compute.cluster.name), status=status.HTTP_400_BAD_REQUEST)395 cset.computes.add(compute)396 submit_computeset.delay(FullComputeSetSerializer(cset).data)397 # We should only poweron computes after entering jobscript and398 # finishing the PROLOG on all allocated nodes. At that point the399 # nodelist will be returned and we can call poweron_nodeset()400 #poweron_nodeset.delay(nodes, hosts)401 location = "/nucleus/v1/computeset/%s" % (cset.id)402 serializer = ComputeSetSerializer(cset)403 response = Response(404 serializer.data,405 status=201,406 headers={'Location': request.build_absolute_uri(location)})407 return response408 @detail_route(methods=['put'])409 def shutdown(self, request, computeset_id, format=None):410 """Shutdown the nodes in the identified ComputeSet."""411 cset = ComputeSet.objects.get(pk=computeset_id)412 if not cset.cluster.project in request.user.groups.all():413 raise PermissionDenied()414 computes = []415 for compute in cset.computes.all():416 computes.append(compute.rocks_name)417 poweroff_nodes.delay(computes, "shutdown")418 cancel_computeset.delay(FullComputeSetSerializer(cset).data)419 return Response(status=204)420 @detail_route(methods=['put'])421 def reboot(self, request, computeset_id, format=None):422 """Reboot the nodes in the identified ComputeSet."""423 cset = ComputeSet.objects.get(pk=computeset_id)424 if not cset.cluster.project in request.user.groups.all():425 raise PermissionDenied()426 computes = []427 for compute in cset.computes.all():428 computes.append(compute.rocks_name)429 poweroff_nodes.delay(computes, "reboot")430 return Response(status=204)431 @detail_route(methods=['put'])432 def reset(self, request, computeset_id, format=None):433 """Reset the nodes in the identified ComputeSet."""434 cset = ComputeSet.objects.get(pk=computeset_id)435 if not cset.cluster.project in request.user.groups.all():436 raise PermissionDenied()437 computes = []438 for compute in cset.computes.all():439 computes.append(compute.rocks_name)440 poweroff_nodes.delay(computes, "reset")441 return Response(status=204)442# #################################################443# FRONTEND444# #################################################445class FrontendViewSet(ViewSet):446 def retrieve(self, request, frontend_cluster_name, format=None):447 """Obtain the details of a frontend resource in a named cluster."""448 clust = get_object_or_404(Cluster, name=frontend_cluster_name)449 if not clust.project in request.user.groups.all():450 raise PermissionDenied()451 serializer = FrontendSerializer(clust.frontend)452 return Response(serializer.data)453 @detail_route(methods=['put'])454 def shutdown(self, request, frontend_cluster_name, format=None):455 """Shutdown the frontend of a named cluster."""456 clust = get_object_or_404(Cluster, name=frontend_cluster_name)457 if not clust.project in request.user.groups.all():458 raise PermissionDenied()459 poweroff_nodes.delay([clust.frontend.rocks_name], "shutdown")460 return Response(status=204)461 @detail_route(methods=['put'])462 def reboot(self, request, frontend_cluster_name, format=None):463 """Reboot the frontend of a named cluster."""464 clust = get_object_or_404(Cluster, name=frontend_cluster_name)465 if not clust.project in request.user.groups.all():466 raise PermissionDenied()467 poweroff_nodes.delay([clust.frontend.rocks_name], "reboot")468 return Response(status=204)469 @detail_route(methods=['put'])470 def reset(self, request, frontend_cluster_name, format=None):471 """Reset the frontend of a named cluster."""472 clust = get_object_or_404(Cluster, name=frontend_cluster_name)473 if not clust.project in request.user.groups.all():474 raise PermissionDenied()475 poweroff_nodes.delay([clust.frontend.rocks_name], "reset")476 return Response(status=204)477 @detail_route(methods=['put'])478 def poweron(self, request, frontend_cluster_name, format=None):479 """Power on the frontend of a named cluster."""480 clust = get_object_or_404(Cluster, name=frontend_cluster_name)481 if not clust.project in request.user.groups.all():482 raise PermissionDenied()483 poweron_nodes.delay([clust.frontend.rocks_name])484 return Response(status=204)485 @detail_route(methods=['put'])486 def poweroff(self, request, frontend_cluster_name, format=None):487 """Power off the frontend of a named cluster."""488 clust = get_object_or_404(Cluster, name=frontend_cluster_name)489 if not clust.project in request.user.groups.all():490 raise PermissionDenied()491 poweroff_nodes.delay([clust.frontend.rocks_name], "poweroff")492 return Response(status=204)493 @detail_route(methods=['put'])494 def attach_iso(self, request, frontend_cluster_name, format=None):495 """Attach an ISO to the frontendresource in a named cluster.496 """497 clust = get_object_or_404(Cluster, name=frontend_cluster_name)498 if not clust.project in request.user.groups.all():499 raise PermissionDenied()500 if not "iso_name" in request.GET:501 return Response("Please provide the iso_name", status=400)502 attach_iso.delay([clust.frontend.rocks_name], request.GET["iso_name"])503 return Response(status=204)504# #################################################505# USER506# #################################################507class UserDetailsView(RetrieveUpdateAPIView):508 """509 Returns User's details in JSON format.510 Accepts the following GET parameters: token511 Accepts the following POST parameters:512 Required: token513 Optional: email, first_name, last_name and UserProfile fields514 Returns the updated UserProfile and/or User object.515 """516 serializer_class = UserDetailsSerializer517 permission_classes = (IsAuthenticated,)518 def get_object(self):519 return self.request.user520# #################################################521# PROJECT522# #################################################523class ProjectListView(ListAPIView):524 """Returns project details."""525 serializer_class = ProjectSerializer526 def get_queryset(self):527 return self.request.user.groups.all()528class ImageUploadView(APIView):529 parser_classes = (MultiPartParser,)530 def get(self, request, format=None):531 filepath = '/mnt/images/public'532 return Response(["public/%s"%dir for dir in os.listdir(filepath)])533 def post(self, request, format=None):534 file_obj = request.FILES['file']535 try:536 filepath = '/mnt/images/public/%s' % (file_obj.name)537 if not request.META.get('HTTP_MD5'):538 return Response("md5 was not provided", status=400)539 if request.META['HTTP_MD5'] != md5_for_file(file_obj.chunks()):540 return Response("md5 does not match the file", status=400)541 with open(filepath, 'wb+') as destination:542 for chunk in file_obj.chunks():543 destination.write(chunk)544 return Response(status=204)545 finally:546 if(file_obj):547 file_obj.close()548def md5_for_file(chunks):549 md5 = hashlib.md5()550 for data in chunks:551 md5.update(data)...

Full Screen

Full Screen

fake_policy.py

Source:fake_policy.py Github

copy

Full Screen

1# Copyright (c) 2012 OpenStack Foundation2#3# Licensed under the Apache License, Version 2.0 (the "License"); you may4# not use this file except in compliance with the License. You may obtain5# a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the12# License for the specific language governing permissions and limitations13# under the License.14policy_data = """15{16 "admin_api": "is_admin:True",17 "cells_scheduler_filter:TargetCellFilter": "is_admin:True",18 "context_is_admin": "role:admin or role:administrator",19 "compute:create": "",20 "compute:create:attach_network": "",21 "compute:create:attach_volume": "",22 "compute:get": "",23 "compute:get_all": "",24 "compute:get_all_tenants": "",25 "compute:update": "",26 "compute:get_instance_metadata": "",27 "compute:get_all_instance_metadata": "",28 "compute:get_all_instance_system_metadata": "",29 "compute:update_instance_metadata": "",30 "compute:delete_instance_metadata": "",31 "compute:get_instance_faults": "",32 "compute:get_diagnostics": "",33 "compute:get_instance_diagnostics": "",34 "compute:get_lock": "",35 "compute:lock": "",36 "compute:unlock": "",37 "compute:unlock_override": "is_admin:True",38 "compute:get_vnc_console": "",39 "compute:get_spice_console": "",40 "compute:get_rdp_console": "",41 "compute:get_serial_console": "",42 "compute:get_console_output": "",43 "compute:associate_floating_ip": "",44 "compute:reset_network": "",45 "compute:inject_network_info": "",46 "compute:add_fixed_ip": "",47 "compute:remove_fixed_ip": "",48 "compute:attach_volume": "",49 "compute:detach_volume": "",50 "compute:attach_interface": "",51 "compute:detach_interface": "",52 "compute:set_admin_password": "",53 "compute:rescue": "",54 "compute:unrescue": "",55 "compute:suspend": "",56 "compute:resume": "",57 "compute:pause": "",58 "compute:unpause": "",59 "compute:start": "",60 "compute:stop": "",61 "compute:resize": "",62 "compute:confirm_resize": "",63 "compute:revert_resize": "",64 "compute:rebuild": "",65 "compute:reboot": "",66 "compute:snapshot": "",67 "compute:backup": "",68 "compute:shelve": "",69 "compute:shelve_offload": "",70 "compute:unshelve": "",71 "compute:security_groups:add_to_instance": "",72 "compute:security_groups:remove_from_instance": "",73 "compute:delete": "",74 "compute:soft_delete": "",75 "compute:force_delete": "",76 "compute:restore": "",77 "compute:swap_volume": "",78 "compute:volume_snapshot_create": "",79 "compute:volume_snapshot_delete": "",80 "compute:v3:servers:start": "",81 "compute:v3:servers:stop": "",82 "compute_extension:v3:os-access-ips": "",83 "compute_extension:accounts": "",84 "compute_extension:admin_actions:pause": "",85 "compute_extension:admin_actions:unpause": "",86 "compute_extension:admin_actions:suspend": "",87 "compute_extension:admin_actions:resume": "",88 "compute_extension:admin_actions:lock": "",89 "compute_extension:admin_actions:unlock": "",90 "compute_extension:admin_actions:resetNetwork": "",91 "compute_extension:admin_actions:injectNetworkInfo": "",92 "compute_extension:admin_actions:createBackup": "",93 "compute_extension:admin_actions:migrateLive": "",94 "compute_extension:admin_actions:resetState": "",95 "compute_extension:admin_actions:migrate": "",96 "compute_extension:v3:os-admin-actions:reset_network": "",97 "compute_extension:v3:os-admin-actions:inject_network_info": "",98 "compute_extension:v3:os-admin-actions:reset_state": "",99 "compute_extension:v3:os-admin-password": "",100 "compute_extension:aggregates": "rule:admin_api",101 "compute_extension:v3:os-aggregates:index": "rule:admin_api",102 "compute_extension:v3:os-aggregates:create": "rule:admin_api",103 "compute_extension:v3:os-aggregates:show": "rule:admin_api",104 "compute_extension:v3:os-aggregates:update": "rule:admin_api",105 "compute_extension:v3:os-aggregates:delete": "rule:admin_api",106 "compute_extension:v3:os-aggregates:add_host": "rule:admin_api",107 "compute_extension:v3:os-aggregates:remove_host": "rule:admin_api",108 "compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api",109 "compute_extension:agents": "",110 "compute_extension:v3:os-agents": "",111 "compute_extension:attach_interfaces": "",112 "compute_extension:v3:os-attach-interfaces": "",113 "compute_extension:baremetal_nodes": "",114 "compute_extension:cells": "",115 "compute_extension:cells:create": "rule:admin_api",116 "compute_extension:cells:delete": "rule:admin_api",117 "compute_extension:cells:update": "rule:admin_api",118 "compute_extension:cells:sync_instances": "rule:admin_api",119 "compute_extension:v3:os-cells": "",120 "compute_extension:v3:os-cells:create": "rule:admin_api",121 "compute_extension:v3:os-cells:delete": "rule:admin_api",122 "compute_extension:v3:os-cells:update": "rule:admin_api",123 "compute_extension:v3:os-cells:sync_instances": "rule:admin_api",124 "compute_extension:certificates": "",125 "compute_extension:v3:os-certificates:create": "",126 "compute_extension:v3:os-certificates:show": "",127 "compute_extension:cloudpipe": "",128 "compute_extension:cloudpipe_update": "",129 "compute_extension:config_drive": "",130 "compute_extension:v3:os-config-drive": "",131 "compute_extension:console_output": "",132 "compute_extension:v3:os-console-output": "",133 "compute_extension:consoles": "",134 "compute_extension:v3:os-remote-consoles": "",135 "compute_extension:createserverext": "",136 "compute_extension:v3:os-create-backup": "",137 "compute_extension:deferred_delete": "",138 "compute_extension:v3:os-deferred-delete": "",139 "compute_extension:disk_config": "",140 "compute_extension:evacuate": "is_admin:True",141 "compute_extension:v3:os-evacuate": "is_admin:True",142 "compute_extension:extended_server_attributes": "",143 "compute_extension:v3:os-extended-server-attributes": "",144 "compute_extension:extended_status": "",145 "compute_extension:v3:os-extended-status": "",146 "compute_extension:extended_availability_zone": "",147 "compute_extension:v3:os-extended-availability-zone": "",148 "compute_extension:extended_ips": "",149 "compute_extension:extended_ips_mac": "",150 "compute_extension:extended_vif_net": "",151 "compute_extension:extended_volumes": "",152 "compute_extension:v3:os-extended-volumes": "",153 "compute_extension:v3:os-extended-volumes:swap": "",154 "compute_extension:v3:os-extended-volumes:attach": "",155 "compute_extension:v3:os-extended-volumes:detach": "",156 "compute_extension:v3:extensions:discoverable": "",157 "compute_extension:fixed_ips": "",158 "compute_extension:flavor_access": "",159 "compute_extension:flavor_access:addTenantAccess": "rule:admin_api",160 "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api",161 "compute_extension:v3:os-flavor-access": "",162 "compute_extension:v3:os-flavor-access:remove_tenant_access":163 "rule:admin_api",164 "compute_extension:v3:os-flavor-access:add_tenant_access":165 "rule:admin_api",166 "compute_extension:flavor_disabled": "",167 "compute_extension:v3:os-flavor-disabled": "",168 "compute_extension:flavor_rxtx": "",169 "compute_extension:v3:os-flavor-rxtx": "",170 "compute_extension:flavor_swap": "",171 "compute_extension:flavorextradata": "",172 "compute_extension:flavorextraspecs:index": "",173 "compute_extension:flavorextraspecs:show": "",174 "compute_extension:flavorextraspecs:create": "is_admin:True",175 "compute_extension:flavorextraspecs:update": "is_admin:True",176 "compute_extension:flavorextraspecs:delete": "is_admin:True",177 "compute_extension:v3:flavor-extra-specs:index": "",178 "compute_extension:v3:flavor-extra-specs:show": "",179 "compute_extension:v3:flavor-extra-specs:create": "is_admin:True",180 "compute_extension:v3:flavor-extra-specs:update": "is_admin:True",181 "compute_extension:v3:flavor-extra-specs:delete": "is_admin:True",182 "compute_extension:flavormanage": "",183 "compute_extension:v3:flavor-manage": "",184 "compute_extension:v3:flavors:discoverable": "",185 "compute_extension:floating_ip_dns": "",186 "compute_extension:floating_ip_pools": "",187 "compute_extension:floating_ips": "",188 "compute_extension:floating_ips_bulk": "",189 "compute_extension:fping": "",190 "compute_extension:fping:all_tenants": "is_admin:True",191 "compute_extension:hide_server_addresses": "",192 "compute_extension:v3:os-hide-server-addresses": "",193 "compute_extension:hosts": "rule:admin_api",194 "compute_extension:v3:os-hosts": "rule:admin_api",195 "compute_extension:hypervisors": "rule:admin_api",196 "compute_extension:v3:os-hypervisors": "rule:admin_api",197 "compute_extension:image_size": "",198 "compute_extension:v3:image-size": "",199 "compute_extension:instance_actions": "",200 "compute_extension:v3:os-instance-actions": "",201 "compute_extension:instance_actions:events": "is_admin:True",202 "compute_extension:v3:os-instance-actions:events": "is_admin:True",203 "compute_extension:instance_usage_audit_log": "",204 "compute_extension:keypairs": "",205 "compute_extension:keypairs:index": "",206 "compute_extension:keypairs:show": "",207 "compute_extension:keypairs:create": "",208 "compute_extension:keypairs:delete": "",209 "compute_extension:v3:os-keypairs": "",210 "compute_extension:v3:os-keypairs:index": "",211 "compute_extension:v3:os-keypairs:show": "",212 "compute_extension:v3:os-keypairs:create": "",213 "compute_extension:v3:os-keypairs:delete": "",214 "compute_extension:v3:os-lock-server:lock": "",215 "compute_extension:v3:os-lock-server:unlock": "",216 "compute_extension:v3:os-migrate-server:migrate": "",217 "compute_extension:v3:os-migrate-server:migrate_live": "",218 "compute_extension:multinic": "",219 "compute_extension:v3:os-multinic": "",220 "compute_extension:networks": "",221 "compute_extension:networks:view": "",222 "compute_extension:networks_associate": "",223 "compute_extension:os-tenant-networks": "",224 "compute_extension:v3:os-pause-server:pause": "",225 "compute_extension:v3:os-pause-server:unpause": "",226 "compute_extension:v3:os-pci:pci_servers": "",227 "compute_extension:v3:os-pci:index": "",228 "compute_extension:v3:os-pci:detail": "",229 "compute_extension:v3:os-pci:show": "",230 "compute_extension:quotas:show": "",231 "compute_extension:quotas:update": "",232 "compute_extension:quotas:delete": "",233 "compute_extension:v3:os-quota-sets:show": "",234 "compute_extension:v3:os-quota-sets:update": "",235 "compute_extension:v3:os-quota-sets:delete": "",236 "compute_extension:v3:os-quota-sets:detail": "",237 "compute_extension:quota_classes": "",238 "compute_extension:rescue": "",239 "compute_extension:v3:os-rescue": "",240 "compute_extension:security_group_default_rules": "",241 "compute_extension:security_groups": "",242 "compute_extension:v3:os-security-groups": "",243 "compute_extension:server_diagnostics": "",244 "compute_extension:v3:os-server-diagnostics": "",245 "compute_extension:server_groups": "",246 "compute_extension:server_password": "",247 "compute_extension:v3:os-server-password": "",248 "compute_extension:server_usage": "",249 "compute_extension:v3:os-server-usage": "",250 "compute_extension:v3:os-server-groups": "",251 "compute_extension:services": "",252 "compute_extension:v3:os-services": "",253 "compute_extension:shelve": "",254 "compute_extension:shelveOffload": "",255 "compute_extension:v3:os-shelve:shelve": "",256 "compute_extension:v3:os-shelve:shelve_offload": "",257 "compute_extension:simple_tenant_usage:show": "",258 "compute_extension:simple_tenant_usage:list": "",259 "compute_extension:v3:os-simple-tenant-usage:show": "",260 "compute_extension:v3:os-simple-tenant-usage:list": "",261 "compute_extension:unshelve": "",262 "compute_extension:v3:os-shelve:unshelve": "",263 "compute_extension:v3:os-suspend-server:suspend": "",264 "compute_extension:v3:os-suspend-server:resume": "",265 "compute_extension:users": "",266 "compute_extension:virtual_interfaces": "",267 "compute_extension:virtual_storage_arrays": "",268 "compute_extension:volumes": "",269 "compute_extension:volume_attachments:index": "",270 "compute_extension:volume_attachments:show": "",271 "compute_extension:volume_attachments:create": "",272 "compute_extension:volume_attachments:update": "",273 "compute_extension:volume_attachments:delete": "",274 "compute_extension:v3:os-volumes": "",275 "compute_extension:volumetypes": "",276 "compute_extension:zones": "",277 "compute_extension:availability_zone:list": "",278 "compute_extension:v3:os-availability-zone:list": "",279 "compute_extension:availability_zone:detail": "is_admin:True",280 "compute_extension:v3:os-availability-zone:detail": "is_admin:True",281 "compute_extension:used_limits_for_admin": "is_admin:True",282 "compute_extension:v3:os-used-limits": "is_admin:True",283 "compute_extension:migrations:index": "is_admin:True",284 "compute_extension:v3:os-migrations:index": "is_admin:True",285 "compute_extension:os-assisted-volume-snapshots:create": "",286 "compute_extension:os-assisted-volume-snapshots:delete": "",287 "compute_extension:console_auth_tokens": "is_admin:True",288 "compute_extension:v3:os-console-auth-tokens": "is_admin:True",289 "compute_extension:os-server-external-events:create": "rule:admin_api",290 "compute_extension:v3:os-server-external-events:create": "rule:admin_api",291 "volume:create": "",292 "volume:get": "",293 "volume:get_all": "",294 "volume:get_volume_metadata": "",295 "volume:delete": "",296 "volume:update": "",297 "volume:delete_volume_metadata": "",298 "volume:update_volume_metadata": "",299 "volume:attach": "",300 "volume:detach": "",301 "volume:reserve_volume": "",302 "volume:unreserve_volume": "",303 "volume:begin_detaching": "",304 "volume:roll_detaching": "",305 "volume:check_attach": "",306 "volume:check_detach": "",307 "volume:initialize_connection": "",308 "volume:terminate_connection": "",309 "volume:create_snapshot": "",310 "volume:delete_snapshot": "",311 "volume:get_snapshot": "",312 "volume:get_all_snapshots": "",313 "volume_extension:volume_admin_actions:reset_status": "rule:admin_api",314 "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",315 "volume_extension:volume_admin_actions:force_delete": "rule:admin_api",316 "volume_extension:volume_actions:upload_image": "",317 "volume_extension:types_manage": "",318 "volume_extension:types_extra_specs": "",319 "network:get_all": "",320 "network:get": "",321 "network:create": "",322 "network:delete": "",323 "network:associate": "",324 "network:disassociate": "",325 "network:get_vifs_by_instance": "",326 "network:get_vif_by_mac_address": "",327 "network:allocate_for_instance": "",328 "network:deallocate_for_instance": "",329 "network:validate_networks": "",330 "network:get_instance_uuids_by_ip_filter": "",331 "network:get_instance_id_by_floating_address": "",332 "network:setup_networks_on_host": "",333 "network:get_floating_ip": "",334 "network:get_floating_ip_pools": "",335 "network:get_floating_ip_by_address": "",336 "network:get_floating_ips_by_project": "",337 "network:get_floating_ips_by_fixed_address": "",338 "network:allocate_floating_ip": "",339 "network:deallocate_floating_ip": "",340 "network:associate_floating_ip": "",341 "network:disassociate_floating_ip": "",342 "network:release_floating_ip": "",343 "network:migrate_instance_start": "",344 "network:migrate_instance_finish": "",345 "network:get_fixed_ip": "",346 "network:get_fixed_ip_by_address": "",347 "network:add_fixed_ip_to_instance": "",348 "network:remove_fixed_ip_from_instance": "",349 "network:add_network_to_project": "",350 "network:get_instance_nw_info": "",351 "network:get_dns_domains": "",352 "network:add_dns_entry": "",353 "network:modify_dns_entry": "",354 "network:delete_dns_entry": "",355 "network:get_dns_entries_by_address": "",356 "network:get_dns_entries_by_name": "",357 "network:create_private_dns_domain": "",358 "network:create_public_dns_domain": "",359 "network:delete_dns_domain": "",360 "network:attach_external_network": "rule:admin_api"361}...

Full Screen

Full Screen

implementations.py

Source:implementations.py Github

copy

Full Screen

1""" Module containing all implementations of ML techniques required for the project """2import numpy as np3from helpers import (batch_iter, compute_gradient_hinge,4 compute_gradient_mse, compute_gradient_nll,5 compute_hessian_nll, compute_loss, compute_loss_hinge,6 compute_loss_nll, compute_subgradient_mae,7 map_target_classes_to_boolean)8def least_squares_GD(y, x, initial_w, max_iters, gamma, mae=False, threshold=1e-5):9 """10 Implementation of the Gradient Descent optimization algorithm for linear regression11 Can be run with both MSE and MAE loss12 :param x: data matrix, numpy ndarray with shape with shape (N, D),13 where N is the number of samples and D is the number of features14 :param y: vector of target values, numpy array with dimensions (N, 1)15 :param initial_w: vector of initial weights, numpy array with dimensions (D, 1)16 :param max_iters: how many iterations to run the algorithm, integer17 :param gamma: learning rate, positive float value18 :param mae: whether to use MAE loss, boolean, optional, the default value is False19 :param threshold: convergence threshold, positive float value20 :returns: (final weights, final loss value), tuple21 """22 # Set the initial values for the weights23 w = initial_w24 # Compute the initial loss value25 prev_loss = compute_loss(y, x, initial_w, mae)26 for n_iter in range(max_iters):27 # Compute the total gradient (or subgradient if MAE loss is used)28 grd = compute_subgradient_mae(y, x, w) if mae else compute_gradient_mse(y, x, w)29 # Update the weights using the gradient and learning rate30 w -= gamma * grd31 # Compute the current loss and test convergence32 loss = compute_loss(y, x, w, mae)33 if abs(loss - prev_loss) < threshold:34 print(f'converged at iter : {n_iter}')35 break36 prev_loss = loss.copy()37 # Compute the final loss value38 loss = compute_loss(y, x, w, mae)39 return w, loss40def least_squares_SGD(y, x, initial_w, max_iters, gamma, mae=False, threshold=1e-5):41 """42 Implementation of the Stochastic Gradient Descent optimization algorithm for linear regression43 Can be run with both MSE and MAE loss44 :param x: data matrix, numpy ndarray with shape with shape (N, D),45 where N is the number of samples and D is the number of features46 :param y: vector of target values, numpy array with dimensions (N, 1)47 :param initial_w: vector of initial weights, numpy array with dimensions (D, 1)48 :param max_iters: how many iterations to run the algorithm, integer49 :param gamma: learning rate, positive float value50 :param mae: whether to use MAE loss, boolean, optional, the default value is False51 :param threshold: convergence threshold, positive float value52 :returns: (final weights, final loss value), tuple53 """54 # Set the initial values for the weights55 w = initial_w56 # Compute the initial loss value57 prev_loss = compute_loss(y, x, initial_w, mae)58 # Use the helper function batch_iter from Exercise 2,59 # to get a random sample from the data in the form (y_n, x_n) for each iteration60 for n_iter in range(max_iters):61 for y_n, x_n in batch_iter(y, x, batch_size=1, num_batches=1):62 # Compute the gradient for only one sample (or subgradient if MAE loss is used)63 grd = compute_subgradient_mae(y_n, x_n, w) if mae else compute_gradient_mse(y_n, x_n, w)64 # Update the weights using the gradient and learning rate65 w = w - gamma * grd66 # Compute the current loss and test convergence67 loss = compute_loss(y, x, w, mae) 68 if abs(loss - prev_loss) < threshold:69 print(f'converged at iter : {n_iter}')70 break 71 prev_loss = loss.copy()72 # Compute the final loss value73 loss = compute_loss(y, x, w, mae)74 return w, loss75def least_squares(y, x):76 """77 Calculate the least squares solution explicitly using the normal equations78 :param x: data matrix, numpy ndarray with shape with shape (N, D),79 where N is the number of samples and D is the number of features80 :param y: vector of target values, numpy array with dimensions (N, 1)81 :raises LinAlgError: if the Gram matrix has no inverse82 :returns: (weights, loss value), tuple83 """84 # Compute the Gram matrix85 gram = x.T @ x86 # Use the normal equations to find the best weights87 w = np.linalg.solve(gram, x.T @ y)88 # Compute the loss89 loss = compute_loss(y, x, w)90 return w, loss91def ridge_regression(y, x, lambda_):92 """93 Calculate the least squares solution with L2 regularization explicitly using the normal equations94 :param x: data matrix, numpy ndarray with shape with shape (N, D),95 where N is the number of samples and D is the number of features96 :param y: vector of target values, numpy array with dimensions (N, 1)97 :param lambda_: regularization coefficient, positive float value98 :returns: (weights, loss value), tuple99 """100 # Compute the Gram matrix and update it with the regularization term101 gram = x.T @ x102 gram += 2 * x.shape[0] * lambda_ * np.identity(gram.shape[0])103 # Use the normal equations to find the best weights104 w = np.linalg.solve(gram, x.T @ y)105 # Compute the loss106 loss = compute_loss(y, x, w)107 return w, loss108def logistic_regression(y, x, initial_w, max_iters, gamma, threshold=1e-2):109 """110 Implementation of the Newton optimization algorithm for logistic regression111 :param x: data matrix, numpy ndarray with shape with shape (N, D),112 where N is the number of samples and D is the number of features113 :param y: vector of target values, numpy array with dimensions (N, 1)114 :param initial_w: vector of initial weights, numpy array with dimensions (D, 1)115 :param max_iters: how many iterations to run the algorithm, integer116 :param gamma: learning rate, positive float value117 :param threshold: convergence threshold, positive float value118 :returns: (final weights, final loss value), tuple119 120 :raises: LinAlgError if the Hessian matrix becomes singular121 """122 # Map the {-1, 1} classes to {0, 1}123 y = map_target_classes_to_boolean(y)124 # Set the initial values for the weights125 w = initial_w126 # Compute the initial loss value127 prev_loss = compute_loss_nll(y, x, initial_w)128 for n_iter in range(max_iters):129 # Compute the gradient and Hessian of the loss function130 grd = compute_gradient_nll(y, x, w)131 hess = compute_hessian_nll(y, x, w)132 # Update the weights using the gradient, Hessian and learning rate133 w -= gamma * np.linalg.solve(hess, grd)134 # Compute the current loss and test convergence135 loss = compute_loss_nll(y, x, w)136 if abs(loss - prev_loss) < threshold:137 print(f'converged at iter : {n_iter}')138 break139 prev_loss = loss.copy()140 # Compute the final loss value141 loss = compute_loss_nll(y, x, w)142 return w, loss143def reg_logistic_regression(y, x, lambda_, initial_w, max_iters, gamma, threshold=1e-2):144 """145 Implementation of the Newton optimization algorithm for logistic regression with L2 regularization146 :param x: data matrix, numpy ndarray with shape with shape (N, D),147 where N is the number of samples and D is the number of features148 :param y: vector of target values, numpy array with dimensions (N, 1)149 :param lambda_: regularization coefficient, positive float value150 :param initial_w: vector of initial weights, numpy array with dimensions (D, 1)151 :param max_iters: how many iterations to run the algorithm, integer152 :param gamma: learning rate, positive float value153 :param threshold: convergence threshold, positive float value154 :returns: (final weights, final loss value), tuple155 """156 # Map the {-1, 1} classes to {0, 1}157 y = map_target_classes_to_boolean(y)158 # Set the initial values for the weights159 w = initial_w160 # Compute the initial loss value161 prev_loss = compute_loss_nll(y, x, initial_w, lambda_)162 for n_iter in range(max_iters):163 # Compute the gradient and Hessian of the loss function164 grd = compute_gradient_nll(y, x, w, lambda_)165 hess = compute_hessian_nll(y, x, w, lambda_)166 # Update the weights using the gradient, Hessian and learning rate167 w -= gamma / np.sqrt(n_iter+1) * np.linalg.solve(hess, grd)168 # Compute the current loss and test convergence169 loss = compute_loss_nll(y, x, w, lambda_)170 if loss == np.inf:171 raise ArithmeticError('Training diverges')172 if abs(loss - prev_loss) < threshold:173 print(f'converged at iter : {n_iter}')174 break175 prev_loss = loss.copy()176 # Compute the final loss value177 loss = compute_loss_nll(y, x, w, lambda_)178 return w, loss179def svm(y, x, lambda_, initial_w, max_iters, gamma, threshold=1e-5):180 """181 Implementation of the linear SVM classification algorithm with L2 regularization182 The SVM is simulated through optimization of the Hinge loss function with gradient descent183 :param x: data matrix, numpy ndarray with shape with shape (N, D),184 where N is the number of samples and D is the number of features185 :param y: vector of target values, numpy array with length N186 :param lambda_: regularization coefficient, positive float value187 :param initial_w: vector of initial weights, numpy array with length D188 :param max_iters: how many iterations to run the algorithm, integer189 :param gamma: learning rate, positive float value190 :param threshold: convergence threshold, positive float value191 :returns: (final weights, final loss value), tuple192 """193 # Set the initial values for the weights194 w = initial_w195 196 # Compute the initial loss value197 prev_loss = compute_loss_hinge(y, x, w, lambda_)198 199 for n_iter in range(max_iters):200 # Compute the gradient of the loss function201 grd = compute_gradient_hinge(y, x, w, lambda_)202 # Update the weights using the gradient, Hessian and learning rate203 w -= gamma / (1 + 1e-2*n_iter) * grd204 # Compute the current loss and test convergence205 loss = compute_loss_hinge(y, x, w, lambda_)206 if abs(loss - prev_loss) < threshold:207 print(f'converged at iter : {n_iter}')208 break209 prev_loss = loss210 # Compute the final loss value211 loss = compute_loss_hinge(y, x, w, lambda_)...

Full Screen

Full Screen

gcp_compute_operator.py

Source:gcp_compute_operator.py Github

copy

Full Screen

1#2# Licensed to the Apache Software Foundation (ASF) under one3# or more contributor license agreements. See the NOTICE file4# distributed with this work for additional information5# regarding copyright ownership. The ASF licenses this file6# to you under the Apache License, Version 2.0 (the7# "License"); you may not use this file except in compliance8# with the License. You may obtain a copy of the License at9#10# http://www.apache.org/licenses/LICENSE-2.011#12# Unless required by applicable law or agreed to in writing,13# software distributed under the License is distributed on an14# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY15# KIND, either express or implied. See the License for the16# specific language governing permissions and limitations17# under the License.18"""This module is deprecated. Please use `airflow.providers.google.cloud.operators.compute`."""19import warnings20from airflow.providers.google.cloud.operators.compute import (21 ComputeEngineBaseOperator,22 ComputeEngineCopyInstanceTemplateOperator,23 ComputeEngineInstanceGroupUpdateManagerTemplateOperator,24 ComputeEngineSetMachineTypeOperator,25 ComputeEngineStartInstanceOperator,26 ComputeEngineStopInstanceOperator,27)28warnings.warn(29 "This module is deprecated. Please use `airflow.providers.google.cloud.operators.compute`.",30 DeprecationWarning,31 stacklevel=2,32)33class GceBaseOperator(ComputeEngineBaseOperator):34 """35 This class is deprecated.36 Please use `airflow.providers.google.cloud.operators.compute.ComputeEngineBaseOperator`.37 """38 def __init__(self, *args, **kwargs):39 warnings.warn(40 """This class is deprecated.41 Please use `airflow.providers.google.cloud.operators.compute.ComputeEngineBaseOperator`.""",42 DeprecationWarning,43 stacklevel=3,44 )45 super().__init__(*args, **kwargs)46class GceInstanceGroupManagerUpdateTemplateOperator(ComputeEngineInstanceGroupUpdateManagerTemplateOperator):47 """48 This class is deprecated.49 Please use `airflow.providers.google.cloud.operators.compute50 .ComputeEngineInstanceGroupUpdateManagerTemplateOperator`.51 """52 def __init__(self, *args, **kwargs):53 warnings.warn(54 """This class is deprecated. Please use55 `airflow.providers.google.cloud.operators.compute56 .ComputeEngineInstanceGroupUpdateManagerTemplateOperator`.""",57 DeprecationWarning,58 stacklevel=3,59 )60 super().__init__(*args, **kwargs)61class GceInstanceStartOperator(ComputeEngineStartInstanceOperator):62 """63 This class is deprecated.64 Please use `airflow.providers.google.cloud.operators65 .compute.ComputeEngineStartInstanceOperator`.66 """67 def __init__(self, *args, **kwargs):68 warnings.warn(69 """This class is deprecated.70 Please use `airflow.providers.google.cloud.operators.compute71 .ComputeEngineStartInstanceOperator`.""",72 DeprecationWarning,73 stacklevel=3,74 )75 super().__init__(*args, **kwargs)76class GceInstanceStopOperator(ComputeEngineStopInstanceOperator):77 """78 This class is deprecated.79 Please use `airflow.providers.google.cloud.operators.compute.ComputeEngineStopInstanceOperator`.80 """81 def __init__(self, *args, **kwargs):82 warnings.warn(83 """This class is deprecated.84 Please use `airflow.providers.google.cloud.operators.compute85 .ComputeEngineStopInstanceOperator`.""",86 DeprecationWarning,87 stacklevel=3,88 )89 super().__init__(*args, **kwargs)90class GceInstanceTemplateCopyOperator(ComputeEngineCopyInstanceTemplateOperator):91 """92 This class is deprecated.93 Please use `airflow.providers.google.cloud.operators.compute.ComputeEngineCopyInstanceTemplateOperator`.94 """95 def __init__(self, *args, **kwargs):96 warnings.warn(97 """"This class is deprecated.98 Please use `airflow.providers.google.cloud.operators.compute99 .ComputeEngineCopyInstanceTemplateOperator`.""",100 DeprecationWarning,101 stacklevel=3,102 )103 super().__init__(*args, **kwargs)104class GceSetMachineTypeOperator(ComputeEngineSetMachineTypeOperator):105 """106 This class is deprecated.107 Please use `airflow.providers.google.cloud.operators.compute.ComputeEngineSetMachineTypeOperator`.108 """109 def __init__(self, *args, **kwargs):110 warnings.warn(111 """This class is deprecated.112 Please use `airflow.providers.google.cloud.operators.compute113 .ComputeEngineSetMachineTypeOperator`.""",114 DeprecationWarning,115 stacklevel=3,116 )...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful