How to use run method in Robotframework

Best Python code snippet using robotframework

Run Robotframework automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Run.js

Source: Run.js Github

copy
1/*
2 * This file is part of the Reproducible Open Benchmarks for Data Analysis
3 * Platform (ROB).
4 *
5 * Copyright (C) [2019-2020] NYU.
6 *
7 * ROB is free software; you can redistribute it and/or modify it under the
8 * terms of the MIT License; see LICENSE file for more details.
9 */
10
11import { selectDialog } from './Benchmark';
12import { fetchApiResource, postRequest } from './Requests';
13import { SHOW_RUNS } from '../resources/Dialog';
14
15
16
17/*
18 * Action types
19 */
20
21// Run handle
22export const FETCH_RUN_ERROR = 'FETCH_RUN_ERROR';
23export const FETCH_RUN_START = 'FETCH_RUN_START';
24export const FETCH_RUN_SUCCESS = 'FETCH_RUN_SUCCESS';
25// Run listing
26export const FETCH_RUNS_ERROR = 'FETCH_RUNS_ERROR';
27export const FETCH_RUNS_SUCCESS = 'FETCH_RUNS_SUCCESS';
28// Run submission
29export const SUBMIT_RUN_ERROR = 'SUBMIT_RUN_ERROR';
30export const SUBMIT_RUN_START = 'SUBMIT_RUN_START';
31export const SUBMIT_RUN_SUCCESS = 'SUBMIT_RUN_SUCCESS';
32
33
34/*
35 * Actions
36 */
37
38
39// -- Errors ------------------------------------------------------------------
40
41export const dismissFetchRunError = () => (fetchRunError());
42export const dismissFetchRunsError = () => (fetchRunsError());
43export const dismissSubmitRunError = () => (submitRunError());
44
45const fetchRunError = (msg) => ({type: FETCH_RUN_ERROR, payload: msg});
46const fetchRunsError = (msg) => ({type: FETCH_RUNS_ERROR, payload: msg});
47const submitRunError = (msg) => ({type: SUBMIT_RUN_ERROR, payload: msg});
48
49
50// -- Run handle --------------------------------------------------------------
51
52export function fetchRun(api, run) {
53    return fetchApiResource(
54        api.urls.getRun(run.id),
55        fetchRunSuccess,
56        fetchRunError,
57        fetchRunStart,
58    );
59}
60
61
62const fetchRunStart = () => ({type: FETCH_RUN_START});
63
64const fetchRunSuccess = (run) => ({type: FETCH_RUN_SUCCESS, payload: run})
65
66
67export function cancelRun(api, submission, run) {
68    return postRequest(
69        api.urls.cancelRun(run.id),
70        {reason: 'Canceled at user request'},
71        (json) => (cancelRunSuccess(api, submission, json)),
72        fetchRunError,
73        fetchRunStart,
74        'PUT'
75    );
76}
77
78
79function cancelRunSuccess(api, submission, run) {
80    return dispatch => {
81        dispatch(fetchRuns(api, submission))
82        return dispatch(fetchRunSuccess(run))
83    }
84}
85
86
87// -- Run listing -------------------------------------------------------------
88
89export function fetchRuns(api, submission, selectedRun) {
90    return fetchApiResource(
91        api.urls.listRuns(submission.id),
92        (json) => {return dispatch => {
93            // If the selected run is given, check whether the run state has
94            // changed. If there is a change in state, fetch the run handle.
95            if (selectedRun != null) {
96                const run = json.runs.find((r) => (r.id === selectedRun.id));
97                if (run != null) {
98                    if (run.state !== selectedRun.state) {
99                        dispatch(fetchRun(api, selectedRun));
100                    }
101                }
102            }
103            return dispatch({type: FETCH_RUNS_SUCCESS, payload: json})
104        }},
105        fetchRunsError
106    );
107}
108
109
110// -- Submit run --------------------------------------------------------------
111
112export function submitRun(api, submission, data) {
113    return postRequest(
114        api.urls.submitRun(submission.id),
115        data,
116        (json) => (submitRunSuccess(api, submission)),
117        submitRunError,
118        () => ({type: SUBMIT_RUN_START})
119    );
120}
121
122
123function submitRunSuccess(api, submission) {
124    return dispatch => {
125        dispatch({type: SUBMIT_RUN_SUCCESS});
126        dispatch(selectDialog(api, SHOW_RUNS));
127        return dispatch(fetchRuns(api, submission))
128    }
129}
130
Full Screen

course_card_model.js

Source: course_card_model.js Github

copy
1/* globals gettext */
2
3import Backbone from 'backbone';
4
5import DateUtils from 'edx-ui-toolkit/js/utils/date-utils';
6import StringUtils from 'edx-ui-toolkit/js/utils/string-utils';
7
8/**
9 * Model for Course Programs.
10 */
11class CourseCardModel extends Backbone.Model {
12  initialize(data) {
13    if (data) {
14      this.context = data;
15      this.setActiveCourseRun(this.getCourseRun(data), data.user_preferences);
16    }
17  }
18
19  getCourseRun(course) {
20    const enrolledCourseRun = course.course_runs.find(run => run.is_enrolled);
21    const openEnrollmentCourseRuns = this.getEnrollableCourseRuns();
22    let desiredCourseRun;
23
24    // If the learner has an existing, unexpired enrollment,
25    // use it to populate the model.
26    if (enrolledCourseRun && !course.expired) {
27      desiredCourseRun = enrolledCourseRun;
28    } else if (openEnrollmentCourseRuns.length > 0) {
29      if (openEnrollmentCourseRuns.length === 1) {
30        desiredCourseRun = openEnrollmentCourseRuns[0];
31      } else {
32        desiredCourseRun = CourseCardModel.getUnselectedCourseRun(openEnrollmentCourseRuns);
33      }
34    } else {
35      desiredCourseRun = CourseCardModel.getUnselectedCourseRun(course.course_runs);
36    }
37
38    return desiredCourseRun;
39  }
40
41  getCourseRunWithHighestGrade(grades) {
42    const allEnrolledCourseRuns = this.context.course_runs.filter(run => run.is_enrolled);
43    if (allEnrolledCourseRuns.length <= 1) {
44      return null;
45    }
46
47    allEnrolledCourseRuns.sort((a, b) => (grades[a.key] || 0) - (grades[b.key] || 0));
48    return allEnrolledCourseRuns[allEnrolledCourseRuns.length - 1];
49  }
50
51  updateCourseRunWithHighestGrade(grades) {
52    const courseRunWithHighestGrade = this.getCourseRunWithHighestGrade(grades);
53    if (courseRunWithHighestGrade) {
54      this.setActiveCourseRun(courseRunWithHighestGrade, this.context.user_preferences);
55    }
56  }
57
58  isEnrolledInSession() {
59    // Returns true if the user is currently enrolled in a session of the course
60    return this.context.course_runs.find(run => run.is_enrolled) !== undefined;
61  }
62
63  static getUnselectedCourseRun(courseRuns) {
64    const unselectedRun = {};
65
66    if (courseRuns && courseRuns.length > 0) {
67      const courseRun = courseRuns[0];
68
69      $.extend(unselectedRun, {
70        marketing_url: courseRun.marketing_url,
71        is_enrollment_open: courseRun.is_enrollment_open,
72        key: courseRun.key || '',
73        is_mobile_only: courseRun.is_mobile_only || false,
74      });
75    }
76
77    return unselectedRun;
78  }
79
80  getEnrollableCourseRuns() {
81    const rawCourseRuns = this.context.course_runs.filter(run => (
82      run.is_enrollment_open &&
83      !run.is_enrolled &&
84      !run.is_course_ended &&
85      run.status === 'published'
86    ));
87
88    // Deep copy to avoid mutating this.context.
89    const enrollableCourseRuns = $.extend(true, [], rawCourseRuns);
90
91    // These are raw course runs from the server. The start
92    // dates are ISO-8601 formatted strings that need to be
93    // prepped for display.
94    enrollableCourseRuns.forEach((courseRun) => {
95      Object.assign(courseRun, {
96        start_date: CourseCardModel.formatDate(courseRun.start),
97        end_date: CourseCardModel.formatDate(courseRun.end),
98        // This is used to render the date when selecting a course run to enroll in
99        dateString: this.formatDateString(courseRun),
100      });
101    });
102
103    return enrollableCourseRuns;
104  }
105
106  getUpcomingCourseRuns() {
107    return this.context.course_runs.filter(run => (
108      !run.is_enrollment_open &&
109      !run.is_enrolled &&
110      !run.is_course_ended &&
111      run.status === 'published'
112    ));
113  }
114
115  static formatDate(date, userPreferences) {
116    let userTimezone = '';
117    let userLanguage = '';
118    if (userPreferences !== undefined) {
119      userTimezone = userPreferences.time_zone;
120      userLanguage = userPreferences['pref-lang'];
121    }
122    const context = {
123      datetime: date,
124      timezone: userTimezone,
125      language: userLanguage,
126      format: DateUtils.dateFormatEnum.shortDate,
127    };
128    return DateUtils.localize(context);
129  }
130
131  static getCertificatePriceString(run) {
132    if ('seats' in run && run.seats.length) {
133      // eslint-disable-next-line consistent-return
134      const upgradeableSeats = run.seats.filter((seat) => {
135        const upgradeableSeatTypes = ['verified', 'professional', 'no-id-professional', 'credit'];
136        return upgradeableSeatTypes.indexOf(seat.type) >= 0;
137      });
138      if (upgradeableSeats.length > 0) {
139        const upgradeableSeat = upgradeableSeats[0];
140        if (upgradeableSeat) {
141          const currency = upgradeableSeat.currency;
142          if (currency === 'USD') {
143            return `$${upgradeableSeat.price}`;
144          }
145          return `${upgradeableSeat.price} ${currency}`;
146        }
147      }
148    }
149    return null;
150  }
151
152  formatDateString(run) {
153    const pacingType = run.pacing_type;
154    let dateString;
155    const start = CourseCardModel.valueIsDefined(run.start_date) ?
156      run.advertised_start || run.start_date :
157      this.get('start_date');
158    const end = CourseCardModel.valueIsDefined(run.end_date) ? run.end_date : this.get('end_date');
159    const now = new Date();
160    const startDate = new Date(start);
161    const endDate = new Date(end);
162
163    if (pacingType === 'self_paced') {
164      if (start) {
165        dateString = startDate > now ?
166          StringUtils.interpolate(gettext('(Self-paced) Starts {start}'), { start }) :
167          StringUtils.interpolate(gettext('(Self-paced) Started {start}'), { start });
168      } else if (end && endDate > now) {
169        dateString = StringUtils.interpolate(gettext('(Self-paced) Ends {end}'), { end });
170      } else if (end && endDate < now) {
171        dateString = StringUtils.interpolate(gettext('(Self-paced) Ended {end}'), { end });
172      }
173    } else if (start && end) {
174      dateString = `${start} - ${end}`;
175    } else if (start) {
176      dateString = startDate > now ?
177                                StringUtils.interpolate(gettext('Starts {start}'), { start }) :
178                                StringUtils.interpolate(gettext('Started {start}'), { start });
179    } else if (end) {
180      dateString = StringUtils.interpolate(gettext('Ends {end}'), { end });
181    }
182    return dateString;
183  }
184
185  static valueIsDefined(val) {
186    return !([undefined, 'None', null].indexOf(val) >= 0);
187  }
188
189  setActiveCourseRun(courseRun, userPreferences) {
190    let startDateString;
191    let courseTitleLink = '';
192    const isEnrolled = this.isEnrolledInSession() && courseRun.key;
193    if (courseRun) {
194      if (CourseCardModel.valueIsDefined(courseRun.advertised_start)) {
195        startDateString = courseRun.advertised_start;
196      } else {
197        startDateString = CourseCardModel.formatDate(courseRun.start, userPreferences);
198      }
199      if (isEnrolled && courseRun.course_url) {
200        courseTitleLink = courseRun.course_url;
201      } else if (!isEnrolled && courseRun.marketing_url) {
202        courseTitleLink = CourseCardModel.updateMarketingUrl(courseRun);
203      }
204      this.set({
205        certificate_url: courseRun.certificate_url,
206        course_run_key: courseRun.key || '',
207        course_url: courseRun.course_url || '',
208        title: this.context.title,
209        end_date: CourseCardModel.formatDate(courseRun.end, userPreferences),
210        enrollable_course_runs: this.getEnrollableCourseRuns(),
211        is_course_ended: courseRun.is_course_ended,
212        is_enrolled: isEnrolled,
213        is_enrollment_open: courseRun.is_enrollment_open,
214        course_key: this.context.key,
215        user_entitlement: this.context.user_entitlement,
216        is_unfulfilled_entitlement: this.context.user_entitlement && !isEnrolled,
217        marketing_url: courseRun.marketing_url,
218        mode_slug: courseRun.type,
219        start_date: startDateString,
220        upcoming_course_runs: this.getUpcomingCourseRuns(),
221        upgrade_url: courseRun.upgrade_url,
222        price: CourseCardModel.getCertificatePriceString(courseRun),
223        course_title_link: courseTitleLink,
224        is_mobile_only: courseRun.is_mobile_only || false,
225      });
226
227      // This is used to render the date for completed and in progress courses
228      this.set({ dateString: this.formatDateString(courseRun) });
229    }
230  }
231
232  setUnselected() {
233    // Called to reset the model back to the unselected state.
234    const unselectedCourseRun = CourseCardModel.getUnselectedCourseRun(this.get('enrollable_course_runs'));
235    this.setActiveCourseRun(unselectedCourseRun);
236  }
237
238  updateCourseRun(courseRunKey) {
239    const selectedCourseRun = this.get('course_runs').find(run => run.key === courseRunKey);
240    if (selectedCourseRun) {
241      // Update the current context to set the course run to the enrolled state
242      this.context.course_runs.forEach((run) => {
243        Object.assign(run, {
244          is_enrolled: run.is_enrolled || run.key === selectedCourseRun.key,
245        });
246      });
247      this.setActiveCourseRun(selectedCourseRun);
248    }
249  }
250
251  // update marketing url for deep linking if is_mobile_only true
252  static updateMarketingUrl(courseRun) {
253    if (courseRun.is_mobile_only === true) {
254      const marketingUrl = courseRun.marketing_url;
255      let href = marketingUrl;
256
257      if (marketingUrl.indexOf('course_info?path_id') < 0) {
258        const start = marketingUrl.indexOf('course/');
259        let path;
260
261        if (start > -1) {
262          path = marketingUrl.substr(start);
263        }
264
265        href = `edxapp://course_info?path_id=${path}`;
266      }
267
268      return href;
269    }
270    return courseRun.marketing_url;
271  }
272}
273
274export default CourseCardModel;
275
Full Screen

course_card_model.cfeeb788acc3.js

Source: course_card_model.cfeeb788acc3.js Github

copy
1/* globals gettext */
2
3import Backbone from 'backbone';
4
5import DateUtils from 'edx-ui-toolkit/js/utils/date-utils';
6import StringUtils from 'edx-ui-toolkit/js/utils/string-utils';
7
8/**
9 * Model for Course Programs.
10 */
11class CourseCardModel extends Backbone.Model {
12  initialize(data) {
13    if (data) {
14      this.context = data;
15      this.setActiveCourseRun(this.getCourseRun(data), data.user_preferences);
16    }
17  }
18
19  getCourseRun(course) {
20    const enrolledCourseRun = course.course_runs.find(run => run.is_enrolled);
21    const openEnrollmentCourseRuns = this.getEnrollableCourseRuns();
22    let desiredCourseRun;
23
24    // If the learner has an existing, unexpired enrollment,
25    // use it to populate the model.
26    if (enrolledCourseRun && !course.expired) {
27      desiredCourseRun = enrolledCourseRun;
28    } else if (openEnrollmentCourseRuns.length > 0) {
29      if (openEnrollmentCourseRuns.length === 1) {
30        desiredCourseRun = openEnrollmentCourseRuns[0];
31      } else {
32        desiredCourseRun = CourseCardModel.getUnselectedCourseRun(openEnrollmentCourseRuns);
33      }
34    } else {
35      desiredCourseRun = CourseCardModel.getUnselectedCourseRun(course.course_runs);
36    }
37
38    return desiredCourseRun;
39  }
40
41  getCourseRunWithHighestGrade(grades) {
42    const allEnrolledCourseRuns = this.context.course_runs.filter(run => run.is_enrolled);
43    if (allEnrolledCourseRuns.length <= 1) {
44      return null;
45    }
46
47    allEnrolledCourseRuns.sort((a, b) => (grades[a.key] || 0) - (grades[b.key] || 0));
48    return allEnrolledCourseRuns[allEnrolledCourseRuns.length - 1];
49  }
50
51  updateCourseRunWithHighestGrade(grades) {
52    const courseRunWithHighestGrade = this.getCourseRunWithHighestGrade(grades);
53    if (courseRunWithHighestGrade) {
54      this.setActiveCourseRun(courseRunWithHighestGrade, this.context.user_preferences);
55    }
56  }
57
58  isEnrolledInSession() {
59    // Returns true if the user is currently enrolled in a session of the course
60    return this.context.course_runs.find(run => run.is_enrolled) !== undefined;
61  }
62
63  static getUnselectedCourseRun(courseRuns) {
64    const unselectedRun = {};
65
66    if (courseRuns && courseRuns.length > 0) {
67      const courseRun = courseRuns[0];
68
69      $.extend(unselectedRun, {
70        marketing_url: courseRun.marketing_url,
71        is_enrollment_open: courseRun.is_enrollment_open,
72        key: courseRun.key || '',
73        is_mobile_only: courseRun.is_mobile_only || false,
74      });
75    }
76
77    return unselectedRun;
78  }
79
80  getEnrollableCourseRuns() {
81    const rawCourseRuns = this.context.course_runs.filter(run => (
82      run.is_enrollment_open &&
83      !run.is_enrolled &&
84      !run.is_course_ended &&
85      run.status === 'published'
86    ));
87
88    // Deep copy to avoid mutating this.context.
89    const enrollableCourseRuns = $.extend(true, [], rawCourseRuns);
90
91    // These are raw course runs from the server. The start
92    // dates are ISO-8601 formatted strings that need to be
93    // prepped for display.
94    enrollableCourseRuns.forEach((courseRun) => {
95      Object.assign(courseRun, {
96        start_date: CourseCardModel.formatDate(courseRun.start),
97        end_date: CourseCardModel.formatDate(courseRun.end),
98        // This is used to render the date when selecting a course run to enroll in
99        dateString: this.formatDateString(courseRun),
100      });
101    });
102
103    return enrollableCourseRuns;
104  }
105
106  getUpcomingCourseRuns() {
107    return this.context.course_runs.filter(run => (
108      !run.is_enrollment_open &&
109      !run.is_enrolled &&
110      !run.is_course_ended &&
111      run.status === 'published'
112    ));
113  }
114
115  static formatDate(date, userPreferences) {
116    let userTimezone = '';
117    let userLanguage = '';
118    if (userPreferences !== undefined) {
119      userTimezone = userPreferences.time_zone;
120      userLanguage = userPreferences['pref-lang'];
121    }
122    const context = {
123      datetime: date,
124      timezone: userTimezone,
125      language: userLanguage,
126      format: DateUtils.dateFormatEnum.shortDate,
127    };
128    return DateUtils.localize(context);
129  }
130
131  static getCertificatePriceString(run) {
132    if ('seats' in run && run.seats.length) {
133      // eslint-disable-next-line consistent-return
134      const upgradeableSeats = run.seats.filter((seat) => {
135        const upgradeableSeatTypes = ['verified', 'professional', 'no-id-professional', 'credit'];
136        return upgradeableSeatTypes.indexOf(seat.type) >= 0;
137      });
138      if (upgradeableSeats.length > 0) {
139        const upgradeableSeat = upgradeableSeats[0];
140        if (upgradeableSeat) {
141          const currency = upgradeableSeat.currency;
142          if (currency === 'USD') {
143            return `$${upgradeableSeat.price}`;
144          }
145          return `${upgradeableSeat.price} ${currency}`;
146        }
147      }
148    }
149    return null;
150  }
151
152  formatDateString(run) {
153    const pacingType = run.pacing_type;
154    let dateString;
155    const start = CourseCardModel.valueIsDefined(run.start_date) ?
156      run.advertised_start || run.start_date :
157      this.get('start_date');
158    const end = CourseCardModel.valueIsDefined(run.end_date) ? run.end_date : this.get('end_date');
159    const now = new Date();
160    const startDate = new Date(start);
161    const endDate = new Date(end);
162
163    if (pacingType === 'self_paced') {
164      if (start) {
165        dateString = startDate > now ?
166          StringUtils.interpolate(gettext('(Self-paced) Starts {start}'), { start }) :
167          StringUtils.interpolate(gettext('(Self-paced) Started {start}'), { start });
168      } else if (end && endDate > now) {
169        dateString = StringUtils.interpolate(gettext('(Self-paced) Ends {end}'), { end });
170      } else if (end && endDate < now) {
171        dateString = StringUtils.interpolate(gettext('(Self-paced) Ended {end}'), { end });
172      }
173    } else if (start && end) {
174      dateString = `${start} - ${end}`;
175    } else if (start) {
176      dateString = startDate > now ?
177                                StringUtils.interpolate(gettext('Starts {start}'), { start }) :
178                                StringUtils.interpolate(gettext('Started {start}'), { start });
179    } else if (end) {
180      dateString = StringUtils.interpolate(gettext('Ends {end}'), { end });
181    }
182    return dateString;
183  }
184
185  static valueIsDefined(val) {
186    return !([undefined, 'None', null].indexOf(val) >= 0);
187  }
188
189  setActiveCourseRun(courseRun, userPreferences) {
190    let startDateString;
191    let courseTitleLink = '';
192    const isEnrolled = this.isEnrolledInSession() && courseRun.key;
193    if (courseRun) {
194      if (CourseCardModel.valueIsDefined(courseRun.advertised_start)) {
195        startDateString = courseRun.advertised_start;
196      } else {
197        startDateString = CourseCardModel.formatDate(courseRun.start, userPreferences);
198      }
199      if (isEnrolled && courseRun.course_url) {
200        courseTitleLink = courseRun.course_url;
201      } else if (!isEnrolled && courseRun.marketing_url) {
202        courseTitleLink = CourseCardModel.updateMarketingUrl(courseRun);
203      }
204      this.set({
205        certificate_url: courseRun.certificate_url,
206        course_run_key: courseRun.key || '',
207        course_url: courseRun.course_url || '',
208        title: this.context.title,
209        end_date: CourseCardModel.formatDate(courseRun.end, userPreferences),
210        enrollable_course_runs: this.getEnrollableCourseRuns(),
211        is_course_ended: courseRun.is_course_ended,
212        is_enrolled: isEnrolled,
213        is_enrollment_open: courseRun.is_enrollment_open,
214        course_key: this.context.key,
215        user_entitlement: this.context.user_entitlement,
216        is_unfulfilled_entitlement: this.context.user_entitlement && !isEnrolled,
217        marketing_url: courseRun.marketing_url,
218        mode_slug: courseRun.type,
219        start_date: startDateString,
220        upcoming_course_runs: this.getUpcomingCourseRuns(),
221        upgrade_url: courseRun.upgrade_url,
222        price: CourseCardModel.getCertificatePriceString(courseRun),
223        course_title_link: courseTitleLink,
224        is_mobile_only: courseRun.is_mobile_only || false,
225      });
226
227      // This is used to render the date for completed and in progress courses
228      this.set({ dateString: this.formatDateString(courseRun) });
229    }
230  }
231
232  setUnselected() {
233    // Called to reset the model back to the unselected state.
234    const unselectedCourseRun = CourseCardModel.getUnselectedCourseRun(this.get('enrollable_course_runs'));
235    this.setActiveCourseRun(unselectedCourseRun);
236  }
237
238  updateCourseRun(courseRunKey) {
239    const selectedCourseRun = this.get('course_runs').find(run => run.key === courseRunKey);
240    if (selectedCourseRun) {
241      // Update the current context to set the course run to the enrolled state
242      this.context.course_runs.forEach((run) => {
243        Object.assign(run, {
244          is_enrolled: run.is_enrolled || run.key === selectedCourseRun.key,
245        });
246      });
247      this.setActiveCourseRun(selectedCourseRun);
248    }
249  }
250
251  // update marketing url for deep linking if is_mobile_only true
252  static updateMarketingUrl(courseRun) {
253    if (courseRun.is_mobile_only === true) {
254      const marketingUrl = courseRun.marketing_url;
255      let href = marketingUrl;
256
257      if (marketingUrl.indexOf('course_info?path_id') < 0) {
258        const start = marketingUrl.indexOf('course/');
259        let path;
260
261        if (start > -1) {
262          path = marketingUrl.substr(start);
263        }
264
265        href = `edxapp://course_info?path_id=${path}`;
266      }
267
268      return href;
269    }
270    return courseRun.marketing_url;
271  }
272}
273
274export default CourseCardModel;
275
Full Screen

test_runtest.py

Source: test_runtest.py Github

copy
1# Copyright (c) 2009-2010 Jonathan M. Lange. See LICENSE for details.
2
3"""Tests for the RunTest single test execution logic."""
4
5from testtools import (
6    ExtendedToOriginalDecorator,
7    run_test_with,
8    RunTest,
9    TestCase,
10    TestResult,
11    )
12from testtools.matchers import MatchesException, Is, Raises
13from testtools.tests.helpers import ExtendedTestResult
14
15
16class TestRunTest(TestCase):
17
18    def make_case(self):
19        class Case(TestCase):
20            def test(self):
21                pass
22        return Case('test')
23
24    def test___init___short(self):
25        run = RunTest("bar")
26        self.assertEqual("bar", run.case)
27        self.assertEqual([], run.handlers)
28
29    def test__init____handlers(self):
30        handlers = [("quux", "baz")]
31        run = RunTest("bar", handlers)
32        self.assertEqual(handlers, run.handlers)
33
34    def test_run_with_result(self):
35        # test.run passes result down to _run_test_method.
36        log = []
37        class Case(TestCase):
38            def _run_test_method(self, result):
39                log.append(result)
40        case = Case('_run_test_method')
41        run = RunTest(case, lambda x: log.append(x))
42        result = TestResult()
43        run.run(result)
44        self.assertEqual(1, len(log))
45        self.assertEqual(result, log[0].decorated)
46
47    def test_run_no_result_manages_new_result(self):
48        log = []
49        run = RunTest(self.make_case(), lambda x: log.append(x) or x)
50        result = run.run()
51        self.assertIsInstance(result.decorated, TestResult)
52
53    def test__run_core_called(self):
54        case = self.make_case()
55        log = []
56        run = RunTest(case, lambda x: x)
57        run._run_core = lambda: log.append('foo')
58        run.run()
59        self.assertEqual(['foo'], log)
60
61    def test__run_user_does_not_catch_keyboard(self):
62        case = self.make_case()
63        def raises():
64            raise KeyboardInterrupt("yo")
65        run = RunTest(case, None)
66        run.result = ExtendedTestResult()
67        self.assertThat(lambda: run._run_user(raises),
68            Raises(MatchesException(KeyboardInterrupt)))
69        self.assertEqual([], run.result._events)
70
71    def test__run_user_calls_onException(self):
72        case = self.make_case()
73        log = []
74        def handler(exc_info):
75            log.append("got it")
76            self.assertEqual(3, len(exc_info))
77            self.assertIsInstance(exc_info[1], KeyError)
78            self.assertIs(KeyError, exc_info[0])
79        case.addOnException(handler)
80        e = KeyError('Yo')
81        def raises():
82            raise e
83        run = RunTest(case, [(KeyError, None)])
84        run.result = ExtendedTestResult()
85        status = run._run_user(raises)
86        self.assertEqual(run.exception_caught, status)
87        self.assertEqual([], run.result._events)
88        self.assertEqual(["got it"], log)
89
90    def test__run_user_can_catch_Exception(self):
91        case = self.make_case()
92        e = Exception('Yo')
93        def raises():
94            raise e
95        log = []
96        run = RunTest(case, [(Exception, None)])
97        run.result = ExtendedTestResult()
98        status = run._run_user(raises)
99        self.assertEqual(run.exception_caught, status)
100        self.assertEqual([], run.result._events)
101        self.assertEqual([], log)
102
103    def test__run_user_uncaught_Exception_raised(self):
104        case = self.make_case()
105        e = KeyError('Yo')
106        def raises():
107            raise e
108        log = []
109        def log_exc(self, result, err):
110            log.append((result, err))
111        run = RunTest(case, [(ValueError, log_exc)])
112        run.result = ExtendedTestResult()
113        self.assertThat(lambda: run._run_user(raises),
114            Raises(MatchesException(KeyError)))
115        self.assertEqual([], run.result._events)
116        self.assertEqual([], log)
117
118    def test__run_user_uncaught_Exception_from_exception_handler_raised(self):
119        case = self.make_case()
120        def broken_handler(exc_info):
121            # ValueError because thats what we know how to catch - and must
122            # not.
123            raise ValueError('boo')
124        case.addOnException(broken_handler)
125        e = KeyError('Yo')
126        def raises():
127            raise e
128        log = []
129        def log_exc(self, result, err):
130            log.append((result, err))
131        run = RunTest(case, [(ValueError, log_exc)])
132        run.result = ExtendedTestResult()
133        self.assertThat(lambda: run._run_user(raises),
134            Raises(MatchesException(ValueError)))
135        self.assertEqual([], run.result._events)
136        self.assertEqual([], log)
137
138    def test__run_user_returns_result(self):
139        case = self.make_case()
140        def returns():
141            return 1
142        run = RunTest(case)
143        run.result = ExtendedTestResult()
144        self.assertEqual(1, run._run_user(returns))
145        self.assertEqual([], run.result._events)
146
147    def test__run_one_decorates_result(self):
148        log = []
149        class Run(RunTest):
150            def _run_prepared_result(self, result):
151                log.append(result)
152                return result
153        run = Run(self.make_case(), lambda x: x)
154        result = run._run_one('foo')
155        self.assertEqual([result], log)
156        self.assertIsInstance(log[0], ExtendedToOriginalDecorator)
157        self.assertEqual('foo', result.decorated)
158
159    def test__run_prepared_result_calls_start_and_stop_test(self):
160        result = ExtendedTestResult()
161        case = self.make_case()
162        run = RunTest(case, lambda x: x)
163        run.run(result)
164        self.assertEqual([
165            ('startTest', case),
166            ('addSuccess', case),
167            ('stopTest', case),
168            ], result._events)
169
170    def test__run_prepared_result_calls_stop_test_always(self):
171        result = ExtendedTestResult()
172        case = self.make_case()
173        def inner():
174            raise Exception("foo")
175        run = RunTest(case, lambda x: x)
176        run._run_core = inner
177        self.assertThat(lambda: run.run(result),
178            Raises(MatchesException(Exception("foo"))))
179        self.assertEqual([
180            ('startTest', case),
181            ('stopTest', case),
182            ], result._events)
183
184
185class CustomRunTest(RunTest):
186
187    marker = object()
188
189    def run(self, result=None):
190        return self.marker
191
192
193class TestTestCaseSupportForRunTest(TestCase):
194
195    def test_pass_custom_run_test(self):
196        class SomeCase(TestCase):
197            def test_foo(self):
198                pass
199        result = TestResult()
200        case = SomeCase('test_foo', runTest=CustomRunTest)
201        from_run_test = case.run(result)
202        self.assertThat(from_run_test, Is(CustomRunTest.marker))
203
204    def test_default_is_runTest_class_variable(self):
205        class SomeCase(TestCase):
206            run_tests_with = CustomRunTest
207            def test_foo(self):
208                pass
209        result = TestResult()
210        case = SomeCase('test_foo')
211        from_run_test = case.run(result)
212        self.assertThat(from_run_test, Is(CustomRunTest.marker))
213
214    def test_constructor_argument_overrides_class_variable(self):
215        # If a 'runTest' argument is passed to the test's constructor, that
216        # overrides the class variable.
217        marker = object()
218        class DifferentRunTest(RunTest):
219            def run(self, result=None):
220                return marker
221        class SomeCase(TestCase):
222            run_tests_with = CustomRunTest
223            def test_foo(self):
224                pass
225        result = TestResult()
226        case = SomeCase('test_foo', runTest=DifferentRunTest)
227        from_run_test = case.run(result)
228        self.assertThat(from_run_test, Is(marker))
229
230    def test_decorator_for_run_test(self):
231        # Individual test methods can be marked as needing a special runner.
232        class SomeCase(TestCase):
233            @run_test_with(CustomRunTest)
234            def test_foo(self):
235                pass
236        result = TestResult()
237        case = SomeCase('test_foo')
238        from_run_test = case.run(result)
239        self.assertThat(from_run_test, Is(CustomRunTest.marker))
240
241    def test_extended_decorator_for_run_test(self):
242        # Individual test methods can be marked as needing a special runner.
243        # Extra arguments can be passed to the decorator which will then be
244        # passed on to the RunTest object.
245        marker = object()
246        class FooRunTest(RunTest):
247            def __init__(self, case, handlers=None, bar=None):
248                super(FooRunTest, self).__init__(case, handlers)
249                self.bar = bar
250            def run(self, result=None):
251                return self.bar
252        class SomeCase(TestCase):
253            @run_test_with(FooRunTest, bar=marker)
254            def test_foo(self):
255                pass
256        result = TestResult()
257        case = SomeCase('test_foo')
258        from_run_test = case.run(result)
259        self.assertThat(from_run_test, Is(marker))
260
261    def test_works_as_inner_decorator(self):
262        # Even if run_test_with is the innermost decorator, it will be
263        # respected.
264        def wrapped(function):
265            """Silly, trivial decorator."""
266            def decorated(*args, **kwargs):
267                return function(*args, **kwargs)
268            decorated.__name__ = function.__name__
269            decorated.__dict__.update(function.__dict__)
270            return decorated
271        class SomeCase(TestCase):
272            @wrapped
273            @run_test_with(CustomRunTest)
274            def test_foo(self):
275                pass
276        result = TestResult()
277        case = SomeCase('test_foo')
278        from_run_test = case.run(result)
279        self.assertThat(from_run_test, Is(CustomRunTest.marker))
280
281    def test_constructor_overrides_decorator(self):
282        # If a 'runTest' argument is passed to the test's constructor, that
283        # overrides the decorator.
284        marker = object()
285        class DifferentRunTest(RunTest):
286            def run(self, result=None):
287                return marker
288        class SomeCase(TestCase):
289            @run_test_with(CustomRunTest)
290            def test_foo(self):
291                pass
292        result = TestResult()
293        case = SomeCase('test_foo', runTest=DifferentRunTest)
294        from_run_test = case.run(result)
295        self.assertThat(from_run_test, Is(marker))
296
297
298def test_suite():
299    from unittest import TestLoader
300    return TestLoader().loadTestsFromName(__name__)
301
Full Screen

session_partial_run_test.py

Source: session_partial_run_test.py Github

copy
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15
16"""Tests for tensorflow.python.client.session.Session's partial run APIs."""
17
18from __future__ import absolute_import
19from __future__ import division
20from __future__ import print_function
21
22from six.moves import xrange  # pylint: disable=redefined-builtin
23
24from tensorflow.python.client import session
25from tensorflow.python.framework import constant_op
26from tensorflow.python.framework import dtypes
27from tensorflow.python.framework import errors
28from tensorflow.python.framework import test_util
29from tensorflow.python.ops import array_ops
30from tensorflow.python.ops import math_ops
31from tensorflow.python.platform import googletest
32from tensorflow.python.training import server_lib
33
34
35class PartialRunTest(test_util.TensorFlowTestCase):
36
37  def RunTestPartialRun(self, sess):
38    a = array_ops.placeholder(dtypes.float32, shape=[])
39    b = array_ops.placeholder(dtypes.float32, shape=[])
40    c = array_ops.placeholder(dtypes.float32, shape=[])
41    r1 = math_ops.add(a, b)
42    r2 = math_ops.multiply(r1, c)
43
44    h = sess.partial_run_setup([r1, r2], [a, b, c])
45    res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
46    self.assertEqual(3, res)
47    temp = res * 17
48    res = sess.partial_run(h, r2, feed_dict={c: temp})
49    self.assertEqual(153, res)
50
51    # Call again on the same graph.
52    h2 = sess.partial_run_setup([r1, r2], [a, b, c])
53    res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
54    self.assertEqual(3, res)
55    temp = res * 18
56    res = sess.partial_run(h2, r2, feed_dict={c: temp})
57    self.assertEqual(162, res)
58
59  def RunTestPartialRunIncomplete(self, sess):
60    a = array_ops.placeholder(dtypes.float32, shape=[])
61    b = array_ops.placeholder(dtypes.float32, shape=[])
62    c = array_ops.placeholder(dtypes.float32, shape=[])
63    r1 = math_ops.add(a, b)
64    r2 = math_ops.multiply(r1, c)
65
66    h = sess.partial_run_setup([r1, r2], [a, b, c])
67    res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
68    self.assertEqual(3, res)
69
70  def RunTestConcurrentPartialRun(self, sess):
71    a = array_ops.placeholder(dtypes.float32, shape=[])
72    b = array_ops.placeholder(dtypes.float32, shape=[])
73    c = array_ops.placeholder(dtypes.float32, shape=[])
74    r1 = math_ops.add(a, b)
75    r2 = math_ops.multiply(r1, c)
76
77    h1 = sess.partial_run_setup([r1], [a, b, c])
78    h2 = sess.partial_run_setup([r1, r2], [a, b, c])
79    res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
80    self.assertEqual(3, res)
81    temp = res * 19
82    res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
83    self.assertEqual(66, res)
84    res = sess.partial_run(h2, r2, feed_dict={c: 7})
85    self.assertEqual(462, res)
86
87  def RunTestManyPartialRun(self, sess):
88    steps = 200
89    inputs = []
90    outputs = []
91    a = constant_op.constant(2.0, dtypes.float32)
92    for i in xrange(steps):
93      inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
94      a = math_ops.multiply(a, inputs[i])
95      outputs.append(a)
96
97    h = sess.partial_run_setup(outputs, inputs)
98    for i in xrange(steps):
99      res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
100    self.assertEqual(2.0, res)
101
102    feed_dict = {}
103    for i in xrange(steps):
104      feed_dict[inputs[i]] = 1.0
105    res = sess.run(outputs, feed_dict)
106    self.assertEqual(steps, len(res))
107    self.assertEqual(2.0, res[-1])
108
109  def RunTestRunAndPartialRun(self, sess):
110    a = constant_op.constant(2.0, dtypes.float32)
111    b = a * 2
112    c = b * 3
113    r1 = self.evaluate([b, c])
114    h = sess.partial_run_setup([b, c], [])
115    r2 = sess.partial_run(h, [b, c])
116    self.assertEqual(r1, r2)
117
118  def RunTestPartialRunMissingPlaceholderFeedException(self, sess):
119    x = array_ops.placeholder(dtypes.float32, shape=())
120    fetches = [x * 2, x * 3]
121    handle = sess.partial_run_setup(fetches=fetches, feeds=[])
122    with self.assertRaisesRegexp(errors.InvalidArgumentError,
123                                 'You must feed a value for placeholder'):
124      sess.partial_run(handle, fetches[0])
125
126  def RunTestPartialRunUnspecifiedFeed(self, sess):
127    a = array_ops.placeholder(dtypes.float32, shape=[])
128    b = array_ops.placeholder(dtypes.float32, shape=[])
129    c = array_ops.placeholder(dtypes.float32, shape=[])
130    r1 = math_ops.add(a, b)
131
132    h = sess.partial_run_setup([r1], [a, b])
133    with self.assertRaisesRegexp(errors.InvalidArgumentError,
134                                 'was not specified in partial_run_setup.$'):
135      sess.partial_run(h, r1, feed_dict={a: 1, b: 2, c: 3})
136
137  def RunTestPartialRunUnspecifiedFetch(self, sess):
138    a = array_ops.placeholder(dtypes.float32, shape=[])
139    b = array_ops.placeholder(dtypes.float32, shape=[])
140    c = array_ops.placeholder(dtypes.float32, shape=[])
141    r1 = math_ops.add(a, b)
142    r2 = math_ops.multiply(a, c)
143
144    h = sess.partial_run_setup([r1], [a, b, c])
145    with self.assertRaisesRegexp(errors.InvalidArgumentError,
146                                 'was not specified in partial_run_setup.$'):
147      sess.partial_run(h, r2, feed_dict={a: 1, c: 3})
148
149  def RunTestPartialRunAlreadyFed(self, sess):
150    a = array_ops.placeholder(dtypes.float32, shape=[])
151    b = array_ops.placeholder(dtypes.float32, shape=[])
152    c = array_ops.placeholder(dtypes.float32, shape=[])
153    r1 = math_ops.add(a, b)
154    r2 = math_ops.multiply(a, c)
155
156    h = sess.partial_run_setup([r1, r2], [a, b, c])
157    sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
158    with self.assertRaisesRegexp(errors.InvalidArgumentError,
159                                 'has already been fed.$'):
160      sess.partial_run(h, r2, feed_dict={a: 1, c: 3})
161
162  def RunTestPartialRunAlreadyFetched(self, sess):
163    a = array_ops.placeholder(dtypes.float32, shape=[])
164    b = array_ops.placeholder(dtypes.float32, shape=[])
165    c = array_ops.placeholder(dtypes.float32, shape=[])
166    r1 = math_ops.add(a, b)
167    r2 = math_ops.multiply(a, c)
168
169    h = sess.partial_run_setup([r1, r2], [a, b, c])
170    sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
171    with self.assertRaisesRegexp(errors.InvalidArgumentError,
172                                 'has already been fetched.$'):
173      sess.partial_run(h, r1, feed_dict={c: 3})
174
175  def RunTestPartialRunEmptyFetches(self, sess):
176    a = array_ops.placeholder(dtypes.float32)
177    b = a * 2.0
178
179    h = sess.partial_run_setup(fetches=[b], feeds=[a])
180    sess.partial_run(h, [], {a: 3.0})
181    r = sess.partial_run(h, [b], {})
182    self.assertEqual([6.0], r)
183
184  @test_util.run_deprecated_v1
185  def testInvalidPartialRunSetup(self):
186    sess = session.Session()
187    x = array_ops.placeholder(dtypes.float32, shape=[])
188    with self.assertRaisesRegexp(
189        errors.InvalidArgumentError,
190        'specify at least one target to fetch or execute.'):
191      sess.partial_run_setup(fetches=[], feeds=[x])
192
193  @test_util.run_deprecated_v1
194  def testPartialRunSetupNoFeedsPassed(self):
195    sess = session.Session()
196    r1 = constant_op.constant([6.0])
197
198    h = sess.partial_run_setup([r1])
199    result1 = sess.partial_run(h, r1)
200    self.assertEqual([6.0], result1)
201
202  @test_util.run_deprecated_v1
203  def testPartialRunDirect(self):
204    self.RunTestPartialRun(session.Session())
205
206  @test_util.run_deprecated_v1
207  def testPartialRunIncompleteDirect(self):
208    self.RunTestPartialRunIncomplete(session.Session())
209
210  @test_util.run_deprecated_v1
211  def testConcurrentPartialRunDirect(self):
212    self.RunTestConcurrentPartialRun(session.Session())
213
214  @test_util.run_deprecated_v1
215  def testManyPartialRunDirect(self):
216    self.RunTestManyPartialRun(session.Session())
217
218  @test_util.run_deprecated_v1
219  def testRunAndPartialRunDirect(self):
220    self.RunTestRunAndPartialRun(session.Session())
221
222  @test_util.run_deprecated_v1
223  def testPartialRunMissingPlaceholderFeedExceptionDirect(self):
224    self.RunTestPartialRunMissingPlaceholderFeedException(session.Session())
225
226  @test_util.run_deprecated_v1
227  def testPartialRunUnspecifiedFeedDirect(self):
228    self.RunTestPartialRunUnspecifiedFeed(session.Session())
229
230  @test_util.run_deprecated_v1
231  def testPartialRunUnspecifiedFetchDirect(self):
232    self.RunTestPartialRunUnspecifiedFetch(session.Session())
233
234  @test_util.run_deprecated_v1
235  def testPartialRunAlreadyFedDirect(self):
236    self.RunTestPartialRunAlreadyFed(session.Session())
237
238  @test_util.run_deprecated_v1
239  def testPartialRunAlreadyFetchedDirect(self):
240    self.RunTestPartialRunAlreadyFetched(session.Session())
241
242  @test_util.run_deprecated_v1
243  def testPartialRunEmptyFetchesDirect(self):
244    self.RunTestPartialRunEmptyFetches(session.Session())
245
246  @test_util.run_deprecated_v1
247  def testPartialRunDist(self):
248    server = server_lib.Server.create_local_server()
249    self.RunTestPartialRun(session.Session(server.target))
250
251  @test_util.run_deprecated_v1
252  def testPartialRunIncompleteDist(self):
253    server = server_lib.Server.create_local_server()
254    self.RunTestPartialRunIncomplete(session.Session(server.target))
255
256  @test_util.run_deprecated_v1
257  def testConcurrentPartialRunDist(self):
258    server = server_lib.Server.create_local_server()
259    self.RunTestConcurrentPartialRun(session.Session(server.target))
260
261  @test_util.run_deprecated_v1
262  def testManyPartialRunDist(self):
263    server = server_lib.Server.create_local_server()
264    self.RunTestManyPartialRun(session.Session(server.target))
265
266  @test_util.run_deprecated_v1
267  def testRunAndPartialRunDist(self):
268    server = server_lib.Server.create_local_server()
269    self.RunTestRunAndPartialRun(session.Session(server.target))
270
271  @test_util.run_deprecated_v1
272  def testPartialRunMissingPlaceholderFeedExceptionDist(self):
273    server = server_lib.Server.create_local_server()
274    self.RunTestPartialRunMissingPlaceholderFeedException(
275        session.Session(server.target))
276
277  @test_util.run_deprecated_v1
278  def testPartialRunUnspecifiedFeedDist(self):
279    server = server_lib.Server.create_local_server()
280    self.RunTestPartialRunUnspecifiedFeed(session.Session(server.target))
281
282  @test_util.run_deprecated_v1
283  def testPartialRunUnspecifiedFetchDist(self):
284    server = server_lib.Server.create_local_server()
285    self.RunTestPartialRunUnspecifiedFetch(session.Session(server.target))
286
287  @test_util.run_deprecated_v1
288  def testPartialRunAlreadyFedDist(self):
289    server = server_lib.Server.create_local_server()
290    self.RunTestPartialRunAlreadyFed(session.Session(server.target))
291
292  @test_util.run_deprecated_v1
293  def testPartialRunAlreadyFetchedDist(self):
294    server = server_lib.Server.create_local_server()
295    self.RunTestPartialRunAlreadyFetched(session.Session(server.target))
296
297  @test_util.run_deprecated_v1
298  def testPartialRunEmptyFetchesDist(self):
299    server = server_lib.Server.create_local_server()
300    self.RunTestPartialRunEmptyFetches(session.Session(server.target))
301
302
303if __name__ == '__main__':
304  googletest.main()
305
Full Screen

views.py

Source: views.py Github

copy
1# Django-related imports
2from django.shortcuts import render, redirect, get_object_or_404
3from django.contrib.auth.decorators import login_required
4from django.conf import settings
5from django.contrib import messages
6from django.utils.translation import ugettext_lazy as _
7# Standard libraries
8import logging
9import fnmatch
10import os
11# Portal-specific things
12from olc_webportalv2.cowbat.models import SequencingRun, DataFile
13from olc_webportalv2.cowbat.forms import RunNameForm, RealTimeForm
14from olc_webportalv2.cowbat.tasks import run_cowbat_batch
15from olc_webportalv2.geneseekr.forms import EmailForm
16# Azure!
17from azure.storage.blob import BlockBlobService
18import azure.batch.batch_service_client as batch
19import azure.batch.batch_auth as batch_auth
20import azure.batch.models as batchmodels
21# Task Management
22from kombu import Queue
23
24log = logging.getLogger(__name__)
25
26
27def find_percent_complete(sequencing_run):
28    try:
29        job_id = str(sequencing_run).lower().replace('_', '-')
30        credentials = batch_auth.SharedKeyCredentials(settings.BATCH_ACCOUNT_NAME, settings.BATCH_ACCOUNT_KEY)
31        batch_client = batch.BatchServiceClient(credentials, base_url=settings.BATCH_ACCOUNT_URL)
32        node_files = batch_client.file.list_from_task(job_id=job_id, task_id=job_id, recursive=True)
33        final_num_reports = 26
34        current_subfolders = 0
35        for node_file in node_files:
36            if 'reports' in node_file.name:
37                current_subfolders += 1
38        if final_num_reports == 0:
39            percent_completed = 1
40        else:
41            percent_completed = int(100.0 * (current_subfolders/final_num_reports))
42
43    except batchmodels.BatchErrorException:  # Means task and job have not yet been created
44        percent_completed = 1
45    return percent_completed
46
47
48def check_uploaded_seqids(sequencing_run):
49    container_name = str(sequencing_run).lower().replace('_', '-')
50    blob_client = BlockBlobService(account_key=settings.AZURE_ACCOUNT_KEY,
51                                   account_name=settings.AZURE_ACCOUNT_NAME)
52    blob_filenames = list()
53    blobs = blob_client.list_blobs(container_name=container_name)
54    for blob in blobs:
55        blob_filenames.append(blob.name)
56    # uploaded_seqids = list()
57    for seqid in sequencing_run.seqids:
58        forward_reads = fnmatch.filter(blob_filenames, seqid + '*_R1*')
59        reverse_reads = fnmatch.filter(blob_filenames, seqid + '*_R2*')
60        if len(forward_reads) == 1:
61            if seqid not in sequencing_run.uploaded_forward_reads:
62                sequencing_run.uploaded_forward_reads.append(seqid)
63        else:
64            if seqid not in sequencing_run.forward_reads_to_upload:
65                sequencing_run.forward_reads_to_upload.append(seqid)
66        if len(reverse_reads) == 1:
67            if seqid not in sequencing_run.uploaded_reverse_reads:
68                sequencing_run.uploaded_reverse_reads.append(seqid)
69        else:
70            if seqid not in sequencing_run.reverse_reads_to_upload:
71                sequencing_run.reverse_reads_to_upload.append(seqid)
72        # if len(forward_reads) == 1 and len(reverse_reads) == 1:
73        #     sequencing_run.uploaded_seqids.append(seqid)
74        # else:
75        #     seqids_to_upload.append(seqid)
76    # for seqid in seqids_to_upload:
77    #     if seqid not in sequencing_run.seqids_to_upload:
78    #         sequencing_run.seqids_to_upload.append(seqid)
79    # for seqid in uploaded_seqids:
80    #     if seqid not in sequencing_run.uploaded_seqids:
81    #         sequencing_run.uploaded_seqids.append(seqid)
82        sequencing_run.save()
83
84
85# Create your views here.
86@login_required
87def cowbat_processing(request, sequencing_run_pk):
88    sequencing_run = get_object_or_404(SequencingRun, pk=sequencing_run_pk)
89    if sequencing_run.status == 'Unprocessed':
90        SequencingRun.objects.filter(pk=sequencing_run.pk).update(status='Processing')
91        run_cowbat_batch.apply_async(queue='cowbat', args=(sequencing_run.pk, ))
92
93    # Find percent complete (approximately). Not sure that having calls to azure batch API in views is a good thing.
94    # Will have to see if performance is terrible because of it.
95    if sequencing_run.status == 'Processing':
96        progress = find_percent_complete(sequencing_run)
97    else:
98        progress = 1
99
100    form = EmailForm()
101    if request.method == 'POST':
102        form = EmailForm(request.POST)
103        if form.is_valid():
104            Email = form.cleaned_data.get('email')
105            if Email not in sequencing_run.emails_array:
106                    sequencing_run.emails_array.append(Email)
107                    sequencing_run.save()
108                    form = EmailForm()
109                    messages.success(request, _('Email saved'))
110            
111    return render(request,
112                  'cowbat/cowbat_processing.html',
113                  {
114                      'sequencing_run': sequencing_run, 
115                      'form':form,
116                      'progress': str(progress)
117                  })
118
119
120@login_required
121def assembly_home(request):
122    sequencing_runs = SequencingRun.objects.order_by('-run_name')
123    return render(request,
124                  'cowbat/assembly_home.html',
125                  {
126                      'sequencing_runs': sequencing_runs
127                  })
128
129
130@login_required
131def upload_metadata(request):
132    form = RunNameForm()
133    if request.method == 'POST':
134        form = RunNameForm(request.POST)
135        if form.is_valid():
136            if not SequencingRun.objects.filter(run_name=form.cleaned_data.get('run_name')).exists():
137                sequencing_run, created = SequencingRun.objects\
138                    .update_or_create(run_name=form.cleaned_data.get('run_name'),
139                                      seqids=list())
140            else:
141                sequencing_run = SequencingRun.objects.get(run_name=form.cleaned_data.get('run_name'))
142            files = [request.FILES.get('file[%d]' % i) for i in range(0, len(request.FILES))]
143            container_name = sequencing_run.run_name.lower().replace('_', '-')
144            blob_client = BlockBlobService(account_name=settings.AZURE_ACCOUNT_NAME,
145                                           account_key=settings.AZURE_ACCOUNT_KEY)
146            blob_client.create_container(container_name)
147            for item in files:
148                blob_client.create_blob_from_bytes(container_name=container_name,
149                                                   blob_name=item.name,
150                                                   blob=item.read())
151                if item.name == 'SampleSheet.csv':
152                    instance = DataFile(sequencing_run=sequencing_run,
153                                        data_file=item)
154                    instance.save()
155                    with open('olc_webportalv2/media/{run_name}/SampleSheet.csv'
156                                      .format(run_name=str(sequencing_run))) as f:
157                        lines = f.readlines()
158                    seqid_start = False
159                    seqid_list = list()
160                    realtime_dict = dict()
161                    # Sample plate column in SampleSheet should have Lab/Whatever other ID.
162                    # Store that data in a dictionary with SeqIDs as keys and LabIDs as values
163                    sample_plate_dict = dict()
164                    for i in range(len(lines)):
165                        if seqid_start:
166                            seqid = lines[i].split(',')[0]
167                            labid = lines[i].split(',')[2]
168                            sample_plate_dict[seqid] = labid
169                            try:
170                                realtime = lines[i].rstrip().split(',')[9]
171                            except IndexError:
172                                realtime = ''
173
174                            seqid_list.append(seqid)
175                            if realtime == 'TRUE' or realtime == 'VRAI':
176                                realtime_dict[seqid] = 'True'  # Not sure JSONField this gets stored in can handle bool
177                            else:
178                                realtime_dict[seqid] = 'False'
179                        if 'Sample_ID' in lines[i]:
180                            seqid_start = True
181                    SequencingRun.objects.filter(pk=sequencing_run.pk).update(seqids=seqid_list,
182                                                                              realtime_strains=realtime_dict,
183                                                                              sample_plate=sample_plate_dict)
184            # TODO: Change this back to verify_realtime once we've gotten the OK from external labs to make them
185            #  validate their data.
186            # return redirect('cowbat:verify_realtime', sequencing_run_pk=sequencing_run.pk)
187            return redirect('cowbat:upload_interop', sequencing_run_pk=sequencing_run.pk)
188    return render(request,
189                  'cowbat/upload_metadata.html',
190                  {
191                      'form': form
192                  })
193
194
195@login_required
196def verify_realtime(request, sequencing_run_pk):
197    sequencing_run = get_object_or_404(SequencingRun, pk=sequencing_run_pk)
198    form = RealTimeForm(instance=sequencing_run)
199    if request.method == 'POST':
200        form = RealTimeForm(request.POST, instance=sequencing_run)
201        if form.is_valid():
202            # Read form data, update realtime strains as necessary.
203            seqids = form.cleaned_data.get('realtime_select')
204            for seqid in sequencing_run.realtime_strains:
205                if seqid in seqids:
206                    sequencing_run.realtime_strains[seqid] = 'True'
207                else:
208                    sequencing_run.realtime_strains[seqid] = 'False'
209                strain_name = form.cleaned_data.get(seqid)
210                sequencing_run.sample_plate[seqid] = strain_name
211            sequencing_run.save()
212            # Also modify samplesheet to reflect the updated Realtime strains and overwrite the previous upload
213            # to blob storage
214            samplesheet_path = 'olc_webportalv2/media/{run_name}/SampleSheet.csv'.format(run_name=str(sequencing_run))
215            with open(samplesheet_path) as f:
216                lines = f.readlines()
217            seqid_start = False
218            with open(samplesheet_path, 'w') as f:
219                for i in range(len(lines)):
220                    if seqid_start:
221                        seqid = lines[i].split(',')[0]
222                        line_split = lines[i].split(',')
223                        line_split[2] = sequencing_run.sample_plate[seqid]
224                        if sequencing_run.realtime_strains[seqid] == 'True':
225                            line_split[-1] = 'TRUE\n'
226                        else:
227                            line_split[-1] = '\n'
228                        to_write = ','.join(line_split)
229                        f.write(to_write)
230                    else:
231                        f.write(lines[i])
232                    if 'Sample_ID' in lines[i]:
233                        seqid_start = True
234            container_name = sequencing_run.run_name.lower().replace('_', '-')
235            blob_client = BlockBlobService(account_name=settings.AZURE_ACCOUNT_NAME,
236                                           account_key=settings.AZURE_ACCOUNT_KEY)
237            blob_client.create_blob_from_path(container_name=container_name,
238                                              blob_name='SampleSheet.csv',
239                                              file_path=samplesheet_path)
240            return redirect('cowbat:upload_interop', sequencing_run_pk=sequencing_run.pk)
241    return render(request,
242                  'cowbat/verify_realtime.html',
243                  {
244                      'form': form,
245                      'sequencing_run': sequencing_run
246                  })
247
248
249@login_required
250def upload_interop(request, sequencing_run_pk):
251    sequencing_run = get_object_or_404(SequencingRun, pk=sequencing_run_pk)
252    if request.method == 'POST':
253            container_name = sequencing_run.run_name.lower().replace('_', '-')
254            blob_client = BlockBlobService(account_name=settings.AZURE_ACCOUNT_NAME,
255                                           account_key=settings.AZURE_ACCOUNT_KEY)
256            blob_client.create_container(container_name)
257            files = [request.FILES.get('file[%d]' % i) for i in range(0, len(request.FILES))]
258            for item in files:
259
260                blob_client.create_blob_from_bytes(container_name=container_name,
261                                                   blob_name=os.path.join('InterOp', item.name),
262                                                   blob=item.read())
263            return redirect('cowbat:upload_sequence_data', sequencing_run_pk=sequencing_run.pk)
264    return render(request,
265                  'cowbat/upload_interop.html',
266                  {
267                      'sequencing_run': sequencing_run
268                  })
269
270
271@login_required
272def upload_sequence_data(request, sequencing_run_pk):
273    sequencing_run = get_object_or_404(SequencingRun, pk=sequencing_run_pk)
274    check_uploaded_seqids(sequencing_run=sequencing_run)
275    seqid_list = list()
276    if request.method == 'POST':
277        check_uploaded_seqids(sequencing_run=sequencing_run)
278        container_name = sequencing_run.run_name.lower().replace('_', '-')
279        blob_client = BlockBlobService(account_name=settings.AZURE_ACCOUNT_NAME,
280                                       account_key=settings.AZURE_ACCOUNT_KEY)
281        blob_client.create_container(container_name)
282        for i in range(0, len(request.FILES)):
283            item = request.FILES.get('file[%d]' % i)
284            blob_client.create_blob_from_bytes(container_name=container_name,
285                                               blob_name=item.name,
286                                               blob=item.read())
287
288        # return redirect('cowbat:cowbat_processing', sequencing_run_pk=sequencing_run.pk)
289    return render(request,
290                  'cowbat/upload_sequence_data.html',
291                  {
292                      'sequencing_run': sequencing_run,
293                  })
294
295
296@login_required
297def retry_sequence_data_upload(request, sequencing_run_pk):
298    sequencing_run = get_object_or_404(SequencingRun, pk=sequencing_run_pk)
299    sequencing_run.status = 'Unprocessed'
300    sequencing_run.save()
301    return redirect('cowbat:upload_sequence_data', sequencing_run_pk=sequencing_run.pk)
302
Full Screen

run_webkit_tests_integrationtest.py

Source: run_webkit_tests_integrationtest.py Github

copy
1#!/usr/bin/python
2# Copyright (C) 2010 Google Inc. All rights reserved.
3# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
4# Copyright (C) 2011 Apple Inc. All rights reserved.
5#
6# Redistribution and use in source and binary forms, with or without
7# modification, are permitted provided that the following conditions are
8# met:
9#
10#     * Redistributions of source code must retain the above copyright
11# notice, this list of conditions and the following disclaimer.
12#     * Redistributions in binary form must reproduce the above
13# copyright notice, this list of conditions and the following disclaimer
14# in the documentation and/or other materials provided with the
15# distribution.
16#     * Neither the name of Google Inc. nor the names of its
17# contributors may be used to endorse or promote products derived from
18# this software without specific prior written permission.
19#
20# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32"""Unit tests for run_webkit_tests."""
33
34from __future__ import with_statement
35
36import codecs
37import itertools
38import logging
39import os
40import Queue
41import sys
42import thread
43import time
44import threading
45import unittest
46
47try:
48    import multiprocessing
49except ImportError:
50    multiprocessing = None
51
52from webkitpy.common import array_stream
53from webkitpy.common.system import outputcapture
54from webkitpy.common.system import filesystem_mock
55from webkitpy.tool import mocktool
56from webkitpy.layout_tests import port
57from webkitpy.layout_tests import run_webkit_tests
58from webkitpy.layout_tests.port.test import TestPort, TestDriver
59from webkitpy.layout_tests.port.test_files import is_reference_html_file
60from webkitpy.python24.versioning import compare_version
61from webkitpy.test.skip import skip_if
62
63from webkitpy.thirdparty.mock import Mock
64
65
66def parse_args(extra_args=None, record_results=False, tests_included=False,
67               print_nothing=True):
68    extra_args = extra_args or []
69    if print_nothing:
70        args = ['--print', 'nothing']
71    else:
72        args = []
73    if not '--platform' in extra_args:
74        args.extend(['--platform', 'test'])
75    if not record_results:
76        args.append('--no-record-results')
77    if not '--child-processes' in extra_args and not '--worker-model' in extra_args:
78        args.extend(['--worker-model', 'inline'])
79    args.extend(extra_args)
80    if not tests_included:
81        # We use the glob to test that globbing works.
82        args.extend(['passes',
83                     'http/tests',
84                     'websocket/tests',
85                     'failures/expected/*'])
86    return run_webkit_tests.parse_args(args)
87
88
89def passing_run(extra_args=None, port_obj=None, record_results=False,
90                tests_included=False, filesystem=None):
91    options, parsed_args = parse_args(extra_args, record_results,
92                                      tests_included)
93    if not port_obj:
94        port_obj = port.get(port_name=options.platform, options=options,
95                            user=mocktool.MockUser(), filesystem=filesystem)
96    res = run_webkit_tests.run(port_obj, options, parsed_args)
97    return res == 0
98
99
100def logging_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, filesystem=None):
101    options, parsed_args = parse_args(extra_args=extra_args,
102                                      record_results=record_results,
103                                      tests_included=tests_included,
104                                      print_nothing=False)
105    user = mocktool.MockUser()
106    if not port_obj:
107        port_obj = port.get(port_name=options.platform, options=options,
108                            user=user, filesystem=filesystem)
109
110    res, buildbot_output, regular_output = run_and_capture(port_obj, options,
111                                                           parsed_args)
112    return (res, buildbot_output, regular_output, user)
113
114
115def run_and_capture(port_obj, options, parsed_args):
116    oc = outputcapture.OutputCapture()
117    try:
118        oc.capture_output()
119        buildbot_output = array_stream.ArrayStream()
120        regular_output = array_stream.ArrayStream()
121        res = run_webkit_tests.run(port_obj, options, parsed_args,
122                                   buildbot_output=buildbot_output,
123                                   regular_output=regular_output)
124    finally:
125        oc.restore_output()
126    return (res, buildbot_output, regular_output)
127
128
129def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False,
130                  filesystem=None, include_reference_html=False):
131    extra_args = extra_args or []
132    if not tests_included:
133        # Not including http tests since they get run out of order (that
134        # behavior has its own test, see test_get_test_file_queue)
135        extra_args = ['passes', 'failures'] + extra_args
136    options, parsed_args = parse_args(extra_args, tests_included=True)
137
138    user = mocktool.MockUser()
139
140    test_batches = []
141
142
143    class RecordingTestDriver(TestDriver):
144        def __init__(self, port, worker_number):
145            TestDriver.__init__(self, port, worker_number)
146            self._current_test_batch = None
147
148        def poll(self):
149            # So that we don't create a new driver for every test
150            return None
151
152        def stop(self):
153            self._current_test_batch = None
154
155        def run_test(self, test_input):
156            if self._current_test_batch is None:
157                self._current_test_batch = []
158                test_batches.append(self._current_test_batch)
159            test_name = self._port.relative_test_filename(test_input.filename)
160            # In case of reftest, one test calls the driver's run_test() twice.
161            # We should not add a reference html used by reftests to tests unless include_reference_html parameter
162            # is explicitly given.
163            if include_reference_html or not is_reference_html_file(test_input.filename):
164                self._current_test_batch.append(test_name)
165            return TestDriver.run_test(self, test_input)
166
167    class RecordingTestPort(TestPort):
168        def create_driver(self, worker_number):
169            return RecordingTestDriver(self, worker_number)
170
171    recording_port = RecordingTestPort(options=options, user=user, filesystem=filesystem)
172    run_and_capture(recording_port, options, parsed_args)
173
174    if flatten_batches:
175        return list(itertools.chain(*test_batches))
176
177    return test_batches
178
179
180class MainTest(unittest.TestCase):
181    def test_accelerated_compositing(self):
182        # This just tests that we recognize the command line args
183        self.assertTrue(passing_run(['--accelerated-compositing']))
184        self.assertTrue(passing_run(['--no-accelerated-compositing']))
185
186    def test_accelerated_2d_canvas(self):
187        # This just tests that we recognize the command line args
188        self.assertTrue(passing_run(['--accelerated-2d-canvas']))
189        self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
190
191    def test_basic(self):
192        self.assertTrue(passing_run())
193
194    def test_batch_size(self):
195        batch_tests_run = get_tests_run(['--batch-size', '2'])
196        for batch in batch_tests_run:
197            self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
198
199    def test_child_process_1(self):
200        _, _, regular_output, _ = logging_run(
201             ['--print', 'config', '--worker-model', 'threads', '--child-processes', '1'])
202        self.assertTrue(any(['Running 1 ' in line for line in regular_output.get()]))
203
204    def test_child_processes_2(self):
205        _, _, regular_output, _ = logging_run(
206             ['--print', 'config', '--worker-model', 'threads', '--child-processes', '2'])
207        self.assertTrue(any(['Running 2 ' in line for line in regular_output.get()]))
208
209    def test_child_processes_min(self):
210        _, _, regular_output, _ = logging_run(
211             ['--print', 'config', '--worker-model', 'threads', '--child-processes', '2', 'passes'],
212             tests_included=True)
213        self.assertTrue(any(['Running 1 ' in line for line in regular_output.get()]))
214
215    def test_dryrun(self):
216        batch_tests_run = get_tests_run(['--dry-run'])
217        self.assertEqual(batch_tests_run, [])
218
219        batch_tests_run = get_tests_run(['-n'])
220        self.assertEqual(batch_tests_run, [])
221
222    def test_exception_raised(self):
223        self.assertRaises(ValueError, logging_run,
224            ['failures/expected/exception.html'], tests_included=True)
225
226    def test_full_results_html(self):
227        # FIXME: verify html?
228        res, out, err, user = logging_run(['--full-results-html'])
229        self.assertEqual(res, 0)
230
231    def test_help_printing(self):
232        res, out, err, user = logging_run(['--help-printing'])
233        self.assertEqual(res, 0)
234        self.assertTrue(out.empty())
235        self.assertFalse(err.empty())
236
237    def test_hung_thread(self):
238        res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
239                                          'failures/expected/hang.html'],
240                                          tests_included=True)
241        self.assertEqual(res, 0)
242        self.assertFalse(out.empty())
243        self.assertFalse(err.empty())
244
245    def test_keyboard_interrupt(self):
246        # Note that this also tests running a test marked as SKIP if
247        # you specify it explicitly.
248        self.assertRaises(KeyboardInterrupt, logging_run,
249            ['failures/expected/keyboard.html'], tests_included=True)
250
251    def test_keyboard_interrupt_inline_worker_model(self):
252        self.assertRaises(KeyboardInterrupt, logging_run,
253            ['failures/expected/keyboard.html', '--worker-model', 'inline'],
254            tests_included=True)
255
256    def test_last_results(self):
257        fs = port.unit_test_filesystem()
258        # We do a logging run here instead of a passing run in order to
259        # suppress the output from the json generator.
260        res, buildbot_output, regular_output, user = logging_run(['--clobber-old-results'], record_results=True, filesystem=fs)
261        res, buildbot_output, regular_output, user = logging_run(
262            ['--print-last-failures'], filesystem=fs)
263        self.assertEqual(regular_output.get(), ['\n\n'])
264        self.assertEqual(buildbot_output.get(), [])
265
266    def test_lint_test_files(self):
267        res, out, err, user = logging_run(['--lint-test-files'])
268        self.assertEqual(res, 0)
269        self.assertTrue(out.empty())
270        self.assertTrue(any(['Lint succeeded' in msg for msg in err.get()]))
271
272    def test_lint_test_files__errors(self):
273        options, parsed_args = parse_args(['--lint-test-files'])
274        user = mocktool.MockUser()
275        port_obj = port.get(options.platform, options=options, user=user)
276        port_obj.test_expectations = lambda: "# syntax error"
277        res, out, err = run_and_capture(port_obj, options, parsed_args)
278
279        self.assertEqual(res, -1)
280        self.assertTrue(out.empty())
281        self.assertTrue(any(['Lint failed' in msg for msg in err.get()]))
282
283    def test_no_tests_found(self):
284        res, out, err, user = logging_run(['resources'], tests_included=True)
285        self.assertEqual(res, -1)
286        self.assertTrue(out.empty())
287        self.assertTrue('No tests to run.\n' in err.get())
288
289    def test_no_tests_found_2(self):
290        res, out, err, user = logging_run(['foo'], tests_included=True)
291        self.assertEqual(res, -1)
292        self.assertTrue(out.empty())
293        self.assertTrue('No tests to run.\n' in err.get())
294
295    def test_randomize_order(self):
296        # FIXME: verify order was shuffled
297        self.assertTrue(passing_run(['--randomize-order']))
298
299    def test_gc_between_tests(self):
300        self.assertTrue(passing_run(['--gc-between-tests']))
301
302    def test_complex_text(self):
303        self.assertTrue(passing_run(['--complex-text']))
304
305    def test_threaded(self):
306        self.assertTrue(passing_run(['--threaded']))
307
308    def test_run_chunk(self):
309        # Test that we actually select the right chunk
310        all_tests_run = get_tests_run(flatten_batches=True)
311        chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
312        self.assertEquals(all_tests_run[4:8], chunk_tests_run)
313
314        # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
315        tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
316        chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
317        self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
318
319    def test_run_force(self):
320        # This raises an exception because we run
321        # failures/expected/exception.html, which is normally SKIPped.
322        self.assertRaises(ValueError, logging_run, ['--force'])
323
324    def test_run_part(self):
325        # Test that we actually select the right part
326        tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
327        tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
328        self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
329
330        # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
331        # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
332        # last part repeats the first two tests).
333        chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
334        self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
335
336    def test_run_singly(self):
337        batch_tests_run = get_tests_run(['--run-singly'])
338        for batch in batch_tests_run:
339            self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
340
341    def test_skip_failing_tests(self):
342        batches = get_tests_run(['--skip-failing-tests'])
343        has_passes_text = False
344        for batch in batches:
345            self.assertFalse('failures/expected/text.html' in batch)
346            has_passes_text = has_passes_text or ('passes/text.html' in batch)
347        self.assertTrue(has_passes_text)
348
349    def test_run_singly_actually_runs_tests(self):
350        res, _, _, _ = logging_run(['--run-singly', 'failures/unexpected'])
351        self.assertEquals(res, 5)
352
353    def test_single_file(self):
354        tests_run = get_tests_run(['passes/text.html'], tests_included=True, flatten_batches=True)
355        self.assertEquals(['passes/text.html'], tests_run)
356
357    def test_single_file_with_prefix(self):
358        tests_run = get_tests_run(['LayoutTests/passes/text.html'], tests_included=True, flatten_batches=True)
359        self.assertEquals(['passes/text.html'], tests_run)
360
361    def test_single_skipped_file(self):
362        tests_run = get_tests_run(['failures/expected/keybaord.html'], tests_included=True, flatten_batches=True)
363        self.assertEquals([], tests_run)
364
365    def test_stderr_is_saved(self):
366        fs = port.unit_test_filesystem()
367        self.assertTrue(passing_run(filesystem=fs))
368        self.assertEquals(fs.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
369                          'stuff going to stderr')
370
371    def test_test_list(self):
372        fs = port.unit_test_filesystem()
373        filename = '/tmp/foo.txt'
374        fs.write_text_file(filename, 'passes/text.html')
375        tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
376        self.assertEquals(['passes/text.html'], tests_run)
377        fs.remove(filename)
378        res, out, err, user = logging_run(['--test-list=%s' % filename],
379                                          tests_included=True, filesystem=fs)
380        self.assertEqual(res, -1)
381        self.assertFalse(err.empty())
382
383    def test_test_list_with_prefix(self):
384        fs = port.unit_test_filesystem()
385        filename = '/tmp/foo.txt'
386        fs.write_text_file(filename, 'LayoutTests/passes/text.html')
387        tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
388        self.assertEquals(['passes/text.html'], tests_run)
389
390    def test_unexpected_failures(self):
391        # Run tests including the unexpected failures.
392        self._url_opened = None
393        res, out, err, user = logging_run(tests_included=True)
394
395        # Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test
396        # FIXME: It's nice to have a routine in port/test.py that returns this number.
397        unexpected_tests_count = 5
398
399        self.assertEqual(res, unexpected_tests_count)
400        self.assertFalse(out.empty())
401        self.assertFalse(err.empty())
402        self.assertEqual(user.opened_urls, ['/tmp/layout-test-results/results.html'])
403
404    def test_exit_after_n_failures_upload(self):
405        fs = port.unit_test_filesystem()
406        res, buildbot_output, regular_output, user = logging_run([
407                'failures/unexpected/text-image-checksum.html',
408                'passes/text.html',
409                '--exit-after-n-failures', '1',
410            ],
411            tests_included=True,
412            record_results=True,
413            filesystem=fs)
414        self.assertTrue('/tmp/layout-test-results/incremental_results.json' in fs.files)
415
416    def test_exit_after_n_failures(self):
417        # Unexpected failures should result in tests stopping.
418        tests_run = get_tests_run([
419                'failures/unexpected/text-image-checksum.html',
420                'passes/text.html',
421                '--exit-after-n-failures', '1',
422            ],
423            tests_included=True,
424            flatten_batches=True)
425        self.assertEquals(['failures/unexpected/text-image-checksum.html'], tests_run)
426
427        # But we'll keep going for expected ones.
428        tests_run = get_tests_run([
429                'failures/expected/text.html',
430                'passes/text.html',
431                '--exit-after-n-failures', '1',
432            ],
433            tests_included=True,
434            flatten_batches=True)
435        self.assertEquals(['failures/expected/text.html', 'passes/text.html'], tests_run)
436
437    def test_exit_after_n_crashes(self):
438        # Unexpected crashes should result in tests stopping.
439        tests_run = get_tests_run([
440                'failures/unexpected/crash.html',
441                'passes/text.html',
442                '--exit-after-n-crashes-or-timeouts', '1',
443            ],
444            tests_included=True,
445            flatten_batches=True)
446        self.assertEquals(['failures/unexpected/crash.html'], tests_run)
447
448        # Same with timeouts.
449        tests_run = get_tests_run([
450                'failures/unexpected/timeout.html',
451                'passes/text.html',
452                '--exit-after-n-crashes-or-timeouts', '1',
453            ],
454            tests_included=True,
455            flatten_batches=True)
456        self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
457
458        # But we'll keep going for expected ones.
459        tests_run = get_tests_run([
460                'failures/expected/crash.html',
461                'passes/text.html',
462                '--exit-after-n-crashes-or-timeouts', '1',
463            ],
464            tests_included=True,
465            flatten_batches=True)
466        self.assertEquals(['failures/expected/crash.html', 'passes/text.html'], tests_run)
467
468    def test_exit_after_n_crashes_inline_worker_model(self):
469        tests_run = get_tests_run([
470                'failures/unexpected/timeout.html',
471                'passes/text.html',
472                '--exit-after-n-crashes-or-timeouts', '1',
473                '--worker-model', 'inline',
474            ],
475            tests_included=True,
476            flatten_batches=True)
477        self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
478
479    def test_results_directory_absolute(self):
480        # We run a configuration that should fail, to generate output, then
481        # look for what the output results url was.
482
483        fs = port.unit_test_filesystem()
484        with fs.mkdtemp() as tmpdir:
485            res, out, err, user = logging_run(['--results-directory=' + str(tmpdir)],
486                                              tests_included=True, filesystem=fs)
487            self.assertEqual(user.opened_urls, [fs.join(tmpdir, 'results.html')])
488
489    def test_results_directory_default(self):
490        # We run a configuration that should fail, to generate output, then
491        # look for what the output results url was.
492
493        # This is the default location.
494        res, out, err, user = logging_run(tests_included=True)
495        self.assertEqual(user.opened_urls, ['/tmp/layout-test-results/results.html'])
496
497    def test_results_directory_relative(self):
498        # We run a configuration that should fail, to generate output, then
499        # look for what the output results url was.
500        fs = port.unit_test_filesystem()
501        fs.maybe_make_directory('/tmp/cwd')
502        fs.chdir('/tmp/cwd')
503        res, out, err, user = logging_run(['--results-directory=foo'],
504                                          tests_included=True, filesystem=fs)
505        self.assertEqual(user.opened_urls, ['/tmp/cwd/foo/results.html'])
506
507    # These next tests test that we run the tests in ascending alphabetical
508    # order per directory. HTTP tests are sharded separately from other tests,
509    # so we have to test both.
510    def assert_run_order(self, worker_model, child_processes='1'):
511        tests_run = get_tests_run(['--worker-model', worker_model,
512            '--child-processes', child_processes, 'passes'],
513            tests_included=True, flatten_batches=True)
514        self.assertEquals(tests_run, sorted(tests_run))
515
516        tests_run = get_tests_run(['--worker-model', worker_model,
517            '--child-processes', child_processes, 'http/tests/passes'],
518            tests_included=True, flatten_batches=True)
519        self.assertEquals(tests_run, sorted(tests_run))
520
521    def test_run_order__inline(self):
522        self.assert_run_order('inline')
523
524    def test_tolerance(self):
525        class ImageDiffTestPort(TestPort):
526            def diff_image(self, expected_contents, actual_contents,
527                   diff_filename=None):
528                self.tolerance_used_for_diff_image = self._options.tolerance
529                return True
530
531        def get_port_for_run(args):
532            options, parsed_args = run_webkit_tests.parse_args(args)
533            test_port = ImageDiffTestPort(options=options, user=mocktool.MockUser())
534            passing_run(args, port_obj=test_port, tests_included=True)
535            return test_port
536
537        base_args = ['--pixel-tests', 'failures/expected/*']
538
539        # If we pass in an explicit tolerance argument, then that will be used.
540        test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
541        self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
542        test_port = get_port_for_run(base_args + ['--tolerance', '0'])
543        self.assertEqual(0, test_port.tolerance_used_for_diff_image)
544
545        # Otherwise the port's default tolerance behavior (including ignoring it)
546        # should be used.
547        test_port = get_port_for_run(base_args)
548        self.assertEqual(None, test_port.tolerance_used_for_diff_image)
549
550    def test_worker_model__inline(self):
551        self.assertTrue(passing_run(['--worker-model', 'inline']))
552
553    def test_worker_model__inline_with_child_processes(self):
554        res, out, err, user = logging_run(['--worker-model', 'inline',
555                                           '--child-processes', '2'])
556        self.assertEqual(res, 0)
557        self.assertTrue('--worker-model=inline overrides --child-processes\n' in err.get())
558
559    def test_worker_model__processes(self):
560        # FIXME: remove this when we fix test-webkitpy to work properly
561        # with the multiprocessing module (bug 54520).
562        if multiprocessing and sys.platform not in ('cygwin', 'win32'):
563            self.assertTrue(passing_run(['--worker-model', 'processes']))
564
565    def test_worker_model__processes_and_dry_run(self):
566        if multiprocessing and sys.platform not in ('cygwin', 'win32'):
567            self.assertTrue(passing_run(['--worker-model', 'processes', '--dry-run']))
568
569    def test_worker_model__threads(self):
570        self.assertTrue(passing_run(['--worker-model', 'threads']))
571
572    def test_worker_model__unknown(self):
573        self.assertRaises(ValueError, logging_run,
574                          ['--worker-model', 'unknown'])
575
576    def test_reftest_run(self):
577        tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True)
578        self.assertEquals(['passes/reftest.html'], tests_run)
579
580    def test_reftest_skip_reftests_if_pixel_tests_are_disabled(self):
581        tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
582        self.assertEquals([], tests_run)
583
584    def test_reftest_expected_html_should_be_ignored(self):
585        tests_run = get_tests_run(['passes/reftest-expected.html'], tests_included=True, flatten_batches=True)
586        self.assertEquals([], tests_run)
587
588    def test_reftest_driver_should_run_expected_html(self):
589        tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True,
590                                  include_reference_html=True)
591        self.assertEquals(['passes/reftest.html', 'passes/reftest-expected.html'], tests_run)
592
593    def test_reftest_driver_should_run_expected_mismatch_html(self):
594        tests_run = get_tests_run(['passes/mismatch.html'], tests_included=True, flatten_batches=True,
595                                  include_reference_html=True)
596        self.assertEquals(['passes/mismatch.html', 'passes/mismatch-expected-mismatch.html'], tests_run)
597
598    def test_additional_platform_directory(self):
599        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
600        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
601        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo',
602            '--additional-platform-directory', '/tmp/bar']))
603
604        res, buildbot_output, regular_output, user = logging_run(
605             ['--additional-platform-directory', 'foo'])
606        self.assertTrue('--additional-platform-directory=foo is ignored since it is not absolute\n'
607                        in regular_output.get())
608
609
610MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
611
612
613class RebaselineTest(unittest.TestCase):
614    def assertBaselines(self, file_list, file):
615        "assert that the file_list contains the baselines."""
616        for ext in (".txt", ".png"):
617            baseline = file + "-expected" + ext
618            self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
619
620    # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
621    # supposed to be.
622
623    def test_reset_results(self):
624        # Test that we update expectations in place. If the expectation
625        # is missing, update the expected generic location.
626        fs = port.unit_test_filesystem()
627        passing_run(['--pixel-tests',
628                        '--reset-results',
629                        'passes/image.html',
630                        'failures/expected/missing_image.html'],
631                        tests_included=True, filesystem=fs)
632        file_list = fs.written_files.keys()
633        file_list.remove('/tmp/layout-test-results/tests_run0.txt')
634        self.assertEqual(len(file_list), 4)
635        self.assertBaselines(file_list, "/passes/image")
636        self.assertBaselines(file_list, "/failures/expected/missing_image")
637
638    def test_new_baseline(self):
639        # Test that we update the platform expectations. If the expectation
640        # is mssing, then create a new expectation in the platform dir.
641        fs = port.unit_test_filesystem()
642        passing_run(['--pixel-tests',
643                        '--new-baseline',
644                        'passes/image.html',
645                        'failures/expected/missing_image.html'],
646                    tests_included=True, filesystem=fs)
647        file_list = fs.written_files.keys()
648        file_list.remove('/tmp/layout-test-results/tests_run0.txt')
649        self.assertEqual(len(file_list), 4)
650        self.assertBaselines(file_list,
651            "/platform/test-mac-leopard/passes/image")
652        self.assertBaselines(file_list,
653            "/platform/test-mac-leopard/failures/expected/missing_image")
654
655
656class DryrunTest(unittest.TestCase):
657    # FIXME: it's hard to know which platforms are safe to test; the
658    # chromium platforms require a chromium checkout, and the mac platform
659    # requires fcntl, so it can't be tested on win32, etc. There is
660    # probably a better way of handling this.
661    def disabled_test_darwin(self):
662        if sys.platform != "darwin":
663            return
664
665        self.assertTrue(passing_run(['--platform', 'dryrun', 'fast/html'],
666                        tests_included=True))
667        self.assertTrue(passing_run(['--platform', 'dryrun-mac', 'fast/html'],
668                        tests_included=True))
669
670    def test_test(self):
671        self.assertTrue(passing_run(['--platform', 'dryrun-test',
672                                           '--pixel-tests']))
673
674
675if __name__ == '__main__':
676    unittest.main()
677
Full Screen

eventrouter.js

Source: eventrouter.js Github

copy
1/*
2 * Copyright 2019-present Open Networking Foundation
3
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7
8 * http://www.apache.org/licenses/LICENSE-2.0
9
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17
18(function () {
19    'use strict';
20
21    const _ = require('lodash');
22    const logger = require('../config/logger.js');
23    const Client = require('../types/client.js');
24    const WorkflowRun = require('../types/workflowrun.js');
25    const ws_probe = require('./ws_probe.js');
26    const ws_manager = require('./ws_manager.js');
27    const ws_workflowrun = require('./ws_workflowrun.js');
28
29    let allClients = {}; // has publishers and subscribers
30    let probeClients = {}; // a subset of clients
31    let workflowManagerClients = {}; // a subset of clients
32    let workflowRunClients = {}; // a subset of clients
33
34    //let io;
35
36    // key: workflow id
37    // value: Workflow instance
38    let workflows = {};
39
40    // key: workflow run id
41    // value: WorkflowRun instance
42    let workflowRuns = {};
43
44    let serviceEvents = {
45        GREETING: 'cord.workflow.ctlsvc.greeting'
46    };
47
48    setInterval(function () {
49        let requests = [];
50        _.forOwn(workflowRuns, (workflowRun, workflowRunId) => {
51            let obj = {
52                workflow_id: workflowRun.getWorkflowId(),
53                workflow_run_id: workflowRunId
54            };
55            requests.push(obj);
56        });
57
58        checkWorkflowRunStatusBulk(requests);
59    }, 5000);
60
61    // add ws_probe events
62    _.forOwn(ws_probe.serviceEvents, (wsServiceEvent, key) => {
63        serviceEvents[key] = wsServiceEvent;
64    });
65
66    // add ws_manager events
67    _.forOwn(ws_manager.serviceEvents, (wsServiceEvent, key) => {
68        serviceEvents[key] = wsServiceEvent;
69    });
70
71    // add ws_workflowrun events
72    _.forOwn(ws_workflowrun.serviceEvents, (wsServiceEvent, key) => {
73        serviceEvents[key] = wsServiceEvent;
74    });
75
76    //const setIO = (ioInstance) => {
77    //    io = ioInstance;
78    //};
79
80    const checkObject = (obj) => {
81        return Object.prototype.toString.call(obj) === '[object Object]';
82    };
83
84    const destroy = () => {
85        removeClients();
86        clearWorkflowRuns();
87        clearWorkflows();
88    };
89
90    const listWorkflows = () => {
91        let workflowList = [];
92        _.forOwn(workflows, (_workflow, workflowId) => {
93            workflowList.push(workflowId);
94        });
95        return workflowList;
96    };
97
98    const checkWorkflow = (workflowId) => {
99        if(workflowId in workflows) {
100            return true;
101        }
102        return false;
103    };
104
105    const addWorkflow = (workflow) => {
106        if(workflow.getId() in workflows) {
107            logger.log('error', `there exists a workflow with the same id - ${workflow.getId()}`);
108            return false;
109        }
110
111        let workflowId = workflow.getId();
112        workflows[workflowId] = workflow;
113        return true;
114    };
115
116    const getWorkflow = (workflowId) => {
117        if(workflowId in workflows) {
118            logger.log('warn', `cannot find a workflow with id - ${workflowId}`);
119            return null;
120        }
121
122        return workflows[workflowId];
123    };
124
125    const clearWorkflows = () => {
126        _.forOwn(workflows, (_workflow, workflowId) => {
127            delete workflows[workflowId];
128        });
129    };
130
131    const listWorkflowRuns = () => {
132        let workflowRunList = [];
133        _.forOwn(workflowRuns, (_workflowRun, workflowRunId) => {
134            workflowRunList.push(workflowRunId);
135        });
136        return workflowRunList;
137    };
138
139    const checkWorkflowRun = (workflowRunId) => {
140        if(workflowRunId in workflowRuns) {
141            return true;
142        }
143        return false;
144    };
145
146    const addWorkflowRun = (workflowRun) => {
147        let workflowId = workflowRun.getWorkflowId();
148        let workflowRunId = workflowRun.getId();
149
150        if(workflowRunId in workflowRuns) {
151            logger.log('warn', `there exists a workflow run with the same id - ${workflowRunId}`);
152            return false;
153        }
154
155        if(!(workflowId in workflows)) {
156            logger.log('warn', `cannot find a workflow with id - ${workflowId}`);
157            return false;
158        }
159
160        workflowRuns[workflowRunId] = workflowRun;
161        return true;
162    };
163
164    const getWorkflowRun = (workflowRunId) => {
165        if(workflowRunId in workflowRuns) {
166            logger.log('warn', `cannot find a workflow run with id - ${workflowRunId}`);
167            return null;
168        }
169
170        return workflowRuns[workflowRunId];
171    };
172
173    const clearWorkflowRuns = () => {
174        _.forOwn(workflowRuns, (_workflowRun, workflowRunId) => {
175            delete workflowRuns[workflowRunId];
176        });
177    };
178
179    const setWorkflowRunKickstarted = (workflowRunId) => {
180        if(!(workflowRunId in workflowRuns)) {
181            logger.log('warn', `cannot find a workflow run with the id - ${workflowRunId}`);
182            return false;
183        }
184
185        let workflowRun = workflowRuns[workflowRunId];
186        workflowRun.setKickstarted();
187        return true;
188    };
189
190    const setWorkflowRunStatus = (workflowRunId, status) => {
191        if(!(workflowRunId in workflowRuns)) {
192            logger.log('warn', `cannot find a workflow run with the id - ${workflowRunId}`);
193            return false;
194        }
195
196        if(status === 'success' || status === 'failed' || status === 'end') {
197            removeWorkflowRun(workflowRunId);
198        }
199        return true;
200    };
201
202    const kickstart = (workflowId, workflowRunId) => {
203        if(!(workflowId in workflows)) {
204            logger.log('warn', `cannot find a workflow with the id - ${workflowId}`);
205            return false;
206        }
207
208        if(!(workflowRunId in workflowRuns)) {
209            logger.log('warn', `cannot find a workflow run with the id - ${workflowRunId}`);
210            return false;
211        }
212
213        ws_manager.kickstartWorkflow(workflowId, workflowRunId);
214        return true;
215    };
2