Best Syzkaller code snippet using main.createJobResp
jobs.go
Source:jobs.go
...183 job, jobKey, err := getNextJob(c, managers)184 if job == nil || err != nil {185 return nil, err186 }187 resp, stale, err := createJobResp(c, job, jobKey)188 if err != nil {189 return nil, err190 }191 if stale {192 goto retry193 }194 return resp, nil195}196func getNextJob(c context.Context, managers map[string]dashapi.ManagerJobs) (*Job, *db.Key, error) {197 job, jobKey, err := loadPendingJob(c, managers)198 if job != nil || err != nil {199 return job, jobKey, err200 }201 // We need both C and syz repros, but the crazy datastore query restrictions202 // do not allow to use ReproLevel>ReproLevelNone in the query. So we do 2 separate queries.203 // C repros tend to be of higher reliability so maybe it's not bad.204 job, jobKey, err = createBisectJob(c, managers, ReproLevelC)205 if job != nil || err != nil {206 return job, jobKey, err207 }208 return createBisectJob(c, managers, ReproLevelSyz)209}210func createBisectJob(c context.Context, managers map[string]dashapi.ManagerJobs,211 reproLevel dashapi.ReproLevel) (*Job, *db.Key, error) {212 causeManagers := make(map[string]bool)213 fixManagers := make(map[string]bool)214 for mgr, jobs := range managers {215 if jobs.BisectCause {216 causeManagers[mgr] = true217 }218 if jobs.BisectFix {219 fixManagers[mgr] = true220 }221 }222 job, jobKey, err := findBugsForBisection(c, causeManagers, reproLevel, JobBisectCause)223 if job != nil || err != nil {224 return job, jobKey, err225 }226 return findBugsForBisection(c, fixManagers, reproLevel, JobBisectFix)227}228func findBugsForBisection(c context.Context, managers map[string]bool,229 reproLevel dashapi.ReproLevel, jobType JobType) (*Job, *db.Key, error) {230 if len(managers) == 0 {231 return nil, nil, nil232 }233 // Note: we could also include len(Commits)==0 but datastore does not work this way.234 // So we would need an additional HasCommits field or something.235 // Note: For JobBisectCause, order the bugs from newest to oldest. For JobBisectFix,236 // order the bugs from oldest to newest.237 // Sort property should be the same as property used in the inequality filter.238 // We only need 1 job, but we skip some because the query is not precise.239 bugs, keys, err := loadAllBugs(c, func(query *db.Query) *db.Query {240 query = query.Filter("Status=", BugStatusOpen)241 if jobType == JobBisectCause {242 query = query.Filter("FirstTime>", time.Time{}).243 Filter("ReproLevel=", reproLevel).244 Filter("BisectCause=", BisectNot).245 Order("-FirstTime")246 } else {247 query = query.Filter("LastTime>", time.Time{}).248 Filter("ReproLevel=", reproLevel).249 Filter("BisectFix=", BisectNot).250 Order("LastTime")251 }252 return query253 })254 if err != nil {255 return nil, nil, fmt.Errorf("failed to query bugs: %v", err)256 }257 for bi, bug := range bugs {258 if !shouldBisectBug(bug, managers) {259 continue260 }261 crash, crashKey, err := bisectCrashForBug(c, bug, keys[bi], managers, jobType)262 if err != nil {263 return nil, nil, err264 }265 if crash == nil {266 continue267 }268 if jobType == JobBisectFix && timeSince(c, bug.LastTime) < 24*30*time.Hour {269 continue270 }271 return createBisectJobForBug(c, bug, crash, keys[bi], crashKey, jobType)272 }273 return nil, nil, nil274}275func shouldBisectBug(bug *Bug, managers map[string]bool) bool {276 if len(bug.Commits) != 0 {277 return false278 }279 for _, mgr := range bug.HappenedOn {280 if managers[mgr] {281 return true282 }283 }284 return false285}286func bisectCrashForBug(c context.Context, bug *Bug, bugKey *db.Key, managers map[string]bool, jobType JobType) (287 *Crash, *db.Key, error) {288 crashes, crashKeys, err := queryCrashesForBug(c, bugKey, maxCrashes)289 if err != nil {290 return nil, nil, err291 }292 for ci, crash := range crashes {293 if crash.ReproSyz == 0 || !managers[crash.Manager] {294 continue295 }296 if jobType == JobBisectFix &&297 config.Namespaces[bug.Namespace].Managers[crash.Manager].FixBisectionDisabled {298 continue299 }300 return crash, crashKeys[ci], nil301 }302 return nil, nil, nil303}304func createBisectJobForBug(c context.Context, bug0 *Bug, crash *Crash, bugKey, crashKey *db.Key, jobType JobType) (305 *Job, *db.Key, error) {306 build, err := loadBuild(c, bug0.Namespace, crash.BuildID)307 if err != nil {308 return nil, nil, err309 }310 now := timeNow(c)311 job := &Job{312 Type: jobType,313 Created: now,314 Namespace: bug0.Namespace,315 Manager: crash.Manager,316 KernelRepo: build.KernelRepo,317 KernelBranch: build.KernelBranch,318 BugTitle: bug0.displayTitle(),319 CrashID: crashKey.IntID(),320 }321 var jobKey *db.Key322 tx := func(c context.Context) error {323 jobKey = nil324 bug := new(Bug)325 if err := db.Get(c, bugKey, bug); err != nil {326 return fmt.Errorf("failed to get bug %v: %v", bugKey.StringID(), err)327 }328 if jobType == JobBisectFix && bug.BisectFix != BisectNot ||329 jobType == JobBisectCause && bug.BisectCause != BisectNot {330 // Race, we could do a more complex retry, but we just rely on the next poll.331 job = nil332 return nil333 }334 if jobType == JobBisectCause {335 bug.BisectCause = BisectPending336 } else {337 bug.BisectFix = BisectPending338 }339 // Create a new job.340 var err error341 jobKey = db.NewIncompleteKey(c, "Job", bugKey)342 if jobKey, err = db.Put(c, jobKey, job); err != nil {343 return fmt.Errorf("failed to put job: %v", err)344 }345 if _, err := db.Put(c, bugKey, bug); err != nil {346 return fmt.Errorf("failed to put bug: %v", err)347 }348 return markCrashReported(c, job.CrashID, bugKey, now)349 }350 if err := db.RunInTransaction(c, tx, nil); err != nil {351 return nil, nil, fmt.Errorf("create bisect job tx failed: %v", err)352 }353 return job, jobKey, nil354}355func createJobResp(c context.Context, job *Job, jobKey *db.Key) (*dashapi.JobPollResp, bool, error) {356 jobID := extJobID(jobKey)357 patch, _, err := getText(c, textPatch, job.Patch)358 if err != nil {359 return nil, false, err360 }361 bugKey := jobKey.Parent()362 crashKey := db.NewKey(c, "Crash", "", job.CrashID, bugKey)363 crash := new(Crash)364 if err := db.Get(c, crashKey, crash); err != nil {365 return nil, false, fmt.Errorf("job %v: failed to get crash: %v", jobID, err)366 }367 build, err := loadBuild(c, job.Namespace, crash.BuildID)368 if err != nil {369 return nil, false, err...
requests.go
Source:requests.go
1package flinkjob2import (3 "github.com/chnsz/golangsdk"4 "github.com/chnsz/golangsdk/openstack/common/tags"5)6const (7 JobTypeFlinkSql = "flink_sql_job"8 JobTypeFlinkOpenSourceSql = "flink_opensource_sql_job"9 JobTypeFlinkEdgeSql = "flink_sql_edge_job"10 JobTypeFlinkJar = "flink_jar_job"11 RunModeSharedCluster = "shared_cluster"12 RunModeExclusiveCluster = "exclusive_cluster"13 RunModeEdgeNode = "edge_node"14 CheckpointModeExactlyOnce = "exactly_once"15 CheckpointModeAtLeastOnce = "at_least_once"16)17type CreateSqlJobOpts struct {18 // Name of the job. Length range: 0 to 57 characters.19 Name string `json:"name" required:"true"`20 // Job description. Length range: 0 to 512 characters.21 Desc string `json:"desc,omitempty"`22 // Template ID.23 // If both template_id and sql_body are specified, sql_body is used. If template_id is specified but sql_body is24 // not, fill sql_body with the template_id value.25 TemplateId *int `json:"template_id,omitempty"`26 // Name of a queue. Length range: 1 to 128 characters.27 QueueName string `json:"queue_name,omitempty"`28 // Stream SQL statement, which includes at least the following three parts: source, query, and sink.29 // Length range: 1024x1024 characters.30 SqlBody string `json:"sql_body,omitempty"`31 // Job running mode. The options are as follows:32 // shared_cluster: indicates that the job is running on a shared cluster.33 // exclusive_cluster: indicates that the job is running on an exclusive cluster.34 // edge_node: indicates that the job is running on an edge node.35 // The default value is shared_cluster.36 RunMode string `json:"run_mode,omitempty"`37 // Number of CUs selected for a job. The default value is 2.38 CuNumber *int `json:"cu_number,omitempty"`39 // Number of parallel jobs set by a user. The default value is 1.40 ParallelNumber *int `json:"parallel_number,omitempty"`41 // Whether to enable the automatic job snapshot function.42 // true: indicates to enable the automatic job snapshot function.43 // false: indicates to disable the automatic job snapshot function.44 // Default value: false45 CheckpointEnabled *bool `json:"checkpoint_enabled,omitempty"`46 // Snapshot mode. There are two options:47 // 1: ExactlyOnce, indicates that data is processed only once.48 // 2: AtLeastOnce, indicates that data is processed at least once.49 // The default value is 1.50 CheckpointMode *int `json:"checkpoint_mode,omitempty"`51 // Snapshot interval. The unit is second. The default value is 10.52 CheckpointInterval *int `json:"checkpoint_interval,omitempty"`53 // OBS path where users are authorized to save the snapshot. This parameter is valid only when checkpoint_enabled54 // is set to true.55 // OBS path where users are authorized to save the snapshot. This parameter is valid only when log_enabled56 // is set to true.57 ObsBucket string `json:"obs_bucket,omitempty"`58 // Whether to enable the function of uploading job logs to users' OBS buckets. The default value is false.59 LogEnabled *bool `json:"log_enabled,omitempty"`60 // SMN topic. If a job fails, the system will send a message to users subscribed to the SMN topic.61 SmnTopic string `json:"smn_topic,omitempty"`62 // Whether to enable the function of automatically restarting a job upon job exceptions. The default value is false.63 RestartWhenException *bool `json:"restart_when_exception,omitempty"`64 // Retention time of the idle state. The unit is hour. The default value is 1.65 IdleStateRetention *int `json:"idle_state_retention,omitempty"`66 EdgeGroupIds []string `json:"edge_group_ids,omitempty"`67 // Job type. This parameter can be set to flink_sql_job, and flink_opensource_sql_job.68 // If run_mode is set to shared_cluster or exclusive_cluster, this parameter must be flink_sql_job.69 // The default value is flink_sql_job.70 JobType string `json:"job_type,omitempty"`71 // Dirty data policy of a job.72 // 2:obsDir: Save. obsDir specifies the path for storing dirty data.73 // 1: Trigger a job exception74 // 0: Ignore75 // The default value is 0.76 DirtyDataStrategy string `json:"dirty_data_strategy,omitempty"`77 // Name of the resource package that has been uploaded to the DLI resource management system.78 // The UDF Jar file of the SQL job is specified by this parameter.79 UdfJarUrl string `json:"udf_jar_url,omitempty"`80 // Number of CUs in the JobManager selected for a job. The default value is 1.81 ManagerCuNumber *int `json:"manager_cu_number"`82 // Number of CUs for each TaskManager. The default value is 1.83 TmCus *int `json:"tm_cus,omitempty"`84 // Number of slots in each TaskManager. The default value is (parallel_number*tm_cus)/(cu_number-manager_cu_number).85 TmSlotNum *int `json:"tm_slot_num,omitempty"`86 // Whether the abnormal restart is recovered from the checkpoint.87 ResumeCheckpoint *bool `json:"resume_checkpoint,omitempty"`88 // Maximum number of retry times upon exceptions. The unit is times/hour. Value range: -1 or greater than 0.89 // The default value is -1, indicating that the number of times is unlimited.90 ResumeMaxNum *int `json:"resume_max_num,omitempty"`91 // Customizes optimization parameters when a Flink job is running.92 RuntimeConfig string `json:"runtime_config,omitempty"`93 // Label of a Flink SQL job. For details, see Table 3.94 Tags []tags.ResourceTag `json:"tags"`95}96type UpdateSqlJobOpts struct {97 // Name of a job. Length range: 0 to 57 characters.98 Name string `json:"name,omitempty"`99 // Job description. Length range: 0 to 512 characters.100 Desc string `json:"desc,omitempty"`101 // Name of a queue. Length range: 1 to 128 characters.102 QueueName string `json:"queue_name,omitempty"`103 // Stream SQL statement, which includes at least the following three parts: source, query, and sink.104 // Length range: 0 to 1024x1024 characters.105 SqlBody string `json:"sql_body,omitempty"`106 // Job running mode. The options are as follows:107 // shared_cluster: indicates that the job is running on a shared cluster.108 // exclusive_cluster: indicates that the job is running on an exclusive cluster.109 // edge_node: indicates that the job is running on an edge node.110 // The default value is shared_cluster.111 RunMode string `json:"run_mode,omitempty"`112 // Number of CUs selected for a job. The default value is 2.113 CuNumber *int `json:"cu_number,omitempty"`114 // Number of parallel jobs set by a user. The default value is 1.115 ParallelNumber *int `json:"parallel_number,omitempty"`116 // Whether to enable the automatic job snapshot function.117 // true: indicates to enable the automatic job snapshot function.118 // false: indicates to disable the automatic job snapshot function.119 // Default value: false120 CheckpointEnabled *bool `json:"checkpoint_enabled,omitempty"`121 // Snapshot mode. There are two options:122 // 1: ExactlyOnce, indicates that data is processed only once.123 // 2: at_least_once, indicates that data is processed at least once.124 // The default value is 1.125 CheckpointMode *int `json:"checkpoint_mode,omitempty"`126 // Snapshot interval. The unit is second. The default value is 10.127 CheckpointInterval *int `json:"checkpoint_interval,omitempty"`128 // OBS path where users are authorized to save the snapshot.129 // This parameter is valid only when checkpoint_enabled is set to true.130 // OBS path where users are authorized to save the snapshot.131 // This parameter is valid only when log_enabled is set to true.132 ObsBucket string `json:"obs_bucket,omitempty"`133 // Whether to enable the function of uploading job logs to users' OBS buckets. The default value is false.134 LogEnabled *bool `json:"log_enabled,omitempty"`135 // SMN topic. If a job fails, the system will send a message to users subscribed to the SMN topic.136 SmnTopic string `json:"smn_topic,omitempty"`137 // Whether to enable the function of automatically restarting a job upon job exceptions. The default value is false.138 RestartWhenException *bool `json:"restart_when_exception,omitempty"`139 // Expiration time, in seconds. The default value is 3600.140 IdleStateRetention *int `json:"idle_state_retention,omitempty"`141 // List of edge computing group IDs. Use commas (,) to separate multiple IDs.142 EdgeGroupIds []string `json:"edge_group_ids,omitempty"`143 // Dirty data policy of a job.144 // 2:obsDir: Save. obsDir specifies the path for storing dirty data.145 // 1: Trigger a job exception146 // 0: Ignore147 // The default value is 0.148 DirtyDataStrategy string `json:"dirty_data_strategy,omitempty"`149 // Name of the resource package that has been uploaded to the DLI resource management system.150 // The UDF Jar file of the SQL job is specified by this parameter.151 UdfJarUrl string `json:"udf_jar_url,omitempty"`152 // Number of CUs in the JobManager selected for a job. The default value is 1.153 ManagerCuNumber *int `json:"manager_cu_number,omitempty"`154 // Number of CUs for each TaskManager. The default value is 1.155 TmCus *int `json:"tm_cus,omitempty"`156 // Number of slots in each TaskManager. The default value is (parallel_number*tm_cus)/(cu_number-manager_cu_number).157 TmSlotNum *int `json:"tm_slot_num,omitempty"`158 // Degree of parallelism (DOP) of an operator.159 OperatorConfig string `json:"operator_config,omitempty"`160 // Whether the abnormal restart is recovered from the checkpoint.161 ResumeCheckpoint *bool `json:"resume_checkpoint,omitempty"`162 // Maximum number of retry times upon exceptions. The unit is times/hour. Value range: -1 or greater than 0.163 // The default value is -1, indicating that the number of times is unlimited.164 ResumeMaxNum *int `json:"resume_max_num,omitempty"`165 // Traffic or hit ratio of each operator, which is a character string in JSON format.166 StaticEstimatorConfig string `json:"static_estimator_config,omitempty"`167 // Customizes optimization parameters when a Flink job is running.168 RuntimeConfig string `json:"runtime_config,omitempty"`169}170type ListOpts struct {171 Name string `q:"name"`172 UserName string `q:"user_name"`173 QueueName string `q:"queue_name"`174 Status string `q:"status"`175 JobType string `q:"job_type"`176 Tags string `q:"tags"`177 SysEnterpriseProjectName string `q:"sys_enterprise_project_name"`178 ShowDetail *bool `q:"show_detail"`179 Order string `q:"order"`180 Offset *int `q:"offset"`181 Limit *int `q:"limit"` // default 10182 //Specifies parent job id of Edge job to query Edge subJob183 // empty: will dont query Edge subJob184 RootJobId *int `q:"root_job_id"`185}186type RunJobOpts struct {187 ResumeSavepoint *bool `json:"resume_savepoint,omitempty"`188 JobIds []int `json:"job_ids" required:"true"`189}190type ObsBucketsOpts struct {191 Buckets []string `json:"obs_buckets" required:"true"`192}193type CreateJarJobOpts struct {194 // Name of the job. Length range: 0 to 57 characters.195 Name string `json:"name" required:"true"`196 // Job description. Length range: 0 to 512 characters.197 Desc string `json:"desc,omitempty"`198 // Name of a queue. Length range: 1 to 128 characters.199 QueueName string `json:"queue_name,omitempty"`200 // Number of CUs selected for a job.201 CuNumber *int `json:"cu_number,omitempty"`202 // Number of CUs on the management node selected by the user for a job,203 // which corresponds to the number of Flink job managers. The default value is 1.204 ManagerCuNumber *int `json:"manager_cu_number,omitempty"`205 // Number of parallel operations selected for a job.206 ParallelNumber *int `json:"parallel_number,omitempty"`207 // Whether to enable the job log function.208 // true: indicates to enable the job log function.209 // false: indicates to disable the job log function.210 // Default value: false211 LogEnabled *bool `json:"log_enabled,omitempty"`212 // OBS bucket where users are authorized to save logs when log_enabled is set to true.213 ObsBucket string `json:"obs_bucket,omitempty"`214 // SMN topic. If a job fails, the system will send a message to users subscribed to the SMN topic.215 SmnTopic string `json:"smn_topic,omitempty"`216 // Job entry class.217 MainClass string `json:"main_class,omitempty"`218 // Job entry parameter. Multiple parameters are separated by spaces.219 EntrypointArgs string `json:"entrypoint_args,omitempty"`220 // Whether to enable the function of restart upon exceptions. The default value is false.221 RestartWhenException *bool `json:"restart_when_exception,omitempty"`222 // Name of the package that has been uploaded to the DLI resource management system.223 // This parameter is used to customize the JAR file where the job main class is located.224 Entrypoint string `json:"entrypoint,omitempty"`225 // Name of the package that has been uploaded to the DLI resource management system.226 // This parameter is used to customize other dependency packages.227 // Example: myGroup/test.jar,myGroup/test1.jar.228 DependencyJars []string `json:"dependency_jars,omitempty"`229 // Name of the resource package that has been uploaded to the DLI resource management system.230 // This parameter is used to customize dependency files.231 // Example: myGroup/test.cvs,myGroup/test1.csv.232 // You can add the following content to the application to access the corresponding dependency file:233 // In the command, fileName indicates the name of the file to be accessed,234 // and ClassName indicates the name of the class that needs to access the file.235 // ClassName.class.getClassLoader().getResource("userData/fileName")236 DependencyFiles []string `json:"dependency_files,omitempty"`237 // Number of CUs for each TaskManager. The default value is 1.238 TmCus *int `json:"tm_cus,omitempty"`239 // Number of slots in each TaskManager. The default value is (parallel_number*tm_cus)/(cu_number-manager_cu_number).240 TmSlotNum *int `json:"tm_slot_num,omitempty"`241 // Job feature. Type of the Flink image used by a job.242 // basic: indicates that the basic Flink image provided by DLI is used.243 // custom: indicates that the user-defined Flink image is used.244 Feature string `json:"feature,omitempty"`245 // Flink version. This parameter is valid only when feature is set to basic. You can use this parameter with the246 // feature parameter to specify the version of the DLI basic Flink image used for job running.247 FlinkVersion string `json:"flink_version,omitempty"`248 // Custom image. The format is Organization name/Image name:Image version.249 // This parameter is valid only when feature is set to custom. You can use this parameter with the feature250 // parameter to specify a user-defined Flink image for job running. For details about how to use custom images251 Image string `json:"image,omitempty"`252 // Whether the abnormal restart is recovered from the checkpoint.253 ResumeCheckpoint *bool `json:"resume_checkpoint,omitempty"`254 // Maximum number of retry times upon exceptions. The unit is times/hour. Value range: -1 or greater than 0.255 // The default value is -1, indicating that the number of times is unlimited.256 ResumeMaxNum *int `json:"resume_max_num,omitempty"`257 // Storage address of the checkpoint in the JAR file of the user. The path must be unique.258 CheckpointPath string `json:"checkpoint_path,omitempty"`259 // Label of a Flink JAR job. For details, see Table 3.260 Tags []tags.ResourceTag `json:"tags"`261 // Customizes optimization parameters when a Flink job is running.262 RuntimeConfig string `json:"runtime_config,omitempty"`263}264type UpdateJarJobOpts struct {265 // Name of the job. Length range: 0 to 57 characters.266 Name string `json:"name,omitempty"`267 // Job description. Length range: 0 to 512 characters.268 Desc string `json:"desc,omitempty"`269 // Name of a queue. Length range: 1 to 128 characters.270 QueueName string `json:"queue_name,omitempty"`271 // Number of CUs selected for a job. The default value is 2.272 CuNumber *int `json:"cu_number,omitempty"`273 // Number of CUs on the management node selected by the user for a job, which corresponds to the number of Flink274 // job managers. The default value is 1.275 ManagerCuNumber *int `json:"manager_cu_number,omitempty"`276 // Number of parallel operations selected for a job. The default value is 1.277 ParallelNumber *int `json:"parallel_number,omitempty"`278 // Whether to enable the job log function.279 // true: indicates to enable the job log function.280 // false: indicates to disable the job log function.281 // Default value: false282 LogEnabled *bool `json:"log_enabled,omitempty"`283 // OBS path where users are authorized to save logs when log_enabled is set to true.284 ObsBucket string `json:"obs_bucket,omitempty"`285 // SMN topic. If a job fails, the system will send a message to users subscribed to the SMN topic.286 SmnTopic string `json:"smn_topic,omitempty"`287 // Job entry class.288 MainClass string `json:"main_class,omitempty"`289 // Job entry parameter. Multiple parameters are separated by spaces.290 EntrypointArgs string `json:"entrypoint_args,omitempty"`291 // Whether to enable the function of restart upon exceptions. The default value is false.292 RestartWhenException *bool `json:"restart_when_exception,omitempty"`293 // Name of the package that has been uploaded to the DLI resource management system. This parameter is used to294 // customize the JAR file where the job main class is located.295 Entrypoint string `json:"entrypoint,omitempty"`296 // Name of the package that has been uploaded to the DLI resource management system. This parameter is used to297 // customize other dependency packages.298 // Example: myGroup/test.jar,myGroup/test1.jar.299 DependencyJars []string `json:"dependency_jars,omitempty"`300 // Name of the resource package that has been uploaded to the DLI resource management system. This parameter is301 // used to customize dependency files.302 // Example: myGroup/test.cvs,myGroup/test1.csv.303 DependencyFiles []string `json:"dependency_files,omitempty"`304 // Number of CUs for each TaskManager. The default value is 1.305 TmCus *int `json:"tm_cus,omitempty"`306 // Number of slots in each TaskManager. The default value is (parallel_number*tm_cus)/(cu_number-manager_cu_number).307 TmSlotNum *int `json:"tm_slot_num,omitempty"`308 // Job feature. Type of the Flink image used by a job.309 // basic: indicates that the basic Flink image provided by DLI is used.310 // custom: indicates that the user-defined Flink image is used.311 Feature string `json:"feature,omitempty"`312 // Flink version. This parameter is valid only when feature is set to basic. You can use this parameter with the313 // feature parameter to specify the version of the DLI basic Flink image used for job running.314 FlinkVersion string `json:"flink_version,omitempty"`315 // Custom image. The format is Organization name/Image name:Image version.316 // This parameter is valid only when feature is set to custom. You can use this parameter with the feature317 // parameter to specify a user-defined Flink image for job running. For details about how to use custom images.318 Image string `json:"image,omitempty"`319 // Whether the abnormal restart is recovered from the checkpoint.320 ResumeCheckpoint *bool `json:"resume_checkpoint,omitempty"`321 // Maximum number of retry times upon exceptions. The unit is times/hour. Value range: -1 or greater than 0.322 // The default value is -1, indicating that the number of times is unlimited.323 ResumeMaxNum *int `json:"resume_max_num,omitempty"`324 // Storage address of the checkpoint in the JAR file of the user. The path must be unique.325 CheckpointPath string `json:"checkpoint_path,omitempty"`326 // Customizes optimization parameters when a Flink job is running.327 RuntimeConfig string `json:"runtime_config,omitempty"`328}329type StopFlinkJobInBatch struct {330 TriggerSavepoint *bool `json:"trigger_savepoint,omitempty"`331 JobIds []int `json:"job_ids" required:"true"`332}333var RequestOpts = golangsdk.RequestOpts{334 MoreHeaders: map[string]string{"Content-Type": "application/json", "X-Language": "en-us"},335}336func CreateSqlJob(c *golangsdk.ServiceClient, opts CreateSqlJobOpts) (*CreateJobResp, error) {337 b, err := golangsdk.BuildRequestBody(opts, "")338 if err != nil {339 return nil, err340 }341 var rst CreateJobResp342 _, err = c.Post(createFlinkSqlUrl(c), b, &rst, &golangsdk.RequestOpts{343 MoreHeaders: RequestOpts.MoreHeaders,344 })345 return &rst, err346}347func UpdateSqlJob(c *golangsdk.ServiceClient, jobId int, opts UpdateSqlJobOpts) (*UpdateJobResp, error) {348 b, err := golangsdk.BuildRequestBody(opts, "")349 if err != nil {350 return nil, err351 }352 var rst UpdateJobResp353 _, err = c.Put(updateFlinkSqlURL(c, jobId), b, &rst, &golangsdk.RequestOpts{354 MoreHeaders: RequestOpts.MoreHeaders,355 })356 return &rst, err357}358func Run(c *golangsdk.ServiceClient, opts RunJobOpts) (*[]CommonResp, error) {359 b, err := golangsdk.BuildRequestBody(opts, "")360 if err != nil {361 return nil, err362 }363 var rst []CommonResp364 _, err = c.Post(runFlinkJobURL(c), b, &rst, &golangsdk.RequestOpts{365 MoreHeaders: RequestOpts.MoreHeaders,366 })367 return &rst, err368}369func Get(c *golangsdk.ServiceClient, jobId int) (*GetJobResp, error) {370 var rst GetJobResp371 _, err := c.Get(getURL(c, jobId), &rst, &golangsdk.RequestOpts{372 MoreHeaders: RequestOpts.MoreHeaders,373 })374 return &rst, err375}376func List(c *golangsdk.ServiceClient, opts ListOpts) (*ListResp, error) {377 url := listURL(c)378 query, err := golangsdk.BuildQueryString(opts)379 if err != nil {380 return nil, err381 }382 url += query.String()383 var rst ListResp384 _, err = c.Get(url, &rst, &golangsdk.RequestOpts{385 MoreHeaders: RequestOpts.MoreHeaders,386 })387 return &rst, err388}389func Delete(c *golangsdk.ServiceClient, jobId int) (*CommonResp, error) {390 var rst CommonResp391 _, err := c.DeleteWithResponse(deleteURL(c, jobId), &rst, &golangsdk.RequestOpts{392 MoreHeaders: RequestOpts.MoreHeaders,393 })394 return &rst, err395}396func AuthorizeBucket(c *golangsdk.ServiceClient, opts ObsBucketsOpts) (*CommonResp, error) {397 b, err := golangsdk.BuildRequestBody(opts, "")398 if err != nil {399 return nil, err400 }401 var rst CommonResp402 _, err = c.Post(authorizeBucketURL(c), b, &rst, &golangsdk.RequestOpts{403 MoreHeaders: RequestOpts.MoreHeaders,404 })405 return &rst, err406}407func CreateJarJob(c *golangsdk.ServiceClient, opts CreateJarJobOpts) (*CreateJobResp, error) {408 b, err := golangsdk.BuildRequestBody(opts, "")409 if err != nil {410 return nil, err411 }412 var rst CreateJobResp413 _, err = c.Post(createJarJobURL(c), b, &rst, &golangsdk.RequestOpts{414 MoreHeaders: RequestOpts.MoreHeaders,415 })416 return &rst, err417}418func UpdateJarJob(c *golangsdk.ServiceClient, jobId int, opts UpdateJarJobOpts) (*UpdateJobResp, error) {419 b, err := golangsdk.BuildRequestBody(opts, "")420 if err != nil {421 return nil, err422 }423 var rst UpdateJobResp424 _, err = c.Put(updateJarJobURL(c, jobId), b, &rst, &golangsdk.RequestOpts{425 MoreHeaders: RequestOpts.MoreHeaders,426 })427 return &rst, err428}429func Stop(c *golangsdk.ServiceClient, opts StopFlinkJobInBatch) (*[]CommonResp, error) {430 b, err := golangsdk.BuildRequestBody(opts, "")431 if err != nil {432 return nil, err433 }434 var rst []CommonResp435 _, err = c.Post(stopJobURL(c), b, &rst, &golangsdk.RequestOpts{436 MoreHeaders: RequestOpts.MoreHeaders,437 })438 return &rst, err439}...
results.go
Source:results.go
1package flinkjob2type CreateJobResp struct {3 IsSuccess bool `json:"is_success,string"`4 Message string `json:"message"`5 Job JobStatus `json:"job"`6}7type JobStatus struct {8 JobId int `json:"job_id"`9 StatusName string `json:"status_name"`10 StatusDesc string `json:"status_desc"`11}12type UpdateJobResp struct {13 IsSuccess bool `json:"is_success,string"`14 Message string `json:"message"`15 Job UpdateJobResp_job `json:"job"`16}17type UpdateJobResp_job struct {18 UpdateTime int `json:"update_time"`19}20type CommonResp struct {21 IsSuccess bool `json:"is_success,string"`22 Message string `json:"message"`23}24type GetJobResp struct {25 IsSuccess bool `json:"is_success,string"`26 Message string `json:"message"`27 JobDetail Job `json:"job_detail"`28}29type Job struct {30 // Job ID.31 JobId int `json:"job_id"`32 // Name of the job. Length range: 0 to 57 characters.33 Name string `json:"name"`34 // Job description. Length range: 0 to 512 characters.35 Desc string `json:"desc"`36 // Job type.37 // flink_sql_job: Flink SQL job38 // flink_opensource_sql_job: Flink OpenSource SQL job39 // flink_jar_job: User-defined Flink job40 JobType string `json:"job_type"`41 // Job status.42 // Available job statuses are as follows:43 // job_init: The job is in the draft status.44 // job_submitting: The job is being submitted.45 // job_submit_fail: The job fails to be submitted.46 // job_running: The job is running. (The billing starts. After the job is submitted, a normal result is returned.)47 // job_running_exception (The billing stops. The job stops running due to an exception.)48 // job_downloading: The job is being downloaded.49 // job_idle: The job is idle.50 // job_canceling: The job is being stopped.51 // job_cancel_success: The job has been stopped.52 // job_cancel_fail: The job fails to be stopped.53 // job_savepointing: The savepoint is being created.54 // job_arrearage_stopped: The job is stopped because the account is in arrears.55 // (The billing ends. The job is stopped because the user account is in arrears.)56 // job_arrearage_recovering: The recharged job is being restored.57 // (The account in arrears is recharged, and the job is being restored).58 // job_finish: The job is completed.59 Status string `json:"status"`60 // Description of job status.61 StatusDesc string `json:"status_desc"`62 // Time when a job is created.63 CreateTime int `json:"create_time"`64 // Time when a job is started.65 StartTime int `json:"start_time"`66 // ID of the user who creates the job.67 UserId string `json:"user_id"`68 // Name of a queue. Length range: 1 to 128 characters.69 QueueName string `json:"queue_name"`70 // ID of the project to which a job belongs.71 ProjectId string `json:"project_id"`72 // Stream SQL statement.73 SqlBody string `json:"sql_body"`74 // Job running mode. The options are as follows:75 // shared_cluster: indicates that the job is running on a shared cluster.76 // exclusive_cluster: indicates that the job is running on an exclusive cluster.77 // edge_node: indicates that the job is running on an edge node.78 RunMode string `json:"run_mode"`79 // Job configurations. Refer to Table 4 for details.80 JobConfig JobConf `json:"job_config"`81 // Main class of a JAR package, for example, org.apache.spark.examples.streaming.JavaQueueStream.82 MainClass string `json:"main_class"`83 // Running parameter of a JAR package job. Multiple parameters are separated by spaces.84 EntrypointArgs string `json:"entrypoint_args"`85 // Job execution plan.86 ExecutionGraph string `json:"execution_graph"`87 // Time when a job is updated.88 UpdateTime int `json:"update_time"`89 // User-defined job feature. Type of the Flink image used by a job.90 // basic: indicates that the basic Flink image provided by DLI is used.91 // custom: indicates that the user-defined Flink image is used.92 Feature string `json:"feature"`93 // Flink version. This parameter is valid only when feature is set to basic. You can use this parameter with the94 // feature parameter to specify the version of the DLI basic Flink image used for job running.95 FlinkVersion string `json:"flink_version"`96 // Custom image. The format is Organization name/Image name:Image version.97 // This parameter is valid only when feature is set to custom. You can use this parameter with the feature98 // parameter to specify a user-defined Flink image for job running. For details about how to use custom images.99 Image string `json:"image"`100}101type JobConfBase struct {102 // Whether to enable the automatic job snapshot function.103 // true: The automatic job snapshot function is enabled.104 // false: The automatic job snapshot function is disabled.105 // The default value is false.106 CheckpointEnabled bool `json:"checkpoint_enabled"`107 // Snapshot mode. There are two options:108 // exactly_once: indicates that data is processed only once.109 // at_least_once: indicates that data is processed at least once.110 // The default value is exactly_once.111 CheckpointMode string `json:"checkpoint_mode"`112 // Snapshot interval. The unit is second. The default value is 10.113 CheckpointInterval int `json:"checkpoint_interval"`114 // Whether to enable the log storage function. The default value is false.115 LogEnabled bool `json:"log_enabled"`116 // Name of an OBS bucket.117 ObsBucket string `json:"obs_bucket"`118 // SMN topic name. If a job fails, the system will send a message to users subscribed to the SMN topic.119 SmnTopic string `json:"smn_topic"`120 // Parent job ID.121 RootId int `json:"root_id"`122 // List of edge computing group IDs. Use commas (,) to separate multiple IDs.123 EdgeGroupIds []string `json:"edge_group_ids"`124 // Number of CUs of the management unit. The default value is 1.125 ManagerCuNumber int `json:"manager_cu_number"`126 // Number of CUs selected for a job. This parameter is valid only when show_detail is set to true.127 // Minimum value: 2128 // Maximum value: 400129 // The default value is 2.130 CuNumber int `json:"cu_number"`131 // Number of concurrent jobs set by a user. This parameter is valid only when show_detail is set to true.132 // Minimum value: 1133 // Maximum value: 2000134 // The default value is 1.135 ParallelNumber int `json:"parallel_number"`136 // Whether to enable the function of restart upon exceptions.137 RestartWhenException bool `json:"restart_when_exception"`138 // Expiration time.139 IdleStateRetention int `json:"idle_state_retention"`140 // Name of the package that has been uploaded to the DLI resource management system. The UDF Jar file of the SQL141 // job is uploaded through this parameter.142 UdfJarUrl string `json:"udf_jar_url"`143 // Dirty data policy of a job.144 // 2:obsDir: Save. obsDir specifies the path for storing dirty data.145 // 1: Trigger a job exception146 // 0: Ignore147 DirtyDataStrategy string `json:"dirty_data_strategy"`148 // Name of the package that has been uploaded to the DLI resource management system.149 // This parameter is used to customize the JAR file where the job main class is located.150 Entrypoint string `json:"entrypoint"`151 // Name of the package that has been uploaded to the DLI resource management system.152 // This parameter is used to customize other dependency packages.153 DependencyJars []string `json:"dependency_jars"`154 // Name of the resource package that has been uploaded to the DLI resource management system.155 // This parameter is used to customize dependency files.156 DependencyFiles []string `json:"dependency_files"`157 // Number of compute nodes in a job.158 ExecutorNumber int `json:"executor_number"`159 // Number of CUs in a compute node.160 ExecutorCuNumber int `json:"executor_cu_number"`161 // Whether to restore data from the latest checkpoint when the system automatically restarts upon an exception.162 // The default value is false.163 ResumeCheckpoint bool `json:"resume_checkpoint"`164 TmCus int `json:"tm_cus"`165 TmSlotNum int `json:"tm_slot_num"`166 ResumeMaxNum int `json:"resume_max_num"`167 CheckpointPath string `json:"checkpoint_path"`168 Feature string `json:"feature"`169 FlinkVersion string `json:"flink_version"`170 Image string `json:"image"`171}172type ListResp struct {173 IsSuccess bool `json:"is_success,string"`174 Message string `json:"message"`175 JobList JobListWapper `json:"job_list"`176}177type JobListWapper struct {178 TotalCount int `json:"total_count"`179 Jobs []Job4List `json:"jobs"`180}181type Job4List struct {182 JobId int `json:"job_id"`183 Name string `json:"name"`184 Desc string `json:"desc"`185 // Job description. Length range: 0 to 512 characters.186 Username string `json:"username"`187 JobType string `json:"job_type"`188 Status string `json:"status"`189 StatusDesc string `json:"status_desc"`190 CreateTime int `json:"create_time"`191 StartTime int `json:"start_time"`192 // Running duration of a job. Unit: ms. This parameter is valid only when show_detail is set to false.193 Duration int `json:"duration"`194 // Parent job ID. This parameter is valid only when show_detail is set to false.195 RootId int `json:"root_id"`196 // ID of the user who creates the job. This parameter is valid only when show_detail is set to true.197 UserId string `json:"user_id"`198 // This parameter is valid only when show_detail is set to true.199 ProjectId string `json:"project_id"`200 // Stream SQL statement. This parameter is valid only when show_detail is set to false.201 SqlBody string `json:"sql_body"`202 // Job running mode. The options are as follows: The value can be shared_cluster, exclusive_cluster, or edge_node.203 // This parameter is valid only when show_detail is set to true.204 // shared_cluster: indicates that the job is running on a shared cluster.205 // exclusive_cluster: indicates that the job is running on an exclusive cluster.206 // edge_node: indicates that the job is running on an edge node.207 RunMode string `json:"run_mode"`208 // Job configuration. This parameter is valid only when show_detail is set to false.209 JobConfig JobConfBase `json:"job_config"`210 //Main class of a JAR package. This parameter is valid only when show_detail is set to false.211 MainClass string `json:"main_class"`212 // Job running parameter of the JAR file. Multiple parameters are separated by spaces.213 // This parameter is valid only when show_detail is set to true.214 EntrypointArgs string `json:"entrypoint_args"`215 // Job execution plan. This parameter is valid only when show_detail is set to false.216 ExecutionGraph string `json:"execution_graph"`217 // Time when a job is updated. This parameter is valid only when show_detail is set to false.218 UpdateTime int `json:"update_time"`219}220type JobConf struct {221 JobConfBase222 // Customizes optimization parameters when a Flink job is running.223 RuntimeConfig string `json:"runtime_config"`224}225type DliError struct {226 ErrorCode string `json:"error_code"`227 ErrorMsg string `json:"error_msg"`228}...
createJobResp
Using AI Code Generation
1import "fmt"2func main() {3 j.createJobResp()4}5import "fmt"6func main() {7 j.createJobResp()8}9import "fmt"10func main() {11 j.createJobResp()12}13import "fmt"14func main() {15 j.createJobResp()16}17import "fmt"18func main() {19 j.createJobResp()20}21import "fmt"22func main() {23 j.createJobResp()24}25import "fmt"26func main() {27 j.createJobResp()28}29import "fmt"30func main() {31 j.createJobResp()32}33import "fmt"34func main() {35 j.createJobResp()36}37import "fmt"38func main() {39 j.createJobResp()40}41import "fmt"42func main() {43 j.createJobResp()44}45import "fmt"46func main() {47 j.createJobResp()48}
createJobResp
Using AI Code Generation
1import (2func main() {3 fmt.Println("Hello, playground")4 createJobResp()5}6import (7func createJobResp() {8 fmt.Println("createJobResp")9}10func main() {11 fmt.Println("Hello, playground")12}13import (14func CreateJobResp() {15 fmt.Println("createJobResp")16}17func main() {18 fmt.Println("Hello, playground")19}20import (21func createJobResp() {22 fmt.Println("createJobResp")23}24func main() {25 fmt.Println("Hello, playground")26}27import (28func main() {29 fmt.Println("Hello, playground")30 main.CreateJobResp()31}
createJobResp
Using AI Code Generation
1import (2func main() {3 fmt.Println("This is 2.go file")4 main.CreateJobResp()5}6import (7func main() {8 fmt.Println("This is 1.go file")9 main.CreateJobResp()10}11import (12func main() {13 fmt.Println("This is main.go file")14 CreateJobResp()15}16import (17func main() {18 fmt.Println("This is main.go file")19 CreateJobResp()20}21import (22func main() {23 fmt.Println("This is main.go file")24 CreateJobResp()25}26import (27func main() {28 fmt.Println("This is main.go file")29 CreateJobResp()30}31import (32func main() {33 fmt.Println("This is main.go file")34 CreateJobResp()35}36import (37func main() {38 fmt.Println("This is main.go file")39 CreateJobResp()40}41import (42func main() {43 fmt.Println("This is main.go file")44 CreateJobResp()45}46import (47func main() {48 fmt.Println("This is main.go file")49 CreateJobResp()50}51import (52func main() {53 fmt.Println("This is main.go file")54 CreateJobResp()55}
createJobResp
Using AI Code Generation
1import (2func main() {3 j := test.Job{Title: "Software Engineer", Company: "Google", Salary: 10000}4 fmt.Println(j.createJobResp())5}6import (7func main() {8 j := test.Job{Title: "Software Engineer", Company: "Google", Salary: 10000}9 fmt.Println(j.createJobResp())10}11import (12func main() {13 j := test.Job{Title: "Software Engineer", Company: "Google", Salary: 10000}14 fmt.Println(j.createJobResp())15}16In the above example, we have created a package named main and created a file named 4.go inside the main package. In the 4.go file, we have imported the test package and created a Job struct. We have defined a method named createJobResp() which returns a string. We have also created a main function which creates an object of Job struct and calls the
createJobResp
Using AI Code Generation
1import (2func main() {3 fmt.Println("Hello, 世界")4 createJobResp()5}6import (7func main() {8 fmt.Println("Hello, 世界")9 createJobResp()10}11import (12func main() {13 fmt.Println("Hello, 世界")14 createJobResp()15}16import (17func main() {18 fmt.Println("Hello, 世界")19 createJobResp()20}21import (22func main() {23 fmt.Println("Hello, 世界")24 createJobResp()25}26import (27func main() {28 fmt.Println("Hello, 世界")29 createJobResp()30}31import (32func main() {33 fmt.Println("Hello, 世界")34 createJobResp()35}36import (37func main() {38 fmt.Println("Hello, 世界")39 createJobResp()40}41import (
createJobResp
Using AI Code Generation
1import (2func main() {3 main := Main{}4 main.createJobResp()5}6import (7type Main struct {8}9func (m *Main) createJobResp() {10 req := esapi.IndicesCreateRequest{11 }12 req.Do()13}
createJobResp
Using AI Code Generation
1import (2func main() {3 main.createJobResp()4 main.createJob()5 fmt.Println("main")6}7import (8func main() {9 main.createJobResp()10 fmt.Println("main")11}12Now I want to use the createJobResp() function of the main.go file in both the 2.go file and the 3.go file. How can I do that?
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!