/linux-6.6.21/scripts/ ! |
D | jobserver-exec | 15 jobs = b"" variable 48 jobs += slot 54 if len(jobs): 55 os.write(writer, jobs) 59 claim = len(jobs) + 1 74 if len(jobs): 75 os.write(writer, jobs)
|
D | generate_initcall_order.pl | 18 my $jobs = {}; # child process pid -> file handle 169 if (!exists($jobs->{$pid})) { 173 my $fh = $jobs->{$pid}; 181 delete($jobs->{$pid}); 202 $jobs->{$pid} = $fh; 213 if (scalar(keys(%{$jobs})) >= $njobs) { 219 while (scalar(keys(%{$jobs})) > 0) {
|
/linux-6.6.21/drivers/gpu/drm/panfrost/ ! |
D | panfrost_job.c | 159 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job() 162 pfdev->jobs[slot][0] = pfdev->jobs[slot][1]; in panfrost_dequeue_job() 163 pfdev->jobs[slot][1] = NULL; in panfrost_dequeue_job() 175 if (!pfdev->jobs[slot][0]) { in panfrost_enqueue_job() 176 pfdev->jobs[slot][0] = job; in panfrost_enqueue_job() 180 WARN_ON(pfdev->jobs[slot][1]); in panfrost_enqueue_job() 181 pfdev->jobs[slot][1] = job; in panfrost_enqueue_job() 183 panfrost_get_job_chain_flag(pfdev->jobs[slot][0])); in panfrost_enqueue_job() 525 } else if (pfdev->jobs[j][0] && !(js_state & MK_JS_MASK(j))) { in panfrost_job_handle_irq() 552 if (!failed[j] || !pfdev->jobs[j][0]) in panfrost_job_handle_irq() [all …]
|
D | TODO | 11 - Compute job support. So called 'compute only' jobs need to be plumbed up to
|
D | panfrost_device.h | 106 struct panfrost_job *jobs[NUM_JOB_SLOTS][2]; member
|
/linux-6.6.21/drivers/md/ ! |
D | dm-kcopyd.c | 417 static struct kcopyd_job *pop_io_job(struct list_head *jobs, in pop_io_job() argument 426 list_for_each_entry(job, jobs, list) { in pop_io_job() 443 static struct kcopyd_job *pop(struct list_head *jobs, in pop() argument 450 if (!list_empty(jobs)) { in pop() 451 if (jobs == &kc->io_jobs) in pop() 452 job = pop_io_job(jobs, kc); in pop() 454 job = list_entry(jobs->next, struct kcopyd_job, list); in pop() 463 static void push(struct list_head *jobs, struct kcopyd_job *job) in push() argument 469 list_add_tail(&job->list, jobs); in push() 474 static void push_head(struct list_head *jobs, struct kcopyd_job *job) in push_head() argument [all …]
|
/linux-6.6.21/Documentation/core-api/ ! |
D | padata.rst | 9 Padata is a mechanism by which the kernel can farm jobs out to be done in 16 Padata also supports multithreaded jobs, splitting up the job evenly while load 25 The first step in using padata to run serialized jobs is to set up a 26 padata_instance structure for overall control of how jobs are to be run:: 39 jobs to be serialized independently. A padata_instance may have one or more 40 padata_shells associated with it, each allowing a separate series of jobs. 45 The CPUs used to run jobs can be changed in two ways, programmatically with 52 parallel cpumask describes which processors will be used to execute jobs 116 true parallelism is achieved by submitting multiple jobs. parallel() runs with 141 pains to ensure that jobs are completed in the order in which they were [all …]
|
/linux-6.6.21/drivers/gpu/drm/amd/amdgpu/ ! |
D | amdgpu_cs.c | 292 num_ibs[i], &p->jobs[i]); in amdgpu_cs_pass1() 296 p->gang_leader = p->jobs[p->gang_leader_idx]; in amdgpu_cs_pass1() 343 job = p->jobs[r]; in amdgpu_cs_p2_ib() 578 p->jobs[i]->shadow_va = shadow->shadow_va; in amdgpu_cs_p2_shadow() 579 p->jobs[i]->csa_va = shadow->csa_va; in amdgpu_cs_p2_shadow() 580 p->jobs[i]->gds_va = shadow->gds_va; in amdgpu_cs_p2_shadow() 581 p->jobs[i]->init_shadow = in amdgpu_cs_p2_shadow() 980 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj, in amdgpu_cs_parser_bos() 1008 struct amdgpu_job *job = p->jobs[i]; in trace_amdgpu_cs_ibs() 1078 r = amdgpu_cs_patch_ibs(p, p->jobs[i]); in amdgpu_cs_patch_jobs() [all …]
|
D | amdgpu_cs.h | 62 struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE]; member
|
/linux-6.6.21/tools/testing/kunit/ ! |
D | kunit.py | 46 jobs: int 92 success = linux.build_kernel(request.jobs, 448 jobs=cli_args.jobs, 484 jobs=cli_args.jobs)
|
D | kunit_kernel.py | 74 def make(self, jobs: int, build_dir: str, make_options: Optional[List[str]]) -> None: 75 command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, '--jobs=' + str(jobs)] 324 def build_kernel(self, jobs: int, build_dir: str, make_options: Optional[List[str]]) -> bool: 327 self._ops.make(jobs, build_dir, make_options)
|
/linux-6.6.21/Documentation/admin-guide/device-mapper/ ! |
D | kcopyd.rst | 10 to set aside for their copy jobs. This is done with a call to 43 When a user is done with all their copy jobs, they should call
|
/linux-6.6.21/Documentation/gpu/ ! |
D | automated_testing.rst | 92 4. The various jobs will be run and when the pipeline is finished, all jobs 131 jobs from a branch in the target tree that is named as 142 otherwise pass, one can disable all jobs that would be submitted to that farm
|
/linux-6.6.21/drivers/gpu/drm/ci/ ! |
D | gitlab-ci.yml | 150 # When to automatically run the CI for build jobs 154 # Run automatically once all dependency jobs have passed 188 # Allow triggering jobs manually in other cases 250 .required-for-hardware-jobs:
|
D | container.yml | 22 # Disable container jobs that we won't use
|
/linux-6.6.21/tools/testing/selftests/net/ ! |
D | udpgso_bench.sh | 52 if [[ "${jobs}" != "" ]]; then
|
D | udpgro_bench.sh | 14 [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
|
D | udpgro_frglist.sh | 14 [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
|
D | udpgro.sh | 22 [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
|
/linux-6.6.21/Documentation/dev-tools/kunit/ ! |
D | run_wrapper.rst | 33 ./tools/testing/kunit/kunit.py run --timeout=30 --jobs=`nproc --all` 36 - ``--jobs`` sets the number of threads to build the kernel. 231 --jobs=12 \ 304 - ``--jobs``: Specifies the number of jobs (commands) to run simultaneously.
|
/linux-6.6.21/drivers/net/wireless/cisco/ ! |
D | airo.c | 1205 unsigned long jobs; member 1335 clear_bit(JOB_MIC, &ai->jobs); in micinit() 1893 clear_bit(JOB_DIE, &ai->jobs); in airo_open() 1905 set_bit(JOB_DIE, &ai->jobs); in airo_open() 2110 clear_bit(JOB_XMIT, &priv->jobs); in airo_end_xmit() 2166 set_bit(JOB_XMIT, &priv->jobs); in airo_start_xmit() 2182 clear_bit(JOB_XMIT11, &priv->jobs); in airo_end_xmit11() 2245 set_bit(JOB_XMIT11, &priv->jobs); in airo_start_xmit11() 2258 clear_bit(JOB_STATS, &ai->jobs); in airo_read_stats() 2290 if (!test_bit(JOB_STATS, &local->jobs)) { in airo_get_stats() [all …]
|
/linux-6.6.21/Documentation/admin-guide/cgroup-v1/ ! |
D | cpusets.rst | 87 can benefit from explicitly placing jobs on properly sized subsets of 100 executing jobs. The location of the running jobs pages may also be moved 252 jobs can share common kernel data, such as file system pages, while 254 construct a large mem_exclusive cpuset to hold all the jobs, and 268 This enables batch managers monitoring jobs running in dedicated 273 submitted jobs, which may choose to terminate or re-prioritize jobs that 276 computing jobs that will dramatically fail to meet required performance 381 This policy can provide substantial improvements for jobs that need 384 the several nodes in the jobs cpuset in order to fit. Without this 385 policy, especially for jobs that might have one thread reading in the [all …]
|
D | memcg_test.rst | 223 run jobs under child_a and child_b 225 create/delete following groups at random while jobs are running:: 231 running new jobs in new group is also good.
|
/linux-6.6.21/Documentation/translations/zh_CN/mm/ ! |
D | hwpoison.rst | 127 echo `jobs -p` > /sys/fs/cgroup/mem/hwpoison/tasks
|
/linux-6.6.21/Documentation/driver-api/tty/ ! |
D | index.rst | 25 implementing echoes, signal handling, jobs control, special characters
|