Home
last modified time | relevance | path

Searched refs:jobs (Results 1 – 25 of 52) sorted by relevance

123

/linux-6.1.9/scripts/
Djobserver-exec15 jobs = b"" variable
37 jobs += slot
43 if len(jobs):
44 os.write(writer, jobs)
48 claim = len(jobs) + 1
63 if len(jobs):
64 os.write(writer, jobs)
Dgenerate_initcall_order.pl18 my $jobs = {}; # child process pid -> file handle
169 if (!exists($jobs->{$pid})) {
173 my $fh = $jobs->{$pid};
181 delete($jobs->{$pid});
202 $jobs->{$pid} = $fh;
213 if (scalar(keys(%{$jobs})) >= $njobs) {
219 while (scalar(keys(%{$jobs})) > 0) {
/linux-6.1.9/drivers/gpu/drm/panfrost/
Dpanfrost_job.c159 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job()
162 pfdev->jobs[slot][0] = pfdev->jobs[slot][1]; in panfrost_dequeue_job()
163 pfdev->jobs[slot][1] = NULL; in panfrost_dequeue_job()
175 if (!pfdev->jobs[slot][0]) { in panfrost_enqueue_job()
176 pfdev->jobs[slot][0] = job; in panfrost_enqueue_job()
180 WARN_ON(pfdev->jobs[slot][1]); in panfrost_enqueue_job()
181 pfdev->jobs[slot][1] = job; in panfrost_enqueue_job()
183 panfrost_get_job_chain_flag(pfdev->jobs[slot][0])); in panfrost_enqueue_job()
525 } else if (pfdev->jobs[j][0] && !(js_state & MK_JS_MASK(j))) { in panfrost_job_handle_irq()
552 if (!failed[j] || !pfdev->jobs[j][0]) in panfrost_job_handle_irq()
[all …]
DTODO11 - Compute job support. So called 'compute only' jobs need to be plumbed up to
Dpanfrost_device.h105 struct panfrost_job *jobs[NUM_JOB_SLOTS][2]; member
/linux-6.1.9/drivers/md/
Ddm-kcopyd.c411 static struct kcopyd_job *pop_io_job(struct list_head *jobs, in pop_io_job() argument
420 list_for_each_entry(job, jobs, list) { in pop_io_job()
437 static struct kcopyd_job *pop(struct list_head *jobs, in pop() argument
444 if (!list_empty(jobs)) { in pop()
445 if (jobs == &kc->io_jobs) in pop()
446 job = pop_io_job(jobs, kc); in pop()
448 job = list_entry(jobs->next, struct kcopyd_job, list); in pop()
457 static void push(struct list_head *jobs, struct kcopyd_job *job) in push() argument
463 list_add_tail(&job->list, jobs); in push()
468 static void push_head(struct list_head *jobs, struct kcopyd_job *job) in push_head() argument
[all …]
/linux-6.1.9/Documentation/core-api/
Dpadata.rst9 Padata is a mechanism by which the kernel can farm jobs out to be done in
16 Padata also supports multithreaded jobs, splitting up the job evenly while load
25 The first step in using padata to run serialized jobs is to set up a
26 padata_instance structure for overall control of how jobs are to be run::
39 jobs to be serialized independently. A padata_instance may have one or more
40 padata_shells associated with it, each allowing a separate series of jobs.
45 The CPUs used to run jobs can be changed in two ways, programatically with
52 parallel cpumask describes which processors will be used to execute jobs
116 true parallelism is achieved by submitting multiple jobs. parallel() runs with
141 pains to ensure that jobs are completed in the order in which they were
[all …]
/linux-6.1.9/drivers/gpu/drm/amd/amdgpu/
Damdgpu_cs.c297 ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm); in amdgpu_cs_pass1()
301 ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i], in amdgpu_cs_pass1()
306 p->gang_leader = p->jobs[p->gang_leader_idx]; in amdgpu_cs_pass1()
353 job = p->jobs[r]; in amdgpu_cs_p2_ib()
980 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj, in amdgpu_cs_parser_bos()
1011 struct amdgpu_job *job = p->jobs[i]; in trace_amdgpu_cs_ibs()
1082 r = amdgpu_cs_patch_ibs(p, p->jobs[i]); in amdgpu_cs_patch_jobs()
1157 job = p->jobs[i]; in amdgpu_cs_vm_handling()
1203 if (p->jobs[i] == leader) in amdgpu_cs_sync_rings()
1206 r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync); in amdgpu_cs_sync_rings()
[all …]
Damdgpu_cs.h59 struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE]; member
/linux-6.1.9/tools/testing/kunit/
Dkunit.py46 jobs: int
91 success = linux.build_kernel(request.jobs,
446 jobs=cli_args.jobs,
474 jobs=cli_args.jobs)
Dkunit_kernel.py74 def make(self, jobs, build_dir: str, make_options) -> None: argument
75 command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, '--jobs=' + str(jobs)]
323 def build_kernel(self, jobs, build_dir: str, make_options) -> bool: argument
326 self._ops.make(jobs, build_dir, make_options)
/linux-6.1.9/Documentation/admin-guide/device-mapper/
Dkcopyd.rst10 to set aside for their copy jobs. This is done with a call to
43 When a user is done with all their copy jobs, they should call
/linux-6.1.9/tools/testing/selftests/net/
Dudpgso_bench.sh51 if [[ "${jobs}" != "" ]]; then
Dudpgro_bench.sh14 [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
Dudpgro_frglist.sh14 [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
Dudpgro.sh22 [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
/linux-6.1.9/Documentation/dev-tools/kunit/
Drun_wrapper.rst33 ./tools/testing/kunit/kunit.py run --timeout=30 --jobs=`nproc --all`
36 - ``--jobs`` sets the number of threads to build the kernel.
231 --jobs=12 \
304 - ``--jobs``: Specifies the number of jobs (commands) to run simultaneously.
/linux-6.1.9/drivers/net/wireless/cisco/
Dairo.c1205 unsigned long jobs; member
1335 clear_bit(JOB_MIC, &ai->jobs); in micinit()
1893 clear_bit(JOB_DIE, &ai->jobs); in airo_open()
1905 set_bit(JOB_DIE, &ai->jobs); in airo_open()
2110 clear_bit(JOB_XMIT, &priv->jobs); in airo_end_xmit()
2166 set_bit(JOB_XMIT, &priv->jobs); in airo_start_xmit()
2182 clear_bit(JOB_XMIT11, &priv->jobs); in airo_end_xmit11()
2245 set_bit(JOB_XMIT11, &priv->jobs); in airo_start_xmit11()
2258 clear_bit(JOB_STATS, &ai->jobs); in airo_read_stats()
2290 if (!test_bit(JOB_STATS, &local->jobs)) { in airo_get_stats()
[all …]
/linux-6.1.9/Documentation/admin-guide/cgroup-v1/
Dcpusets.rst87 can benefit from explicitly placing jobs on properly sized subsets of
100 executing jobs. The location of the running jobs pages may also be moved
252 jobs can share common kernel data, such as file system pages, while
254 construct a large mem_exclusive cpuset to hold all the jobs, and
268 This enables batch managers monitoring jobs running in dedicated
273 submitted jobs, which may choose to terminate or re-prioritize jobs that
276 computing jobs that will dramatically fail to meet required performance
381 This policy can provide substantial improvements for jobs that need
384 the several nodes in the jobs cpuset in order to fit. Without this
385 policy, especially for jobs that might have one thread reading in the
[all …]
Dmemcg_test.rst223 run jobs under child_a and child_b
225 create/delete following groups at random while jobs are running::
231 running new jobs in new group is also good.
/linux-6.1.9/Documentation/translations/zh_CN/mm/
Dhwpoison.rst127 echo `jobs -p` > /sys/fs/cgroup/mem/hwpoison/tasks
/linux-6.1.9/Documentation/driver-api/tty/
Dindex.rst25 implementing echoes, signal handling, jobs control, special characters
/linux-6.1.9/Documentation/admin-guide/mm/
Dmultigen_lru.rst103 scheduler needs to estimate the working sets of the existing jobs.
162 existing jobs.
/linux-6.1.9/tools/cgroup/
Diocost_coef_gen.py89 def run_fio(testfile, duration, iotype, iodepth, blocksize, jobs): argument
/linux-6.1.9/Documentation/accounting/
Dpsi.rst27 dynamically using techniques such as load shedding, migrating jobs to
29 priority or restartable batch jobs.

123