Lines Matching refs:g

48 #define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
54 #define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
162 static struct global_info *g = NULL; variable
231 for (i = 0; i < g->p.nr_nodes; i++) { in nr_numa_nodes()
293 for (cpu = 0; cpu < g->p.nr_cpus; cpu++) in bind_to_cpu()
296 if (target_cpu < 0 || target_cpu >= g->p.nr_cpus) in bind_to_cpu()
339 for (cpu = 0; cpu < g->p.nr_cpus; cpu++) in bind_to_node()
387 ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1); in mempol_restore()
450 if (ret && !g->print_once) { in alloc_data()
451 g->print_once = 1; in alloc_data()
457 if (ret && !g->print_once) { in alloc_data()
458 g->print_once = 1; in alloc_data()
507 return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0, g->p.thp, g->p.init_random); in zalloc_shared_data()
515 return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0, g->p.thp, g->p.init_random); in setup_shared_data()
524 return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0, g->p.thp, g->p.init_random); in setup_private_data()
542 if (!g->p.cpu_list_str) in parse_setup_cpu_list()
545 dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks); in parse_setup_cpu_list()
547 str0 = str = strdup(g->p.cpu_list_str); in parse_setup_cpu_list()
582 BUG_ON(step <= 0 || step >= g->p.nr_cpus); in parse_setup_cpu_list()
594 BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus); in parse_setup_cpu_list()
607 if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) { in parse_setup_cpu_list()
608 printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus); in parse_setup_cpu_list()
621 size_t size = CPU_ALLOC_SIZE(g->p.nr_cpus); in parse_setup_cpu_list()
627 if (t >= g->p.nr_tasks) { in parse_setup_cpu_list()
631 td = g->threads + t; in parse_setup_cpu_list()
641 td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus); in parse_setup_cpu_list()
645 if (cpu < 0 || cpu >= g->p.nr_cpus) { in parse_setup_cpu_list()
659 if (t < g->p.nr_tasks) in parse_setup_cpu_list()
660 printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t); in parse_setup_cpu_list()
690 if (!g->p.node_list_str) in parse_setup_node_list()
693 dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks); in parse_setup_node_list()
695 str0 = str = strdup(g->p.node_list_str); in parse_setup_node_list()
729 BUG_ON(step <= 0 || step >= g->p.nr_nodes); in parse_setup_node_list()
742 if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) { in parse_setup_node_list()
743 printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes); in parse_setup_node_list()
754 if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) { in parse_setup_node_list()
758 td = g->threads + t; in parse_setup_node_list()
774 if (t < g->p.nr_tasks) in parse_setup_node_list()
775 printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t); in parse_setup_node_list()
804 if (g->p.data_reads) in access_data()
806 if (g->p.data_writes) in access_data()
835 if (g->p.data_zero_memset && !g->p.data_rand_walk) { in do_work()
842 chunk_1 = words/g->p.nr_loops; in do_work()
848 if (g->p.data_rand_walk) { in do_work()
860 if (g->p.data_zero_memset) { in do_work()
867 } else if (!g->p.data_backwards || (nr + loop) & 1) { in do_work()
912 g->threads[task_nr].curr_cpu = cpu; in update_curr_cpu()
930 node_present = (char *)malloc(g->p.nr_nodes * sizeof(char)); in count_process_nodes()
932 for (nodes = 0; nodes < g->p.nr_nodes; nodes++) in count_process_nodes()
935 for (t = 0; t < g->p.nr_threads; t++) { in count_process_nodes()
940 task_nr = process_nr*g->p.nr_threads + t; in count_process_nodes()
941 td = g->threads + task_nr; in count_process_nodes()
954 for (n = 0; n < g->p.nr_nodes; n++) in count_process_nodes()
973 for (p = 0; p < g->p.nr_proc; p++) { in count_node_processes()
974 for (t = 0; t < g->p.nr_threads; t++) { in count_node_processes()
979 task_nr = p*g->p.nr_threads + t; in count_node_processes()
980 td = g->threads + task_nr; in count_node_processes()
1001 for (p = 0; p < g->p.nr_proc; p++) { in calc_convergence_compression()
1037 if (!g->p.show_convergence && !g->p.measure_convergence) in calc_convergence()
1040 nodes = (int *)malloc(g->p.nr_nodes * sizeof(int)); in calc_convergence()
1042 for (node = 0; node < g->p.nr_nodes; node++) in calc_convergence()
1048 for (t = 0; t < g->p.nr_tasks; t++) { in calc_convergence()
1049 struct thread_data *td = g->threads + t; in calc_convergence()
1068 nr_min = g->p.nr_tasks; in calc_convergence()
1071 for (node = 0; node < g->p.nr_nodes; node++) { in calc_convergence()
1081 BUG_ON(sum > g->p.nr_tasks); in calc_convergence()
1083 if (0 && (sum < g->p.nr_tasks)) { in calc_convergence()
1095 for (node = 0; node < g->p.nr_nodes; node++) { in calc_convergence()
1122 if (strong && process_groups == g->p.nr_proc) { in calc_convergence()
1126 if (g->p.measure_convergence) { in calc_convergence()
1127 g->all_converged = true; in calc_convergence()
1128 g->stop_work = true; in calc_convergence()
1145 (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max / NSEC_PER_SEC / 60.0); in show_summary()
1149 if (g->p.show_details >= 0) in show_summary()
1161 int details = g->p.show_details; in worker_thread()
1179 global_data = g->data; in worker_thread()
1181 thread_data = setup_private_data(g->p.bytes_thread); in worker_thread()
1186 if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1) in worker_thread()
1198 if (g->p.serialize_startup) { in worker_thread()
1199 mutex_lock(&g->startup_mutex); in worker_thread()
1200 g->nr_tasks_started++; in worker_thread()
1202 if (g->nr_tasks_started == g->p.nr_tasks) in worker_thread()
1203 cond_signal(&g->startup_cond); in worker_thread()
1205 mutex_unlock(&g->startup_mutex); in worker_thread()
1208 mutex_lock(&g->start_work_mutex); in worker_thread()
1209 g->start_work = false; in worker_thread()
1210 g->nr_tasks_working++; in worker_thread()
1211 while (!g->start_work) in worker_thread()
1212 cond_wait(&g->start_work_cond, &g->start_work_mutex); in worker_thread()
1214 mutex_unlock(&g->start_work_mutex); in worker_thread()
1222 for (l = 0; l < g->p.nr_loops; l++) { in worker_thread()
1225 if (g->stop_work) in worker_thread()
1228 val += do_work(global_data, g->p.bytes_global, process_nr, g->p.nr_proc, l, val); in worker_thread()
1229 val += do_work(process_data, g->p.bytes_process, thread_nr, g->p.nr_threads, l, val); in worker_thread()
1230 val += do_work(thread_data, g->p.bytes_thread, 0, 1, l, val); in worker_thread()
1232 if (g->p.sleep_usecs) { in worker_thread()
1234 usleep(g->p.sleep_usecs); in worker_thread()
1240 if (g->p.bytes_process_locked) { in worker_thread()
1242 val += do_work(process_data, g->p.bytes_process_locked, thread_nr, g->p.nr_threads, l, val); in worker_thread()
1246 work_done = g->p.bytes_global + g->p.bytes_process + in worker_thread()
1247 g->p.bytes_process_locked + g->p.bytes_thread; in worker_thread()
1252 if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs) in worker_thread()
1260 if (g->p.nr_secs) { in worker_thread()
1262 if ((u32)diff.tv_sec >= g->p.nr_secs) { in worker_thread()
1263 g->stop_work = true; in worker_thread()
1276 …if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs)… in worker_thread()
1288 this_cpu = g->threads[task_nr].curr_cpu; in worker_thread()
1289 if (this_cpu < g->p.nr_cpus/2) in worker_thread()
1290 target_cpu = g->p.nr_cpus-1; in worker_thread()
1338 free_data(thread_data, g->p.bytes_thread); in worker_thread()
1340 mutex_lock(&g->stop_work_mutex); in worker_thread()
1341 g->bytes_done += bytes_done; in worker_thread()
1342 mutex_unlock(&g->stop_work_mutex); in worker_thread()
1367 task_nr = process_nr*g->p.nr_threads; in worker_process()
1368 td = g->threads + task_nr; in worker_process()
1373 pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t)); in worker_process()
1374 process_data = setup_private_data(g->p.bytes_process); in worker_process()
1376 if (g->p.show_details >= 3) { in worker_process()
1378 process_nr, g->data, process_data); in worker_process()
1381 for (t = 0; t < g->p.nr_threads; t++) { in worker_process()
1382 task_nr = process_nr*g->p.nr_threads + t; in worker_process()
1383 td = g->threads + task_nr; in worker_process()
1397 for (t = 0; t < g->p.nr_threads; t++) { in worker_process()
1402 free_data(process_data, g->p.bytes_process); in worker_process()
1408 if (g->p.show_details < 0) in print_summary()
1413 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus); in print_summary()
1415 g->p.nr_loops, g->p.bytes_global/1024/1024); in print_summary()
1417 g->p.nr_loops, g->p.bytes_process/1024/1024); in print_summary()
1419 g->p.nr_loops, g->p.bytes_thread/1024/1024); in print_summary()
1428 ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; in init_thread_data()
1431 g->threads = zalloc_shared_data(size); in init_thread_data()
1433 for (t = 0; t < g->p.nr_tasks; t++) { in init_thread_data()
1434 struct thread_data *td = g->threads + t; in init_thread_data()
1435 size_t cpuset_size = CPU_ALLOC_SIZE(g->p.nr_cpus); in init_thread_data()
1442 td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus); in init_thread_data()
1445 for (cpu = 0; cpu < g->p.nr_cpus; cpu++) in init_thread_data()
1452 ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; in deinit_thread_data()
1456 for (t = 0; t < g->p.nr_tasks; t++) { in deinit_thread_data()
1457 struct thread_data *td = g->threads + t; in deinit_thread_data()
1461 free_data(g->threads, size); in deinit_thread_data()
1466 g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0); in init()
1469 g->p = p0; in init()
1471 g->p.nr_cpus = numa_num_configured_cpus(); in init()
1473 g->p.nr_nodes = numa_max_node() + 1; in init()
1476 BUG_ON(g->p.nr_nodes < 0); in init()
1478 if (quiet && !g->p.show_details) in init()
1479 g->p.show_details = -1; in init()
1482 if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str) in init()
1485 if (g->p.mb_global_str) { in init()
1486 g->p.mb_global = atof(g->p.mb_global_str); in init()
1487 BUG_ON(g->p.mb_global < 0); in init()
1490 if (g->p.mb_proc_str) { in init()
1491 g->p.mb_proc = atof(g->p.mb_proc_str); in init()
1492 BUG_ON(g->p.mb_proc < 0); in init()
1495 if (g->p.mb_proc_locked_str) { in init()
1496 g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str); in init()
1497 BUG_ON(g->p.mb_proc_locked < 0); in init()
1498 BUG_ON(g->p.mb_proc_locked > g->p.mb_proc); in init()
1501 if (g->p.mb_thread_str) { in init()
1502 g->p.mb_thread = atof(g->p.mb_thread_str); in init()
1503 BUG_ON(g->p.mb_thread < 0); in init()
1506 BUG_ON(g->p.nr_threads <= 0); in init()
1507 BUG_ON(g->p.nr_proc <= 0); in init()
1509 g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads; in init()
1511 g->p.bytes_global = g->p.mb_global *1024L*1024L; in init()
1512 g->p.bytes_process = g->p.mb_proc *1024L*1024L; in init()
1513 g->p.bytes_process_locked = g->p.mb_proc_locked *1024L*1024L; in init()
1514 g->p.bytes_thread = g->p.mb_thread *1024L*1024L; in init()
1516 g->data = setup_shared_data(g->p.bytes_global); in init()
1519 mutex_init_pshared(&g->start_work_mutex); in init()
1520 cond_init_pshared(&g->start_work_cond); in init()
1521 mutex_init_pshared(&g->startup_mutex); in init()
1522 cond_init_pshared(&g->startup_cond); in init()
1523 mutex_init_pshared(&g->stop_work_mutex); in init()
1539 free_data(g->data, g->p.bytes_global); in deinit()
1540 g->data = NULL; in deinit()
1544 free_data(g, sizeof(*g)); in deinit()
1545 g = NULL; in deinit()
1579 pids = zalloc(g->p.nr_proc * sizeof(*pids)); in __bench_numa()
1582 if (g->p.serialize_startup) { in __bench_numa()
1589 for (i = 0; i < g->p.nr_proc; i++) { in __bench_numa()
1604 if (g->p.serialize_startup) { in __bench_numa()
1612 mutex_lock(&g->startup_mutex); in __bench_numa()
1613 while (g->nr_tasks_started != g->p.nr_tasks) in __bench_numa()
1614 cond_wait(&g->startup_cond, &g->startup_mutex); in __bench_numa()
1616 mutex_unlock(&g->startup_mutex); in __bench_numa()
1620 mutex_lock(&g->start_work_mutex); in __bench_numa()
1621 threads_ready = (g->nr_tasks_working == g->p.nr_tasks); in __bench_numa()
1622 mutex_unlock(&g->start_work_mutex); in __bench_numa()
1640 mutex_lock(&g->start_work_mutex); in __bench_numa()
1641 g->start_work = true; in __bench_numa()
1642 mutex_unlock(&g->start_work_mutex); in __bench_numa()
1643 cond_broadcast(&g->start_work_cond); in __bench_numa()
1651 for (i = 0; i < g->p.nr_proc; i++) { in __bench_numa()
1661 for (t = 0; t < g->p.nr_tasks; t++) { in __bench_numa()
1662 u64 thread_runtime_ns = g->threads[t].runtime_ns; in __bench_numa()
1682 bytes = g->bytes_done; in __bench_numa()
1683 runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / NSEC_PER_SEC; in __bench_numa()
1685 if (g->p.measure_convergence) { in __bench_numa()
1703 print_res(name, bytes / g->p.nr_tasks / 1e9, in __bench_numa()
1709 print_res(name, runtime_sec_max * NSEC_PER_SEC / (bytes / g->p.nr_tasks), in __bench_numa()
1712 print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max, in __bench_numa()
1718 if (g->p.show_details >= 2) { in __bench_numa()
1721 for (p = 0; p < g->p.nr_proc; p++) { in __bench_numa()
1722 for (t = 0; t < g->p.nr_threads; t++) { in __bench_numa()
1724 td = g->threads + p*g->p.nr_threads + t; in __bench_numa()