Lines Matching defs:ids

217     if (ahwthread->ids[level] == bhwthread->ids[level])
221 if (ahwthread->ids[level] == UNKNOWN_ID)
223 else if (bhwthread->ids[level] == UNKNOWN_ID)
225 else if (ahwthread->ids[level] < bhwthread->ids[level])
227 else if (ahwthread->ids[level] > bhwthread->ids[level])
268 printf("%4d (%d) ", ids[i], sub_ids[i]);
284 // Add a layer to the topology based on the ids. Assume the topology
286 void kmp_topology_t::insert_layer(kmp_hw_t type, const int *ids) {
287 // Figure out where the layer should go by comparing the ids of the current
288 // layers with the new ids
299 int id = hw_threads[i].ids[target_layer];
300 int new_id = ids[i];
319 // layer. And put the new ids and type into the topology.
325 hw_threads[k].ids[j] = hw_threads[k].ids[i];
326 hw_threads[k].ids[target_layer] = ids[k];
339 int *ids = (int *)__kmp_allocate(sizeof(int) * num_hw_threads);
344 ids[i] = __kmp_get_proc_group(mask);
347 insert_layer(KMP_HW_PROC_GROUP, ids);
348 __kmp_free(ids);
391 int id1 = hw_threads[0].ids[top_index1];
392 int id2 = hw_threads[0].ids[top_index2];
396 if (hw_threads[hwidx].ids[top_index1] == id1 &&
397 hw_threads[hwidx].ids[top_index2] != id2) {
401 if (hw_threads[hwidx].ids[top_index2] != id2)
403 id1 = hw_threads[hwidx].ids[top_index1];
404 id2 = hw_threads[hwidx].ids[top_index2];
420 // e.g., all are zero, then make sure to keep the first layer's ids
429 hw_thread.ids[d] = hw_thread.ids[d + 1];
484 int id = hw_thread.ids[layer];
488 if (hw_thread.ids[l] != kmp_hw_thread_t::UNKNOWN_ID)
492 if (hw_thread.ids[layer] != kmp_hw_thread_t::UNKNOWN_ID)
526 previous_id[layer] = hw_thread.ids[layer];
550 if (!find_all && hw_thread.ids[above_level] != previous_id[above_level]) {
556 if (hw_thread.ids[level] != previous_id[level]) {
564 previous_id[level] = hw_thread.ids[level];
592 if (hw_thread.ids[j] != previous_id[j]) {
602 previous_id[j] = hw_thread.ids[j];
678 // Assume ids have been sorted
686 if (previous_thread.ids[j] != current_thread.ids[j]) {
858 if (hw_threads[i].ids[level] == kmp_hw_thread_t::UNKNOWN_ID)
862 __kmp_str_buf_print(&buf, "%d ", hw_threads[i].ids[level]);
1303 // Figure out the absolute sub ids and core eff/type sub ids
1381 if (hw_thread.ids[level] == kmp_hw_thread_t::UNKNOWN_ID ||
1420 if (t1.ids[i] != t2.ids[i])
1693 // internal topology object and set the layer ids for it. Each routine
1934 hw_thread.ids[index] = pu->logical_index;
1973 hw_thread.ids[index] = memory->logical_index;
1974 hw_thread.ids[index + 1] = sub_id;
1985 hw_thread.ids[index] = obj->logical_index;
1986 hw_thread.ids[index + 1] = sub_id;
2051 hw_thread.ids[0] = i;
2052 hw_thread.ids[1] = 0;
2053 hw_thread.ids[2] = 0;
2100 hw_thread.ids[0] = i / BITS_PER_GROUP;
2101 hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP;
2535 hw_thread.ids[idx++] = threadInfo[i].pkgId;
2538 hw_thread.ids[idx++] = threadInfo[i].coreId;
2541 hw_thread.ids[idx++] = threadInfo[i].threadId;
2951 hw_thread.ids[idx] = kmp_hw_thread_t::UNKNOWN_ID;
2953 hw_thread.ids[idx] = apic_id & proc_info[i].levels[j].mask;
2955 hw_thread.ids[idx] >>= proc_info[i].levels[j - 1].mask_width;
2968 int prev_id = __kmp_topology->at(0).ids[j];
2969 int curr_id = __kmp_topology->at(0).ids[j + 1];
2970 __kmp_topology->at(0).ids[j + 1] = new_id;
2973 if (hw_thread.ids[j] == prev_id && hw_thread.ids[j + 1] == curr_id) {
2974 hw_thread.ids[j + 1] = new_id;
2975 } else if (hw_thread.ids[j] == prev_id &&
2976 hw_thread.ids[j + 1] != curr_id) {
2977 curr_id = hw_thread.ids[j + 1];
2978 hw_thread.ids[j + 1] = ++new_id;
2980 prev_id = hw_thread.ids[j];
2981 curr_id = hw_thread.ids[j + 1];
2982 hw_thread.ids[j + 1] = ++new_id;
3034 int *ids = (int *)__kmp_allocate(sizeof(int) * num_hw_threads);
3041 ids[i] = kmp_hw_thread_t::UNKNOWN_ID;
3047 ids[i] = info.mask & proc_info[original_idx].apic_id;
3049 __kmp_topology->insert_layer(cache_type, ids);
3755 // If thread ids were specified, it is an error if they are not unique.
3768 // If the thread ids were not specified and we see entries that
3769 // are duplicates, start the loop over and assign the thread ids manually.
3870 hw_thread.ids[pkgLevel] = threadInfo[i][src_index];
3872 hw_thread.ids[coreLevel] = threadInfo[i][src_index];
3874 hw_thread.ids[threadLevel] = threadInfo[i][src_index];
3889 // If the thread level does not have ids, then put them in.
3890 if (__kmp_topology->at(0).ids[tlevel] == kmp_hw_thread_t::UNKNOWN_ID) {
3891 __kmp_topology->at(0).ids[tlevel] = 0;
3895 if (hw_thread.ids[tlevel] != kmp_hw_thread_t::UNKNOWN_ID)
3899 // If the ids did change, then restart thread id at 0
3902 if (hw_thread.ids[j] != prev_hw_thread.ids[j]) {
3903 hw_thread.ids[tlevel] = 0;
3907 if (hw_thread.ids[tlevel] == kmp_hw_thread_t::UNKNOWN_ID)
3908 hw_thread.ids[tlevel] = prev_hw_thread.ids[tlevel] + 1;
4586 if (hw_thread.ids[j] > 0) {
4635 kmp_affinity_ids_t &ids,
4640 // Initiailze ids and attrs thread data
4642 ids.ids[i] = kmp_hw_thread_t::UNKNOWN_ID;
4651 ids.os_id = cpu;
4656 if (ids.ids[type] == kmp_hw_thread_t::UNKNOWN_ID || ids.ids[type] == id) {
4657 ids.ids[type] = id;
4661 ids.ids[type] = kmp_hw_thread_t::MULTIPLE_ID;
4664 ids.ids[type] = kmp_hw_thread_t::MULTIPLE_ID;
4686 kmp_affinity_ids_t &ids = th->th.th_topology_ids;
4688 __kmp_affinity_get_mask_topology_info(mask, ids, attrs);
4709 if (!affinity.ids) {
4710 affinity.ids = (kmp_affinity_ids_t *)__kmp_allocate(
4731 kmp_affinity_ids_t &ids = affinity.ids[i];
4734 __kmp_affinity_get_mask_topology_info(mask, ids, attrs);
5070 (gran < depth && __kmp_topology->at(i).ids[gran_level] !=
5245 // Sort the topology back using ids
5283 if (affinity->ids != NULL)
5284 __kmp_free(affinity->ids);
5350 th->th.th_topology_ids.ids[id] = kmp_hw_thread_t::UNKNOWN_ID;
5365 // one that has all of the OS proc ids set, or if
5421 th->th.th_topology_ids = __kmp_affinity.ids[i];