Lines Matching +defs:lo +defs:size
2095 size_t KMP_EXPAND_NAME(ompc_get_affinity_format)(char *buffer, size_t size) {
2101 if (buffer && size) {
2102 __kmp_strncpy_truncate(buffer, size, __kmp_affinity_format,
2232 @param cpy_size size of the cpy_data buffer
3619 @param reduce_size size of data in bytes to be reduced
3699 // usage: if team size == 1, no synchronization is required ( Intel
3809 // usage: if team size == 1, no synchronization is required ( on Intel
3849 @param reduce_size size of data in bytes to be reduced
3911 // usage: if team size == 1, no synchronization is required ( Intel
4030 // usage: if team size==1, no synchronization is required (Intel platforms only)
4143 e.g. for(i=2;i<9;i+=2) lo=2, up=8, st=2.
4182 pr_buf->th_doacross_info[2] = dims[0].lo;
4191 range_length = dims[j].up - dims[j].lo + 1;
4194 KMP_DEBUG_ASSERT(dims[j].up > dims[j].lo);
4195 range_length = (kmp_uint64)(dims[j].up - dims[j].lo) / dims[j].st + 1;
4197 KMP_DEBUG_ASSERT(dims[j].lo > dims[j].up);
4199 (kmp_uint64)(dims[j].lo - dims[j].up) / (-dims[j].st) + 1;
4203 pr_buf->th_doacross_info[last++] = dims[j].lo;
4211 trace_count = dims[0].up - dims[0].lo + 1;
4213 KMP_DEBUG_ASSERT(dims[0].up > dims[0].lo);
4214 trace_count = (kmp_uint64)(dims[0].up - dims[0].lo) / dims[0].st + 1;
4216 KMP_DEBUG_ASSERT(dims[0].lo > dims[0].up);
4217 trace_count = (kmp_uint64)(dims[0].lo - dims[0].up) / (-dims[0].st) + 1;
4243 size_t size =
4245 flags = (kmp_uint32 *)__kmp_thread_calloc(th, size, 1);
4276 kmp_int64 lo, up, st;
4288 lo = pr_buf->th_doacross_info[2];
4295 if (vec[0] < lo || vec[0] > up) {
4298 gtid, vec[0], lo, up));
4301 iter_number = vec[0] - lo;
4303 if (vec[0] < lo || vec[0] > up) {
4306 gtid, vec[0], lo, up));
4309 iter_number = (kmp_uint64)(vec[0] - lo) / st;
4311 if (vec[0] > lo || vec[0] < up) {
4314 gtid, vec[0], lo, up));
4317 iter_number = (kmp_uint64)(lo - vec[0]) / (-st);
4327 lo = pr_buf->th_doacross_info[j + 2];
4331 if (vec[i] < lo || vec[i] > up) {
4334 gtid, vec[i], lo, up));
4337 iter = vec[i] - lo;
4339 if (vec[i] < lo || vec[i] > up) {
4342 gtid, vec[i], lo, up));
4345 iter = (kmp_uint64)(vec[i] - lo) / st;
4347 if (vec[i] > lo || vec[i] < up) {
4350 gtid, vec[i], lo, up));
4353 iter = (kmp_uint64)(lo - vec[i]) / (-st);
4388 kmp_int64 lo, st;
4401 lo = pr_buf->th_doacross_info[2];
4407 iter_number = vec[0] - lo;
4409 iter_number = (kmp_uint64)(vec[0] - lo) / st;
4411 iter_number = (kmp_uint64)(lo - vec[0]) / (-st);
4421 lo = pr_buf->th_doacross_info[j + 2];
4424 iter = vec[i] - lo;
4426 iter = (kmp_uint64)(vec[i] - lo) / st;
4428 iter = (kmp_uint64)(lo - vec[i]) / (-st);
4489 void *omp_alloc(size_t size, omp_allocator_handle_t allocator) {
4490 return __kmp_alloc(__kmp_entry_gtid(), 0, size, allocator);
4493 void *omp_aligned_alloc(size_t align, size_t size,
4495 return __kmp_alloc(__kmp_entry_gtid(), align, size, allocator);
4498 void *omp_calloc(size_t nmemb, size_t size, omp_allocator_handle_t allocator) {
4499 return __kmp_calloc(__kmp_entry_gtid(), 0, nmemb, size, allocator);
4502 void *omp_aligned_calloc(size_t align, size_t nmemb, size_t size,
4504 return __kmp_calloc(__kmp_entry_gtid(), align, nmemb, size, allocator);
4507 void *omp_realloc(void *ptr, size_t size, omp_allocator_handle_t allocator,
4509 return __kmp_realloc(__kmp_entry_gtid(), ptr, size, allocator,