LLVM OpenMP* Runtime Library
kmp_taskdeps.cpp
1/*
2 * kmp_taskdeps.cpp
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12
13//#define KMP_SUPPORT_GRAPH_OUTPUT 1
14
15#include "kmp.h"
16#include "kmp_io.h"
17#include "kmp_wait_release.h"
18#include "kmp_taskdeps.h"
19#if OMPT_SUPPORT
20#include "ompt-specific.h"
21#endif
22
23// TODO: Improve memory allocation? keep a list of pre-allocated structures?
24// allocate in blocks? re-use list finished list entries?
25// TODO: don't use atomic ref counters for stack-allocated nodes.
26// TODO: find an alternate to atomic refs for heap-allocated nodes?
27// TODO: Finish graph output support
28// TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other
29// runtime locks
30// TODO: Any ITT support needed?
31
32#ifdef KMP_SUPPORT_GRAPH_OUTPUT
33static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
34#endif
35
36static void __kmp_init_node(kmp_depnode_t *node) {
37 node->dn.successors = NULL;
38 node->dn.task = NULL; // will point to the right task
39 // once dependences have been processed
40 for (int i = 0; i < MAX_MTX_DEPS; ++i)
41 node->dn.mtx_locks[i] = NULL;
42 node->dn.mtx_num_locks = 0;
43 __kmp_init_lock(&node->dn.lock);
44 KMP_ATOMIC_ST_RLX(&node->dn.nrefs, 1); // init creates the first reference
45#ifdef KMP_SUPPORT_GRAPH_OUTPUT
46 node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
47#endif
48}
49
50static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
51 KMP_ATOMIC_INC(&node->dn.nrefs);
52 return node;
53}
54
55enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
56
57size_t sizes[] = { 997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029 };
58const size_t MAX_GEN = 8;
59
60static inline kmp_int32 __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) {
61 // TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) %
62 // m_num_sets );
63 return ((addr >> 6) ^ (addr >> 2)) % hsize;
64}
65
66static kmp_dephash_t *__kmp_dephash_extend(kmp_info_t *thread,
67 kmp_dephash_t *current_dephash) {
68 kmp_dephash_t *h;
69
70 size_t gen = current_dephash->generation + 1;
71 if (gen >= MAX_GEN)
72 return current_dephash;
73 size_t new_size = sizes[gen];
74
75 kmp_int32 size_to_allocate =
76 new_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
77
78#if USE_FAST_MEMORY
79 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size_to_allocate);
80#else
81 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size_to_allocate);
82#endif
83
84 h->size = new_size;
85 h->nelements = current_dephash->nelements;
86 h->buckets = (kmp_dephash_entry **)(h + 1);
87 h->generation = gen;
88 h->nconflicts = 0;
89 // insert existing elements in the new table
90 for (size_t i = 0; i < current_dephash->size; i++) {
91 kmp_dephash_entry_t *next, *entry;
92 for (entry = current_dephash->buckets[i]; entry; entry = next) {
93 next = entry->next_in_bucket;
94 // Compute the new hash using the new size, and insert the entry in
95 // the new bucket.
96 kmp_int32 new_bucket = __kmp_dephash_hash(entry->addr, h->size);
97 entry->next_in_bucket = h->buckets[new_bucket];
98 if (entry->next_in_bucket) {
99 h->nconflicts++;
100 }
101 h->buckets[new_bucket] = entry;
102 }
103 }
104
105 // Free old hash table
106#if USE_FAST_MEMORY
107 __kmp_fast_free(thread, current_dephash);
108#else
109 __kmp_thread_free(thread, current_dephash);
110#endif
111
112 return h;
113}
114
115static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
116 kmp_taskdata_t *current_task) {
117 kmp_dephash_t *h;
118
119 size_t h_size;
120
121 if (current_task->td_flags.tasktype == TASK_IMPLICIT)
122 h_size = KMP_DEPHASH_MASTER_SIZE;
123 else
124 h_size = KMP_DEPHASH_OTHER_SIZE;
125
126 kmp_int32 size =
127 h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
128
129#if USE_FAST_MEMORY
130 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
131#else
132 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
133#endif
134 h->size = h_size;
135
136 h->generation = 0;
137 h->nelements = 0;
138 h->nconflicts = 0;
139 h->buckets = (kmp_dephash_entry **)(h + 1);
140
141 for (size_t i = 0; i < h_size; i++)
142 h->buckets[i] = 0;
143
144 return h;
145}
146
147#define ENTRY_LAST_INS 0
148#define ENTRY_LAST_MTXS 1
149
150static kmp_dephash_entry *
151__kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t **hash, kmp_intptr_t addr) {
152 kmp_dephash_t *h = *hash;
153 if (h->nelements != 0
154 && h->nconflicts/h->size >= 1) {
155 *hash = __kmp_dephash_extend(thread, h);
156 h = *hash;
157 }
158 kmp_int32 bucket = __kmp_dephash_hash(addr, h->size);
159
160 kmp_dephash_entry_t *entry;
161 for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
162 if (entry->addr == addr)
163 break;
164
165 if (entry == NULL) {
166// create entry. This is only done by one thread so no locking required
167#if USE_FAST_MEMORY
168 entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
169 thread, sizeof(kmp_dephash_entry_t));
170#else
171 entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
172 thread, sizeof(kmp_dephash_entry_t));
173#endif
174 entry->addr = addr;
175 entry->last_out = NULL;
176 entry->last_ins = NULL;
177 entry->last_mtxs = NULL;
178 entry->last_flag = ENTRY_LAST_INS;
179 entry->mtx_lock = NULL;
180 entry->next_in_bucket = h->buckets[bucket];
181 h->buckets[bucket] = entry;
182 h->nelements++;
183 if (entry->next_in_bucket)
184 h->nconflicts++;
185 }
186 return entry;
187}
188
189static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
190 kmp_depnode_list_t *list,
191 kmp_depnode_t *node) {
192 kmp_depnode_list_t *new_head;
193
194#if USE_FAST_MEMORY
195 new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
196 thread, sizeof(kmp_depnode_list_t));
197#else
198 new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
199 thread, sizeof(kmp_depnode_list_t));
200#endif
201
202 new_head->node = __kmp_node_ref(node);
203 new_head->next = list;
204
205 return new_head;
206}
207
208static inline void __kmp_track_dependence(kmp_int32 gtid, kmp_depnode_t *source,
209 kmp_depnode_t *sink,
210 kmp_task_t *sink_task) {
211#ifdef KMP_SUPPORT_GRAPH_OUTPUT
212 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
213 // do not use sink->dn.task as that is only filled after the dependencies
214 // are already processed!
215 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
216
217 __kmp_printf("%d(%s) -> %d(%s)\n", source->dn.id,
218 task_source->td_ident->psource, sink->dn.id,
219 task_sink->td_ident->psource);
220#endif
221#if OMPT_SUPPORT && OMPT_OPTIONAL
222 /* OMPT tracks dependences between task (a=source, b=sink) in which
223 task a blocks the execution of b through the ompt_new_dependence_callback
224 */
225 if (ompt_enabled.ompt_callback_task_dependence) {
226 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
227 ompt_data_t *sink_data;
228 if (sink_task)
229 sink_data = &(KMP_TASK_TO_TASKDATA(sink_task)->ompt_task_info.task_data);
230 else
231 sink_data = &__kmp_threads[gtid]->th.ompt_thread_info.task_data;
232
233 ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
234 &(task_source->ompt_task_info.task_data), sink_data);
235 }
236#endif /* OMPT_SUPPORT && OMPT_OPTIONAL */
237}
238
239static inline kmp_int32
240__kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread,
241 kmp_task_t *task, kmp_depnode_t *node,
242 kmp_depnode_list_t *plist) {
243 if (!plist)
244 return 0;
245 kmp_int32 npredecessors = 0;
246 // link node as successor of list elements
247 for (kmp_depnode_list_t *p = plist; p; p = p->next) {
248 kmp_depnode_t *dep = p->node;
249 if (dep->dn.task) {
250 KMP_ACQUIRE_DEPNODE(gtid, dep);
251 if (dep->dn.task) {
252 __kmp_track_dependence(gtid, dep, node, task);
253 dep->dn.successors = __kmp_add_node(thread, dep->dn.successors, node);
254 KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
255 "%p\n",
256 gtid, KMP_TASK_TO_TASKDATA(dep->dn.task),
257 KMP_TASK_TO_TASKDATA(task)));
258 npredecessors++;
259 }
260 KMP_RELEASE_DEPNODE(gtid, dep);
261 }
262 }
263 return npredecessors;
264}
265
266static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid,
267 kmp_info_t *thread,
268 kmp_task_t *task,
269 kmp_depnode_t *source,
270 kmp_depnode_t *sink) {
271 if (!sink)
272 return 0;
273 kmp_int32 npredecessors = 0;
274 if (sink->dn.task) {
275 // synchronously add source to sink' list of successors
276 KMP_ACQUIRE_DEPNODE(gtid, sink);
277 if (sink->dn.task) {
278 __kmp_track_dependence(gtid, sink, source, task);
279 sink->dn.successors = __kmp_add_node(thread, sink->dn.successors, source);
280 KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
281 "%p\n",
282 gtid, KMP_TASK_TO_TASKDATA(sink->dn.task),
283 KMP_TASK_TO_TASKDATA(task)));
284 npredecessors++;
285 }
286 KMP_RELEASE_DEPNODE(gtid, sink);
287 }
288 return npredecessors;
289}
290
291template <bool filter>
292static inline kmp_int32
293__kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t **hash,
294 bool dep_barrier, kmp_int32 ndeps,
295 kmp_depend_info_t *dep_list, kmp_task_t *task) {
296 KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d dependencies : "
297 "dep_barrier = %d\n",
298 filter, gtid, ndeps, dep_barrier));
299
300 kmp_info_t *thread = __kmp_threads[gtid];
301 kmp_int32 npredecessors = 0;
302 for (kmp_int32 i = 0; i < ndeps; i++) {
303 const kmp_depend_info_t *dep = &dep_list[i];
304
305 if (filter && dep->base_addr == 0)
306 continue; // skip filtered entries
307
308 kmp_dephash_entry_t *info =
309 __kmp_dephash_find(thread, hash, dep->base_addr);
310 kmp_depnode_t *last_out = info->last_out;
311 kmp_depnode_list_t *last_ins = info->last_ins;
312 kmp_depnode_list_t *last_mtxs = info->last_mtxs;
313
314 if (dep->flags.out) { // out --> clean lists of ins and mtxs if any
315 if (last_ins || last_mtxs) {
316 if (info->last_flag == ENTRY_LAST_INS) { // INS were last
317 npredecessors +=
318 __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
319 } else { // MTXS were last
320 npredecessors +=
321 __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
322 }
323 __kmp_depnode_list_free(thread, last_ins);
324 __kmp_depnode_list_free(thread, last_mtxs);
325 info->last_ins = NULL;
326 info->last_mtxs = NULL;
327 } else {
328 npredecessors +=
329 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
330 }
331 __kmp_node_deref(thread, last_out);
332 if (dep_barrier) {
333 // if this is a sync point in the serial sequence, then the previous
334 // outputs are guaranteed to be completed after the execution of this
335 // task so the previous output nodes can be cleared.
336 info->last_out = NULL;
337 } else {
338 info->last_out = __kmp_node_ref(node);
339 }
340 } else if (dep->flags.in) {
341 // in --> link node to either last_out or last_mtxs, clean earlier deps
342 if (last_mtxs) {
343 npredecessors +=
344 __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
345 __kmp_node_deref(thread, last_out);
346 info->last_out = NULL;
347 if (info->last_flag == ENTRY_LAST_MTXS && last_ins) { // MTXS were last
348 // clean old INS before creating new list
349 __kmp_depnode_list_free(thread, last_ins);
350 info->last_ins = NULL;
351 }
352 } else {
353 // link node as successor of the last_out if any
354 npredecessors +=
355 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
356 }
357 info->last_flag = ENTRY_LAST_INS;
358 info->last_ins = __kmp_add_node(thread, info->last_ins, node);
359 } else {
360 KMP_DEBUG_ASSERT(dep->flags.mtx == 1);
361 // mtx --> link node to either last_out or last_ins, clean earlier deps
362 if (last_ins) {
363 npredecessors +=
364 __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
365 __kmp_node_deref(thread, last_out);
366 info->last_out = NULL;
367 if (info->last_flag == ENTRY_LAST_INS && last_mtxs) { // INS were last
368 // clean old MTXS before creating new list
369 __kmp_depnode_list_free(thread, last_mtxs);
370 info->last_mtxs = NULL;
371 }
372 } else {
373 // link node as successor of the last_out if any
374 npredecessors +=
375 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
376 }
377 info->last_flag = ENTRY_LAST_MTXS;
378 info->last_mtxs = __kmp_add_node(thread, info->last_mtxs, node);
379 if (info->mtx_lock == NULL) {
380 info->mtx_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t));
381 __kmp_init_lock(info->mtx_lock);
382 }
383 KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS);
384 kmp_int32 m;
385 // Save lock in node's array
386 for (m = 0; m < MAX_MTX_DEPS; ++m) {
387 // sort pointers in decreasing order to avoid potential livelock
388 if (node->dn.mtx_locks[m] < info->mtx_lock) {
389 KMP_DEBUG_ASSERT(node->dn.mtx_locks[node->dn.mtx_num_locks] == NULL);
390 for (int n = node->dn.mtx_num_locks; n > m; --n) {
391 // shift right all lesser non-NULL pointers
392 KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL);
393 node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1];
394 }
395 node->dn.mtx_locks[m] = info->mtx_lock;
396 break;
397 }
398 }
399 KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS); // must break from loop
400 node->dn.mtx_num_locks++;
401 }
402 }
403 KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
404 gtid, npredecessors));
405 return npredecessors;
406}
407
408#define NO_DEP_BARRIER (false)
409#define DEP_BARRIER (true)
410
411// returns true if the task has any outstanding dependence
412static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
413 kmp_task_t *task, kmp_dephash_t **hash,
414 bool dep_barrier, kmp_int32 ndeps,
415 kmp_depend_info_t *dep_list,
416 kmp_int32 ndeps_noalias,
417 kmp_depend_info_t *noalias_dep_list) {
418 int i, n_mtxs = 0;
419#if KMP_DEBUG
420 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
421#endif
422 KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependencies for task %p : %d "
423 "possibly aliased dependencies, %d non-aliased dependencies : "
424 "dep_barrier=%d .\n",
425 gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
426
427 // Filter deps in dep_list
428 // TODO: Different algorithm for large dep_list ( > 10 ? )
429 for (i = 0; i < ndeps; i++) {
430 if (dep_list[i].base_addr != 0) {
431 for (int j = i + 1; j < ndeps; j++) {
432 if (dep_list[i].base_addr == dep_list[j].base_addr) {
433 dep_list[i].flags.in |= dep_list[j].flags.in;
434 dep_list[i].flags.out |=
435 (dep_list[j].flags.out ||
436 (dep_list[i].flags.in && dep_list[j].flags.mtx) ||
437 (dep_list[i].flags.mtx && dep_list[j].flags.in));
438 dep_list[i].flags.mtx =
439 dep_list[i].flags.mtx | dep_list[j].flags.mtx &&
440 !dep_list[i].flags.out;
441 dep_list[j].base_addr = 0; // Mark j element as void
442 }
443 }
444 if (dep_list[i].flags.mtx) {
445 // limit number of mtx deps to MAX_MTX_DEPS per node
446 if (n_mtxs < MAX_MTX_DEPS && task != NULL) {
447 ++n_mtxs;
448 } else {
449 dep_list[i].flags.in = 1; // downgrade mutexinoutset to inout
450 dep_list[i].flags.out = 1;
451 dep_list[i].flags.mtx = 0;
452 }
453 }
454 }
455 }
456
457 // doesn't need to be atomic as no other thread is going to be accessing this
458 // node just yet.
459 // npredecessors is set -1 to ensure that none of the releasing tasks queues
460 // this task before we have finished processing all the dependencies
461 node->dn.npredecessors = -1;
462
463 // used to pack all npredecessors additions into a single atomic operation at
464 // the end
465 int npredecessors;
466
467 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps,
468 dep_list, task);
469 npredecessors += __kmp_process_deps<false>(
470 gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
471
472 node->dn.task = task;
473 KMP_MB();
474
475 // Account for our initial fake value
476 npredecessors++;
477
478 // Update predecessors and obtain current value to check if there are still
479 // any outstanding dependences (some tasks may have finished while we
480 // processed the dependences)
481 npredecessors =
482 node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
483
484 KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n",
485 gtid, npredecessors, taskdata));
486
487 // beyond this point the task could be queued (and executed) by a releasing
488 // task...
489 return npredecessors > 0 ? true : false;
490}
491
508kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid,
509 kmp_task_t *new_task, kmp_int32 ndeps,
510 kmp_depend_info_t *dep_list,
511 kmp_int32 ndeps_noalias,
512 kmp_depend_info_t *noalias_dep_list) {
513
514 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
515 KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
516 loc_ref, new_taskdata));
517
518 kmp_info_t *thread = __kmp_threads[gtid];
519 kmp_taskdata_t *current_task = thread->th.th_current_task;
520
521#if OMPT_SUPPORT
522 if (ompt_enabled.enabled) {
523 OMPT_STORE_RETURN_ADDRESS(gtid);
524 if (!current_task->ompt_task_info.frame.enter_frame.ptr)
525 current_task->ompt_task_info.frame.enter_frame.ptr =
526 OMPT_GET_FRAME_ADDRESS(0);
527 if (ompt_enabled.ompt_callback_task_create) {
528 ompt_data_t task_data = ompt_data_none;
529 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
530 current_task ? &(current_task->ompt_task_info.task_data) : &task_data,
531 current_task ? &(current_task->ompt_task_info.frame) : NULL,
532 &(new_taskdata->ompt_task_info.task_data),
533 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
534 OMPT_LOAD_RETURN_ADDRESS(gtid));
535 }
536
537 new_taskdata->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
538 }
539
540#if OMPT_OPTIONAL
541 /* OMPT grab all dependences if requested by the tool */
542 if (ndeps + ndeps_noalias > 0 &&
543 ompt_enabled.ompt_callback_dependences) {
544 kmp_int32 i;
545
546 int ompt_ndeps = ndeps + ndeps_noalias;
547 ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
548 thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t));
549
550 KMP_ASSERT(ompt_deps != NULL);
551
552 for (i = 0; i < ndeps; i++) {
553 ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr;
554 if (dep_list[i].flags.in && dep_list[i].flags.out)
555 ompt_deps[i].dependence_type = ompt_dependence_type_inout;
556 else if (dep_list[i].flags.out)
557 ompt_deps[i].dependence_type = ompt_dependence_type_out;
558 else if (dep_list[i].flags.in)
559 ompt_deps[i].dependence_type = ompt_dependence_type_in;
560 else if (dep_list[i].flags.mtx)
561 ompt_deps[i].dependence_type = ompt_dependence_type_mutexinoutset;
562 }
563 for (i = 0; i < ndeps_noalias; i++) {
564 ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr;
565 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
566 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
567 else if (noalias_dep_list[i].flags.out)
568 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
569 else if (noalias_dep_list[i].flags.in)
570 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
571 else if (noalias_dep_list[i].flags.mtx)
572 ompt_deps[ndeps + i].dependence_type =
573 ompt_dependence_type_mutexinoutset;
574 }
575 ompt_callbacks.ompt_callback(ompt_callback_dependences)(
576 &(new_taskdata->ompt_task_info.task_data), ompt_deps, ompt_ndeps);
577 /* We can now free the allocated memory for the dependencies */
578 /* For OMPD we might want to delay the free until end of this function */
579 KMP_OMPT_DEPS_FREE(thread, ompt_deps);
580 }
581#endif /* OMPT_OPTIONAL */
582#endif /* OMPT_SUPPORT */
583
584 bool serial = current_task->td_flags.team_serial ||
585 current_task->td_flags.tasking_ser ||
586 current_task->td_flags.final;
587 kmp_task_team_t *task_team = thread->th.th_task_team;
588 serial = serial && !(task_team && task_team->tt.tt_found_proxy_tasks);
589
590 if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
591 /* if no dependencies have been tracked yet, create the dependence hash */
592 if (current_task->td_dephash == NULL)
593 current_task->td_dephash = __kmp_dephash_create(thread, current_task);
594
595#if USE_FAST_MEMORY
596 kmp_depnode_t *node =
597 (kmp_depnode_t *)__kmp_fast_allocate(thread, sizeof(kmp_depnode_t));
598#else
599 kmp_depnode_t *node =
600 (kmp_depnode_t *)__kmp_thread_malloc(thread, sizeof(kmp_depnode_t));
601#endif
602
603 __kmp_init_node(node);
604 new_taskdata->td_depnode = node;
605
606 if (__kmp_check_deps(gtid, node, new_task, &current_task->td_dephash,
607 NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
608 noalias_dep_list)) {
609 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking "
610 "dependencies: "
611 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
612 gtid, loc_ref, new_taskdata));
613#if OMPT_SUPPORT
614 if (ompt_enabled.enabled) {
615 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
616 }
617#endif
618 return TASK_CURRENT_NOT_QUEUED;
619 }
620 } else {
621 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies "
622 "for task (serialized)"
623 "loc=%p task=%p\n",
624 gtid, loc_ref, new_taskdata));
625 }
626
627 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking "
628 "dependencies : "
629 "loc=%p task=%p, transferring to __kmp_omp_task\n",
630 gtid, loc_ref, new_taskdata));
631
632 kmp_int32 ret = __kmp_omp_task(gtid, new_task, true);
633#if OMPT_SUPPORT
634 if (ompt_enabled.enabled) {
635 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
636 }
637#endif
638 return ret;
639}
640
641#if OMPT_SUPPORT
642void __ompt_taskwait_dep_finish(kmp_taskdata_t *current_task,
643 ompt_data_t *taskwait_task_data) {
644 if (ompt_enabled.ompt_callback_task_schedule) {
645 ompt_data_t task_data = ompt_data_none;
646 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
647 current_task ? &(current_task->ompt_task_info.task_data) : &task_data,
648 ompt_task_switch, taskwait_task_data);
649 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
650 taskwait_task_data, ompt_task_complete,
651 current_task ? &(current_task->ompt_task_info.task_data) : &task_data);
652 }
653 current_task->ompt_task_info.frame.enter_frame.ptr = NULL;
654 *taskwait_task_data = ompt_data_none;
655}
656#endif /* OMPT_SUPPORT */
657
669void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps,
670 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
671 kmp_depend_info_t *noalias_dep_list) {
672 KA_TRACE(10, ("__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref));
673
674 if (ndeps == 0 && ndeps_noalias == 0) {
675 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no dependencies to "
676 "wait upon : loc=%p\n",
677 gtid, loc_ref));
678 return;
679 }
680
681 kmp_info_t *thread = __kmp_threads[gtid];
682 kmp_taskdata_t *current_task = thread->th.th_current_task;
683
684#if OMPT_SUPPORT
685 // this function represents a taskwait construct with depend clause
686 // We signal 4 events:
687 // - creation of the taskwait task
688 // - dependences of the taskwait task
689 // - schedule and finish of the taskwait task
690 ompt_data_t *taskwait_task_data = &thread->th.ompt_thread_info.task_data;
691 KMP_ASSERT(taskwait_task_data->ptr == NULL);
692 if (ompt_enabled.enabled) {
693 if (!current_task->ompt_task_info.frame.enter_frame.ptr)
694 current_task->ompt_task_info.frame.enter_frame.ptr =
695 OMPT_GET_FRAME_ADDRESS(0);
696 if (ompt_enabled.ompt_callback_task_create) {
697 ompt_data_t task_data = ompt_data_none;
698 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
699 current_task ? &(current_task->ompt_task_info.task_data) : &task_data,
700 current_task ? &(current_task->ompt_task_info.frame) : NULL,
701 taskwait_task_data,
702 ompt_task_explicit | ompt_task_undeferred | ompt_task_mergeable, 1,
703 OMPT_GET_RETURN_ADDRESS(0));
704 }
705 }
706
707#if OMPT_OPTIONAL
708 /* OMPT grab all dependences if requested by the tool */
709 if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) {
710 kmp_int32 i;
711
712 int ompt_ndeps = ndeps + ndeps_noalias;
713 ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
714 thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t));
715
716 KMP_ASSERT(ompt_deps != NULL);
717
718 for (i = 0; i < ndeps; i++) {
719 ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr;
720 if (dep_list[i].flags.in && dep_list[i].flags.out)
721 ompt_deps[i].dependence_type = ompt_dependence_type_inout;
722 else if (dep_list[i].flags.out)
723 ompt_deps[i].dependence_type = ompt_dependence_type_out;
724 else if (dep_list[i].flags.in)
725 ompt_deps[i].dependence_type = ompt_dependence_type_in;
726 else if (dep_list[i].flags.mtx)
727 ompt_deps[ndeps + i].dependence_type =
728 ompt_dependence_type_mutexinoutset;
729 }
730 for (i = 0; i < ndeps_noalias; i++) {
731 ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr;
732 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
733 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
734 else if (noalias_dep_list[i].flags.out)
735 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
736 else if (noalias_dep_list[i].flags.in)
737 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
738 else if (noalias_dep_list[i].flags.mtx)
739 ompt_deps[ndeps + i].dependence_type =
740 ompt_dependence_type_mutexinoutset;
741 }
742 ompt_callbacks.ompt_callback(ompt_callback_dependences)(
743 taskwait_task_data, ompt_deps, ompt_ndeps);
744 /* We can now free the allocated memory for the dependencies */
745 /* For OMPD we might want to delay the free until end of this function */
746 KMP_OMPT_DEPS_FREE(thread, ompt_deps);
747 ompt_deps = NULL;
748 }
749#endif /* OMPT_OPTIONAL */
750#endif /* OMPT_SUPPORT */
751
752 // We can return immediately as:
753 // - dependences are not computed in serial teams (except with proxy tasks)
754 // - if the dephash is not yet created it means we have nothing to wait for
755 bool ignore = current_task->td_flags.team_serial ||
756 current_task->td_flags.tasking_ser ||
757 current_task->td_flags.final;
758 ignore = ignore && thread->th.th_task_team != NULL &&
759 thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
760 ignore = ignore || current_task->td_dephash == NULL;
761
762 if (ignore) {
763 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
764 "dependencies : loc=%p\n",
765 gtid, loc_ref));
766#if OMPT_SUPPORT
767 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
768#endif /* OMPT_SUPPORT */
769 return;
770 }
771
772 kmp_depnode_t node = {0};
773 __kmp_init_node(&node);
774
775 if (!__kmp_check_deps(gtid, &node, NULL, &current_task->td_dephash,
776 DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
777 noalias_dep_list)) {
778 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
779 "dependencies : loc=%p\n",
780 gtid, loc_ref));
781#if OMPT_SUPPORT
782 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
783#endif /* OMPT_SUPPORT */
784 return;
785 }
786
787 int thread_finished = FALSE;
788 kmp_flag_32 flag((std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
789 while (node.dn.npredecessors > 0) {
790 flag.execute_tasks(thread, gtid, FALSE,
791 &thread_finished USE_ITT_BUILD_ARG(NULL),
792 __kmp_task_stealing_constraint);
793 }
794
795#if OMPT_SUPPORT
796 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
797#endif /* OMPT_SUPPORT */
798 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n",
799 gtid, loc_ref));
800}
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
Definition: kmp.h:222