blob: ca1d41a28b2bc3d2a47f290d87901300956887ee [file] [log] [blame]
George Rokos2467df62017-01-25 21:27:24 +00001//===------ omptarget.cpp - Target independent OpenMP target RTL -- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is dual licensed under the MIT and the University of Illinois Open
6// Source Licenses. See LICENSE.txt for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Implementation of the interface to be used by Clang during the codegen of a
11// target region.
12//
13//===----------------------------------------------------------------------===//
14
15#include <algorithm>
16#include <cassert>
17#include <climits>
18#include <cstdlib>
19#include <cstring>
20#include <dlfcn.h>
21#include <list>
22#include <map>
23#include <mutex>
24#include <string>
25#include <vector>
26
27// Header file global to this project
28#include "omptarget.h"
29
Sergey Dmitrievb305d262017-08-14 15:09:59 +000030#ifdef OMPTARGET_DEBUG
31static int DebugLevel = 0;
32
33#define DP(...) \
34 do { \
35 if (DebugLevel > 0) { \
36 DEBUGP("Libomptarget", __VA_ARGS__); \
37 } \
38 } while (false)
39#else // OMPTARGET_DEBUG
40#define DP(...) {}
41#endif // OMPTARGET_DEBUG
42
George Rokos2467df62017-01-25 21:27:24 +000043#define INF_REF_CNT (LONG_MAX>>1) // leave room for additions/subtractions
44#define CONSIDERED_INF(x) (x > (INF_REF_CNT>>1))
45
46// List of all plugins that can support offloading.
47static const char *RTLNames[] = {
48 /* PowerPC target */ "libomptarget.rtl.ppc64.so",
49 /* x86_64 target */ "libomptarget.rtl.x86_64.so",
Paul Osmialowski1e254c52017-03-06 21:00:07 +000050 /* CUDA target */ "libomptarget.rtl.cuda.so",
51 /* AArch64 target */ "libomptarget.rtl.aarch64.so"};
George Rokos2467df62017-01-25 21:27:24 +000052
53// forward declarations
54struct RTLInfoTy;
55static int target(int32_t device_id, void *host_ptr, int32_t arg_num,
56 void **args_base, void **args, int64_t *arg_sizes, int64_t *arg_types,
57 int32_t team_num, int32_t thread_limit, int IsTeamConstruct);
58
59/// Map between host data and target data.
60struct HostDataToTargetTy {
61 uintptr_t HstPtrBase; // host info.
62 uintptr_t HstPtrBegin;
63 uintptr_t HstPtrEnd; // non-inclusive.
64
65 uintptr_t TgtPtrBegin; // target info.
66
67 long RefCount;
68
69 HostDataToTargetTy()
70 : HstPtrBase(0), HstPtrBegin(0), HstPtrEnd(0),
71 TgtPtrBegin(0), RefCount(0) {}
72 HostDataToTargetTy(uintptr_t BP, uintptr_t B, uintptr_t E, uintptr_t TB)
73 : HstPtrBase(BP), HstPtrBegin(B), HstPtrEnd(E),
74 TgtPtrBegin(TB), RefCount(1) {}
George Rokosd57681b2017-04-22 11:45:03 +000075 HostDataToTargetTy(uintptr_t BP, uintptr_t B, uintptr_t E, uintptr_t TB,
76 long RF)
77 : HstPtrBase(BP), HstPtrBegin(B), HstPtrEnd(E),
78 TgtPtrBegin(TB), RefCount(RF) {}
George Rokos2467df62017-01-25 21:27:24 +000079};
80
81typedef std::list<HostDataToTargetTy> HostDataToTargetListTy;
82
83struct LookupResult {
84 struct {
85 unsigned IsContained : 1;
86 unsigned ExtendsBefore : 1;
87 unsigned ExtendsAfter : 1;
88 } Flags;
89
90 HostDataToTargetListTy::iterator Entry;
91
Jonas Hahnfeldcfe5ef52017-01-27 11:03:33 +000092 LookupResult() : Flags({0,0,0}), Entry() {}
George Rokos2467df62017-01-25 21:27:24 +000093};
94
95/// Map for shadow pointers
96struct ShadowPtrValTy {
97 void *HstPtrVal;
98 void *TgtPtrAddr;
99 void *TgtPtrVal;
100};
101typedef std::map<void *, ShadowPtrValTy> ShadowPtrListTy;
102
103///
104struct PendingCtorDtorListsTy {
105 std::list<void *> PendingCtors;
106 std::list<void *> PendingDtors;
107};
108typedef std::map<__tgt_bin_desc *, PendingCtorDtorListsTy>
109 PendingCtorsDtorsPerLibrary;
110
111struct DeviceTy {
112 int32_t DeviceID;
113 RTLInfoTy *RTL;
114 int32_t RTLDeviceID;
115
116 bool IsInit;
117 std::once_flag InitFlag;
118 bool HasPendingGlobals;
119
120 HostDataToTargetListTy HostDataToTargetMap;
121 PendingCtorsDtorsPerLibrary PendingCtorsDtors;
122
123 ShadowPtrListTy ShadowPtrMap;
124
125 std::mutex DataMapMtx, PendingGlobalsMtx, ShadowMtx;
126
127 uint64_t loopTripCnt;
128
129 DeviceTy(RTLInfoTy *RTL)
130 : DeviceID(-1), RTL(RTL), RTLDeviceID(-1), IsInit(false), InitFlag(),
131 HasPendingGlobals(false), HostDataToTargetMap(),
132 PendingCtorsDtors(), ShadowPtrMap(), DataMapMtx(), PendingGlobalsMtx(),
133 ShadowMtx(), loopTripCnt(0) {}
134
135 // The existence of mutexes makes DeviceTy non-copyable. We need to
136 // provide a copy constructor and an assignment operator explicitly.
137 DeviceTy(const DeviceTy &d)
138 : DeviceID(d.DeviceID), RTL(d.RTL), RTLDeviceID(d.RTLDeviceID),
139 IsInit(d.IsInit), InitFlag(), HasPendingGlobals(d.HasPendingGlobals),
140 HostDataToTargetMap(d.HostDataToTargetMap),
141 PendingCtorsDtors(d.PendingCtorsDtors), ShadowPtrMap(d.ShadowPtrMap),
142 DataMapMtx(), PendingGlobalsMtx(),
143 ShadowMtx(), loopTripCnt(d.loopTripCnt) {}
144
145 DeviceTy& operator=(const DeviceTy &d) {
146 DeviceID = d.DeviceID;
147 RTL = d.RTL;
148 RTLDeviceID = d.RTLDeviceID;
149 IsInit = d.IsInit;
150 HasPendingGlobals = d.HasPendingGlobals;
151 HostDataToTargetMap = d.HostDataToTargetMap;
152 PendingCtorsDtors = d.PendingCtorsDtors;
153 ShadowPtrMap = d.ShadowPtrMap;
154 loopTripCnt = d.loopTripCnt;
155
156 return *this;
157 }
158
159 long getMapEntryRefCnt(void *HstPtrBegin);
160 LookupResult lookupMapping(void *HstPtrBegin, int64_t Size);
161 void *getOrAllocTgtPtr(void *HstPtrBegin, void *HstPtrBase, int64_t Size,
162 bool &IsNew, bool IsImplicit, bool UpdateRefCount = true);
163 void *getTgtPtrBegin(void *HstPtrBegin, int64_t Size);
164 void *getTgtPtrBegin(void *HstPtrBegin, int64_t Size, bool &IsLast,
165 bool UpdateRefCount);
166 int deallocTgtPtr(void *TgtPtrBegin, int64_t Size, bool ForceDelete);
167 int associatePtr(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size);
168 int disassociatePtr(void *HstPtrBegin);
169
170 // calls to RTL
171 int32_t initOnce();
172 __tgt_target_table *load_binary(void *Img);
173
174 int32_t data_submit(void *TgtPtrBegin, void *HstPtrBegin, int64_t Size);
175 int32_t data_retrieve(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size);
176
George Rokos1546d312017-05-10 14:12:36 +0000177 int32_t run_region(void *TgtEntryPtr, void **TgtVarsPtr,
178 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize);
George Rokos2467df62017-01-25 21:27:24 +0000179 int32_t run_team_region(void *TgtEntryPtr, void **TgtVarsPtr,
George Rokos1546d312017-05-10 14:12:36 +0000180 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, int32_t NumTeams,
181 int32_t ThreadLimit, uint64_t LoopTripCount);
George Rokos2467df62017-01-25 21:27:24 +0000182
183private:
184 // Call to RTL
185 void init(); // To be called only via DeviceTy::initOnce()
186};
187
188/// Map between Device ID (i.e. openmp device id) and its DeviceTy.
189typedef std::vector<DeviceTy> DevicesTy;
190static DevicesTy Devices;
191
192struct RTLInfoTy {
193 typedef int32_t(is_valid_binary_ty)(void *);
194 typedef int32_t(number_of_devices_ty)();
195 typedef int32_t(init_device_ty)(int32_t);
196 typedef __tgt_target_table *(load_binary_ty)(int32_t, void *);
George Rokos1546d312017-05-10 14:12:36 +0000197 typedef void *(data_alloc_ty)(int32_t, int64_t, void *);
George Rokos2467df62017-01-25 21:27:24 +0000198 typedef int32_t(data_submit_ty)(int32_t, void *, void *, int64_t);
199 typedef int32_t(data_retrieve_ty)(int32_t, void *, void *, int64_t);
200 typedef int32_t(data_delete_ty)(int32_t, void *);
George Rokos1546d312017-05-10 14:12:36 +0000201 typedef int32_t(run_region_ty)(int32_t, void *, void **, ptrdiff_t *,
202 int32_t);
203 typedef int32_t(run_team_region_ty)(int32_t, void *, void **, ptrdiff_t *,
204 int32_t, int32_t, int32_t, uint64_t);
George Rokos2467df62017-01-25 21:27:24 +0000205
206 int32_t Idx; // RTL index, index is the number of devices
207 // of other RTLs that were registered before,
208 // i.e. the OpenMP index of the first device
209 // to be registered with this RTL.
210 int32_t NumberOfDevices; // Number of devices this RTL deals with.
211 std::vector<DeviceTy *> Devices; // one per device (NumberOfDevices in total).
212
213 void *LibraryHandler;
214
215#ifdef OMPTARGET_DEBUG
216 std::string RTLName;
217#endif
218
219 // Functions implemented in the RTL.
220 is_valid_binary_ty *is_valid_binary;
221 number_of_devices_ty *number_of_devices;
222 init_device_ty *init_device;
223 load_binary_ty *load_binary;
224 data_alloc_ty *data_alloc;
225 data_submit_ty *data_submit;
226 data_retrieve_ty *data_retrieve;
227 data_delete_ty *data_delete;
228 run_region_ty *run_region;
229 run_team_region_ty *run_team_region;
230
231 // Are there images associated with this RTL.
232 bool isUsed;
233
234 // Mutex for thread-safety when calling RTL interface functions.
235 // It is easier to enforce thread-safety at the libomptarget level,
236 // so that developers of new RTLs do not have to worry about it.
237 std::mutex Mtx;
238
239 // The existence of the mutex above makes RTLInfoTy non-copyable.
240 // We need to provide a copy constructor explicitly.
241 RTLInfoTy()
242 : Idx(-1), NumberOfDevices(-1), Devices(), LibraryHandler(0),
243#ifdef OMPTARGET_DEBUG
244 RTLName(),
245#endif
246 is_valid_binary(0), number_of_devices(0), init_device(0),
247 load_binary(0), data_alloc(0), data_submit(0), data_retrieve(0),
248 data_delete(0), run_region(0), run_team_region(0), isUsed(false),
249 Mtx() {}
250
251 RTLInfoTy(const RTLInfoTy &r) : Mtx() {
252 Idx = r.Idx;
253 NumberOfDevices = r.NumberOfDevices;
254 Devices = r.Devices;
255 LibraryHandler = r.LibraryHandler;
256#ifdef OMPTARGET_DEBUG
257 RTLName = r.RTLName;
258#endif
259 is_valid_binary = r.is_valid_binary;
260 number_of_devices = r.number_of_devices;
261 init_device = r.init_device;
262 load_binary = r.load_binary;
263 data_alloc = r.data_alloc;
264 data_submit = r.data_submit;
265 data_retrieve = r.data_retrieve;
266 data_delete = r.data_delete;
267 run_region = r.run_region;
268 run_team_region = r.run_team_region;
269 isUsed = r.isUsed;
270 }
271};
272
273/// RTLs identified in the system.
274class RTLsTy {
275private:
276 // Mutex-like object to guarantee thread-safety and unique initialization
277 // (i.e. the library attempts to load the RTLs (plugins) only once).
278 std::once_flag initFlag;
279 void LoadRTLs(); // not thread-safe
280
281public:
282 // List of the detected runtime libraries.
283 std::list<RTLInfoTy> AllRTLs;
284
285 // Array of pointers to the detected runtime libraries that have compatible
286 // binaries.
287 std::vector<RTLInfoTy *> UsedRTLs;
288
289 explicit RTLsTy() {}
290
291 // Load all the runtime libraries (plugins) if not done before.
292 void LoadRTLsOnce();
293};
294
295void RTLsTy::LoadRTLs() {
Sergey Dmitrievb305d262017-08-14 15:09:59 +0000296#ifdef OMPTARGET_DEBUG
297 if (char *envStr = getenv("LIBOMPTARGET_DEBUG")) {
298 DebugLevel = std::stoi(envStr);
299 }
300#endif // OMPTARGET_DEBUG
301
George Rokos2467df62017-01-25 21:27:24 +0000302 // Parse environment variable OMP_TARGET_OFFLOAD (if set)
303 char *envStr = getenv("OMP_TARGET_OFFLOAD");
304 if (envStr && !strcmp(envStr, "DISABLED")) {
305 DP("Target offloading disabled by environment\n");
306 return;
307 }
308
309 DP("Loading RTLs...\n");
310
311 // Attempt to open all the plugins and, if they exist, check if the interface
312 // is correct and if they are supporting any devices.
313 for (auto *Name : RTLNames) {
314 DP("Loading library '%s'...\n", Name);
315 void *dynlib_handle = dlopen(Name, RTLD_NOW);
316
317 if (!dynlib_handle) {
318 // Library does not exist or cannot be found.
319 DP("Unable to load library '%s': %s!\n", Name, dlerror());
320 continue;
321 }
322
323 DP("Successfully loaded library '%s'!\n", Name);
324
325 // Retrieve the RTL information from the runtime library.
326 RTLInfoTy R;
327
328 R.LibraryHandler = dynlib_handle;
329 R.isUsed = false;
330
331#ifdef OMPTARGET_DEBUG
332 R.RTLName = Name;
333#endif
334
George Rokos0e86bfb2017-06-02 22:41:35 +0000335 if (!(*((void**) &R.is_valid_binary) = dlsym(
George Rokos2467df62017-01-25 21:27:24 +0000336 dynlib_handle, "__tgt_rtl_is_valid_binary")))
337 continue;
George Rokos0e86bfb2017-06-02 22:41:35 +0000338 if (!(*((void**) &R.number_of_devices) = dlsym(
George Rokos2467df62017-01-25 21:27:24 +0000339 dynlib_handle, "__tgt_rtl_number_of_devices")))
340 continue;
George Rokos0e86bfb2017-06-02 22:41:35 +0000341 if (!(*((void**) &R.init_device) = dlsym(
George Rokos2467df62017-01-25 21:27:24 +0000342 dynlib_handle, "__tgt_rtl_init_device")))
343 continue;
George Rokos0e86bfb2017-06-02 22:41:35 +0000344 if (!(*((void**) &R.load_binary) = dlsym(
George Rokos2467df62017-01-25 21:27:24 +0000345 dynlib_handle, "__tgt_rtl_load_binary")))
346 continue;
George Rokos0e86bfb2017-06-02 22:41:35 +0000347 if (!(*((void**) &R.data_alloc) = dlsym(
George Rokos2467df62017-01-25 21:27:24 +0000348 dynlib_handle, "__tgt_rtl_data_alloc")))
349 continue;
George Rokos0e86bfb2017-06-02 22:41:35 +0000350 if (!(*((void**) &R.data_submit) = dlsym(
George Rokos2467df62017-01-25 21:27:24 +0000351 dynlib_handle, "__tgt_rtl_data_submit")))
352 continue;
George Rokos0e86bfb2017-06-02 22:41:35 +0000353 if (!(*((void**) &R.data_retrieve) = dlsym(
George Rokos2467df62017-01-25 21:27:24 +0000354 dynlib_handle, "__tgt_rtl_data_retrieve")))
355 continue;
George Rokos0e86bfb2017-06-02 22:41:35 +0000356 if (!(*((void**) &R.data_delete) = dlsym(
George Rokos2467df62017-01-25 21:27:24 +0000357 dynlib_handle, "__tgt_rtl_data_delete")))
358 continue;
George Rokos0e86bfb2017-06-02 22:41:35 +0000359 if (!(*((void**) &R.run_region) = dlsym(
George Rokos2467df62017-01-25 21:27:24 +0000360 dynlib_handle, "__tgt_rtl_run_target_region")))
361 continue;
George Rokos0e86bfb2017-06-02 22:41:35 +0000362 if (!(*((void**) &R.run_team_region) = dlsym(
George Rokos2467df62017-01-25 21:27:24 +0000363 dynlib_handle, "__tgt_rtl_run_target_team_region")))
364 continue;
365
366 // No devices are supported by this RTL?
367 if (!(R.NumberOfDevices = R.number_of_devices())) {
368 DP("No devices supported in this RTL\n");
369 continue;
370 }
371
372 DP("Registering RTL %s supporting %d devices!\n",
373 R.RTLName.c_str(), R.NumberOfDevices);
374
375 // The RTL is valid! Will save the information in the RTLs list.
376 AllRTLs.push_back(R);
377 }
378
379 DP("RTLs loaded!\n");
380
381 return;
382}
383
384void RTLsTy::LoadRTLsOnce() {
385 // RTL.LoadRTLs() is called only once in a thread-safe fashion.
386 std::call_once(initFlag, &RTLsTy::LoadRTLs, this);
387}
388
389static RTLsTy RTLs;
390static std::mutex RTLsMtx;
391
392/// Map between the host entry begin and the translation table. Each
393/// registered library gets one TranslationTable. Use the map from
394/// __tgt_offload_entry so that we may quickly determine whether we
395/// are trying to (re)register an existing lib or really have a new one.
396struct TranslationTable {
397 __tgt_target_table HostTable;
398
399 // Image assigned to a given device.
400 std::vector<__tgt_device_image *> TargetsImages; // One image per device ID.
401
402 // Table of entry points or NULL if it was not already computed.
403 std::vector<__tgt_target_table *> TargetsTable; // One table per device ID.
404};
405typedef std::map<__tgt_offload_entry *, TranslationTable>
406 HostEntriesBeginToTransTableTy;
407static HostEntriesBeginToTransTableTy HostEntriesBeginToTransTable;
408static std::mutex TrlTblMtx;
409
410/// Map between the host ptr and a table index
411struct TableMap {
412 TranslationTable *Table; // table associated with the host ptr.
413 uint32_t Index; // index in which the host ptr translated entry is found.
414 TableMap() : Table(0), Index(0) {}
415 TableMap(TranslationTable *table, uint32_t index)
416 : Table(table), Index(index) {}
417};
418typedef std::map<void *, TableMap> HostPtrToTableMapTy;
419static HostPtrToTableMapTy HostPtrToTableMap;
420static std::mutex TblMapMtx;
421
422/// Check whether a device has an associated RTL and initialize it if it's not
423/// already initialized.
424static bool device_is_ready(int device_num) {
425 DP("Checking whether device %d is ready.\n", device_num);
426 // Devices.size() can only change while registering a new
427 // library, so try to acquire the lock of RTLs' mutex.
428 RTLsMtx.lock();
429 size_t Devices_size = Devices.size();
430 RTLsMtx.unlock();
431 if (Devices_size <= (size_t)device_num) {
432 DP("Device ID %d does not have a matching RTL\n", device_num);
433 return false;
434 }
435
436 // Get device info
437 DeviceTy &Device = Devices[device_num];
438
439 DP("Is the device %d (local ID %d) initialized? %d\n", device_num,
440 Device.RTLDeviceID, Device.IsInit);
441
442 // Init the device if not done before
443 if (!Device.IsInit && Device.initOnce() != OFFLOAD_SUCCESS) {
444 DP("Failed to init device %d\n", device_num);
445 return false;
446 }
447
448 DP("Device %d is ready to use.\n", device_num);
449
450 return true;
451}
452
453////////////////////////////////////////////////////////////////////////////////
454// Target API functions
455//
456EXTERN int omp_get_num_devices(void) {
457 RTLsMtx.lock();
458 size_t Devices_size = Devices.size();
459 RTLsMtx.unlock();
460
461 DP("Call to omp_get_num_devices returning %zd\n", Devices_size);
462
463 return Devices_size;
464}
465
466EXTERN int omp_get_initial_device(void) {
467 DP("Call to omp_get_initial_device returning %d\n", HOST_DEVICE);
468 return HOST_DEVICE;
469}
470
471EXTERN void *omp_target_alloc(size_t size, int device_num) {
472 DP("Call to omp_target_alloc for device %d requesting %zu bytes\n",
473 device_num, size);
474
475 if (size <= 0) {
476 DP("Call to omp_target_alloc with non-positive length\n");
477 return NULL;
478 }
479
480 void *rc = NULL;
481
482 if (device_num == omp_get_initial_device()) {
483 rc = malloc(size);
484 DP("omp_target_alloc returns host ptr " DPxMOD "\n", DPxPTR(rc));
485 return rc;
486 }
487
488 if (!device_is_ready(device_num)) {
489 DP("omp_target_alloc returns NULL ptr\n");
490 return NULL;
491 }
492
493 DeviceTy &Device = Devices[device_num];
George Rokos1546d312017-05-10 14:12:36 +0000494 rc = Device.RTL->data_alloc(Device.RTLDeviceID, size, NULL);
George Rokos2467df62017-01-25 21:27:24 +0000495 DP("omp_target_alloc returns device ptr " DPxMOD "\n", DPxPTR(rc));
496 return rc;
497}
498
499EXTERN void omp_target_free(void *device_ptr, int device_num) {
500 DP("Call to omp_target_free for device %d and address " DPxMOD "\n",
501 device_num, DPxPTR(device_ptr));
502
503 if (!device_ptr) {
504 DP("Call to omp_target_free with NULL ptr\n");
505 return;
506 }
507
508 if (device_num == omp_get_initial_device()) {
509 free(device_ptr);
510 DP("omp_target_free deallocated host ptr\n");
511 return;
512 }
513
514 if (!device_is_ready(device_num)) {
515 DP("omp_target_free returns, nothing to do\n");
516 return;
517 }
518
519 DeviceTy &Device = Devices[device_num];
520 Device.RTL->data_delete(Device.RTLDeviceID, (void *)device_ptr);
521 DP("omp_target_free deallocated device ptr\n");
522}
523
524EXTERN int omp_target_is_present(void *ptr, int device_num) {
525 DP("Call to omp_target_is_present for device %d and address " DPxMOD "\n",
526 device_num, DPxPTR(ptr));
527
528 if (!ptr) {
529 DP("Call to omp_target_is_present with NULL ptr, returning false\n");
530 return false;
531 }
532
533 if (device_num == omp_get_initial_device()) {
534 DP("Call to omp_target_is_present on host, returning true\n");
535 return true;
536 }
537
538 RTLsMtx.lock();
539 size_t Devices_size = Devices.size();
540 RTLsMtx.unlock();
541 if (Devices_size <= (size_t)device_num) {
542 DP("Call to omp_target_is_present with invalid device ID, returning "
543 "false\n");
544 return false;
545 }
546
547 DeviceTy& Device = Devices[device_num];
548 bool IsLast; // not used
549 int rc = (Device.getTgtPtrBegin(ptr, 0, IsLast, false) != NULL);
550 DP("Call to omp_target_is_present returns %d\n", rc);
551 return rc;
552}
553
554EXTERN int omp_target_memcpy(void *dst, void *src, size_t length,
555 size_t dst_offset, size_t src_offset, int dst_device, int src_device) {
556 DP("Call to omp_target_memcpy, dst device %d, src device %d, "
557 "dst addr " DPxMOD ", src addr " DPxMOD ", dst offset %zu, "
558 "src offset %zu, length %zu\n", dst_device, src_device, DPxPTR(dst),
559 DPxPTR(src), dst_offset, src_offset, length);
560
561 if (!dst || !src || length <= 0) {
562 DP("Call to omp_target_memcpy with invalid arguments\n");
563 return OFFLOAD_FAIL;
564 }
565
566 if (src_device != omp_get_initial_device() && !device_is_ready(src_device)) {
567 DP("omp_target_memcpy returns OFFLOAD_FAIL\n");
568 return OFFLOAD_FAIL;
569 }
570
571 if (dst_device != omp_get_initial_device() && !device_is_ready(dst_device)) {
572 DP("omp_target_memcpy returns OFFLOAD_FAIL\n");
573 return OFFLOAD_FAIL;
574 }
575
576 int rc = OFFLOAD_SUCCESS;
577 void *srcAddr = (char *)src + src_offset;
578 void *dstAddr = (char *)dst + dst_offset;
579
580 if (src_device == omp_get_initial_device() &&
581 dst_device == omp_get_initial_device()) {
582 DP("copy from host to host\n");
583 const void *p = memcpy(dstAddr, srcAddr, length);
584 if (p == NULL)
585 rc = OFFLOAD_FAIL;
586 } else if (src_device == omp_get_initial_device()) {
587 DP("copy from host to device\n");
588 DeviceTy& DstDev = Devices[dst_device];
589 rc = DstDev.data_submit(dstAddr, srcAddr, length);
590 } else if (dst_device == omp_get_initial_device()) {
591 DP("copy from device to host\n");
592 DeviceTy& SrcDev = Devices[src_device];
593 rc = SrcDev.data_retrieve(dstAddr, srcAddr, length);
594 } else {
595 DP("copy from device to device\n");
596 void *buffer = malloc(length);
597 DeviceTy& SrcDev = Devices[src_device];
598 DeviceTy& DstDev = Devices[dst_device];
599 rc = SrcDev.data_retrieve(buffer, srcAddr, length);
600 if (rc == OFFLOAD_SUCCESS)
601 rc = DstDev.data_submit(dstAddr, buffer, length);
602 }
603
604 DP("omp_target_memcpy returns %d\n", rc);
605 return rc;
606}
607
608EXTERN int omp_target_memcpy_rect(void *dst, void *src, size_t element_size,
609 int num_dims, const size_t *volume, const size_t *dst_offsets,
610 const size_t *src_offsets, const size_t *dst_dimensions,
611 const size_t *src_dimensions, int dst_device, int src_device) {
612 DP("Call to omp_target_memcpy_rect, dst device %d, src device %d, "
613 "dst addr " DPxMOD ", src addr " DPxMOD ", dst offsets " DPxMOD ", "
614 "src offsets " DPxMOD ", dst dims " DPxMOD ", src dims " DPxMOD ", "
615 "volume " DPxMOD ", element size %zu, num_dims %d\n", dst_device,
616 src_device, DPxPTR(dst), DPxPTR(src), DPxPTR(dst_offsets),
617 DPxPTR(src_offsets), DPxPTR(dst_dimensions), DPxPTR(src_dimensions),
618 DPxPTR(volume), element_size, num_dims);
619
620 if (!(dst || src)) {
621 DP("Call to omp_target_memcpy_rect returns max supported dimensions %d\n",
622 INT_MAX);
623 return INT_MAX;
624 }
625
626 if (!dst || !src || element_size < 1 || num_dims < 1 || !volume ||
627 !dst_offsets || !src_offsets || !dst_dimensions || !src_dimensions) {
628 DP("Call to omp_target_memcpy_rect with invalid arguments\n");
629 return OFFLOAD_FAIL;
630 }
631
632 int rc;
633 if (num_dims == 1) {
634 rc = omp_target_memcpy(dst, src, element_size * volume[0],
635 element_size * dst_offsets[0], element_size * src_offsets[0],
636 dst_device, src_device);
637 } else {
638 size_t dst_slice_size = element_size;
639 size_t src_slice_size = element_size;
640 for (int i=1; i<num_dims; ++i) {
641 dst_slice_size *= dst_dimensions[i];
642 src_slice_size *= src_dimensions[i];
643 }
644
645 size_t dst_off = dst_offsets[0] * dst_slice_size;
646 size_t src_off = src_offsets[0] * src_slice_size;
647 for (size_t i=0; i<volume[0]; ++i) {
648 rc = omp_target_memcpy_rect((char *) dst + dst_off + dst_slice_size * i,
649 (char *) src + src_off + src_slice_size * i, element_size,
650 num_dims - 1, volume + 1, dst_offsets + 1, src_offsets + 1,
651 dst_dimensions + 1, src_dimensions + 1, dst_device, src_device);
652
653 if (rc) {
654 DP("Recursive call to omp_target_memcpy_rect returns unsuccessfully\n");
655 return rc;
656 }
657 }
658 }
659
660 DP("omp_target_memcpy_rect returns %d\n", rc);
661 return rc;
662}
663
664EXTERN int omp_target_associate_ptr(void *host_ptr, void *device_ptr,
665 size_t size, size_t device_offset, int device_num) {
666 DP("Call to omp_target_associate_ptr with host_ptr " DPxMOD ", "
667 "device_ptr " DPxMOD ", size %zu, device_offset %zu, device_num %d\n",
668 DPxPTR(host_ptr), DPxPTR(device_ptr), size, device_offset, device_num);
669
670 if (!host_ptr || !device_ptr || size <= 0) {
671 DP("Call to omp_target_associate_ptr with invalid arguments\n");
672 return OFFLOAD_FAIL;
673 }
674
675 if (device_num == omp_get_initial_device()) {
676 DP("omp_target_associate_ptr: no association possible on the host\n");
677 return OFFLOAD_FAIL;
678 }
679
680 if (!device_is_ready(device_num)) {
681 DP("omp_target_associate_ptr returns OFFLOAD_FAIL\n");
682 return OFFLOAD_FAIL;
683 }
684
685 DeviceTy& Device = Devices[device_num];
686 void *device_addr = (void *)((uint64_t)device_ptr + (uint64_t)device_offset);
687 int rc = Device.associatePtr(host_ptr, device_addr, size);
688 DP("omp_target_associate_ptr returns %d\n", rc);
689 return rc;
690}
691
692EXTERN int omp_target_disassociate_ptr(void *host_ptr, int device_num) {
693 DP("Call to omp_target_disassociate_ptr with host_ptr " DPxMOD ", "
694 "device_num %d\n", DPxPTR(host_ptr), device_num);
695
696 if (!host_ptr) {
697 DP("Call to omp_target_associate_ptr with invalid host_ptr\n");
698 return OFFLOAD_FAIL;
699 }
700
701 if (device_num == omp_get_initial_device()) {
702 DP("omp_target_disassociate_ptr: no association possible on the host\n");
703 return OFFLOAD_FAIL;
704 }
705
706 if (!device_is_ready(device_num)) {
707 DP("omp_target_disassociate_ptr returns OFFLOAD_FAIL\n");
708 return OFFLOAD_FAIL;
709 }
710
711 DeviceTy& Device = Devices[device_num];
712 int rc = Device.disassociatePtr(host_ptr);
713 DP("omp_target_disassociate_ptr returns %d\n", rc);
714 return rc;
715}
716
717////////////////////////////////////////////////////////////////////////////////
718// functionality for device
719
720int DeviceTy::associatePtr(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size) {
721 DataMapMtx.lock();
722
723 // Check if entry exists
724 for (auto &HT : HostDataToTargetMap) {
725 if ((uintptr_t)HstPtrBegin == HT.HstPtrBegin) {
726 // Mapping already exists
727 bool isValid = HT.HstPtrBegin == (uintptr_t) HstPtrBegin &&
728 HT.HstPtrEnd == (uintptr_t) HstPtrBegin + Size &&
729 HT.TgtPtrBegin == (uintptr_t) TgtPtrBegin;
730 DataMapMtx.unlock();
731 if (isValid) {
732 DP("Attempt to re-associate the same device ptr+offset with the same "
733 "host ptr, nothing to do\n");
734 return OFFLOAD_SUCCESS;
735 } else {
736 DP("Not allowed to re-associate a different device ptr+offset with the "
737 "same host ptr\n");
738 return OFFLOAD_FAIL;
739 }
740 }
741 }
742
743 // Mapping does not exist, allocate it
744 HostDataToTargetTy newEntry;
745
746 // Set up missing fields
747 newEntry.HstPtrBase = (uintptr_t) HstPtrBegin;
748 newEntry.HstPtrBegin = (uintptr_t) HstPtrBegin;
749 newEntry.HstPtrEnd = (uintptr_t) HstPtrBegin + Size;
750 newEntry.TgtPtrBegin = (uintptr_t) TgtPtrBegin;
751 // refCount must be infinite
752 newEntry.RefCount = INF_REF_CNT;
753
754 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", HstEnd="
755 DPxMOD ", TgtBegin=" DPxMOD "\n", DPxPTR(newEntry.HstPtrBase),
756 DPxPTR(newEntry.HstPtrBegin), DPxPTR(newEntry.HstPtrEnd),
757 DPxPTR(newEntry.TgtPtrBegin));
758 HostDataToTargetMap.push_front(newEntry);
759
760 DataMapMtx.unlock();
761
762 return OFFLOAD_SUCCESS;
763}
764
765int DeviceTy::disassociatePtr(void *HstPtrBegin) {
766 DataMapMtx.lock();
767
768 // Check if entry exists
769 for (HostDataToTargetListTy::iterator ii = HostDataToTargetMap.begin();
770 ii != HostDataToTargetMap.end(); ++ii) {
771 if ((uintptr_t)HstPtrBegin == ii->HstPtrBegin) {
772 // Mapping exists
773 if (CONSIDERED_INF(ii->RefCount)) {
774 DP("Association found, removing it\n");
775 HostDataToTargetMap.erase(ii);
776 DataMapMtx.unlock();
777 return OFFLOAD_SUCCESS;
778 } else {
779 DP("Trying to disassociate a pointer which was not mapped via "
780 "omp_target_associate_ptr\n");
781 break;
782 }
783 }
784 }
785
786 // Mapping not found
787 DataMapMtx.unlock();
788 DP("Association not found\n");
789 return OFFLOAD_FAIL;
790}
791
792// Get ref count of map entry containing HstPtrBegin
793long DeviceTy::getMapEntryRefCnt(void *HstPtrBegin) {
794 uintptr_t hp = (uintptr_t)HstPtrBegin;
795 long RefCnt = -1;
796
797 DataMapMtx.lock();
798 for (auto &HT : HostDataToTargetMap) {
799 if (hp >= HT.HstPtrBegin && hp < HT.HstPtrEnd) {
800 DP("DeviceTy::getMapEntry: requested entry found\n");
801 RefCnt = HT.RefCount;
802 break;
803 }
804 }
805 DataMapMtx.unlock();
806
807 if (RefCnt < 0) {
808 DP("DeviceTy::getMapEntry: requested entry not found\n");
809 }
810
811 return RefCnt;
812}
813
814LookupResult DeviceTy::lookupMapping(void *HstPtrBegin, int64_t Size) {
815 uintptr_t hp = (uintptr_t)HstPtrBegin;
816 LookupResult lr;
817
818 DP("Looking up mapping(HstPtrBegin=" DPxMOD ", Size=%ld)...\n", DPxPTR(hp),
819 Size);
820 for (lr.Entry = HostDataToTargetMap.begin();
821 lr.Entry != HostDataToTargetMap.end(); ++lr.Entry) {
822 auto &HT = *lr.Entry;
823 // Is it contained?
824 lr.Flags.IsContained = hp >= HT.HstPtrBegin && hp < HT.HstPtrEnd &&
825 (hp+Size) <= HT.HstPtrEnd;
826 // Does it extend into an already mapped region?
827 lr.Flags.ExtendsBefore = hp < HT.HstPtrBegin && (hp+Size) > HT.HstPtrBegin;
828 // Does it extend beyond the mapped region?
829 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp+Size) > HT.HstPtrEnd;
830
831 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore ||
832 lr.Flags.ExtendsAfter) {
833 break;
834 }
835 }
836
837 if (lr.Flags.ExtendsBefore) {
838 DP("WARNING: Pointer is not mapped but section extends into already "
839 "mapped data\n");
840 }
841 if (lr.Flags.ExtendsAfter) {
842 DP("WARNING: Pointer is already mapped but section extends beyond mapped "
843 "region\n");
844 }
845
846 return lr;
847}
848
849// Used by target_data_begin
850// Return the target pointer begin (where the data will be moved).
851// Allocate memory if this is the first occurrence if this mapping.
852// Increment the reference counter.
853// If NULL is returned, then either data allocation failed or the user tried
854// to do an illegal mapping.
855void *DeviceTy::getOrAllocTgtPtr(void *HstPtrBegin, void *HstPtrBase,
856 int64_t Size, bool &IsNew, bool IsImplicit, bool UpdateRefCount) {
857 void *rc = NULL;
858 DataMapMtx.lock();
859 LookupResult lr = lookupMapping(HstPtrBegin, Size);
860
861 // Check if the pointer is contained.
862 if (lr.Flags.IsContained ||
863 ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && IsImplicit)) {
864 auto &HT = *lr.Entry;
865 IsNew = false;
866
867 if (UpdateRefCount)
868 ++HT.RefCount;
869
870 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin);
871 DP("Mapping exists%s with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", "
872 "Size=%ld,%s RefCount=%s\n", (IsImplicit ? " (implicit)" : ""),
873 DPxPTR(HstPtrBegin), DPxPTR(tp), Size,
874 (UpdateRefCount ? " updated" : ""),
875 (CONSIDERED_INF(HT.RefCount)) ? "INF" :
876 std::to_string(HT.RefCount).c_str());
877 rc = (void *)tp;
878 } else if ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && !IsImplicit) {
879 // Explicit extension of mapped data - not allowed.
880 DP("Explicit extension of mapping is not allowed.\n");
881 } else if (Size) {
882 // If it is not contained and Size > 0 we should create a new entry for it.
883 IsNew = true;
George Rokos1546d312017-05-10 14:12:36 +0000884 uintptr_t tp = (uintptr_t)RTL->data_alloc(RTLDeviceID, Size, HstPtrBegin);
George Rokos2467df62017-01-25 21:27:24 +0000885 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", "
886 "HstEnd=" DPxMOD ", TgtBegin=" DPxMOD "\n", DPxPTR(HstPtrBase),
887 DPxPTR(HstPtrBegin), DPxPTR((uintptr_t)HstPtrBegin + Size), DPxPTR(tp));
888 HostDataToTargetMap.push_front(HostDataToTargetTy((uintptr_t)HstPtrBase,
889 (uintptr_t)HstPtrBegin, (uintptr_t)HstPtrBegin + Size, tp));
890 rc = (void *)tp;
891 }
892
893 DataMapMtx.unlock();
894 return rc;
895}
896
897// Used by target_data_begin, target_data_end, target_data_update and target.
898// Return the target pointer begin (where the data will be moved).
899// Decrement the reference counter if called from target_data_end.
900void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size, bool &IsLast,
901 bool UpdateRefCount) {
902 void *rc = NULL;
903 DataMapMtx.lock();
904 LookupResult lr = lookupMapping(HstPtrBegin, Size);
905
906 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) {
907 auto &HT = *lr.Entry;
908 IsLast = !(HT.RefCount > 1);
909
910 if (HT.RefCount > 1 && UpdateRefCount)
911 --HT.RefCount;
912
913 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin);
914 DP("Mapping exists with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", "
915 "Size=%ld,%s RefCount=%s\n", DPxPTR(HstPtrBegin), DPxPTR(tp), Size,
916 (UpdateRefCount ? " updated" : ""),
917 (CONSIDERED_INF(HT.RefCount)) ? "INF" :
918 std::to_string(HT.RefCount).c_str());
919 rc = (void *)tp;
920 } else {
921 IsLast = false;
922 }
923
924 DataMapMtx.unlock();
925 return rc;
926}
927
928// Return the target pointer begin (where the data will be moved).
George Rokosd57681b2017-04-22 11:45:03 +0000929// Lock-free version called when loading global symbols from the fat binary.
George Rokos2467df62017-01-25 21:27:24 +0000930void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size) {
931 uintptr_t hp = (uintptr_t)HstPtrBegin;
932 LookupResult lr = lookupMapping(HstPtrBegin, Size);
933 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) {
934 auto &HT = *lr.Entry;
935 uintptr_t tp = HT.TgtPtrBegin + (hp - HT.HstPtrBegin);
936 return (void *)tp;
937 }
938
939 return NULL;
940}
941
942int DeviceTy::deallocTgtPtr(void *HstPtrBegin, int64_t Size, bool ForceDelete) {
943 // Check if the pointer is contained in any sub-nodes.
944 int rc;
945 DataMapMtx.lock();
946 LookupResult lr = lookupMapping(HstPtrBegin, Size);
947 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) {
948 auto &HT = *lr.Entry;
949 if (ForceDelete)
950 HT.RefCount = 1;
951 if (--HT.RefCount <= 0) {
952 assert(HT.RefCount == 0 && "did not expect a negative ref count");
953 DP("Deleting tgt data " DPxMOD " of size %ld\n",
954 DPxPTR(HT.TgtPtrBegin), Size);
955 RTL->data_delete(RTLDeviceID, (void *)HT.TgtPtrBegin);
956 DP("Removing%s mapping with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD
957 ", Size=%ld\n", (ForceDelete ? " (forced)" : ""),
958 DPxPTR(HT.HstPtrBegin), DPxPTR(HT.TgtPtrBegin), Size);
959 HostDataToTargetMap.erase(lr.Entry);
960 }
961 rc = OFFLOAD_SUCCESS;
962 } else {
963 DP("Section to delete (hst addr " DPxMOD ") does not exist in the allocated"
964 " memory\n", DPxPTR(HstPtrBegin));
965 rc = OFFLOAD_FAIL;
966 }
967
968 DataMapMtx.unlock();
969 return rc;
970}
971
972/// Init device, should not be called directly.
973void DeviceTy::init() {
974 int32_t rc = RTL->init_device(RTLDeviceID);
975 if (rc == OFFLOAD_SUCCESS) {
976 IsInit = true;
977 }
978}
979
980/// Thread-safe method to initialize the device only once.
981int32_t DeviceTy::initOnce() {
982 std::call_once(InitFlag, &DeviceTy::init, this);
983
984 // At this point, if IsInit is true, then either this thread or some other
985 // thread in the past successfully initialized the device, so we can return
986 // OFFLOAD_SUCCESS. If this thread executed init() via call_once() and it
987 // failed, return OFFLOAD_FAIL. If call_once did not invoke init(), it means
988 // that some other thread already attempted to execute init() and if IsInit
989 // is still false, return OFFLOAD_FAIL.
990 if (IsInit)
991 return OFFLOAD_SUCCESS;
992 else
993 return OFFLOAD_FAIL;
994}
995
996// Load binary to device.
997__tgt_target_table *DeviceTy::load_binary(void *Img) {
998 RTL->Mtx.lock();
999 __tgt_target_table *rc = RTL->load_binary(RTLDeviceID, Img);
1000 RTL->Mtx.unlock();
1001 return rc;
1002}
1003
1004// Submit data to device.
1005int32_t DeviceTy::data_submit(void *TgtPtrBegin, void *HstPtrBegin,
1006 int64_t Size) {
1007 return RTL->data_submit(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size);
1008}
1009
1010// Retrieve data from device.
1011int32_t DeviceTy::data_retrieve(void *HstPtrBegin, void *TgtPtrBegin,
1012 int64_t Size) {
1013 return RTL->data_retrieve(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size);
1014}
1015
1016// Run region on device
1017int32_t DeviceTy::run_region(void *TgtEntryPtr, void **TgtVarsPtr,
George Rokos1546d312017-05-10 14:12:36 +00001018 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize) {
1019 return RTL->run_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets,
1020 TgtVarsSize);
George Rokos2467df62017-01-25 21:27:24 +00001021}
1022
1023// Run team region on device.
1024int32_t DeviceTy::run_team_region(void *TgtEntryPtr, void **TgtVarsPtr,
George Rokos1546d312017-05-10 14:12:36 +00001025 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, int32_t NumTeams,
1026 int32_t ThreadLimit, uint64_t LoopTripCount) {
1027 return RTL->run_team_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets,
1028 TgtVarsSize, NumTeams, ThreadLimit, LoopTripCount);
George Rokos2467df62017-01-25 21:27:24 +00001029}
1030
1031////////////////////////////////////////////////////////////////////////////////
1032// Functionality for registering libs
1033
1034static void RegisterImageIntoTranslationTable(TranslationTable &TT,
1035 RTLInfoTy &RTL, __tgt_device_image *image) {
1036
1037 // same size, as when we increase one, we also increase the other.
1038 assert(TT.TargetsTable.size() == TT.TargetsImages.size() &&
1039 "We should have as many images as we have tables!");
1040
1041 // Resize the Targets Table and Images to accommodate the new targets if
1042 // required
1043 unsigned TargetsTableMinimumSize = RTL.Idx + RTL.NumberOfDevices;
1044
1045 if (TT.TargetsTable.size() < TargetsTableMinimumSize) {
1046 TT.TargetsImages.resize(TargetsTableMinimumSize, 0);
1047 TT.TargetsTable.resize(TargetsTableMinimumSize, 0);
1048 }
1049
1050 // Register the image in all devices for this target type.
1051 for (int32_t i = 0; i < RTL.NumberOfDevices; ++i) {
1052 // If we are changing the image we are also invalidating the target table.
1053 if (TT.TargetsImages[RTL.Idx + i] != image) {
1054 TT.TargetsImages[RTL.Idx + i] = image;
1055 TT.TargetsTable[RTL.Idx + i] = 0; // lazy initialization of target table.
1056 }
1057 }
1058}
1059
1060////////////////////////////////////////////////////////////////////////////////
1061// Functionality for registering Ctors/Dtors
1062
1063static void RegisterGlobalCtorsDtorsForImage(__tgt_bin_desc *desc,
1064 __tgt_device_image *img, RTLInfoTy *RTL) {
1065
1066 for (int32_t i = 0; i < RTL->NumberOfDevices; ++i) {
1067 DeviceTy &Device = Devices[RTL->Idx + i];
1068 Device.PendingGlobalsMtx.lock();
1069 Device.HasPendingGlobals = true;
1070 for (__tgt_offload_entry *entry = img->EntriesBegin;
1071 entry != img->EntriesEnd; ++entry) {
1072 if (entry->flags & OMP_DECLARE_TARGET_CTOR) {
1073 DP("Adding ctor " DPxMOD " to the pending list.\n",
1074 DPxPTR(entry->addr));
1075 Device.PendingCtorsDtors[desc].PendingCtors.push_back(entry->addr);
1076 } else if (entry->flags & OMP_DECLARE_TARGET_DTOR) {
1077 // Dtors are pushed in reverse order so they are executed from end
1078 // to beginning when unregistering the library!
1079 DP("Adding dtor " DPxMOD " to the pending list.\n",
1080 DPxPTR(entry->addr));
1081 Device.PendingCtorsDtors[desc].PendingDtors.push_front(entry->addr);
1082 }
1083
1084 if (entry->flags & OMP_DECLARE_TARGET_LINK) {
1085 DP("The \"link\" attribute is not yet supported!\n");
1086 }
1087 }
1088 Device.PendingGlobalsMtx.unlock();
1089 }
1090}
1091
1092////////////////////////////////////////////////////////////////////////////////
1093/// adds a target shared library to the target execution image
1094EXTERN void __tgt_register_lib(__tgt_bin_desc *desc) {
1095
1096 // Attempt to load all plugins available in the system.
1097 RTLs.LoadRTLsOnce();
1098
1099 RTLsMtx.lock();
1100 // Register the images with the RTLs that understand them, if any.
1101 for (int32_t i = 0; i < desc->NumDeviceImages; ++i) {
1102 // Obtain the image.
1103 __tgt_device_image *img = &desc->DeviceImages[i];
1104
1105 RTLInfoTy *FoundRTL = NULL;
1106
1107 // Scan the RTLs that have associated images until we find one that supports
1108 // the current image.
1109 for (auto &R : RTLs.AllRTLs) {
1110 if (!R.is_valid_binary(img)) {
1111 DP("Image " DPxMOD " is NOT compatible with RTL %s!\n",
1112 DPxPTR(img->ImageStart), R.RTLName.c_str());
1113 continue;
1114 }
1115
1116 DP("Image " DPxMOD " is compatible with RTL %s!\n",
1117 DPxPTR(img->ImageStart), R.RTLName.c_str());
1118
1119 // If this RTL is not already in use, initialize it.
1120 if (!R.isUsed) {
1121 // Initialize the device information for the RTL we are about to use.
1122 DeviceTy device(&R);
1123
1124 size_t start = Devices.size();
1125 Devices.resize(start + R.NumberOfDevices, device);
1126 for (int32_t device_id = 0; device_id < R.NumberOfDevices;
1127 device_id++) {
1128 // global device ID
1129 Devices[start + device_id].DeviceID = start + device_id;
1130 // RTL local device ID
1131 Devices[start + device_id].RTLDeviceID = device_id;
1132
1133 // Save pointer to device in RTL in case we want to unregister the RTL
1134 R.Devices.push_back(&Devices[start + device_id]);
1135 }
1136
1137 // Initialize the index of this RTL and save it in the used RTLs.
1138 R.Idx = (RTLs.UsedRTLs.empty())
1139 ? 0
1140 : RTLs.UsedRTLs.back()->Idx +
1141 RTLs.UsedRTLs.back()->NumberOfDevices;
1142 assert((size_t) R.Idx == start &&
1143 "RTL index should equal the number of devices used so far.");
1144 R.isUsed = true;
1145 RTLs.UsedRTLs.push_back(&R);
1146
1147 DP("RTL " DPxMOD " has index %d!\n", DPxPTR(R.LibraryHandler), R.Idx);
1148 }
1149
1150 // Initialize (if necessary) translation table for this library.
1151 TrlTblMtx.lock();
1152 if(!HostEntriesBeginToTransTable.count(desc->HostEntriesBegin)){
1153 TranslationTable &tt =
1154 HostEntriesBeginToTransTable[desc->HostEntriesBegin];
1155 tt.HostTable.EntriesBegin = desc->HostEntriesBegin;
1156 tt.HostTable.EntriesEnd = desc->HostEntriesEnd;
1157 }
1158
1159 // Retrieve translation table for this library.
1160 TranslationTable &TransTable =
1161 HostEntriesBeginToTransTable[desc->HostEntriesBegin];
1162
1163 DP("Registering image " DPxMOD " with RTL %s!\n",
1164 DPxPTR(img->ImageStart), R.RTLName.c_str());
1165 RegisterImageIntoTranslationTable(TransTable, R, img);
1166 TrlTblMtx.unlock();
1167 FoundRTL = &R;
1168
1169 // Load ctors/dtors for static objects
1170 RegisterGlobalCtorsDtorsForImage(desc, img, FoundRTL);
1171
1172 // if an RTL was found we are done - proceed to register the next image
1173 break;
1174 }
1175
1176 if (!FoundRTL) {
1177 DP("No RTL found for image " DPxMOD "!\n", DPxPTR(img->ImageStart));
1178 }
1179 }
1180 RTLsMtx.unlock();
1181
1182
1183 DP("Done registering entries!\n");
1184}
1185
1186////////////////////////////////////////////////////////////////////////////////
1187/// unloads a target shared library
1188EXTERN void __tgt_unregister_lib(__tgt_bin_desc *desc) {
1189 DP("Unloading target library!\n");
1190
1191 RTLsMtx.lock();
1192 // Find which RTL understands each image, if any.
1193 for (int32_t i = 0; i < desc->NumDeviceImages; ++i) {
1194 // Obtain the image.
1195 __tgt_device_image *img = &desc->DeviceImages[i];
1196
1197 RTLInfoTy *FoundRTL = NULL;
1198
1199 // Scan the RTLs that have associated images until we find one that supports
1200 // the current image. We only need to scan RTLs that are already being used.
1201 for (auto *R : RTLs.UsedRTLs) {
1202
1203 assert(R->isUsed && "Expecting used RTLs.");
1204
1205 if (!R->is_valid_binary(img)) {
1206 DP("Image " DPxMOD " is NOT compatible with RTL " DPxMOD "!\n",
1207 DPxPTR(img->ImageStart), DPxPTR(R->LibraryHandler));
1208 continue;
1209 }
1210
1211 DP("Image " DPxMOD " is compatible with RTL " DPxMOD "!\n",
1212 DPxPTR(img->ImageStart), DPxPTR(R->LibraryHandler));
1213
1214 FoundRTL = R;
1215
1216 // Execute dtors for static objects if the device has been used, i.e.
1217 // if its PendingCtors list has been emptied.
1218 for (int32_t i = 0; i < FoundRTL->NumberOfDevices; ++i) {
1219 DeviceTy &Device = Devices[FoundRTL->Idx + i];
1220 Device.PendingGlobalsMtx.lock();
1221 if (Device.PendingCtorsDtors[desc].PendingCtors.empty()) {
1222 for (auto &dtor : Device.PendingCtorsDtors[desc].PendingDtors) {
1223 int rc = target(Device.DeviceID, dtor, 0, NULL, NULL, NULL, NULL, 1,
1224 1, true /*team*/);
1225 if (rc != OFFLOAD_SUCCESS) {
1226 DP("Running destructor " DPxMOD " failed.\n", DPxPTR(dtor));
1227 }
1228 }
1229 // Remove this library's entry from PendingCtorsDtors
1230 Device.PendingCtorsDtors.erase(desc);
1231 }
1232 Device.PendingGlobalsMtx.unlock();
1233 }
1234
1235 DP("Unregistered image " DPxMOD " from RTL " DPxMOD "!\n",
1236 DPxPTR(img->ImageStart), DPxPTR(R->LibraryHandler));
1237
1238 break;
1239 }
1240
1241 // if no RTL was found proceed to unregister the next image
1242 if (!FoundRTL){
1243 DP("No RTLs in use support the image " DPxMOD "!\n",
1244 DPxPTR(img->ImageStart));
1245 }
1246 }
1247 RTLsMtx.unlock();
1248 DP("Done unregistering images!\n");
1249
1250 // Remove entries from HostPtrToTableMap
1251 TblMapMtx.lock();
1252 for (__tgt_offload_entry *cur = desc->HostEntriesBegin;
1253 cur < desc->HostEntriesEnd; ++cur) {
1254 HostPtrToTableMap.erase(cur->addr);
1255 }
1256
1257 // Remove translation table for this descriptor.
1258 auto tt = HostEntriesBeginToTransTable.find(desc->HostEntriesBegin);
1259 if (tt != HostEntriesBeginToTransTable.end()) {
1260 DP("Removing translation table for descriptor " DPxMOD "\n",
1261 DPxPTR(desc->HostEntriesBegin));
1262 HostEntriesBeginToTransTable.erase(tt);
1263 } else {
1264 DP("Translation table for descriptor " DPxMOD " cannot be found, probably "
1265 "it has been already removed.\n", DPxPTR(desc->HostEntriesBegin));
1266 }
1267
1268 TblMapMtx.unlock();
1269
1270 // TODO: Remove RTL and the devices it manages if it's not used anymore?
1271 // TODO: Write some RTL->unload_image(...) function?
1272
1273 DP("Done unregistering library!\n");
1274}
1275
1276/// Map global data and execute pending ctors
1277static int InitLibrary(DeviceTy& Device) {
1278 /*
1279 * Map global data
1280 */
1281 int32_t device_id = Device.DeviceID;
1282 int rc = OFFLOAD_SUCCESS;
1283
1284 Device.PendingGlobalsMtx.lock();
1285 TrlTblMtx.lock();
1286 for (HostEntriesBeginToTransTableTy::iterator
1287 ii = HostEntriesBeginToTransTable.begin();
1288 ii != HostEntriesBeginToTransTable.end(); ++ii) {
1289 TranslationTable *TransTable = &ii->second;
1290 if (TransTable->TargetsTable[device_id] != 0) {
1291 // Library entries have already been processed
1292 continue;
1293 }
1294
1295 // 1) get image.
1296 assert(TransTable->TargetsImages.size() > (size_t)device_id &&
1297 "Not expecting a device ID outside the table's bounds!");
1298 __tgt_device_image *img = TransTable->TargetsImages[device_id];
1299 if (!img) {
1300 DP("No image loaded for device id %d.\n", device_id);
1301 rc = OFFLOAD_FAIL;
1302 break;
1303 }
1304 // 2) load image into the target table.
1305 __tgt_target_table *TargetTable =
1306 TransTable->TargetsTable[device_id] = Device.load_binary(img);
1307 // Unable to get table for this image: invalidate image and fail.
1308 if (!TargetTable) {
1309 DP("Unable to generate entries table for device id %d.\n", device_id);
1310 TransTable->TargetsImages[device_id] = 0;
1311 rc = OFFLOAD_FAIL;
1312 break;
1313 }
1314
1315 // Verify whether the two table sizes match.
1316 size_t hsize =
1317 TransTable->HostTable.EntriesEnd - TransTable->HostTable.EntriesBegin;
1318 size_t tsize = TargetTable->EntriesEnd - TargetTable->EntriesBegin;
1319
1320 // Invalid image for these host entries!
1321 if (hsize != tsize) {
1322 DP("Host and Target tables mismatch for device id %d [%zx != %zx].\n",
1323 device_id, hsize, tsize);
1324 TransTable->TargetsImages[device_id] = 0;
1325 TransTable->TargetsTable[device_id] = 0;
1326 rc = OFFLOAD_FAIL;
1327 break;
1328 }
1329
1330 // process global data that needs to be mapped.
George Rokosd57681b2017-04-22 11:45:03 +00001331 Device.DataMapMtx.lock();
George Rokos2467df62017-01-25 21:27:24 +00001332 __tgt_target_table *HostTable = &TransTable->HostTable;
1333 for (__tgt_offload_entry *CurrDeviceEntry = TargetTable->EntriesBegin,
1334 *CurrHostEntry = HostTable->EntriesBegin,
1335 *EntryDeviceEnd = TargetTable->EntriesEnd;
1336 CurrDeviceEntry != EntryDeviceEnd;
1337 CurrDeviceEntry++, CurrHostEntry++) {
1338 if (CurrDeviceEntry->size != 0) {
1339 // has data.
1340 assert(CurrDeviceEntry->size == CurrHostEntry->size &&
1341 "data size mismatch");
George Rokosba7380b2017-03-22 16:43:40 +00001342
1343 // Fortran may use multiple weak declarations for the same symbol,
1344 // therefore we must allow for multiple weak symbols to be loaded from
1345 // the fat binary. Treat these mappings as any other "regular" mapping.
1346 // Add entry to map.
George Rokosd57681b2017-04-22 11:45:03 +00001347 if (Device.getTgtPtrBegin(CurrHostEntry->addr, CurrHostEntry->size))
1348 continue;
George Rokos2467df62017-01-25 21:27:24 +00001349 DP("Add mapping from host " DPxMOD " to device " DPxMOD " with size %zu"
1350 "\n", DPxPTR(CurrHostEntry->addr), DPxPTR(CurrDeviceEntry->addr),
1351 CurrDeviceEntry->size);
George Rokosd57681b2017-04-22 11:45:03 +00001352 Device.HostDataToTargetMap.push_front(HostDataToTargetTy(
1353 (uintptr_t)CurrHostEntry->addr /*HstPtrBase*/,
1354 (uintptr_t)CurrHostEntry->addr /*HstPtrBegin*/,
1355 (uintptr_t)CurrHostEntry->addr + CurrHostEntry->size /*HstPtrEnd*/,
1356 (uintptr_t)CurrDeviceEntry->addr /*TgtPtrBegin*/,
1357 INF_REF_CNT /*RefCount*/));
George Rokos2467df62017-01-25 21:27:24 +00001358 }
1359 }
George Rokosd57681b2017-04-22 11:45:03 +00001360 Device.DataMapMtx.unlock();
George Rokos2467df62017-01-25 21:27:24 +00001361 }
1362 TrlTblMtx.unlock();
1363
1364 if (rc != OFFLOAD_SUCCESS) {
1365 Device.PendingGlobalsMtx.unlock();
1366 return rc;
1367 }
1368
1369 /*
1370 * Run ctors for static objects
1371 */
1372 if (!Device.PendingCtorsDtors.empty()) {
1373 // Call all ctors for all libraries registered so far
1374 for (auto &lib : Device.PendingCtorsDtors) {
1375 if (!lib.second.PendingCtors.empty()) {
1376 DP("Has pending ctors... call now\n");
1377 for (auto &entry : lib.second.PendingCtors) {
1378 void *ctor = entry;
1379 int rc = target(device_id, ctor, 0, NULL, NULL, NULL,
1380 NULL, 1, 1, true /*team*/);
1381 if (rc != OFFLOAD_SUCCESS) {
1382 DP("Running ctor " DPxMOD " failed.\n", DPxPTR(ctor));
1383 Device.PendingGlobalsMtx.unlock();
1384 return OFFLOAD_FAIL;
1385 }
1386 }
1387 // Clear the list to indicate that this device has been used
1388 lib.second.PendingCtors.clear();
1389 DP("Done with pending ctors for lib " DPxMOD "\n", DPxPTR(lib.first));
1390 }
1391 }
1392 }
1393 Device.HasPendingGlobals = false;
1394 Device.PendingGlobalsMtx.unlock();
1395
1396 return OFFLOAD_SUCCESS;
1397}
1398
1399// Check whether a device has been initialized, global ctors have been
1400// executed and global data has been mapped; do so if not already done.
1401static int CheckDevice(int32_t device_id) {
1402 // Is device ready?
1403 if (!device_is_ready(device_id)) {
1404 DP("Device %d is not ready.\n", device_id);
1405 return OFFLOAD_FAIL;
1406 }
1407
1408 // Get device info.
1409 DeviceTy &Device = Devices[device_id];
1410
1411 // Check whether global data has been mapped for this device
1412 Device.PendingGlobalsMtx.lock();
1413 bool hasPendingGlobals = Device.HasPendingGlobals;
1414 Device.PendingGlobalsMtx.unlock();
1415 if (hasPendingGlobals && InitLibrary(Device) != OFFLOAD_SUCCESS) {
1416 DP("Failed to init globals on device %d\n", device_id);
1417 return OFFLOAD_FAIL;
1418 }
1419
1420 return OFFLOAD_SUCCESS;
1421}
1422
1423// Following datatypes and functions (tgt_oldmap_type, combined_entry_t,
1424// translate_map, cleanup_map) will be removed once the compiler starts using
1425// the new map types.
1426
1427// Old map types
1428enum tgt_oldmap_type {
1429 OMP_TGT_OLDMAPTYPE_TO = 0x001, // copy data from host to device
1430 OMP_TGT_OLDMAPTYPE_FROM = 0x002, // copy data from device to host
1431 OMP_TGT_OLDMAPTYPE_ALWAYS = 0x004, // copy regardless of the ref. count
1432 OMP_TGT_OLDMAPTYPE_DELETE = 0x008, // force unmapping of data
1433 OMP_TGT_OLDMAPTYPE_MAP_PTR = 0x010, // map pointer as well as pointee
1434 OMP_TGT_OLDMAPTYPE_FIRST_MAP = 0x020, // first occurrence of mapped variable
1435 OMP_TGT_OLDMAPTYPE_RETURN_PTR = 0x040, // return TgtBase addr of mapped data
1436 OMP_TGT_OLDMAPTYPE_PRIVATE_PTR = 0x080, // private variable - not mapped
1437 OMP_TGT_OLDMAPTYPE_PRIVATE_VAL = 0x100 // copy by value - not mapped
1438};
1439
1440// Temporary functions for map translation and cleanup
1441struct combined_entry_t {
1442 int num_members; // number of members in combined entry
1443 void *base_addr; // base address of combined entry
1444 void *begin_addr; // begin address of combined entry
1445 void *end_addr; // size of combined entry
1446};
1447
1448static void translate_map(int32_t arg_num, void **args_base, void **args,
1449 int64_t *arg_sizes, int32_t *arg_types, int32_t &new_arg_num,
1450 void **&new_args_base, void **&new_args, int64_t *&new_arg_sizes,
1451 int64_t *&new_arg_types, bool is_target_construct) {
1452 if (arg_num <= 0) {
1453 DP("Nothing to translate\n");
1454 new_arg_num = 0;
1455 return;
1456 }
1457
1458 // array of combined entries
1459 combined_entry_t *cmb_entries =
1460 (combined_entry_t *) alloca(arg_num * sizeof(combined_entry_t));
1461 // number of combined entries
1462 long num_combined = 0;
1463 // old entry is MAP_PTR?
1464 bool *is_ptr_old = (bool *) alloca(arg_num * sizeof(bool));
1465 // old entry is member of member_of[old] cmb_entry
1466 int *member_of = (int *) alloca(arg_num * sizeof(int));
George Rokos15a6e7d2017-02-15 20:45:37 +00001467 // temporary storage for modifications of the original arg_types
1468 int32_t *mod_arg_types = (int32_t *) alloca(arg_num *sizeof(int32_t));
George Rokos2467df62017-01-25 21:27:24 +00001469
1470 DP("Translating %d map entries\n", arg_num);
1471 for (int i = 0; i < arg_num; ++i) {
1472 member_of[i] = -1;
1473 is_ptr_old[i] = false;
George Rokos15a6e7d2017-02-15 20:45:37 +00001474 mod_arg_types[i] = arg_types[i];
George Rokos2467df62017-01-25 21:27:24 +00001475 // Scan previous entries to see whether this entry shares the same base
1476 for (int j = 0; j < i; ++j) {
1477 void *new_begin_addr = NULL;
1478 void *new_end_addr = NULL;
1479
George Rokos15a6e7d2017-02-15 20:45:37 +00001480 if (mod_arg_types[i] & OMP_TGT_OLDMAPTYPE_MAP_PTR) {
George Rokos2467df62017-01-25 21:27:24 +00001481 if (args_base[i] == args[j]) {
George Rokos15a6e7d2017-02-15 20:45:37 +00001482 if (!(mod_arg_types[j] & OMP_TGT_OLDMAPTYPE_MAP_PTR)) {
George Rokos2467df62017-01-25 21:27:24 +00001483 DP("Entry %d has the same base as entry %d's begin address\n", i,
1484 j);
1485 new_begin_addr = args_base[i];
1486 new_end_addr = (char *)args_base[i] + sizeof(void *);
1487 assert(arg_sizes[j] == sizeof(void *));
1488 is_ptr_old[j] = true;
1489 } else {
1490 DP("Entry %d has the same base as entry %d's begin address, but "
1491 "%d's base was a MAP_PTR too\n", i, j, j);
George Rokos15a6e7d2017-02-15 20:45:37 +00001492 int32_t to_from_always_delete =
1493 OMP_TGT_OLDMAPTYPE_TO | OMP_TGT_OLDMAPTYPE_FROM |
1494 OMP_TGT_OLDMAPTYPE_ALWAYS | OMP_TGT_OLDMAPTYPE_DELETE;
1495 if (mod_arg_types[j] & to_from_always_delete) {
1496 DP("Resetting to/from/always/delete flags for entry %d because "
1497 "it is only a pointer to pointer\n", j);
1498 mod_arg_types[j] &= ~to_from_always_delete;
1499 }
George Rokos2467df62017-01-25 21:27:24 +00001500 }
1501 }
1502 } else {
George Rokos15a6e7d2017-02-15 20:45:37 +00001503 if (!(mod_arg_types[i] & OMP_TGT_OLDMAPTYPE_FIRST_MAP) &&
George Rokos2467df62017-01-25 21:27:24 +00001504 args_base[i] == args_base[j]) {
1505 DP("Entry %d has the same base address as entry %d\n", i, j);
1506 new_begin_addr = args[i];
1507 new_end_addr = (char *)args[i] + arg_sizes[i];
1508 }
1509 }
1510
1511 // If we have combined the entry with a previous one
1512 if (new_begin_addr) {
1513 int id;
1514 if(member_of[j] == -1) {
1515 // We have a new entry
1516 id = num_combined++;
1517 DP("Creating new combined entry %d for old entry %d\n", id, j);
1518 // Initialize new entry
1519 cmb_entries[id].num_members = 1;
1520 cmb_entries[id].base_addr = args_base[j];
George Rokos15a6e7d2017-02-15 20:45:37 +00001521 if (mod_arg_types[j] & OMP_TGT_OLDMAPTYPE_MAP_PTR) {
George Rokos2467df62017-01-25 21:27:24 +00001522 cmb_entries[id].begin_addr = args_base[j];
1523 cmb_entries[id].end_addr = (char *)args_base[j] + arg_sizes[j];
1524 } else {
1525 cmb_entries[id].begin_addr = args[j];
1526 cmb_entries[id].end_addr = (char *)args[j] + arg_sizes[j];
1527 }
1528 member_of[j] = id;
1529 } else {
1530 // Reuse existing combined entry
1531 DP("Reusing existing combined entry %d\n", member_of[j]);
1532 id = member_of[j];
1533 }
1534
1535 // Update combined entry
1536 DP("Adding entry %d to combined entry %d\n", i, id);
1537 cmb_entries[id].num_members++;
1538 // base_addr stays the same
1539 cmb_entries[id].begin_addr =
1540 std::min(cmb_entries[id].begin_addr, new_begin_addr);
1541 cmb_entries[id].end_addr =
1542 std::max(cmb_entries[id].end_addr, new_end_addr);
1543 member_of[i] = id;
1544 break;
1545 }
1546 }
1547 }
1548
1549 DP("New entries: %ld combined + %d original\n", num_combined, arg_num);
1550 new_arg_num = arg_num + num_combined;
1551 new_args_base = (void **) malloc(new_arg_num * sizeof(void *));
1552 new_args = (void **) malloc(new_arg_num * sizeof(void *));
1553 new_arg_sizes = (int64_t *) malloc(new_arg_num * sizeof(int64_t));
1554 new_arg_types = (int64_t *) malloc(new_arg_num * sizeof(int64_t));
1555
1556 const int64_t alignment = 8;
1557
1558 int next_id = 0; // next ID
1559 int next_cid = 0; // next combined ID
1560 int *combined_to_new_id = (int *) alloca(num_combined * sizeof(int));
1561 for (int i = 0; i < arg_num; ++i) {
1562 // It is member_of
1563 if (member_of[i] == next_cid) {
1564 int cid = next_cid++; // ID of this combined entry
1565 int nid = next_id++; // ID of the new (global) entry
1566 combined_to_new_id[cid] = nid;
1567 DP("Combined entry %3d will become new entry %3d\n", cid, nid);
1568
1569 int64_t padding = (int64_t)cmb_entries[cid].begin_addr % alignment;
1570 if (padding) {
1571 DP("Using a padding of %" PRId64 " for begin address " DPxMOD "\n",
1572 padding, DPxPTR(cmb_entries[cid].begin_addr));
1573 cmb_entries[cid].begin_addr =
1574 (char *)cmb_entries[cid].begin_addr - padding;
1575 }
1576
1577 new_args_base[nid] = cmb_entries[cid].base_addr;
1578 new_args[nid] = cmb_entries[cid].begin_addr;
1579 new_arg_sizes[nid] = (int64_t) ((char *)cmb_entries[cid].end_addr -
1580 (char *)cmb_entries[cid].begin_addr);
1581 new_arg_types[nid] = OMP_TGT_MAPTYPE_TARGET_PARAM;
1582 DP("Entry %3d: base_addr " DPxMOD ", begin_addr " DPxMOD ", "
1583 "size %" PRId64 ", type 0x%" PRIx64 "\n", nid,
1584 DPxPTR(new_args_base[nid]), DPxPTR(new_args[nid]), new_arg_sizes[nid],
1585 new_arg_types[nid]);
1586 } else if (member_of[i] != -1) {
1587 DP("Combined entry %3d has been encountered before, do nothing\n",
1588 member_of[i]);
1589 }
1590
1591 // Now that the combined entry (the one the old entry was a member of) has
1592 // been inserted into the new arguments list, proceed with the old entry.
1593 int nid = next_id++;
1594 DP("Old entry %3d will become new entry %3d\n", i, nid);
1595
1596 new_args_base[nid] = args_base[i];
1597 new_args[nid] = args[i];
1598 new_arg_sizes[nid] = arg_sizes[i];
George Rokos15a6e7d2017-02-15 20:45:37 +00001599 int64_t old_type = mod_arg_types[i];
George Rokos2467df62017-01-25 21:27:24 +00001600
1601 if (is_ptr_old[i]) {
1602 // Reset TO and FROM flags
1603 old_type &= ~(OMP_TGT_OLDMAPTYPE_TO | OMP_TGT_OLDMAPTYPE_FROM);
1604 }
1605
1606 if (member_of[i] == -1) {
1607 if (!is_target_construct)
1608 old_type &= ~OMP_TGT_MAPTYPE_TARGET_PARAM;
1609 new_arg_types[nid] = old_type;
1610 DP("Entry %3d: base_addr " DPxMOD ", begin_addr " DPxMOD ", size %" PRId64
1611 ", type 0x%" PRIx64 " (old entry %d not MEMBER_OF)\n", nid,
1612 DPxPTR(new_args_base[nid]), DPxPTR(new_args[nid]), new_arg_sizes[nid],
1613 new_arg_types[nid], i);
1614 } else {
1615 // Old entry is not FIRST_MAP
1616 old_type &= ~OMP_TGT_OLDMAPTYPE_FIRST_MAP;
1617 // Add MEMBER_OF
1618 int new_member_of = combined_to_new_id[member_of[i]];
1619 old_type |= ((int64_t)new_member_of + 1) << 48;
1620 new_arg_types[nid] = old_type;
1621 DP("Entry %3d: base_addr " DPxMOD ", begin_addr " DPxMOD ", size %" PRId64
1622 ", type 0x%" PRIx64 " (old entry %d MEMBER_OF %d)\n", nid,
1623 DPxPTR(new_args_base[nid]), DPxPTR(new_args[nid]), new_arg_sizes[nid],
1624 new_arg_types[nid], i, new_member_of);
1625 }
1626 }
1627}
1628
1629static void cleanup_map(int32_t new_arg_num, void **new_args_base,
1630 void **new_args, int64_t *new_arg_sizes, int64_t *new_arg_types,
1631 int32_t arg_num, void **args_base) {
1632 if (new_arg_num > 0) {
1633 int offset = new_arg_num - arg_num;
1634 for (int32_t i = 0; i < arg_num; ++i) {
1635 // Restore old base address
1636 args_base[i] = new_args_base[i+offset];
1637 }
1638 free(new_args_base);
1639 free(new_args);
1640 free(new_arg_sizes);
1641 free(new_arg_types);
1642 }
1643}
1644
1645static short member_of(int64_t type) {
1646 return ((type & OMP_TGT_MAPTYPE_MEMBER_OF) >> 48) - 1;
1647}
1648
1649/// Internal function to do the mapping and transfer the data to the device
1650static int target_data_begin(DeviceTy &Device, int32_t arg_num,
1651 void **args_base, void **args, int64_t *arg_sizes, int64_t *arg_types) {
1652 // process each input.
1653 int rc = OFFLOAD_SUCCESS;
1654 for (int32_t i = 0; i < arg_num; ++i) {
1655 // Ignore private variables and arrays - there is no mapping for them.
1656 if ((arg_types[i] & OMP_TGT_MAPTYPE_LITERAL) ||
1657 (arg_types[i] & OMP_TGT_MAPTYPE_PRIVATE))
1658 continue;
1659
1660 void *HstPtrBegin = args[i];
1661 void *HstPtrBase = args_base[i];
1662 // Address of pointer on the host and device, respectively.
1663 void *Pointer_HstPtrBegin, *Pointer_TgtPtrBegin;
1664 bool IsNew, Pointer_IsNew;
1665 bool IsImplicit = arg_types[i] & OMP_TGT_MAPTYPE_IMPLICIT;
1666 bool UpdateRef = !(arg_types[i] & OMP_TGT_MAPTYPE_MEMBER_OF);
1667 if (arg_types[i] & OMP_TGT_MAPTYPE_PTR_AND_OBJ) {
1668 DP("Has a pointer entry: \n");
1669 // base is address of pointer.
1670 Pointer_TgtPtrBegin = Device.getOrAllocTgtPtr(HstPtrBase, HstPtrBase,
1671 sizeof(void *), Pointer_IsNew, IsImplicit, UpdateRef);
1672 if (!Pointer_TgtPtrBegin) {
1673 DP("Call to getOrAllocTgtPtr returned null pointer (device failure or "
1674 "illegal mapping).\n");
1675 }
1676 DP("There are %zu bytes allocated at target address " DPxMOD " - is%s new"
1677 "\n", sizeof(void *), DPxPTR(Pointer_TgtPtrBegin),
1678 (Pointer_IsNew ? "" : " not"));
1679 Pointer_HstPtrBegin = HstPtrBase;
1680 // modify current entry.
1681 HstPtrBase = *(void **)HstPtrBase;
1682 UpdateRef = true; // subsequently update ref count of pointee
1683 }
1684
1685 void *TgtPtrBegin = Device.getOrAllocTgtPtr(HstPtrBegin, HstPtrBase,
1686 arg_sizes[i], IsNew, IsImplicit, UpdateRef);
1687 if (!TgtPtrBegin && arg_sizes[i]) {
1688 // If arg_sizes[i]==0, then the argument is a pointer to NULL, so
1689 // getOrAlloc() returning NULL is not an error.
1690 DP("Call to getOrAllocTgtPtr returned null pointer (device failure or "
1691 "illegal mapping).\n");
1692 }
1693 DP("There are %" PRId64 " bytes allocated at target address " DPxMOD
1694 " - is%s new\n", arg_sizes[i], DPxPTR(TgtPtrBegin),
1695 (IsNew ? "" : " not"));
1696
1697 if (arg_types[i] & OMP_TGT_MAPTYPE_RETURN_PARAM) {
1698 void *ret_ptr;
1699 if (arg_types[i] & OMP_TGT_MAPTYPE_PTR_AND_OBJ)
1700 ret_ptr = Pointer_TgtPtrBegin;
1701 else {
1702 bool IsLast; // not used
1703 ret_ptr = Device.getTgtPtrBegin(HstPtrBegin, 0, IsLast, false);
1704 }
1705
1706 DP("Returning device pointer " DPxMOD "\n", DPxPTR(ret_ptr));
1707 args_base[i] = ret_ptr;
1708 }
1709
1710 if (arg_types[i] & OMP_TGT_MAPTYPE_TO) {
1711 bool copy = false;
1712 if (IsNew || (arg_types[i] & OMP_TGT_MAPTYPE_ALWAYS)) {
1713 copy = true;
1714 } else if (arg_types[i] & OMP_TGT_MAPTYPE_MEMBER_OF) {
1715 // Copy data only if the "parent" struct has RefCount==1.
1716 short parent_idx = member_of(arg_types[i]);
1717 long parent_rc = Device.getMapEntryRefCnt(args[parent_idx]);
1718 assert(parent_rc > 0 && "parent struct not found");
1719 if (parent_rc == 1) {
1720 copy = true;
1721 }
1722 }
1723
1724 if (copy) {
1725 DP("Moving %" PRId64 " bytes (hst:" DPxMOD ") -> (tgt:" DPxMOD ")\n",
1726 arg_sizes[i], DPxPTR(HstPtrBegin), DPxPTR(TgtPtrBegin));
1727 int rt = Device.data_submit(TgtPtrBegin, HstPtrBegin, arg_sizes[i]);
1728 if (rt != OFFLOAD_SUCCESS) {
1729 DP("Copying data to device failed.\n");
1730 rc = OFFLOAD_FAIL;
1731 }
1732 }
1733 }
1734
1735 if (arg_types[i] & OMP_TGT_MAPTYPE_PTR_AND_OBJ) {
1736 DP("Update pointer (" DPxMOD ") -> [" DPxMOD "]\n",
1737 DPxPTR(Pointer_TgtPtrBegin), DPxPTR(TgtPtrBegin));
1738 uint64_t Delta = (uint64_t)HstPtrBegin - (uint64_t)HstPtrBase;
1739 void *TgtPtrBase = (void *)((uint64_t)TgtPtrBegin - Delta);
1740 int rt = Device.data_submit(Pointer_TgtPtrBegin, &TgtPtrBase,
1741 sizeof(void *));
1742 if (rt != OFFLOAD_SUCCESS) {
1743 DP("Copying data to device failed.\n");
1744 rc = OFFLOAD_FAIL;
1745 }
1746 // create shadow pointers for this entry
1747 Device.ShadowMtx.lock();
1748 Device.ShadowPtrMap[Pointer_HstPtrBegin] = {HstPtrBase,
1749 Pointer_TgtPtrBegin, TgtPtrBase};
1750 Device.ShadowMtx.unlock();
1751 }
1752 }
1753
1754 return rc;
1755}
1756
1757EXTERN void __tgt_target_data_begin_nowait(int32_t device_id, int32_t arg_num,
1758 void **args_base, void **args, int64_t *arg_sizes, int32_t *arg_types,
1759 int32_t depNum, void *depList, int32_t noAliasDepNum,
1760 void *noAliasDepList) {
1761 if (depNum + noAliasDepNum > 0)
1762 __kmpc_omp_taskwait(NULL, 0);
1763
1764 __tgt_target_data_begin(device_id, arg_num, args_base, args, arg_sizes,
1765 arg_types);
1766}
1767
1768/// creates host-to-target data mapping, stores it in the
1769/// libomptarget.so internal structure (an entry in a stack of data maps)
1770/// and passes the data to the device.
1771EXTERN void __tgt_target_data_begin(int32_t device_id, int32_t arg_num,
1772 void **args_base, void **args, int64_t *arg_sizes, int32_t *arg_types) {
1773 DP("Entering data begin region for device %d with %d mappings\n", device_id,
1774 arg_num);
1775
1776 // No devices available?
1777 if (device_id == OFFLOAD_DEVICE_DEFAULT) {
1778 device_id = omp_get_default_device();
1779 DP("Use default device id %d\n", device_id);
1780 }
1781
1782 if (CheckDevice(device_id) != OFFLOAD_SUCCESS) {
1783 DP("Failed to get device %d ready\n", device_id);
1784 return;
1785 }
1786
1787 DeviceTy& Device = Devices[device_id];
1788
1789 // Translate maps
1790 int32_t new_arg_num;
1791 void **new_args_base;
1792 void **new_args;
1793 int64_t *new_arg_sizes;
1794 int64_t *new_arg_types;
1795 translate_map(arg_num, args_base, args, arg_sizes, arg_types, new_arg_num,
1796 new_args_base, new_args, new_arg_sizes, new_arg_types, false);
1797
1798 //target_data_begin(Device, arg_num, args_base, args, arg_sizes, arg_types);
1799 target_data_begin(Device, new_arg_num, new_args_base, new_args, new_arg_sizes,
1800 new_arg_types);
1801
1802 // Cleanup translation memory
1803 cleanup_map(new_arg_num, new_args_base, new_args, new_arg_sizes,
1804 new_arg_types, arg_num, args_base);
1805}
1806
1807/// Internal function to undo the mapping and retrieve the data from the device.
1808static int target_data_end(DeviceTy &Device, int32_t arg_num, void **args_base,
1809 void **args, int64_t *arg_sizes, int64_t *arg_types) {
1810 int rc = OFFLOAD_SUCCESS;
1811 // process each input.
1812 for (int32_t i = arg_num - 1; i >= 0; --i) {
1813 // Ignore private variables and arrays - there is no mapping for them.
1814 // Also, ignore the use_device_ptr directive, it has no effect here.
1815 if ((arg_types[i] & OMP_TGT_MAPTYPE_LITERAL) ||
1816 (arg_types[i] & OMP_TGT_MAPTYPE_PRIVATE))
1817 continue;
1818
1819 void *HstPtrBegin = args[i];
1820 bool IsLast;
1821 bool UpdateRef = !(arg_types[i] & OMP_TGT_MAPTYPE_MEMBER_OF) ||
1822 (arg_types[i] & OMP_TGT_MAPTYPE_PTR_AND_OBJ);
1823 bool ForceDelete = arg_types[i] & OMP_TGT_MAPTYPE_DELETE;
1824
1825 // If PTR_AND_OBJ, HstPtrBegin is address of pointee
1826 void *TgtPtrBegin = Device.getTgtPtrBegin(HstPtrBegin, arg_sizes[i], IsLast,
1827 UpdateRef);
1828 DP("There are %" PRId64 " bytes allocated at target address " DPxMOD
1829 " - is%s last\n", arg_sizes[i], DPxPTR(TgtPtrBegin),
1830 (IsLast ? "" : " not"));
1831
George Rokos15a6e7d2017-02-15 20:45:37 +00001832 bool DelEntry = IsLast || ForceDelete;
1833
George Rokos2467df62017-01-25 21:27:24 +00001834 if ((arg_types[i] & OMP_TGT_MAPTYPE_MEMBER_OF) &&
1835 !(arg_types[i] & OMP_TGT_MAPTYPE_PTR_AND_OBJ)) {
George Rokos15a6e7d2017-02-15 20:45:37 +00001836 DelEntry = false; // protect parent struct from being deallocated
George Rokos2467df62017-01-25 21:27:24 +00001837 }
1838
George Rokos2467df62017-01-25 21:27:24 +00001839 if ((arg_types[i] & OMP_TGT_MAPTYPE_FROM) || DelEntry) {
1840 // Move data back to the host
1841 if (arg_types[i] & OMP_TGT_MAPTYPE_FROM) {
1842 bool Always = arg_types[i] & OMP_TGT_MAPTYPE_ALWAYS;
1843 bool CopyMember = false;
1844 if ((arg_types[i] & OMP_TGT_MAPTYPE_MEMBER_OF) &&
1845 !(arg_types[i] & OMP_TGT_MAPTYPE_PTR_AND_OBJ)) {
1846 // Copy data only if the "parent" struct has RefCount==1.
1847 short parent_idx = member_of(arg_types[i]);
1848 long parent_rc = Device.getMapEntryRefCnt(args[parent_idx]);
1849 assert(parent_rc > 0 && "parent struct not found");
1850 if (parent_rc == 1) {
1851 CopyMember = true;
1852 }
1853 }
1854
1855 if (DelEntry || Always || CopyMember) {
1856 DP("Moving %" PRId64 " bytes (tgt:" DPxMOD ") -> (hst:" DPxMOD ")\n",
1857 arg_sizes[i], DPxPTR(TgtPtrBegin), DPxPTR(HstPtrBegin));
1858 int rt = Device.data_retrieve(HstPtrBegin, TgtPtrBegin, arg_sizes[i]);
1859 if (rt != OFFLOAD_SUCCESS) {
1860 DP("Copying data from device failed.\n");
1861 rc = OFFLOAD_FAIL;
1862 }
1863 }
1864 }
1865
1866 // If we copied back to the host a struct/array containing pointers, we
1867 // need to restore the original host pointer values from their shadow
1868 // copies. If the struct is going to be deallocated, remove any remaining
1869 // shadow pointer entries for this struct.
1870 uintptr_t lb = (uintptr_t) HstPtrBegin;
1871 uintptr_t ub = (uintptr_t) HstPtrBegin + arg_sizes[i];
1872 Device.ShadowMtx.lock();
1873 for (ShadowPtrListTy::iterator it = Device.ShadowPtrMap.begin();
1874 it != Device.ShadowPtrMap.end(); ++it) {
1875 void **ShadowHstPtrAddr = (void**) it->first;
1876
1877 // An STL map is sorted on its keys; use this property
1878 // to quickly determine when to break out of the loop.
1879 if ((uintptr_t) ShadowHstPtrAddr < lb)
1880 continue;
1881 if ((uintptr_t) ShadowHstPtrAddr >= ub)
1882 break;
1883
1884 // If we copied the struct to the host, we need to restore the pointer.
1885 if (arg_types[i] & OMP_TGT_MAPTYPE_FROM) {
1886 DP("Restoring original host pointer value " DPxMOD " for host "
1887 "pointer " DPxMOD "\n", DPxPTR(it->second.HstPtrVal),
1888 DPxPTR(ShadowHstPtrAddr));
1889 *ShadowHstPtrAddr = it->second.HstPtrVal;
1890 }
1891 // If the struct is to be deallocated, remove the shadow entry.
1892 if (DelEntry) {
1893 DP("Removing shadow pointer " DPxMOD "\n", DPxPTR(ShadowHstPtrAddr));
1894 Device.ShadowPtrMap.erase(it);
1895 }
1896 }
1897 Device.ShadowMtx.unlock();
1898
1899 // Deallocate map
1900 if (DelEntry) {
1901 int rt = Device.deallocTgtPtr(HstPtrBegin, arg_sizes[i], ForceDelete);
1902 if (rt != OFFLOAD_SUCCESS) {
1903 DP("Deallocating data from device failed.\n");
1904 rc = OFFLOAD_FAIL;
1905 }
1906 }
1907 }
1908 }
1909
1910 return rc;
1911}
1912
1913/// passes data from the target, releases target memory and destroys
1914/// the host-target mapping (top entry from the stack of data maps)
1915/// created by the last __tgt_target_data_begin.
1916EXTERN void __tgt_target_data_end(int32_t device_id, int32_t arg_num,
1917 void **args_base, void **args, int64_t *arg_sizes, int32_t *arg_types) {
1918 DP("Entering data end region with %d mappings\n", arg_num);
1919
1920 // No devices available?
1921 if (device_id == OFFLOAD_DEVICE_DEFAULT) {
1922 device_id = omp_get_default_device();
1923 }
1924
1925 RTLsMtx.lock();
1926 size_t Devices_size = Devices.size();
1927 RTLsMtx.unlock();
1928 if (Devices_size <= (size_t)device_id) {
1929 DP("Device ID %d does not have a matching RTL.\n", device_id);
1930 return;
1931 }
1932
1933 DeviceTy &Device = Devices[device_id];
1934 if (!Device.IsInit) {
1935 DP("uninit device: ignore");
1936 return;
1937 }
1938
1939 // Translate maps
1940 int32_t new_arg_num;
1941 void **new_args_base;
1942 void **new_args;
1943 int64_t *new_arg_sizes;
1944 int64_t *new_arg_types;
1945 translate_map(arg_num, args_base, args, arg_sizes, arg_types, new_arg_num,
1946 new_args_base, new_args, new_arg_sizes, new_arg_types, false);
1947
1948 //target_data_end(Device, arg_num, args_base, args, arg_sizes, arg_types);
1949 target_data_end(Device, new_arg_num, new_args_base, new_args, new_arg_sizes,
1950 new_arg_types);
1951
1952 // Cleanup translation memory
1953 cleanup_map(new_arg_num, new_args_base, new_args, new_arg_sizes,
1954 new_arg_types, arg_num, args_base);
1955}
1956
1957EXTERN void __tgt_target_data_end_nowait(int32_t device_id, int32_t arg_num,
1958 void **args_base, void **args, int64_t *arg_sizes, int32_t *arg_types,
1959 int32_t depNum, void *depList, int32_t noAliasDepNum,
1960 void *noAliasDepList) {
1961 if (depNum + noAliasDepNum > 0)
1962 __kmpc_omp_taskwait(NULL, 0);
1963
1964 __tgt_target_data_end(device_id, arg_num, args_base, args, arg_sizes,
1965 arg_types);
1966}
1967
1968/// passes data to/from the target.
1969EXTERN void __tgt_target_data_update(int32_t device_id, int32_t arg_num,
1970 void **args_base, void **args, int64_t *arg_sizes, int32_t *arg_types) {
1971 DP("Entering data update with %d mappings\n", arg_num);
1972
1973 // No devices available?
1974 if (device_id == OFFLOAD_DEVICE_DEFAULT) {
1975 device_id = omp_get_default_device();
1976 }
1977
1978 if (CheckDevice(device_id) != OFFLOAD_SUCCESS) {
1979 DP("Failed to get device %d ready\n", device_id);
1980 return;
1981 }
1982
1983 DeviceTy& Device = Devices[device_id];
1984
1985 // process each input.
1986 for (int32_t i = 0; i < arg_num; ++i) {
1987 if ((arg_types[i] & OMP_TGT_MAPTYPE_LITERAL) ||
1988 (arg_types[i] & OMP_TGT_MAPTYPE_PRIVATE))
1989 continue;
1990
1991 void *HstPtrBegin = args[i];
1992 int64_t MapSize = arg_sizes[i];
1993 bool IsLast;
1994 void *TgtPtrBegin = Device.getTgtPtrBegin(HstPtrBegin, MapSize, IsLast,
1995 false);
1996
1997 if (arg_types[i] & OMP_TGT_MAPTYPE_FROM) {
1998 DP("Moving %" PRId64 " bytes (tgt:" DPxMOD ") -> (hst:" DPxMOD ")\n",
1999 arg_sizes[i], DPxPTR(TgtPtrBegin), DPxPTR(HstPtrBegin));
2000 Device.data_retrieve(HstPtrBegin, TgtPtrBegin, MapSize);
2001
2002 uintptr_t lb = (uintptr_t) HstPtrBegin;
2003 uintptr_t ub = (uintptr_t) HstPtrBegin + MapSize;
2004 Device.ShadowMtx.lock();
2005 for (ShadowPtrListTy::iterator it = Device.ShadowPtrMap.begin();
2006 it != Device.ShadowPtrMap.end(); ++it) {
2007 void **ShadowHstPtrAddr = (void**) it->first;
2008 if ((uintptr_t) ShadowHstPtrAddr < lb)
2009 continue;
2010 if ((uintptr_t) ShadowHstPtrAddr >= ub)
2011 break;
2012 DP("Restoring original host pointer value " DPxMOD " for host pointer "
2013 DPxMOD "\n", DPxPTR(it->second.HstPtrVal),
2014 DPxPTR(ShadowHstPtrAddr));
2015 *ShadowHstPtrAddr = it->second.HstPtrVal;
2016 }
2017 Device.ShadowMtx.unlock();
2018 }
2019
2020 if (arg_types[i] & OMP_TGT_MAPTYPE_TO) {
2021 DP("Moving %" PRId64 " bytes (hst:" DPxMOD ") -> (tgt:" DPxMOD ")\n",
2022 arg_sizes[i], DPxPTR(HstPtrBegin), DPxPTR(TgtPtrBegin));
2023 Device.data_submit(TgtPtrBegin, HstPtrBegin, MapSize);
2024
2025 uintptr_t lb = (uintptr_t) HstPtrBegin;
2026 uintptr_t ub = (uintptr_t) HstPtrBegin + MapSize;
2027 Device.ShadowMtx.lock();
2028 for (ShadowPtrListTy::iterator it = Device.ShadowPtrMap.begin();
2029 it != Device.ShadowPtrMap.end(); ++it) {
2030 void **ShadowHstPtrAddr = (void**) it->first;
2031 if ((uintptr_t) ShadowHstPtrAddr < lb)
2032 continue;
2033 if ((uintptr_t) ShadowHstPtrAddr >= ub)
2034 break;
2035 DP("Restoring original target pointer value " DPxMOD " for target "
2036 "pointer " DPxMOD "\n", DPxPTR(it->second.TgtPtrVal),
2037 DPxPTR(it->second.TgtPtrAddr));
2038 Device.data_submit(it->second.TgtPtrAddr,
2039 &it->second.TgtPtrVal, sizeof(void *));
2040 }
2041 Device.ShadowMtx.unlock();
2042 }
2043 }
2044}
2045
2046EXTERN void __tgt_target_data_update_nowait(
2047 int32_t device_id, int32_t arg_num, void **args_base, void **args,
2048 int64_t *arg_sizes, int32_t *arg_types, int32_t depNum, void *depList,
2049 int32_t noAliasDepNum, void *noAliasDepList) {
2050 if (depNum + noAliasDepNum > 0)
2051 __kmpc_omp_taskwait(NULL, 0);
2052
2053 __tgt_target_data_update(device_id, arg_num, args_base, args, arg_sizes,
2054 arg_types);
2055}
2056
2057/// performs the same actions as data_begin in case arg_num is
2058/// non-zero and initiates run of the offloaded region on the target platform;
2059/// if arg_num is non-zero after the region execution is done it also
2060/// performs the same action as data_update and data_end above. This function
2061/// returns 0 if it was able to transfer the execution to a target and an
2062/// integer different from zero otherwise.
2063static int target(int32_t device_id, void *host_ptr, int32_t arg_num,
2064 void **args_base, void **args, int64_t *arg_sizes, int64_t *arg_types,
2065 int32_t team_num, int32_t thread_limit, int IsTeamConstruct) {
2066 DeviceTy &Device = Devices[device_id];
2067
2068 // Find the table information in the map or look it up in the translation
2069 // tables.
2070 TableMap *TM = 0;
2071 TblMapMtx.lock();
2072 HostPtrToTableMapTy::iterator TableMapIt = HostPtrToTableMap.find(host_ptr);
2073 if (TableMapIt == HostPtrToTableMap.end()) {
2074 // We don't have a map. So search all the registered libraries.
2075 TrlTblMtx.lock();
2076 for (HostEntriesBeginToTransTableTy::iterator
2077 ii = HostEntriesBeginToTransTable.begin(),
2078 ie = HostEntriesBeginToTransTable.end();
2079 !TM && ii != ie; ++ii) {
2080 // get the translation table (which contains all the good info).
2081 TranslationTable *TransTable = &ii->second;
2082 // iterate over all the host table entries to see if we can locate the
2083 // host_ptr.
2084 __tgt_offload_entry *begin = TransTable->HostTable.EntriesBegin;
2085 __tgt_offload_entry *end = TransTable->HostTable.EntriesEnd;
2086 __tgt_offload_entry *cur = begin;
2087 for (uint32_t i = 0; cur < end; ++cur, ++i) {
2088 if (cur->addr != host_ptr)
2089 continue;
2090 // we got a match, now fill the HostPtrToTableMap so that we
2091 // may avoid this search next time.
2092 TM = &HostPtrToTableMap[host_ptr];
2093 TM->Table = TransTable;
2094 TM->Index = i;
2095 break;
2096 }
2097 }
2098 TrlTblMtx.unlock();
2099 } else {
2100 TM = &TableMapIt->second;
2101 }
2102 TblMapMtx.unlock();
2103
2104 // No map for this host pointer found!
2105 if (!TM) {
2106 DP("Host ptr " DPxMOD " does not have a matching target pointer.\n",
2107 DPxPTR(host_ptr));
2108 return OFFLOAD_FAIL;
2109 }
2110
2111 // get target table.
2112 TrlTblMtx.lock();
2113 assert(TM->Table->TargetsTable.size() > (size_t)device_id &&
2114 "Not expecting a device ID outside the table's bounds!");
2115 __tgt_target_table *TargetTable = TM->Table->TargetsTable[device_id];
2116 TrlTblMtx.unlock();
2117 assert(TargetTable && "Global data has not been mapped\n");
2118
2119 // Move data to device.
2120 int rc = target_data_begin(Device, arg_num, args_base, args, arg_sizes,
2121 arg_types);
2122
2123 if (rc != OFFLOAD_SUCCESS) {
2124 DP("Call to target_data_begin failed, skipping target execution.\n");
2125 // Call target_data_end to dealloc whatever target_data_begin allocated
2126 // and return OFFLOAD_FAIL.
2127 target_data_end(Device, arg_num, args_base, args, arg_sizes, arg_types);
2128 return OFFLOAD_FAIL;
2129 }
2130
2131 std::vector<void *> tgt_args;
George Rokos1546d312017-05-10 14:12:36 +00002132 std::vector<ptrdiff_t> tgt_offsets;
George Rokos2467df62017-01-25 21:27:24 +00002133
2134 // List of (first-)private arrays allocated for this target region
2135 std::vector<void *> fpArrays;
2136
2137 for (int32_t i = 0; i < arg_num; ++i) {
2138 if (!(arg_types[i] & OMP_TGT_MAPTYPE_TARGET_PARAM)) {
2139 // This is not a target parameter, do not push it into tgt_args.
2140 continue;
2141 }
2142 void *HstPtrBegin = args[i];
2143 void *HstPtrBase = args_base[i];
George Rokos1546d312017-05-10 14:12:36 +00002144 void *TgtPtrBegin;
2145 ptrdiff_t TgtBaseOffset;
George Rokos2467df62017-01-25 21:27:24 +00002146 bool IsLast; // unused.
2147 if (arg_types[i] & OMP_TGT_MAPTYPE_LITERAL) {
2148 DP("Forwarding first-private value " DPxMOD " to the target construct\n",
2149 DPxPTR(HstPtrBase));
George Rokos1546d312017-05-10 14:12:36 +00002150 TgtPtrBegin = HstPtrBase;
2151 TgtBaseOffset = 0;
George Rokos2467df62017-01-25 21:27:24 +00002152 } else if (arg_types[i] & OMP_TGT_MAPTYPE_PRIVATE) {
2153 // Allocate memory for (first-)private array
George Rokos1546d312017-05-10 14:12:36 +00002154 TgtPtrBegin = Device.RTL->data_alloc(Device.RTLDeviceID,
2155 arg_sizes[i], HstPtrBegin);
George Rokos2467df62017-01-25 21:27:24 +00002156 if (!TgtPtrBegin) {
2157 DP ("Data allocation for %sprivate array " DPxMOD " failed\n",
2158 (arg_types[i] & OMP_TGT_MAPTYPE_TO ? "first-" : ""),
2159 DPxPTR(HstPtrBegin));
2160 rc = OFFLOAD_FAIL;
2161 break;
2162 } else {
2163 fpArrays.push_back(TgtPtrBegin);
George Rokos1546d312017-05-10 14:12:36 +00002164 TgtBaseOffset = (intptr_t)HstPtrBase - (intptr_t)HstPtrBegin;
Samuel Antao8933ffb2017-06-09 16:46:07 +00002165#ifdef OMPTARGET_DEBUG
George Rokos1546d312017-05-10 14:12:36 +00002166 void *TgtPtrBase = (void *)((intptr_t)TgtPtrBegin + TgtBaseOffset);
George Rokos2467df62017-01-25 21:27:24 +00002167 DP("Allocated %" PRId64 " bytes of target memory at " DPxMOD " for "
2168 "%sprivate array " DPxMOD " - pushing target argument " DPxMOD "\n",
2169 arg_sizes[i], DPxPTR(TgtPtrBegin),
2170 (arg_types[i] & OMP_TGT_MAPTYPE_TO ? "first-" : ""),
2171 DPxPTR(HstPtrBegin), DPxPTR(TgtPtrBase));
Samuel Antao8933ffb2017-06-09 16:46:07 +00002172#endif
George Rokos2467df62017-01-25 21:27:24 +00002173 // If first-private, copy data from host
2174 if (arg_types[i] & OMP_TGT_MAPTYPE_TO) {
2175 int rt = Device.data_submit(TgtPtrBegin, HstPtrBegin, arg_sizes[i]);
2176 if (rt != OFFLOAD_SUCCESS) {
2177 DP ("Copying data to device failed.\n");
2178 rc = OFFLOAD_FAIL;
2179 break;
2180 }
2181 }
2182 }
2183 } else if (arg_types[i] & OMP_TGT_MAPTYPE_PTR_AND_OBJ) {
George Rokos1546d312017-05-10 14:12:36 +00002184 TgtPtrBegin = Device.getTgtPtrBegin(HstPtrBase, sizeof(void *), IsLast,
2185 false);
2186 TgtBaseOffset = 0; // no offset for ptrs.
George Rokos2467df62017-01-25 21:27:24 +00002187 DP("Obtained target argument " DPxMOD " from host pointer " DPxMOD " to "
2188 "object " DPxMOD "\n", DPxPTR(TgtPtrBegin), DPxPTR(HstPtrBase),
2189 DPxPTR(HstPtrBase));
2190 } else {
George Rokos1546d312017-05-10 14:12:36 +00002191 TgtPtrBegin = Device.getTgtPtrBegin(HstPtrBegin, arg_sizes[i], IsLast,
2192 false);
2193 TgtBaseOffset = (intptr_t)HstPtrBase - (intptr_t)HstPtrBegin;
Samuel Antao8933ffb2017-06-09 16:46:07 +00002194#ifdef OMPTARGET_DEBUG
George Rokos1546d312017-05-10 14:12:36 +00002195 void *TgtPtrBase = (void *)((intptr_t)TgtPtrBegin + TgtBaseOffset);
George Rokos2467df62017-01-25 21:27:24 +00002196 DP("Obtained target argument " DPxMOD " from host pointer " DPxMOD "\n",
2197 DPxPTR(TgtPtrBase), DPxPTR(HstPtrBegin));
Samuel Antao8933ffb2017-06-09 16:46:07 +00002198#endif
George Rokos2467df62017-01-25 21:27:24 +00002199 }
George Rokos1546d312017-05-10 14:12:36 +00002200 tgt_args.push_back(TgtPtrBegin);
2201 tgt_offsets.push_back(TgtBaseOffset);
George Rokos2467df62017-01-25 21:27:24 +00002202 }
George Rokos1546d312017-05-10 14:12:36 +00002203
2204 assert(tgt_args.size() == tgt_offsets.size() &&
2205 "Size mismatch in arguments and offsets");
George Rokos2467df62017-01-25 21:27:24 +00002206
2207 // Pop loop trip count
2208 uint64_t ltc = Device.loopTripCnt;
2209 Device.loopTripCnt = 0;
2210
2211 // Launch device execution.
2212 if (rc == OFFLOAD_SUCCESS) {
2213 DP("Launching target execution %s with pointer " DPxMOD " (index=%d).\n",
2214 TargetTable->EntriesBegin[TM->Index].name,
2215 DPxPTR(TargetTable->EntriesBegin[TM->Index].addr), TM->Index);
2216 if (IsTeamConstruct) {
2217 rc = Device.run_team_region(TargetTable->EntriesBegin[TM->Index].addr,
George Rokos1546d312017-05-10 14:12:36 +00002218 &tgt_args[0], &tgt_offsets[0], tgt_args.size(), team_num,
2219 thread_limit, ltc);
George Rokos2467df62017-01-25 21:27:24 +00002220 } else {
2221 rc = Device.run_region(TargetTable->EntriesBegin[TM->Index].addr,
George Rokos1546d312017-05-10 14:12:36 +00002222 &tgt_args[0], &tgt_offsets[0], tgt_args.size());
George Rokos2467df62017-01-25 21:27:24 +00002223 }
2224 } else {
2225 DP("Errors occurred while obtaining target arguments, skipping kernel "
2226 "execution\n");
2227 }
2228
2229 // Deallocate (first-)private arrays
2230 for (auto it : fpArrays) {
2231 int rt = Device.RTL->data_delete(Device.RTLDeviceID, it);
2232 if (rt != OFFLOAD_SUCCESS) {
2233 DP("Deallocation of (first-)private arrays failed.\n");
2234 rc = OFFLOAD_FAIL;
2235 }
2236 }
2237
2238 // Move data from device.
2239 int rt = target_data_end(Device, arg_num, args_base, args, arg_sizes,
2240 arg_types);
2241
2242 if (rt != OFFLOAD_SUCCESS) {
2243 DP("Call to target_data_end failed.\n");
2244 rc = OFFLOAD_FAIL;
2245 }
2246
2247 return rc;
2248}
2249
2250EXTERN int __tgt_target(int32_t device_id, void *host_ptr, int32_t arg_num,
2251 void **args_base, void **args, int64_t *arg_sizes, int32_t *arg_types) {
George Rokos2467df62017-01-25 21:27:24 +00002252 DP("Entering target region with entry point " DPxMOD " and device Id %d\n",
2253 DPxPTR(host_ptr), device_id);
2254
2255 if (device_id == OFFLOAD_DEVICE_DEFAULT) {
2256 device_id = omp_get_default_device();
2257 }
2258
2259 if (CheckDevice(device_id) != OFFLOAD_SUCCESS) {
2260 DP("Failed to get device %d ready\n", device_id);
2261 return OFFLOAD_FAIL;
2262 }
2263
2264 // Translate maps
2265 int32_t new_arg_num;
2266 void **new_args_base;
2267 void **new_args;
2268 int64_t *new_arg_sizes;
2269 int64_t *new_arg_types;
2270 translate_map(arg_num, args_base, args, arg_sizes, arg_types, new_arg_num,
2271 new_args_base, new_args, new_arg_sizes, new_arg_types, true);
2272
2273 //return target(device_id, host_ptr, arg_num, args_base, args, arg_sizes,
2274 // arg_types, 0, 0, false /*team*/, false /*recursive*/);
2275 int rc = target(device_id, host_ptr, new_arg_num, new_args_base, new_args,
2276 new_arg_sizes, new_arg_types, 0, 0, false /*team*/);
2277
2278 // Cleanup translation memory
2279 cleanup_map(new_arg_num, new_args_base, new_args, new_arg_sizes,
2280 new_arg_types, arg_num, args_base);
2281
2282 return rc;
2283}
2284
2285EXTERN int __tgt_target_nowait(int32_t device_id, void *host_ptr,
2286 int32_t arg_num, void **args_base, void **args, int64_t *arg_sizes,
2287 int32_t *arg_types, int32_t depNum, void *depList, int32_t noAliasDepNum,
2288 void *noAliasDepList) {
2289 if (depNum + noAliasDepNum > 0)
2290 __kmpc_omp_taskwait(NULL, 0);
2291
2292 return __tgt_target(device_id, host_ptr, arg_num, args_base, args, arg_sizes,
2293 arg_types);
2294}
2295
2296EXTERN int __tgt_target_teams(int32_t device_id, void *host_ptr,
2297 int32_t arg_num, void **args_base, void **args, int64_t *arg_sizes,
2298 int32_t *arg_types, int32_t team_num, int32_t thread_limit) {
George Rokos2467df62017-01-25 21:27:24 +00002299 DP("Entering target region with entry point " DPxMOD " and device Id %d\n",
2300 DPxPTR(host_ptr), device_id);
2301
2302 if (device_id == OFFLOAD_DEVICE_DEFAULT) {
2303 device_id = omp_get_default_device();
2304 }
2305
2306 if (CheckDevice(device_id) != OFFLOAD_SUCCESS) {
2307 DP("Failed to get device %d ready\n", device_id);
2308 return OFFLOAD_FAIL;
2309 }
2310
2311 // Translate maps
2312 int32_t new_arg_num;
2313 void **new_args_base;
2314 void **new_args;
2315 int64_t *new_arg_sizes;
2316 int64_t *new_arg_types;
2317 translate_map(arg_num, args_base, args, arg_sizes, arg_types, new_arg_num,
2318 new_args_base, new_args, new_arg_sizes, new_arg_types, true);
2319
2320 //return target(device_id, host_ptr, arg_num, args_base, args, arg_sizes,
2321 // arg_types, team_num, thread_limit, true /*team*/,
2322 // false /*recursive*/);
2323 int rc = target(device_id, host_ptr, new_arg_num, new_args_base, new_args,
2324 new_arg_sizes, new_arg_types, team_num, thread_limit, true /*team*/);
2325
2326 // Cleanup translation memory
2327 cleanup_map(new_arg_num, new_args_base, new_args, new_arg_sizes,
2328 new_arg_types, arg_num, args_base);
2329
2330 return rc;
2331}
2332
2333EXTERN int __tgt_target_teams_nowait(int32_t device_id, void *host_ptr,
2334 int32_t arg_num, void **args_base, void **args, int64_t *arg_sizes,
2335 int32_t *arg_types, int32_t team_num, int32_t thread_limit, int32_t depNum,
2336 void *depList, int32_t noAliasDepNum, void *noAliasDepList) {
2337 if (depNum + noAliasDepNum > 0)
2338 __kmpc_omp_taskwait(NULL, 0);
2339
2340 return __tgt_target_teams(device_id, host_ptr, arg_num, args_base, args,
2341 arg_sizes, arg_types, team_num, thread_limit);
2342}
2343
2344
2345// The trip count mechanism will be revised - this scheme is not thread-safe.
2346EXTERN void __kmpc_push_target_tripcount(int32_t device_id,
2347 uint64_t loop_tripcount) {
2348 if (device_id == OFFLOAD_DEVICE_DEFAULT) {
2349 device_id = omp_get_default_device();
2350 }
2351
2352 if (CheckDevice(device_id) != OFFLOAD_SUCCESS) {
2353 DP("Failed to get device %d ready\n", device_id);
2354 return;
2355 }
2356
2357 DP("__kmpc_push_target_tripcount(%d, %" PRIu64 ")\n", device_id,
2358 loop_tripcount);
2359 Devices[device_id].loopTripCnt = loop_tripcount;
2360}
2361