OpenCL C++ Bindings
Loading...
Searching...
No Matches
opencl.hpp
Go to the documentation of this file.
1//
2// Copyright (c) 2008-2024 The Khronos Group Inc.
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15//
16
55
329 // SVM allocations
330
331 auto anSVMInt = cl::allocate_svm<int, cl::SVMTraitCoarse<>>();
332 *anSVMInt = 5;
333 cl::SVMAllocator<Foo, cl::SVMTraitCoarse<cl::SVMTraitReadOnly<>>> svmAllocReadOnly;
334 auto fooPointer = cl::allocate_pointer<Foo>(svmAllocReadOnly);
335 fooPointer->bar = anSVMInt.get();
336 cl::SVMAllocator<int, cl::SVMTraitCoarse<>> svmAlloc;
337 std::vector<int, cl::SVMAllocator<int, cl::SVMTraitCoarse<>>> inputA(numElements, 1, svmAlloc);
338 cl::coarse_svm_vector<int> inputB(numElements, 2, svmAlloc);
339
341 // Traditional cl_mem allocations
342
343 std::vector<int> output(numElements, 0xdeadbeef);
344 cl::Buffer outputBuffer(output.begin(), output.end(), false);
345 cl::Pipe aPipe(sizeof(cl_int), numElements / 2);
346
347 // Default command queue, also passed in as a parameter
348 cl::DeviceCommandQueue defaultDeviceQueue = cl::DeviceCommandQueue::makeDefault(
349 cl::Context::getDefault(), cl::Device::getDefault());
350
351 auto vectorAddKernel =
352 cl::KernelFunctor<
353 decltype(fooPointer)&,
354 int*,
355 cl::coarse_svm_vector<int>&,
356 cl::Buffer,
357 int,
358 cl::Pipe&,
359 cl::DeviceCommandQueue
360 >(vectorAddProgram, "vectorAdd");
361
362 // Ensure that the additional SVM pointer is available to the kernel
363 // This one was not passed as a parameter
364 vectorAddKernel.setSVMPointers(anSVMInt);
365
366 cl_int error;
367 vectorAddKernel(
368 cl::EnqueueArgs(
369 cl::NDRange(numElements/2),
370 cl::NDRange(numElements/2)),
371 fooPointer,
372 inputA.data(),
373 inputB,
374 outputBuffer,
375 3,
376 aPipe,
377 defaultDeviceQueue,
378 error
379 );
380
381 cl::copy(outputBuffer, output.begin(), output.end());
382
383 cl::Device d = cl::Device::getDefault();
384
385 std::cout << "Output:\n";
386 for (int i = 1; i < numElements; ++i) {
387 std::cout << "\t" << output[i] << "\n";
388 }
389 std::cout << "\n\n";
390
391 return 0;
392 }
393 *
394 * \endcode
395 *
396 */
397#ifndef CL_HPP_
398#define CL_HPP_
399
400#ifdef CL_HPP_OPENCL_API_WRAPPER
401#define CL_(name) CL_HPP_OPENCL_API_WRAPPER(name)
402#else
403#define CL_(name) ::name
404#endif
405
406/* Handle deprecated preprocessor definitions. In each case, we only check for
407 * the old name if the new name is not defined, so that user code can define
408 * both and hence work with either version of the bindings.
409 */
410#if !defined(CL_HPP_USE_DX_INTEROP) && defined(USE_DX_INTEROP)
411# pragma message("opencl.hpp: USE_DX_INTEROP is deprecated. Define CL_HPP_USE_DX_INTEROP instead")
412# define CL_HPP_USE_DX_INTEROP
413#endif
414#if !defined(CL_HPP_ENABLE_EXCEPTIONS) && defined(__CL_ENABLE_EXCEPTIONS)
415# pragma message("opencl.hpp: __CL_ENABLE_EXCEPTIONS is deprecated. Define CL_HPP_ENABLE_EXCEPTIONS instead")
416# define CL_HPP_ENABLE_EXCEPTIONS
417#endif
418#if !defined(CL_HPP_NO_STD_VECTOR) && defined(__NO_STD_VECTOR)
419# pragma message("opencl.hpp: __NO_STD_VECTOR is deprecated. Define CL_HPP_NO_STD_VECTOR instead")
420# define CL_HPP_NO_STD_VECTOR
421#endif
422#if !defined(CL_HPP_NO_STD_STRING) && defined(__NO_STD_STRING)
423# pragma message("opencl.hpp: __NO_STD_STRING is deprecated. Define CL_HPP_NO_STD_STRING instead")
424# define CL_HPP_NO_STD_STRING
425#endif
426#if defined(VECTOR_CLASS)
427# pragma message("opencl.hpp: VECTOR_CLASS is deprecated. Alias cl::vector instead")
428#endif
429#if defined(STRING_CLASS)
430# pragma message("opencl.hpp: STRING_CLASS is deprecated. Alias cl::string instead.")
431#endif
432#if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS) && defined(__CL_USER_OVERRIDE_ERROR_STRINGS)
433# pragma message("opencl.hpp: __CL_USER_OVERRIDE_ERROR_STRINGS is deprecated. Define CL_HPP_USER_OVERRIDE_ERROR_STRINGS instead")
434# define CL_HPP_USER_OVERRIDE_ERROR_STRINGS
435#endif
436
437/* Warn about features that are no longer supported
438 */
439#if defined(__USE_DEV_VECTOR)
440# pragma message("opencl.hpp: __USE_DEV_VECTOR is no longer supported. Expect compilation errors")
441#endif
442#if defined(__USE_DEV_STRING)
443# pragma message("opencl.hpp: __USE_DEV_STRING is no longer supported. Expect compilation errors")
444#endif
445
446/* Detect which version to target */
447#if !defined(CL_HPP_TARGET_OPENCL_VERSION)
448# pragma message("opencl.hpp: CL_HPP_TARGET_OPENCL_VERSION is not defined. It will default to 300 (OpenCL 3.0)")
449# define CL_HPP_TARGET_OPENCL_VERSION 300
450#endif
451#if CL_HPP_TARGET_OPENCL_VERSION != 100 && \
452 CL_HPP_TARGET_OPENCL_VERSION != 110 && \
453 CL_HPP_TARGET_OPENCL_VERSION != 120 && \
454 CL_HPP_TARGET_OPENCL_VERSION != 200 && \
455 CL_HPP_TARGET_OPENCL_VERSION != 210 && \
456 CL_HPP_TARGET_OPENCL_VERSION != 220 && \
457 CL_HPP_TARGET_OPENCL_VERSION != 300
458# pragma message("opencl.hpp: CL_HPP_TARGET_OPENCL_VERSION is not a valid value (100, 110, 120, 200, 210, 220 or 300). It will be set to 300 (OpenCL 3.0).")
459# undef CL_HPP_TARGET_OPENCL_VERSION
460# define CL_HPP_TARGET_OPENCL_VERSION 300
461#endif
462
463/* Forward target OpenCL version to C headers if necessary */
464#if defined(CL_TARGET_OPENCL_VERSION)
465/* Warn if prior definition of CL_TARGET_OPENCL_VERSION is lower than
466 * requested C++ bindings version */
467#if CL_TARGET_OPENCL_VERSION < CL_HPP_TARGET_OPENCL_VERSION
468# pragma message("CL_TARGET_OPENCL_VERSION is already defined as is lower than CL_HPP_TARGET_OPENCL_VERSION")
469#endif
470#else
471# define CL_TARGET_OPENCL_VERSION CL_HPP_TARGET_OPENCL_VERSION
472#endif
473
474#if !defined(CL_HPP_MINIMUM_OPENCL_VERSION)
475# define CL_HPP_MINIMUM_OPENCL_VERSION 200
476#endif
477#if CL_HPP_MINIMUM_OPENCL_VERSION != 100 && \
478 CL_HPP_MINIMUM_OPENCL_VERSION != 110 && \
479 CL_HPP_MINIMUM_OPENCL_VERSION != 120 && \
480 CL_HPP_MINIMUM_OPENCL_VERSION != 200 && \
481 CL_HPP_MINIMUM_OPENCL_VERSION != 210 && \
482 CL_HPP_MINIMUM_OPENCL_VERSION != 220 && \
483 CL_HPP_MINIMUM_OPENCL_VERSION != 300
484# pragma message("opencl.hpp: CL_HPP_MINIMUM_OPENCL_VERSION is not a valid value (100, 110, 120, 200, 210, 220 or 300). It will be set to 100")
485# undef CL_HPP_MINIMUM_OPENCL_VERSION
486# define CL_HPP_MINIMUM_OPENCL_VERSION 100
487#endif
488#if CL_HPP_MINIMUM_OPENCL_VERSION > CL_HPP_TARGET_OPENCL_VERSION
489# error "CL_HPP_MINIMUM_OPENCL_VERSION must not be greater than CL_HPP_TARGET_OPENCL_VERSION"
490#endif
491
492#if CL_HPP_MINIMUM_OPENCL_VERSION <= 100 && !defined(CL_USE_DEPRECATED_OPENCL_1_0_APIS)
493# define CL_USE_DEPRECATED_OPENCL_1_0_APIS
494#endif
495#if CL_HPP_MINIMUM_OPENCL_VERSION <= 110 && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
496# define CL_USE_DEPRECATED_OPENCL_1_1_APIS
497#endif
498#if CL_HPP_MINIMUM_OPENCL_VERSION <= 120 && !defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
499# define CL_USE_DEPRECATED_OPENCL_1_2_APIS
500#endif
501#if CL_HPP_MINIMUM_OPENCL_VERSION <= 200 && !defined(CL_USE_DEPRECATED_OPENCL_2_0_APIS)
502# define CL_USE_DEPRECATED_OPENCL_2_0_APIS
503#endif
504#if CL_HPP_MINIMUM_OPENCL_VERSION <= 210 && !defined(CL_USE_DEPRECATED_OPENCL_2_1_APIS)
505# define CL_USE_DEPRECATED_OPENCL_2_1_APIS
506#endif
507#if CL_HPP_MINIMUM_OPENCL_VERSION <= 220 && !defined(CL_USE_DEPRECATED_OPENCL_2_2_APIS)
508# define CL_USE_DEPRECATED_OPENCL_2_2_APIS
509#endif
510
511#ifdef _WIN32
512
513#include <malloc.h>
514
515#if defined(CL_HPP_USE_DX_INTEROP)
516#include <CL/cl_d3d10.h>
517#include <CL/cl_dx9_media_sharing.h>
518#endif
519#endif // _WIN32
520
521#if defined(_MSC_VER)
522#include <intrin.h>
523#endif // _MSC_VER
524
525 // Check for a valid C++ version
526
527// Need to do both tests here because for some reason __cplusplus is not
528// updated in visual studio
529#if (!defined(_MSC_VER) && __cplusplus < 201103L) || (defined(_MSC_VER) && _MSC_VER < 1700)
530#error Visual studio 2013 or another C++11-supporting compiler required
531#endif
532
533#if defined(__APPLE__) || defined(__MACOSX)
534#include <OpenCL/opencl.h>
535#else
536#include <CL/opencl.h>
537#endif // !__APPLE__
538
539#if __cplusplus >= 201703L
540# define CL_HPP_DEFINE_STATIC_MEMBER_ inline
541#elif defined(_MSC_VER)
542# define CL_HPP_DEFINE_STATIC_MEMBER_ __declspec(selectany)
543#elif defined(__MINGW32__)
544# define CL_HPP_DEFINE_STATIC_MEMBER_ __attribute__((selectany))
545#else
546# define CL_HPP_DEFINE_STATIC_MEMBER_ __attribute__((weak))
547#endif // !_MSC_VER
548
549// Define deprecated prefixes and suffixes to ensure compilation
550// in case they are not pre-defined
551#if !defined(CL_API_PREFIX__VERSION_1_1_DEPRECATED)
552#define CL_API_PREFIX__VERSION_1_1_DEPRECATED
553#endif // #if !defined(CL_API_PREFIX__VERSION_1_1_DEPRECATED)
554#if !defined(CL_API_SUFFIX__VERSION_1_1_DEPRECATED)
555#define CL_API_SUFFIX__VERSION_1_1_DEPRECATED
556#endif // #if !defined(CL_API_SUFFIX__VERSION_1_1_DEPRECATED)
557
558#if !defined(CL_API_PREFIX__VERSION_1_2_DEPRECATED)
559#define CL_API_PREFIX__VERSION_1_2_DEPRECATED
560#endif // #if !defined(CL_API_PREFIX__VERSION_1_2_DEPRECATED)
561#if !defined(CL_API_SUFFIX__VERSION_1_2_DEPRECATED)
562#define CL_API_SUFFIX__VERSION_1_2_DEPRECATED
563#endif // #if !defined(CL_API_SUFFIX__VERSION_1_2_DEPRECATED)
564
565#if !defined(CL_API_PREFIX__VERSION_2_2_DEPRECATED)
566#define CL_API_PREFIX__VERSION_2_2_DEPRECATED
567#endif // #if !defined(CL_API_PREFIX__VERSION_2_2_DEPRECATED)
568#if !defined(CL_API_SUFFIX__VERSION_2_2_DEPRECATED)
569#define CL_API_SUFFIX__VERSION_2_2_DEPRECATED
570#endif // #if !defined(CL_API_SUFFIX__VERSION_2_2_DEPRECATED)
571
572#if !defined(CL_CALLBACK)
573#define CL_CALLBACK
574#endif //CL_CALLBACK
575
576#include <utility>
577#include <limits>
578#include <iterator>
579#include <mutex>
580#include <cstring>
581#include <functional>
582
583
584// Define a size_type to represent a correctly resolved size_t
585#if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
586namespace cl {
587 using size_type = ::size_t;
588} // namespace cl
589#else // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
590namespace cl {
591 using size_type = size_t;
592} // namespace cl
593#endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
594
595
596#if defined(CL_HPP_ENABLE_EXCEPTIONS)
597#include <exception>
598#endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
599
600#if !defined(CL_HPP_NO_STD_VECTOR)
601#include <vector>
602namespace cl {
603 template < class T, class Alloc = std::allocator<T> >
604 using vector = std::vector<T, Alloc>;
605} // namespace cl
606#endif // #if !defined(CL_HPP_NO_STD_VECTOR)
607
608#if !defined(CL_HPP_NO_STD_STRING)
609#include <string>
610namespace cl {
611 using string = std::string;
612} // namespace cl
613#endif // #if !defined(CL_HPP_NO_STD_STRING)
614
615#if CL_HPP_TARGET_OPENCL_VERSION >= 200
616
617#if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
618#include <memory>
619namespace cl {
620 // Replace unique_ptr and allocate_pointer for internal use
621 // to allow user to replace them
622 template<class T, class D>
623 using pointer = std::unique_ptr<T, D>;
624} // namespace cl
625#endif
626#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
627#if !defined(CL_HPP_NO_STD_ARRAY)
628#include <array>
629namespace cl {
630 template < class T, size_type N >
631 using array = std::array<T, N>;
632} // namespace cl
633#endif // #if !defined(CL_HPP_NO_STD_ARRAY)
634
635// Define size_type appropriately to allow backward-compatibility
636// use of the old size_t interface class
637#if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
638namespace cl {
639 namespace compatibility {
644 template <int N>
645 class size_t
646 {
647 private:
648 size_type data_[N];
649
650 public:
652 size_t()
653 {
654 for (int i = 0; i < N; ++i) {
655 data_[i] = 0;
656 }
657 }
658
659 size_t(const array<size_type, N> &rhs)
660 {
661 for (int i = 0; i < N; ++i) {
662 data_[i] = rhs[i];
663 }
664 }
665
666 size_type& operator[](int index)
667 {
668 return data_[index];
669 }
670
671 const size_type& operator[](int index) const
672 {
673 return data_[index];
674 }
675
677 operator size_type* () { return data_; }
678
680 operator const size_type* () const { return data_; }
681
682 operator array<size_type, N>() const
683 {
684 array<size_type, N> ret;
685
686 for (int i = 0; i < N; ++i) {
687 ret[i] = data_[i];
688 }
689 return ret;
690 }
691 };
692 } // namespace compatibility
693
694 template<int N>
695 using size_t = compatibility::size_t<N>;
696} // namespace cl
697#endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
698
699// Helper alias to avoid confusing the macros
700namespace cl {
701 namespace detail {
702 using size_t_array = array<size_type, 3>;
703 } // namespace detail
704} // namespace cl
705
706
712namespace cl {
713
714#define CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(name) \
715 using PFN_##name = name##_fn
716
717#define CL_HPP_INIT_CL_EXT_FCN_PTR_(name) \
718 if (!pfn_##name) { \
719 pfn_##name = (PFN_##name)CL_(clGetExtensionFunctionAddress)(#name); \
720 }
721
722#define CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, name) \
723 if (!pfn_##name) { \
724 pfn_##name = (PFN_##name) \
725 CL_(clGetExtensionFunctionAddressForPlatform)(platform, #name); \
726 }
727
728#ifdef cl_khr_external_memory
729 enum class ExternalMemoryType : cl_external_memory_handle_type_khr;
730#endif
731
732 class Memory;
733 class Platform;
734 class Program;
735 class Device;
736 class Context;
737 class CommandQueue;
738 class DeviceCommandQueue;
739 class Memory;
740 class Buffer;
741 class Pipe;
742#ifdef cl_khr_semaphore
743 class Semaphore;
744#endif
745#if defined(cl_khr_command_buffer)
746 class CommandBufferKhr;
747 class MutableCommandKhr;
748#endif // cl_khr_command_buffer
749
750#if defined(CL_HPP_ENABLE_EXCEPTIONS)
755 class Error : public std::exception
756 {
757 private:
758 cl_int err_;
759 const char * errStr_;
760 public:
770 Error(cl_int err, const char * errStr = nullptr) : err_(err), errStr_(errStr)
771 {}
772
777 const char * what() const noexcept override
778 {
779 if (errStr_ == nullptr) {
780 return "empty";
781 }
782 else {
783 return errStr_;
784 }
785 }
786
791 cl_int err(void) const { return err_; }
792 };
793#define CL_HPP_ERR_STR_(x) #x
794#else
795#define CL_HPP_ERR_STR_(x) nullptr
796#endif // CL_HPP_ENABLE_EXCEPTIONS
797
798
799namespace detail
800{
801#if defined(CL_HPP_ENABLE_EXCEPTIONS)
802static inline cl_int errHandler (
803 cl_int err,
804 const char * errStr = nullptr)
805{
806 if (err != CL_SUCCESS) {
807 throw Error(err, errStr);
808 }
809 return err;
810}
811#else
812static inline cl_int errHandler (cl_int err, const char * errStr = nullptr)
813{
814 (void) errStr; // suppress unused variable warning
815 return err;
816}
817#endif // CL_HPP_ENABLE_EXCEPTIONS
818}
819
820
821
823#if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS)
824#define __GET_DEVICE_INFO_ERR CL_HPP_ERR_STR_(clGetDeviceInfo)
825#define __GET_PLATFORM_INFO_ERR CL_HPP_ERR_STR_(clGetPlatformInfo)
826#define __GET_DEVICE_IDS_ERR CL_HPP_ERR_STR_(clGetDeviceIDs)
827#define __GET_PLATFORM_IDS_ERR CL_HPP_ERR_STR_(clGetPlatformIDs)
828#define __GET_CONTEXT_INFO_ERR CL_HPP_ERR_STR_(clGetContextInfo)
829#define __GET_EVENT_INFO_ERR CL_HPP_ERR_STR_(clGetEventInfo)
830#define __GET_EVENT_PROFILE_INFO_ERR CL_HPP_ERR_STR_(clGetEventProfileInfo)
831#define __GET_MEM_OBJECT_INFO_ERR CL_HPP_ERR_STR_(clGetMemObjectInfo)
832#define __GET_IMAGE_INFO_ERR CL_HPP_ERR_STR_(clGetImageInfo)
833#define __GET_SAMPLER_INFO_ERR CL_HPP_ERR_STR_(clGetSamplerInfo)
834#define __GET_KERNEL_INFO_ERR CL_HPP_ERR_STR_(clGetKernelInfo)
835#if CL_HPP_TARGET_OPENCL_VERSION >= 120
836#define __GET_KERNEL_ARG_INFO_ERR CL_HPP_ERR_STR_(clGetKernelArgInfo)
837#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
838#if CL_HPP_TARGET_OPENCL_VERSION >= 210
839#define __GET_KERNEL_SUB_GROUP_INFO_ERR CL_HPP_ERR_STR_(clGetKernelSubGroupInfo)
840#else
841#define __GET_KERNEL_SUB_GROUP_INFO_ERR CL_HPP_ERR_STR_(clGetKernelSubGroupInfoKHR)
842#endif // CL_HPP_TARGET_OPENCL_VERSION >= 210
843#define __GET_KERNEL_WORK_GROUP_INFO_ERR CL_HPP_ERR_STR_(clGetKernelWorkGroupInfo)
844#define __GET_PROGRAM_INFO_ERR CL_HPP_ERR_STR_(clGetProgramInfo)
845#define __GET_PROGRAM_BUILD_INFO_ERR CL_HPP_ERR_STR_(clGetProgramBuildInfo)
846#define __GET_COMMAND_QUEUE_INFO_ERR CL_HPP_ERR_STR_(clGetCommandQueueInfo)
847
848#define __CREATE_CONTEXT_ERR CL_HPP_ERR_STR_(clCreateContext)
849#define __CREATE_CONTEXT_FROM_TYPE_ERR CL_HPP_ERR_STR_(clCreateContextFromType)
850#define __GET_SUPPORTED_IMAGE_FORMATS_ERR CL_HPP_ERR_STR_(clGetSupportedImageFormats)
851#if CL_HPP_TARGET_OPENCL_VERSION >= 300
852#define __SET_CONTEXT_DESCTRUCTOR_CALLBACK_ERR CL_HPP_ERR_STR_(clSetContextDestructorCallback)
853#endif // CL_HPP_TARGET_OPENCL_VERSION >= 300
854
855#define __CREATE_BUFFER_ERR CL_HPP_ERR_STR_(clCreateBuffer)
856#define __COPY_ERR CL_HPP_ERR_STR_(cl::copy)
857#define __CREATE_SUBBUFFER_ERR CL_HPP_ERR_STR_(clCreateSubBuffer)
858#define __CREATE_GL_BUFFER_ERR CL_HPP_ERR_STR_(clCreateFromGLBuffer)
859#define __CREATE_GL_RENDER_BUFFER_ERR CL_HPP_ERR_STR_(clCreateFromGLBuffer)
860#define __GET_GL_OBJECT_INFO_ERR CL_HPP_ERR_STR_(clGetGLObjectInfo)
861#if CL_HPP_TARGET_OPENCL_VERSION >= 120
862#define __CREATE_IMAGE_ERR CL_HPP_ERR_STR_(clCreateImage)
863#define __CREATE_GL_TEXTURE_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture)
864#define __IMAGE_DIMENSION_ERR CL_HPP_ERR_STR_(Incorrect image dimensions)
865#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
866#define __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR CL_HPP_ERR_STR_(clSetMemObjectDestructorCallback)
867
868#define __CREATE_USER_EVENT_ERR CL_HPP_ERR_STR_(clCreateUserEvent)
869#define __SET_USER_EVENT_STATUS_ERR CL_HPP_ERR_STR_(clSetUserEventStatus)
870#define __SET_EVENT_CALLBACK_ERR CL_HPP_ERR_STR_(clSetEventCallback)
871#define __WAIT_FOR_EVENTS_ERR CL_HPP_ERR_STR_(clWaitForEvents)
872
873#define __CREATE_KERNEL_ERR CL_HPP_ERR_STR_(clCreateKernel)
874#define __SET_KERNEL_ARGS_ERR CL_HPP_ERR_STR_(clSetKernelArg)
875#define __CREATE_PROGRAM_WITH_SOURCE_ERR CL_HPP_ERR_STR_(clCreateProgramWithSource)
876#define __CREATE_PROGRAM_WITH_BINARY_ERR CL_HPP_ERR_STR_(clCreateProgramWithBinary)
877#if CL_HPP_TARGET_OPENCL_VERSION >= 210
878#define __CREATE_PROGRAM_WITH_IL_ERR CL_HPP_ERR_STR_(clCreateProgramWithIL)
879#else
880#define __CREATE_PROGRAM_WITH_IL_ERR CL_HPP_ERR_STR_(clCreateProgramWithILKHR)
881#endif // CL_HPP_TARGET_OPENCL_VERSION >= 210
882#if CL_HPP_TARGET_OPENCL_VERSION >= 120
883#define __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR CL_HPP_ERR_STR_(clCreateProgramWithBuiltInKernels)
884#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
885#define __BUILD_PROGRAM_ERR CL_HPP_ERR_STR_(clBuildProgram)
886#if CL_HPP_TARGET_OPENCL_VERSION >= 120
887#define __COMPILE_PROGRAM_ERR CL_HPP_ERR_STR_(clCompileProgram)
888#define __LINK_PROGRAM_ERR CL_HPP_ERR_STR_(clLinkProgram)
889#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
890#define __CREATE_KERNELS_IN_PROGRAM_ERR CL_HPP_ERR_STR_(clCreateKernelsInProgram)
891
892#if CL_HPP_TARGET_OPENCL_VERSION >= 200
893#define __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateCommandQueueWithProperties)
894#define __CREATE_SAMPLER_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateSamplerWithProperties)
895#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
896#define __SET_COMMAND_QUEUE_PROPERTY_ERR CL_HPP_ERR_STR_(clSetCommandQueueProperty)
897#define __ENQUEUE_READ_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueReadBuffer)
898#define __ENQUEUE_READ_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueReadBufferRect)
899#define __ENQUEUE_WRITE_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueWriteBuffer)
900#define __ENQUEUE_WRITE_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueWriteBufferRect)
901#define __ENQEUE_COPY_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueCopyBuffer)
902#define __ENQEUE_COPY_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueCopyBufferRect)
903#define __ENQUEUE_FILL_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueFillBuffer)
904#define __ENQUEUE_READ_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueReadImage)
905#define __ENQUEUE_WRITE_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueWriteImage)
906#define __ENQUEUE_COPY_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueCopyImage)
907#define __ENQUEUE_FILL_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueFillImage)
908#define __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueCopyImageToBuffer)
909#define __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueCopyBufferToImage)
910#define __ENQUEUE_MAP_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueMapBuffer)
911#define __ENQUEUE_MAP_SVM_ERR CL_HPP_ERR_STR_(clEnqueueSVMMap)
912#define __ENQUEUE_FILL_SVM_ERR CL_HPP_ERR_STR_(clEnqueueSVMMemFill)
913#define __ENQUEUE_COPY_SVM_ERR CL_HPP_ERR_STR_(clEnqueueSVMMemcpy)
914#define __ENQUEUE_UNMAP_SVM_ERR CL_HPP_ERR_STR_(clEnqueueSVMUnmap)
915#define __ENQUEUE_MAP_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueMapImage)
916#define __ENQUEUE_UNMAP_MEM_OBJECT_ERR CL_HPP_ERR_STR_(clEnqueueUnMapMemObject)
917#define __ENQUEUE_NDRANGE_KERNEL_ERR CL_HPP_ERR_STR_(clEnqueueNDRangeKernel)
918#define __ENQUEUE_NATIVE_KERNEL CL_HPP_ERR_STR_(clEnqueueNativeKernel)
919#if CL_HPP_TARGET_OPENCL_VERSION >= 120
920#define __ENQUEUE_MIGRATE_MEM_OBJECTS_ERR CL_HPP_ERR_STR_(clEnqueueMigrateMemObjects)
921#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
922#if CL_HPP_TARGET_OPENCL_VERSION >= 210
923#define __ENQUEUE_MIGRATE_SVM_ERR CL_HPP_ERR_STR_(clEnqueueSVMMigrateMem)
924#define __SET_DEFAULT_DEVICE_COMMAND_QUEUE_ERR CL_HPP_ERR_STR_(clSetDefaultDeviceCommandQueue)
925#endif // CL_HPP_TARGET_OPENCL_VERSION >= 210
926
927
928#define __ENQUEUE_ACQUIRE_GL_ERR CL_HPP_ERR_STR_(clEnqueueAcquireGLObjects)
929#define __ENQUEUE_RELEASE_GL_ERR CL_HPP_ERR_STR_(clEnqueueReleaseGLObjects)
930
931#define __CREATE_PIPE_ERR CL_HPP_ERR_STR_(clCreatePipe)
932#define __GET_PIPE_INFO_ERR CL_HPP_ERR_STR_(clGetPipeInfo)
933
934#define __RETAIN_ERR CL_HPP_ERR_STR_(Retain Object)
935#define __RELEASE_ERR CL_HPP_ERR_STR_(Release Object)
936#define __FLUSH_ERR CL_HPP_ERR_STR_(clFlush)
937#define __FINISH_ERR CL_HPP_ERR_STR_(clFinish)
938#define __VECTOR_CAPACITY_ERR CL_HPP_ERR_STR_(Vector capacity error)
939
940#if CL_HPP_TARGET_OPENCL_VERSION >= 210
941#define __GET_HOST_TIMER_ERR CL_HPP_ERR_STR_(clGetHostTimer)
942#define __GET_DEVICE_AND_HOST_TIMER_ERR CL_HPP_ERR_STR_(clGetDeviceAndHostTimer)
943#endif
944#if CL_HPP_TARGET_OPENCL_VERSION >= 220
945#define __SET_PROGRAM_RELEASE_CALLBACK_ERR CL_HPP_ERR_STR_(clSetProgramReleaseCallback)
946#define __SET_PROGRAM_SPECIALIZATION_CONSTANT_ERR CL_HPP_ERR_STR_(clSetProgramSpecializationConstant)
947#endif
948
949#ifdef cl_khr_external_memory
950#define __ENQUEUE_ACQUIRE_EXTERNAL_MEMORY_ERR CL_HPP_ERR_STR_(clEnqueueAcquireExternalMemObjectsKHR)
951#define __ENQUEUE_RELEASE_EXTERNAL_MEMORY_ERR CL_HPP_ERR_STR_(clEnqueueReleaseExternalMemObjectsKHR)
952#endif
953
954#ifdef cl_khr_semaphore
955#define __GET_SEMAPHORE_KHR_INFO_ERR CL_HPP_ERR_STR_(clGetSemaphoreInfoKHR)
956#define __CREATE_SEMAPHORE_KHR_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateSemaphoreWithPropertiesKHR)
957#define __ENQUEUE_WAIT_SEMAPHORE_KHR_ERR CL_HPP_ERR_STR_(clEnqueueWaitSemaphoresKHR)
958#define __ENQUEUE_SIGNAL_SEMAPHORE_KHR_ERR CL_HPP_ERR_STR_(clEnqueueSignalSemaphoresKHR)
959#define __RETAIN_SEMAPHORE_KHR_ERR CL_HPP_ERR_STR_(clRetainSemaphoreKHR)
960#define __RELEASE_SEMAPHORE_KHR_ERR CL_HPP_ERR_STR_(clReleaseSemaphoreKHR)
961#endif
962
963#ifdef cl_khr_external_semaphore
964#define __GET_SEMAPHORE_HANDLE_FOR_TYPE_KHR_ERR CL_HPP_ERR_STR_(clGetSemaphoreHandleForTypeKHR)
965#endif // cl_khr_external_semaphore
966
967#if defined(cl_khr_command_buffer)
968#define __CREATE_COMMAND_BUFFER_KHR_ERR CL_HPP_ERR_STR_(clCreateCommandBufferKHR)
969#define __GET_COMMAND_BUFFER_INFO_KHR_ERR CL_HPP_ERR_STR_(clGetCommandBufferInfoKHR)
970#define __FINALIZE_COMMAND_BUFFER_KHR_ERR CL_HPP_ERR_STR_(clFinalizeCommandBufferKHR)
971#define __ENQUEUE_COMMAND_BUFFER_KHR_ERR CL_HPP_ERR_STR_(clEnqueueCommandBufferKHR)
972#define __COMMAND_BARRIER_WITH_WAIT_LIST_KHR_ERR CL_HPP_ERR_STR_(clCommandBarrierWithWaitListKHR)
973#define __COMMAND_COPY_BUFFER_KHR_ERR CL_HPP_ERR_STR_(clCommandCopyBufferKHR)
974#define __COMMAND_COPY_BUFFER_RECT_KHR_ERR CL_HPP_ERR_STR_(clCommandCopyBufferRectKHR)
975#define __COMMAND_COPY_BUFFER_TO_IMAGE_KHR_ERR CL_HPP_ERR_STR_(clCommandCopyBufferToImageKHR)
976#define __COMMAND_COPY_IMAGE_KHR_ERR CL_HPP_ERR_STR_(clCommandCopyImageKHR)
977#define __COMMAND_COPY_IMAGE_TO_BUFFER_KHR_ERR CL_HPP_ERR_STR_(clCommandCopyImageToBufferKHR)
978#define __COMMAND_FILL_BUFFER_KHR_ERR CL_HPP_ERR_STR_(clCommandFillBufferKHR)
979#define __COMMAND_FILL_IMAGE_KHR_ERR CL_HPP_ERR_STR_(clCommandFillImageKHR)
980#define __COMMAND_NDRANGE_KERNEL_KHR_ERR CL_HPP_ERR_STR_(clCommandNDRangeKernelKHR)
981#define __UPDATE_MUTABLE_COMMANDS_KHR_ERR CL_HPP_ERR_STR_(clUpdateMutableCommandsKHR)
982#define __GET_MUTABLE_COMMAND_INFO_KHR_ERR CL_HPP_ERR_STR_(clGetMutableCommandInfoKHR)
983#define __RETAIN_COMMAND_BUFFER_KHR_ERR CL_HPP_ERR_STR_(clRetainCommandBufferKHR)
984#define __RELEASE_COMMAND_BUFFER_KHR_ERR CL_HPP_ERR_STR_(clReleaseCommandBufferKHR)
985#endif // cl_khr_command_buffer
986
987#if defined(cl_ext_image_requirements_info)
988#define __GET_IMAGE_REQUIREMENT_INFO_EXT_ERR CL_HPP_ERR_STR_(clGetImageRequirementsInfoEXT)
989#endif //cl_ext_image_requirements_info
990
994#if CL_HPP_TARGET_OPENCL_VERSION >= 120
995#define __CREATE_SUB_DEVICES_ERR CL_HPP_ERR_STR_(clCreateSubDevices)
996#else
997#define __CREATE_SUB_DEVICES_ERR CL_HPP_ERR_STR_(clCreateSubDevicesEXT)
998#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
999
1003#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
1004#define __ENQUEUE_MARKER_ERR CL_HPP_ERR_STR_(clEnqueueMarker)
1005#define __ENQUEUE_WAIT_FOR_EVENTS_ERR CL_HPP_ERR_STR_(clEnqueueWaitForEvents)
1006#define __ENQUEUE_BARRIER_ERR CL_HPP_ERR_STR_(clEnqueueBarrier)
1007#define __UNLOAD_COMPILER_ERR CL_HPP_ERR_STR_(clUnloadCompiler)
1008#define __CREATE_GL_TEXTURE_2D_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture2D)
1009#define __CREATE_GL_TEXTURE_3D_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture3D)
1010#define __CREATE_IMAGE2D_ERR CL_HPP_ERR_STR_(clCreateImage2D)
1011#define __CREATE_IMAGE3D_ERR CL_HPP_ERR_STR_(clCreateImage3D)
1012#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
1013
1017#if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
1018#define __CREATE_COMMAND_QUEUE_ERR CL_HPP_ERR_STR_(clCreateCommandQueue)
1019#define __ENQUEUE_TASK_ERR CL_HPP_ERR_STR_(clEnqueueTask)
1020#define __CREATE_SAMPLER_ERR CL_HPP_ERR_STR_(clCreateSampler)
1021#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
1022
1026#if CL_HPP_TARGET_OPENCL_VERSION >= 120
1027#define __ENQUEUE_MARKER_WAIT_LIST_ERR CL_HPP_ERR_STR_(clEnqueueMarkerWithWaitList)
1028#define __ENQUEUE_BARRIER_WAIT_LIST_ERR CL_HPP_ERR_STR_(clEnqueueBarrierWithWaitList)
1029#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
1030
1031#if CL_HPP_TARGET_OPENCL_VERSION >= 210
1032#define __CLONE_KERNEL_ERR CL_HPP_ERR_STR_(clCloneKernel)
1033#endif // CL_HPP_TARGET_OPENCL_VERSION >= 210
1034
1035#endif // CL_HPP_USER_OVERRIDE_ERROR_STRINGS
1037
1038#ifdef cl_khr_external_memory
1039CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clEnqueueAcquireExternalMemObjectsKHR);
1040CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clEnqueueReleaseExternalMemObjectsKHR);
1041
1042CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clEnqueueAcquireExternalMemObjectsKHR pfn_clEnqueueAcquireExternalMemObjectsKHR = nullptr;
1043CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clEnqueueReleaseExternalMemObjectsKHR pfn_clEnqueueReleaseExternalMemObjectsKHR = nullptr;
1044#endif // cl_khr_external_memory
1045
1046#ifdef cl_khr_semaphore
1047CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clCreateSemaphoreWithPropertiesKHR);
1048CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clReleaseSemaphoreKHR);
1049CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clRetainSemaphoreKHR);
1050CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clEnqueueWaitSemaphoresKHR);
1051CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clEnqueueSignalSemaphoresKHR);
1052CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clGetSemaphoreInfoKHR);
1053
1054CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clCreateSemaphoreWithPropertiesKHR pfn_clCreateSemaphoreWithPropertiesKHR = nullptr;
1055CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clReleaseSemaphoreKHR pfn_clReleaseSemaphoreKHR = nullptr;
1056CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clRetainSemaphoreKHR pfn_clRetainSemaphoreKHR = nullptr;
1057CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clEnqueueWaitSemaphoresKHR pfn_clEnqueueWaitSemaphoresKHR = nullptr;
1058CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clEnqueueSignalSemaphoresKHR pfn_clEnqueueSignalSemaphoresKHR = nullptr;
1059CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clGetSemaphoreInfoKHR pfn_clGetSemaphoreInfoKHR = nullptr;
1060#endif // cl_khr_semaphore
1061
1062#ifdef cl_khr_external_semaphore
1063CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clGetSemaphoreHandleForTypeKHR);
1064CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clGetSemaphoreHandleForTypeKHR pfn_clGetSemaphoreHandleForTypeKHR = nullptr;
1065#endif // cl_khr_external_semaphore
1066
1067#if defined(cl_khr_command_buffer)
1068CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clCreateCommandBufferKHR);
1069CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clFinalizeCommandBufferKHR);
1070CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clRetainCommandBufferKHR);
1071CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clReleaseCommandBufferKHR);
1072CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clGetCommandBufferInfoKHR);
1073CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clEnqueueCommandBufferKHR);
1074CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clCommandBarrierWithWaitListKHR);
1075CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clCommandCopyBufferKHR);
1076CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clCommandCopyBufferRectKHR);
1077CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clCommandCopyBufferToImageKHR);
1078CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clCommandCopyImageKHR);
1079CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clCommandCopyImageToBufferKHR);
1080CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clCommandFillBufferKHR);
1081CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clCommandFillImageKHR);
1082CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clCommandNDRangeKernelKHR);
1083
1084CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clCreateCommandBufferKHR pfn_clCreateCommandBufferKHR = nullptr;
1085CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clFinalizeCommandBufferKHR pfn_clFinalizeCommandBufferKHR = nullptr;
1086CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clRetainCommandBufferKHR pfn_clRetainCommandBufferKHR = nullptr;
1087CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clReleaseCommandBufferKHR pfn_clReleaseCommandBufferKHR = nullptr;
1088CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clGetCommandBufferInfoKHR pfn_clGetCommandBufferInfoKHR = nullptr;
1089CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clEnqueueCommandBufferKHR pfn_clEnqueueCommandBufferKHR = nullptr;
1090CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clCommandBarrierWithWaitListKHR pfn_clCommandBarrierWithWaitListKHR = nullptr;
1091CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clCommandCopyBufferKHR pfn_clCommandCopyBufferKHR = nullptr;
1092CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clCommandCopyBufferRectKHR pfn_clCommandCopyBufferRectKHR = nullptr;
1093CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clCommandCopyBufferToImageKHR pfn_clCommandCopyBufferToImageKHR = nullptr;
1094CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clCommandCopyImageKHR pfn_clCommandCopyImageKHR = nullptr;
1095CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clCommandCopyImageToBufferKHR pfn_clCommandCopyImageToBufferKHR = nullptr;
1096CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clCommandFillBufferKHR pfn_clCommandFillBufferKHR = nullptr;
1097CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clCommandFillImageKHR pfn_clCommandFillImageKHR = nullptr;
1098CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clCommandNDRangeKernelKHR pfn_clCommandNDRangeKernelKHR = nullptr;
1099#endif /* cl_khr_command_buffer */
1100
1101#if defined(cl_khr_command_buffer_mutable_dispatch)
1102CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clUpdateMutableCommandsKHR);
1103CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clGetMutableCommandInfoKHR);
1104
1105CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clUpdateMutableCommandsKHR pfn_clUpdateMutableCommandsKHR = nullptr;
1106CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clGetMutableCommandInfoKHR pfn_clGetMutableCommandInfoKHR = nullptr;
1107#endif /* cl_khr_command_buffer_mutable_dispatch */
1108
1109#if defined(cl_ext_image_requirements_info)
1110CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clGetImageRequirementsInfoEXT);
1111CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clGetImageRequirementsInfoEXT pfn_clGetImageRequirementsInfoEXT = nullptr;
1112#endif
1113
1114#if defined(cl_ext_device_fission)
1115CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_(clCreateSubDevicesEXT);
1116CL_HPP_DEFINE_STATIC_MEMBER_ PFN_clCreateSubDevicesEXT
1117 pfn_clCreateSubDevicesEXT = nullptr;
1118#endif
1119
1120namespace detail {
1121
1122// Generic getInfoHelper. The final parameter is used to guide overload
1123// resolution: the actual parameter passed is an int, which makes this
1124// a worse conversion sequence than a specialization that declares the
1125// parameter as an int.
1126template<typename Functor, typename T>
1127inline cl_int getInfoHelper(Functor f, cl_uint name, T* param, long)
1128{
1129 return f(name, sizeof(T), param, nullptr);
1130}
1131
1132// Specialized for getInfo<CL_PROGRAM_BINARIES>
1133// Assumes that the output vector was correctly resized on the way in
1134template <typename Func>
1135inline cl_int getInfoHelper(Func f, cl_uint name, vector<vector<unsigned char>>* param, int)
1136{
1137 if (name != CL_PROGRAM_BINARIES) {
1138 return CL_INVALID_VALUE;
1139 }
1140 if (param) {
1141 // Create array of pointers, calculate total size and pass pointer array in
1142 size_type numBinaries = param->size();
1143 vector<unsigned char*> binariesPointers(numBinaries);
1144
1145 for (size_type i = 0; i < numBinaries; ++i)
1146 {
1147 binariesPointers[i] = (*param)[i].data();
1148 }
1149
1150 cl_int err = f(name, numBinaries * sizeof(unsigned char*), binariesPointers.data(), nullptr);
1151
1152 if (err != CL_SUCCESS) {
1153 return err;
1154 }
1155 }
1156
1157 return CL_SUCCESS;
1158}
1159
1160// Specialized getInfoHelper for vector params
1161template <typename Func, typename T>
1162inline cl_int getInfoHelper(Func f, cl_uint name, vector<T>* param, long)
1163{
1164 size_type required;
1165 cl_int err = f(name, 0, nullptr, &required);
1166 if (err != CL_SUCCESS) {
1167 return err;
1168 }
1169 const size_type elements = required / sizeof(T);
1170
1171 // Temporary to avoid changing param on an error
1172 vector<T> localData(elements);
1173 err = f(name, required, localData.data(), nullptr);
1174 if (err != CL_SUCCESS) {
1175 return err;
1176 }
1177 if (param) {
1178 *param = std::move(localData);
1179 }
1180
1181 return CL_SUCCESS;
1182}
1183
1184/* Specialization for reference-counted types. This depends on the
1185 * existence of Wrapper<T>::cl_type, and none of the other types having the
1186 * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
1187 * does not work, because when using a derived type (e.g. Context) the generic
1188 * template will provide a better match.
1189 */
1190template <typename Func, typename T>
1191inline cl_int getInfoHelper(
1192 Func f, cl_uint name, vector<T>* param, int, typename T::cl_type = 0)
1193{
1194 size_type required;
1195 cl_int err = f(name, 0, nullptr, &required);
1196 if (err != CL_SUCCESS) {
1197 return err;
1198 }
1199
1200 const size_type elements = required / sizeof(typename T::cl_type);
1201
1202 vector<typename T::cl_type> value(elements);
1203 err = f(name, required, value.data(), nullptr);
1204 if (err != CL_SUCCESS) {
1205 return err;
1206 }
1207
1208 if (param) {
1209 // Assign to convert CL type to T for each element
1210 param->resize(elements);
1211
1212 // Assign to param, constructing with retain behaviour
1213 // to correctly capture each underlying CL object
1214 for (size_type i = 0; i < elements; i++) {
1215 (*param)[i] = T(value[i], true);
1216 }
1217 }
1218 return CL_SUCCESS;
1219}
1220
1221// Specialized GetInfoHelper for string params
1222template <typename Func>
1223inline cl_int getInfoHelper(Func f, cl_uint name, string* param, long)
1224{
1225 size_type required;
1226 cl_int err = f(name, 0, nullptr, &required);
1227 if (err != CL_SUCCESS) {
1228 return err;
1229 }
1230
1231 // std::string has a constant data member
1232 // a char vector does not
1233 if (required > 0) {
1234 vector<char> value(required);
1235 err = f(name, required, value.data(), nullptr);
1236 if (err != CL_SUCCESS) {
1237 return err;
1238 }
1239 if (param) {
1240 param->assign(value.begin(), value.end() - 1);
1241 }
1242 }
1243 else if (param) {
1244 param->assign("");
1245 }
1246 return CL_SUCCESS;
1247}
1248
1249// Specialized GetInfoHelper for clsize_t params
1250template <typename Func, size_type N>
1251inline cl_int getInfoHelper(Func f, cl_uint name, array<size_type, N>* param, long)
1252{
1253 size_type required;
1254 cl_int err = f(name, 0, nullptr, &required);
1255 if (err != CL_SUCCESS) {
1256 return err;
1257 }
1258
1259 size_type elements = required / sizeof(size_type);
1260 vector<size_type> value(elements, 0);
1261
1262 err = f(name, required, value.data(), nullptr);
1263 if (err != CL_SUCCESS) {
1264 return err;
1265 }
1266
1267 // Bound the copy with N to prevent overruns
1268 // if passed N > than the amount copied
1269 if (elements > N) {
1270 elements = N;
1271 }
1272 for (size_type i = 0; i < elements; ++i) {
1273 (*param)[i] = value[i];
1274 }
1275
1276 return CL_SUCCESS;
1277}
1278
1279template<typename T> struct ReferenceHandler;
1280
1281/* Specialization for reference-counted types. This depends on the
1282 * existence of Wrapper<T>::cl_type, and none of the other types having the
1283 * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
1284 * does not work, because when using a derived type (e.g. Context) the generic
1285 * template will provide a better match.
1286 */
1287template<typename Func, typename T>
1288inline cl_int getInfoHelper(Func f, cl_uint name, T* param, int, typename T::cl_type = 0)
1289{
1290 typename T::cl_type value;
1291 cl_int err = f(name, sizeof(value), &value, nullptr);
1292 if (err != CL_SUCCESS) {
1293 return err;
1294 }
1295 *param = value;
1296 if (value != nullptr)
1297 {
1298 err = param->retain();
1299 if (err != CL_SUCCESS) {
1300 return err;
1301 }
1302 }
1303 return CL_SUCCESS;
1304}
1305
1306#define CL_HPP_PARAM_NAME_INFO_1_0_(F) \
1307 F(cl_platform_info, CL_PLATFORM_PROFILE, string) \
1308 F(cl_platform_info, CL_PLATFORM_VERSION, string) \
1309 F(cl_platform_info, CL_PLATFORM_NAME, string) \
1310 F(cl_platform_info, CL_PLATFORM_VENDOR, string) \
1311 F(cl_platform_info, CL_PLATFORM_EXTENSIONS, string) \
1312 \
1313 F(cl_device_info, CL_DEVICE_TYPE, cl_device_type) \
1314 F(cl_device_info, CL_DEVICE_VENDOR_ID, cl_uint) \
1315 F(cl_device_info, CL_DEVICE_MAX_COMPUTE_UNITS, cl_uint) \
1316 F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, cl_uint) \
1317 F(cl_device_info, CL_DEVICE_MAX_WORK_GROUP_SIZE, size_type) \
1318 F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_SIZES, cl::vector<size_type>) \
1319 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR, cl_uint) \
1320 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT, cl_uint) \
1321 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, cl_uint) \
1322 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG, cl_uint) \
1323 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT, cl_uint) \
1324 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE, cl_uint) \
1325 F(cl_device_info, CL_DEVICE_MAX_CLOCK_FREQUENCY, cl_uint) \
1326 F(cl_device_info, CL_DEVICE_ADDRESS_BITS, cl_uint) \
1327 F(cl_device_info, CL_DEVICE_MAX_READ_IMAGE_ARGS, cl_uint) \
1328 F(cl_device_info, CL_DEVICE_MAX_WRITE_IMAGE_ARGS, cl_uint) \
1329 F(cl_device_info, CL_DEVICE_MAX_MEM_ALLOC_SIZE, cl_ulong) \
1330 F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_WIDTH, size_type) \
1331 F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_HEIGHT, size_type) \
1332 F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_WIDTH, size_type) \
1333 F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_HEIGHT, size_type) \
1334 F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_DEPTH, size_type) \
1335 F(cl_device_info, CL_DEVICE_IMAGE_SUPPORT, cl_bool) \
1336 F(cl_device_info, CL_DEVICE_MAX_PARAMETER_SIZE, size_type) \
1337 F(cl_device_info, CL_DEVICE_MAX_SAMPLERS, cl_uint) \
1338 F(cl_device_info, CL_DEVICE_MEM_BASE_ADDR_ALIGN, cl_uint) \
1339 F(cl_device_info, CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE, cl_uint) \
1340 F(cl_device_info, CL_DEVICE_SINGLE_FP_CONFIG, cl_device_fp_config) \
1341 F(cl_device_info, CL_DEVICE_DOUBLE_FP_CONFIG, cl_device_fp_config) \
1342 F(cl_device_info, CL_DEVICE_HALF_FP_CONFIG, cl_device_fp_config) \
1343 F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_TYPE, cl_device_mem_cache_type) \
1344 F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE, cl_uint)\
1345 F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_SIZE, cl_ulong) \
1346 F(cl_device_info, CL_DEVICE_GLOBAL_MEM_SIZE, cl_ulong) \
1347 F(cl_device_info, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, cl_ulong) \
1348 F(cl_device_info, CL_DEVICE_MAX_CONSTANT_ARGS, cl_uint) \
1349 F(cl_device_info, CL_DEVICE_LOCAL_MEM_TYPE, cl_device_local_mem_type) \
1350 F(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE, cl_ulong) \
1351 F(cl_device_info, CL_DEVICE_ERROR_CORRECTION_SUPPORT, cl_bool) \
1352 F(cl_device_info, CL_DEVICE_PROFILING_TIMER_RESOLUTION, size_type) \
1353 F(cl_device_info, CL_DEVICE_ENDIAN_LITTLE, cl_bool) \
1354 F(cl_device_info, CL_DEVICE_AVAILABLE, cl_bool) \
1355 F(cl_device_info, CL_DEVICE_COMPILER_AVAILABLE, cl_bool) \
1356 F(cl_device_info, CL_DEVICE_EXECUTION_CAPABILITIES, cl_device_exec_capabilities) \
1357 F(cl_device_info, CL_DEVICE_PLATFORM, cl::Platform) \
1358 F(cl_device_info, CL_DEVICE_NAME, string) \
1359 F(cl_device_info, CL_DEVICE_VENDOR, string) \
1360 F(cl_device_info, CL_DRIVER_VERSION, string) \
1361 F(cl_device_info, CL_DEVICE_PROFILE, string) \
1362 F(cl_device_info, CL_DEVICE_VERSION, string) \
1363 F(cl_device_info, CL_DEVICE_EXTENSIONS, string) \
1364 \
1365 F(cl_context_info, CL_CONTEXT_REFERENCE_COUNT, cl_uint) \
1366 F(cl_context_info, CL_CONTEXT_DEVICES, cl::vector<Device>) \
1367 F(cl_context_info, CL_CONTEXT_PROPERTIES, cl::vector<cl_context_properties>) \
1368 \
1369 F(cl_event_info, CL_EVENT_COMMAND_QUEUE, cl::CommandQueue) \
1370 F(cl_event_info, CL_EVENT_COMMAND_TYPE, cl_command_type) \
1371 F(cl_event_info, CL_EVENT_REFERENCE_COUNT, cl_uint) \
1372 F(cl_event_info, CL_EVENT_COMMAND_EXECUTION_STATUS, cl_int) \
1373 \
1374 F(cl_profiling_info, CL_PROFILING_COMMAND_QUEUED, cl_ulong) \
1375 F(cl_profiling_info, CL_PROFILING_COMMAND_SUBMIT, cl_ulong) \
1376 F(cl_profiling_info, CL_PROFILING_COMMAND_START, cl_ulong) \
1377 F(cl_profiling_info, CL_PROFILING_COMMAND_END, cl_ulong) \
1378 \
1379 F(cl_mem_info, CL_MEM_TYPE, cl_mem_object_type) \
1380 F(cl_mem_info, CL_MEM_FLAGS, cl_mem_flags) \
1381 F(cl_mem_info, CL_MEM_SIZE, size_type) \
1382 F(cl_mem_info, CL_MEM_HOST_PTR, void*) \
1383 F(cl_mem_info, CL_MEM_MAP_COUNT, cl_uint) \
1384 F(cl_mem_info, CL_MEM_REFERENCE_COUNT, cl_uint) \
1385 F(cl_mem_info, CL_MEM_CONTEXT, cl::Context) \
1386 \
1387 F(cl_image_info, CL_IMAGE_FORMAT, cl_image_format) \
1388 F(cl_image_info, CL_IMAGE_ELEMENT_SIZE, size_type) \
1389 F(cl_image_info, CL_IMAGE_ROW_PITCH, size_type) \
1390 F(cl_image_info, CL_IMAGE_SLICE_PITCH, size_type) \
1391 F(cl_image_info, CL_IMAGE_WIDTH, size_type) \
1392 F(cl_image_info, CL_IMAGE_HEIGHT, size_type) \
1393 F(cl_image_info, CL_IMAGE_DEPTH, size_type) \
1394 \
1395 F(cl_sampler_info, CL_SAMPLER_REFERENCE_COUNT, cl_uint) \
1396 F(cl_sampler_info, CL_SAMPLER_CONTEXT, cl::Context) \
1397 F(cl_sampler_info, CL_SAMPLER_NORMALIZED_COORDS, cl_bool) \
1398 F(cl_sampler_info, CL_SAMPLER_ADDRESSING_MODE, cl_addressing_mode) \
1399 F(cl_sampler_info, CL_SAMPLER_FILTER_MODE, cl_filter_mode) \
1400 \
1401 F(cl_program_info, CL_PROGRAM_REFERENCE_COUNT, cl_uint) \
1402 F(cl_program_info, CL_PROGRAM_CONTEXT, cl::Context) \
1403 F(cl_program_info, CL_PROGRAM_NUM_DEVICES, cl_uint) \
1404 F(cl_program_info, CL_PROGRAM_DEVICES, cl::vector<Device>) \
1405 F(cl_program_info, CL_PROGRAM_SOURCE, string) \
1406 F(cl_program_info, CL_PROGRAM_BINARY_SIZES, cl::vector<size_type>) \
1407 F(cl_program_info, CL_PROGRAM_BINARIES, cl::vector<cl::vector<unsigned char>>) \
1408 \
1409 F(cl_program_build_info, CL_PROGRAM_BUILD_STATUS, cl_build_status) \
1410 F(cl_program_build_info, CL_PROGRAM_BUILD_OPTIONS, string) \
1411 F(cl_program_build_info, CL_PROGRAM_BUILD_LOG, string) \
1412 \
1413 F(cl_kernel_info, CL_KERNEL_FUNCTION_NAME, string) \
1414 F(cl_kernel_info, CL_KERNEL_NUM_ARGS, cl_uint) \
1415 F(cl_kernel_info, CL_KERNEL_REFERENCE_COUNT, cl_uint) \
1416 F(cl_kernel_info, CL_KERNEL_CONTEXT, cl::Context) \
1417 F(cl_kernel_info, CL_KERNEL_PROGRAM, cl::Program) \
1418 \
1419 F(cl_kernel_work_group_info, CL_KERNEL_WORK_GROUP_SIZE, size_type) \
1420 F(cl_kernel_work_group_info, CL_KERNEL_COMPILE_WORK_GROUP_SIZE, cl::detail::size_t_array) \
1421 F(cl_kernel_work_group_info, CL_KERNEL_LOCAL_MEM_SIZE, cl_ulong) \
1422 \
1423 F(cl_command_queue_info, CL_QUEUE_CONTEXT, cl::Context) \
1424 F(cl_command_queue_info, CL_QUEUE_DEVICE, cl::Device) \
1425 F(cl_command_queue_info, CL_QUEUE_REFERENCE_COUNT, cl_uint) \
1426 F(cl_command_queue_info, CL_QUEUE_PROPERTIES, cl_command_queue_properties)
1427
1428
1429#define CL_HPP_PARAM_NAME_INFO_1_1_(F) \
1430 F(cl_context_info, CL_CONTEXT_NUM_DEVICES, cl_uint)\
1431 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF, cl_uint) \
1432 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR, cl_uint) \
1433 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT, cl_uint) \
1434 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, cl_uint) \
1435 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, cl_uint) \
1436 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT, cl_uint) \
1437 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE, cl_uint) \
1438 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF, cl_uint) \
1439 F(cl_device_info, CL_DEVICE_OPENCL_C_VERSION, string) \
1440 \
1441 F(cl_mem_info, CL_MEM_ASSOCIATED_MEMOBJECT, cl::Memory) \
1442 F(cl_mem_info, CL_MEM_OFFSET, size_type) \
1443 \
1444 F(cl_kernel_work_group_info, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, size_type) \
1445 F(cl_kernel_work_group_info, CL_KERNEL_PRIVATE_MEM_SIZE, cl_ulong) \
1446 \
1447 F(cl_event_info, CL_EVENT_CONTEXT, cl::Context)
1448
1449#define CL_HPP_PARAM_NAME_INFO_1_2_(F) \
1450 F(cl_program_info, CL_PROGRAM_NUM_KERNELS, size_type) \
1451 F(cl_program_info, CL_PROGRAM_KERNEL_NAMES, string) \
1452 \
1453 F(cl_program_build_info, CL_PROGRAM_BINARY_TYPE, cl_program_binary_type) \
1454 \
1455 F(cl_kernel_info, CL_KERNEL_ATTRIBUTES, string) \
1456 \
1457 F(cl_kernel_arg_info, CL_KERNEL_ARG_ADDRESS_QUALIFIER, cl_kernel_arg_address_qualifier) \
1458 F(cl_kernel_arg_info, CL_KERNEL_ARG_ACCESS_QUALIFIER, cl_kernel_arg_access_qualifier) \
1459 F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_NAME, string) \
1460 F(cl_kernel_arg_info, CL_KERNEL_ARG_NAME, string) \
1461 F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_QUALIFIER, cl_kernel_arg_type_qualifier) \
1462 \
1463 F(cl_kernel_work_group_info, CL_KERNEL_GLOBAL_WORK_SIZE, cl::detail::size_t_array) \
1464 \
1465 F(cl_device_info, CL_DEVICE_LINKER_AVAILABLE, cl_bool) \
1466 F(cl_device_info, CL_DEVICE_IMAGE_MAX_BUFFER_SIZE, size_type) \
1467 F(cl_device_info, CL_DEVICE_IMAGE_MAX_ARRAY_SIZE, size_type) \
1468 F(cl_device_info, CL_DEVICE_PARENT_DEVICE, cl::Device) \
1469 F(cl_device_info, CL_DEVICE_PARTITION_MAX_SUB_DEVICES, cl_uint) \
1470 F(cl_device_info, CL_DEVICE_PARTITION_PROPERTIES, cl::vector<cl_device_partition_property>) \
1471 F(cl_device_info, CL_DEVICE_PARTITION_TYPE, cl::vector<cl_device_partition_property>) \
1472 F(cl_device_info, CL_DEVICE_REFERENCE_COUNT, cl_uint) \
1473 F(cl_device_info, CL_DEVICE_PREFERRED_INTEROP_USER_SYNC, cl_bool) \
1474 F(cl_device_info, CL_DEVICE_PARTITION_AFFINITY_DOMAIN, cl_device_affinity_domain) \
1475 F(cl_device_info, CL_DEVICE_BUILT_IN_KERNELS, string) \
1476 F(cl_device_info, CL_DEVICE_PRINTF_BUFFER_SIZE, size_type) \
1477 \
1478 F(cl_image_info, CL_IMAGE_ARRAY_SIZE, size_type) \
1479 F(cl_image_info, CL_IMAGE_NUM_MIP_LEVELS, cl_uint) \
1480 F(cl_image_info, CL_IMAGE_NUM_SAMPLES, cl_uint)
1481
1482#define CL_HPP_PARAM_NAME_INFO_2_0_(F) \
1483 F(cl_device_info, CL_DEVICE_QUEUE_ON_HOST_PROPERTIES, cl_command_queue_properties) \
1484 F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES, cl_command_queue_properties) \
1485 F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PREFERRED_SIZE, cl_uint) \
1486 F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE, cl_uint) \
1487 F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_QUEUES, cl_uint) \
1488 F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_EVENTS, cl_uint) \
1489 F(cl_device_info, CL_DEVICE_MAX_PIPE_ARGS, cl_uint) \
1490 F(cl_device_info, CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS, cl_uint) \
1491 F(cl_device_info, CL_DEVICE_PIPE_MAX_PACKET_SIZE, cl_uint) \
1492 F(cl_device_info, CL_DEVICE_SVM_CAPABILITIES, cl_device_svm_capabilities) \
1493 F(cl_device_info, CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT, cl_uint) \
1494 F(cl_device_info, CL_DEVICE_PREFERRED_GLOBAL_ATOMIC_ALIGNMENT, cl_uint) \
1495 F(cl_device_info, CL_DEVICE_PREFERRED_LOCAL_ATOMIC_ALIGNMENT, cl_uint) \
1496 F(cl_device_info, CL_DEVICE_IMAGE_PITCH_ALIGNMENT, cl_uint) \
1497 F(cl_device_info, CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT, cl_uint) \
1498 F(cl_device_info, CL_DEVICE_MAX_READ_WRITE_IMAGE_ARGS, cl_uint ) \
1499 F(cl_device_info, CL_DEVICE_MAX_GLOBAL_VARIABLE_SIZE, size_type ) \
1500 F(cl_device_info, CL_DEVICE_GLOBAL_VARIABLE_PREFERRED_TOTAL_SIZE, size_type ) \
1501 F(cl_profiling_info, CL_PROFILING_COMMAND_COMPLETE, cl_ulong) \
1502 F(cl_kernel_exec_info, CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM, cl_bool) \
1503 F(cl_kernel_exec_info, CL_KERNEL_EXEC_INFO_SVM_PTRS, void**) \
1504 F(cl_command_queue_info, CL_QUEUE_SIZE, cl_uint) \
1505 F(cl_mem_info, CL_MEM_USES_SVM_POINTER, cl_bool) \
1506 F(cl_program_build_info, CL_PROGRAM_BUILD_GLOBAL_VARIABLE_TOTAL_SIZE, size_type) \
1507 F(cl_pipe_info, CL_PIPE_PACKET_SIZE, cl_uint) \
1508 F(cl_pipe_info, CL_PIPE_MAX_PACKETS, cl_uint)
1509
1510#define CL_HPP_PARAM_NAME_INFO_SUBGROUP_KHR_(F) \
1511 F(cl_kernel_sub_group_info, CL_KERNEL_MAX_SUB_GROUP_SIZE_FOR_NDRANGE_KHR, size_type) \
1512 F(cl_kernel_sub_group_info, CL_KERNEL_SUB_GROUP_COUNT_FOR_NDRANGE_KHR, size_type)
1513
1514#define CL_HPP_PARAM_NAME_INFO_IL_KHR_(F) \
1515 F(cl_device_info, CL_DEVICE_IL_VERSION_KHR, string) \
1516 F(cl_program_info, CL_PROGRAM_IL_KHR, cl::vector<unsigned char>)
1517
1518#define CL_HPP_PARAM_NAME_INFO_2_1_(F) \
1519 F(cl_platform_info, CL_PLATFORM_HOST_TIMER_RESOLUTION, cl_ulong) \
1520 F(cl_program_info, CL_PROGRAM_IL, cl::vector<unsigned char>) \
1521 F(cl_device_info, CL_DEVICE_MAX_NUM_SUB_GROUPS, cl_uint) \
1522 F(cl_device_info, CL_DEVICE_IL_VERSION, string) \
1523 F(cl_device_info, CL_DEVICE_SUB_GROUP_INDEPENDENT_FORWARD_PROGRESS, cl_bool) \
1524 F(cl_command_queue_info, CL_QUEUE_DEVICE_DEFAULT, cl::DeviceCommandQueue) \
1525 F(cl_kernel_sub_group_info, CL_KERNEL_MAX_SUB_GROUP_SIZE_FOR_NDRANGE, size_type) \
1526 F(cl_kernel_sub_group_info, CL_KERNEL_SUB_GROUP_COUNT_FOR_NDRANGE, size_type) \
1527 F(cl_kernel_sub_group_info, CL_KERNEL_LOCAL_SIZE_FOR_SUB_GROUP_COUNT, cl::detail::size_t_array) \
1528 F(cl_kernel_sub_group_info, CL_KERNEL_MAX_NUM_SUB_GROUPS, size_type) \
1529 F(cl_kernel_sub_group_info, CL_KERNEL_COMPILE_NUM_SUB_GROUPS, size_type)
1530
1531#define CL_HPP_PARAM_NAME_INFO_2_2_(F) \
1532 F(cl_program_info, CL_PROGRAM_SCOPE_GLOBAL_CTORS_PRESENT, cl_bool) \
1533 F(cl_program_info, CL_PROGRAM_SCOPE_GLOBAL_DTORS_PRESENT, cl_bool)
1534
1535#define CL_HPP_PARAM_NAME_DEVICE_FISSION_EXT_(F) \
1536 F(cl_device_info, CL_DEVICE_PARENT_DEVICE_EXT, cl::Device) \
1537 F(cl_device_info, CL_DEVICE_PARTITION_TYPES_EXT, cl::vector<cl_device_partition_property_ext>) \
1538 F(cl_device_info, CL_DEVICE_AFFINITY_DOMAINS_EXT, cl::vector<cl_device_partition_property_ext>) \
1539 F(cl_device_info, CL_DEVICE_REFERENCE_COUNT_EXT , cl_uint) \
1540 F(cl_device_info, CL_DEVICE_PARTITION_STYLE_EXT, cl::vector<cl_device_partition_property_ext>)
1541
1542#define CL_HPP_PARAM_NAME_CL_KHR_EXTENDED_VERSIONING_CL3_SHARED_(F) \
1543 F(cl_platform_info, CL_PLATFORM_NUMERIC_VERSION_KHR, cl_version_khr) \
1544 F(cl_platform_info, CL_PLATFORM_EXTENSIONS_WITH_VERSION_KHR, cl::vector<cl_name_version_khr>) \
1545 \
1546 F(cl_device_info, CL_DEVICE_NUMERIC_VERSION_KHR, cl_version_khr) \
1547 F(cl_device_info, CL_DEVICE_EXTENSIONS_WITH_VERSION_KHR, cl::vector<cl_name_version_khr>) \
1548 F(cl_device_info, CL_DEVICE_ILS_WITH_VERSION_KHR, cl::vector<cl_name_version_khr>) \
1549 F(cl_device_info, CL_DEVICE_BUILT_IN_KERNELS_WITH_VERSION_KHR, cl::vector<cl_name_version_khr>)
1550
1551#define CL_HPP_PARAM_NAME_CL_KHR_EXTENDED_VERSIONING_KHRONLY_(F) \
1552 F(cl_device_info, CL_DEVICE_OPENCL_C_NUMERIC_VERSION_KHR, cl_version_khr)
1553
1554// Note: the query for CL_SEMAPHORE_DEVICE_HANDLE_LIST_KHR is handled specially!
1555#define CL_HPP_PARAM_NAME_CL_KHR_SEMAPHORE_(F) \
1556 F(cl_semaphore_info_khr, CL_SEMAPHORE_CONTEXT_KHR, cl::Context) \
1557 F(cl_semaphore_info_khr, CL_SEMAPHORE_REFERENCE_COUNT_KHR, cl_uint) \
1558 F(cl_semaphore_info_khr, CL_SEMAPHORE_PROPERTIES_KHR, cl::vector<cl_semaphore_properties_khr>) \
1559 F(cl_semaphore_info_khr, CL_SEMAPHORE_TYPE_KHR, cl_semaphore_type_khr) \
1560 F(cl_semaphore_info_khr, CL_SEMAPHORE_PAYLOAD_KHR, cl_semaphore_payload_khr) \
1561 F(cl_platform_info, CL_PLATFORM_SEMAPHORE_TYPES_KHR, cl::vector<cl_semaphore_type_khr>) \
1562 F(cl_device_info, CL_DEVICE_SEMAPHORE_TYPES_KHR, cl::vector<cl_semaphore_type_khr>) \
1563
1564#define CL_HPP_PARAM_NAME_CL_KHR_EXTERNAL_MEMORY_(F) \
1565 F(cl_device_info, CL_DEVICE_EXTERNAL_MEMORY_IMPORT_HANDLE_TYPES_KHR, cl::vector<cl::ExternalMemoryType>) \
1566 F(cl_platform_info, CL_PLATFORM_EXTERNAL_MEMORY_IMPORT_HANDLE_TYPES_KHR, cl::vector<cl::ExternalMemoryType>)
1567
1568#define CL_HPP_PARAM_NAME_CL_KHR_EXTERNAL_SEMAPHORE_(F) \
1569 F(cl_platform_info, CL_PLATFORM_SEMAPHORE_IMPORT_HANDLE_TYPES_KHR, cl::vector<cl_external_semaphore_handle_type_khr>) \
1570 F(cl_platform_info, CL_PLATFORM_SEMAPHORE_EXPORT_HANDLE_TYPES_KHR, cl::vector<cl_external_semaphore_handle_type_khr>) \
1571 F(cl_device_info, CL_DEVICE_SEMAPHORE_IMPORT_HANDLE_TYPES_KHR, cl::vector<cl_external_semaphore_handle_type_khr>) \
1572 F(cl_device_info, CL_DEVICE_SEMAPHORE_EXPORT_HANDLE_TYPES_KHR, cl::vector<cl_external_semaphore_handle_type_khr>) \
1573 F(cl_semaphore_info_khr, CL_SEMAPHORE_EXPORT_HANDLE_TYPES_KHR, cl::vector<cl_external_semaphore_handle_type_khr>) \
1574
1575#define CL_HPP_PARAM_NAME_CL_KHR_EXTERNAL_SEMAPHORE_OPAQUE_FD_EXT(F) \
1576 F(cl_external_semaphore_handle_type_khr, CL_SEMAPHORE_HANDLE_OPAQUE_FD_KHR, int) \
1577
1578#define CL_HPP_PARAM_NAME_CL_KHR_EXTERNAL_SEMAPHORE_SYNC_FD_EXT(F) \
1579 F(cl_external_semaphore_handle_type_khr, CL_SEMAPHORE_HANDLE_SYNC_FD_KHR, int) \
1580
1581#define CL_HPP_PARAM_NAME_CL_KHR_EXTERNAL_SEMAPHORE_WIN32_EXT(F) \
1582 F(cl_external_semaphore_handle_type_khr, CL_SEMAPHORE_HANDLE_OPAQUE_WIN32_KHR, void*) \
1583 F(cl_external_semaphore_handle_type_khr, CL_SEMAPHORE_HANDLE_OPAQUE_WIN32_KMT_KHR, void*) \
1584
1585#define CL_HPP_PARAM_NAME_INFO_3_0_(F) \
1586 F(cl_platform_info, CL_PLATFORM_NUMERIC_VERSION, cl_version) \
1587 F(cl_platform_info, CL_PLATFORM_EXTENSIONS_WITH_VERSION, cl::vector<cl_name_version>) \
1588 \
1589 F(cl_device_info, CL_DEVICE_NUMERIC_VERSION, cl_version) \
1590 F(cl_device_info, CL_DEVICE_EXTENSIONS_WITH_VERSION, cl::vector<cl_name_version>) \
1591 F(cl_device_info, CL_DEVICE_ILS_WITH_VERSION, cl::vector<cl_name_version>) \
1592 F(cl_device_info, CL_DEVICE_BUILT_IN_KERNELS_WITH_VERSION, cl::vector<cl_name_version>) \
1593 F(cl_device_info, CL_DEVICE_ATOMIC_MEMORY_CAPABILITIES, cl_device_atomic_capabilities) \
1594 F(cl_device_info, CL_DEVICE_ATOMIC_FENCE_CAPABILITIES, cl_device_atomic_capabilities) \
1595 F(cl_device_info, CL_DEVICE_NON_UNIFORM_WORK_GROUP_SUPPORT, cl_bool) \
1596 F(cl_device_info, CL_DEVICE_OPENCL_C_ALL_VERSIONS, cl::vector<cl_name_version>) \
1597 F(cl_device_info, CL_DEVICE_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, size_type) \
1598 F(cl_device_info, CL_DEVICE_WORK_GROUP_COLLECTIVE_FUNCTIONS_SUPPORT, cl_bool) \
1599 F(cl_device_info, CL_DEVICE_GENERIC_ADDRESS_SPACE_SUPPORT, cl_bool) \
1600 F(cl_device_info, CL_DEVICE_OPENCL_C_FEATURES, cl::vector<cl_name_version>) \
1601 F(cl_device_info, CL_DEVICE_DEVICE_ENQUEUE_CAPABILITIES, cl_device_device_enqueue_capabilities) \
1602 F(cl_device_info, CL_DEVICE_PIPE_SUPPORT, cl_bool) \
1603 F(cl_device_info, CL_DEVICE_LATEST_CONFORMANCE_VERSION_PASSED, string) \
1604 \
1605 F(cl_command_queue_info, CL_QUEUE_PROPERTIES_ARRAY, cl::vector<cl_queue_properties>) \
1606 F(cl_mem_info, CL_MEM_PROPERTIES, cl::vector<cl_mem_properties>) \
1607 F(cl_pipe_info, CL_PIPE_PROPERTIES, cl::vector<cl_pipe_properties>) \
1608 F(cl_sampler_info, CL_SAMPLER_PROPERTIES, cl::vector<cl_sampler_properties>) \
1609
1610#define CL_HPP_PARAM_NAME_CL_IMAGE_REQUIREMENTS_EXT(F) \
1611 F(cl_image_requirements_info_ext, CL_IMAGE_REQUIREMENTS_ROW_PITCH_ALIGNMENT_EXT, size_type) \
1612 F(cl_image_requirements_info_ext, CL_IMAGE_REQUIREMENTS_BASE_ADDRESS_ALIGNMENT_EXT, size_type) \
1613 F(cl_image_requirements_info_ext, CL_IMAGE_REQUIREMENTS_SIZE_EXT, size_type) \
1614 F(cl_image_requirements_info_ext, CL_IMAGE_REQUIREMENTS_MAX_WIDTH_EXT, cl_uint) \
1615 F(cl_image_requirements_info_ext, CL_IMAGE_REQUIREMENTS_MAX_HEIGHT_EXT, cl_uint) \
1616 F(cl_image_requirements_info_ext, CL_IMAGE_REQUIREMENTS_MAX_DEPTH_EXT, cl_uint) \
1617 F(cl_image_requirements_info_ext, CL_IMAGE_REQUIREMENTS_MAX_ARRAY_SIZE_EXT, cl_uint) \
1618
1619#define CL_HPP_PARAM_NAME_CL_IMAGE_REQUIREMENTS_SLICE_PITCH_ALIGNMENT_EXT(F) \
1620 F(cl_image_requirements_info_ext, CL_IMAGE_REQUIREMENTS_SLICE_PITCH_ALIGNMENT_EXT, size_type) \
1621
1622#define CL_HPP_PARAM_NAME_CL_INTEL_COMMAND_QUEUE_FAMILIES_(F) \
1623 F(cl_device_info, CL_DEVICE_QUEUE_FAMILY_PROPERTIES_INTEL, cl::vector<cl_queue_family_properties_intel>) \
1624 \
1625 F(cl_command_queue_info, CL_QUEUE_FAMILY_INTEL, cl_uint) \
1626 F(cl_command_queue_info, CL_QUEUE_INDEX_INTEL, cl_uint)
1627
1628#define CL_HPP_PARAM_NAME_CL_INTEL_UNIFIED_SHARED_MEMORY_(F) \
1629 F(cl_device_info, CL_DEVICE_HOST_MEM_CAPABILITIES_INTEL, cl_device_unified_shared_memory_capabilities_intel ) \
1630 F(cl_device_info, CL_DEVICE_DEVICE_MEM_CAPABILITIES_INTEL, cl_device_unified_shared_memory_capabilities_intel ) \
1631 F(cl_device_info, CL_DEVICE_SINGLE_DEVICE_SHARED_MEM_CAPABILITIES_INTEL, cl_device_unified_shared_memory_capabilities_intel ) \
1632 F(cl_device_info, CL_DEVICE_CROSS_DEVICE_SHARED_MEM_CAPABILITIES_INTEL, cl_device_unified_shared_memory_capabilities_intel ) \
1633 F(cl_device_info, CL_DEVICE_SHARED_SYSTEM_MEM_CAPABILITIES_INTEL, cl_device_unified_shared_memory_capabilities_intel )
1634
1635template <typename enum_type, cl_int Name>
1636struct param_traits {};
1637
1638#define CL_HPP_DECLARE_PARAM_TRAITS_(token, param_name, T) \
1639struct token; \
1640template<> \
1641struct param_traits<detail:: token,param_name> \
1642{ \
1643 enum { value = param_name }; \
1644 typedef T param_type; \
1645};
1646
1647CL_HPP_PARAM_NAME_INFO_1_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1648#if CL_HPP_TARGET_OPENCL_VERSION >= 110
1649CL_HPP_PARAM_NAME_INFO_1_1_(CL_HPP_DECLARE_PARAM_TRAITS_)
1650#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
1651#if CL_HPP_TARGET_OPENCL_VERSION >= 120
1652CL_HPP_PARAM_NAME_INFO_1_2_(CL_HPP_DECLARE_PARAM_TRAITS_)
1653#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
1654#if CL_HPP_TARGET_OPENCL_VERSION >= 200
1655CL_HPP_PARAM_NAME_INFO_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1656#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
1657#if CL_HPP_TARGET_OPENCL_VERSION >= 210
1658CL_HPP_PARAM_NAME_INFO_2_1_(CL_HPP_DECLARE_PARAM_TRAITS_)
1659#endif // CL_HPP_TARGET_OPENCL_VERSION >= 210
1660#if CL_HPP_TARGET_OPENCL_VERSION >= 220
1661CL_HPP_PARAM_NAME_INFO_2_2_(CL_HPP_DECLARE_PARAM_TRAITS_)
1662#endif // CL_HPP_TARGET_OPENCL_VERSION >= 220
1663#if CL_HPP_TARGET_OPENCL_VERSION >= 300
1664CL_HPP_PARAM_NAME_INFO_3_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1665#endif // CL_HPP_TARGET_OPENCL_VERSION >= 300
1666
1667#if defined(cl_khr_subgroups) && CL_HPP_TARGET_OPENCL_VERSION < 210
1668CL_HPP_PARAM_NAME_INFO_SUBGROUP_KHR_(CL_HPP_DECLARE_PARAM_TRAITS_)
1669#endif // #if defined(cl_khr_subgroups) && CL_HPP_TARGET_OPENCL_VERSION < 210
1670
1671#if defined(cl_khr_il_program) && CL_HPP_TARGET_OPENCL_VERSION < 210
1672CL_HPP_PARAM_NAME_INFO_IL_KHR_(CL_HPP_DECLARE_PARAM_TRAITS_)
1673#endif // #if defined(cl_khr_il_program) && CL_HPP_TARGET_OPENCL_VERSION < 210
1674
1675
1676// Flags deprecated in OpenCL 2.0
1677#define CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(F) \
1678 F(cl_device_info, CL_DEVICE_QUEUE_PROPERTIES, cl_command_queue_properties)
1679
1680#define CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(F) \
1681 F(cl_device_info, CL_DEVICE_HOST_UNIFIED_MEMORY, cl_bool)
1682
1683#define CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(F) \
1684 F(cl_image_info, CL_IMAGE_BUFFER, cl::Buffer)
1685
1686// Include deprecated query flags based on versions
1687// Only include deprecated 1.0 flags if 2.0 not active as there is an enum clash
1688#if CL_HPP_TARGET_OPENCL_VERSION > 100 && CL_HPP_MINIMUM_OPENCL_VERSION < 200 && CL_HPP_TARGET_OPENCL_VERSION < 200
1689CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1690#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 110
1691#if CL_HPP_TARGET_OPENCL_VERSION > 110 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
1692CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1693#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1694#if CL_HPP_TARGET_OPENCL_VERSION > 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
1695CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1696#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
1697
1698#if defined(cl_ext_device_fission)
1699CL_HPP_PARAM_NAME_DEVICE_FISSION_EXT_(CL_HPP_DECLARE_PARAM_TRAITS_)
1700#endif // cl_ext_device_fission
1701
1702#if defined(cl_khr_extended_versioning)
1703#if CL_HPP_TARGET_OPENCL_VERSION < 300
1704CL_HPP_PARAM_NAME_CL_KHR_EXTENDED_VERSIONING_CL3_SHARED_(CL_HPP_DECLARE_PARAM_TRAITS_)
1705#endif // CL_HPP_TARGET_OPENCL_VERSION < 300
1706CL_HPP_PARAM_NAME_CL_KHR_EXTENDED_VERSIONING_KHRONLY_(CL_HPP_DECLARE_PARAM_TRAITS_)
1707#endif // cl_khr_extended_versioning
1708
1709#if defined(cl_khr_semaphore)
1710CL_HPP_PARAM_NAME_CL_KHR_SEMAPHORE_(CL_HPP_DECLARE_PARAM_TRAITS_)
1711#if defined(CL_SEMAPHORE_DEVICE_HANDLE_LIST_KHR)
1712CL_HPP_DECLARE_PARAM_TRAITS_(cl_semaphore_info_khr, CL_SEMAPHORE_DEVICE_HANDLE_LIST_KHR, cl::vector<cl::Device>)
1713#endif // defined(CL_SEMAPHORE_DEVICE_HANDLE_LIST_KHR)
1714#endif // defined(cl_khr_semaphore)
1715
1716#ifdef cl_khr_external_memory
1717CL_HPP_PARAM_NAME_CL_KHR_EXTERNAL_MEMORY_(CL_HPP_DECLARE_PARAM_TRAITS_)
1718#endif // cl_khr_external_memory
1719
1720#if defined(cl_khr_external_semaphore)
1721CL_HPP_PARAM_NAME_CL_KHR_EXTERNAL_SEMAPHORE_(CL_HPP_DECLARE_PARAM_TRAITS_)
1722#endif // cl_khr_external_semaphore
1723
1724#if defined(cl_khr_external_semaphore_opaque_fd)
1725CL_HPP_PARAM_NAME_CL_KHR_EXTERNAL_SEMAPHORE_OPAQUE_FD_EXT(CL_HPP_DECLARE_PARAM_TRAITS_)
1726#endif // cl_khr_external_semaphore_opaque_fd
1727#if defined(cl_khr_external_semaphore_sync_fd)
1728CL_HPP_PARAM_NAME_CL_KHR_EXTERNAL_SEMAPHORE_SYNC_FD_EXT(CL_HPP_DECLARE_PARAM_TRAITS_)
1729#endif // cl_khr_external_semaphore_sync_fd
1730#if defined(cl_khr_external_semaphore_win32)
1731CL_HPP_PARAM_NAME_CL_KHR_EXTERNAL_SEMAPHORE_WIN32_EXT(CL_HPP_DECLARE_PARAM_TRAITS_)
1732#endif // cl_khr_external_semaphore_win32
1733
1734#if defined(cl_khr_device_uuid)
1735using uuid_array = array<cl_uchar, CL_UUID_SIZE_KHR>;
1736using luid_array = array<cl_uchar, CL_LUID_SIZE_KHR>;
1737CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_UUID_KHR, uuid_array)
1738CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DRIVER_UUID_KHR, uuid_array)
1739CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LUID_VALID_KHR, cl_bool)
1740CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LUID_KHR, luid_array)
1741CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_NODE_MASK_KHR, cl_uint)
1742#endif
1743
1744#if defined(cl_khr_pci_bus_info)
1745CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_PCI_BUS_INFO_KHR, cl_device_pci_bus_info_khr)
1746#endif
1747
1748// Note: some headers do not define cl_khr_image2d_from_buffer
1749#if CL_HPP_TARGET_OPENCL_VERSION < 200
1750#if defined(CL_DEVICE_IMAGE_PITCH_ALIGNMENT_KHR)
1751CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_IMAGE_PITCH_ALIGNMENT_KHR, cl_uint)
1752#endif
1753#if defined(CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT_KHR)
1754CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT_KHR, cl_uint)
1755#endif
1756#endif // CL_HPP_TARGET_OPENCL_VERSION < 200
1757
1758#if defined(cl_khr_integer_dot_product)
1759CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_INTEGER_DOT_PRODUCT_CAPABILITIES_KHR, cl_device_integer_dot_product_capabilities_khr)
1760#if defined(CL_DEVICE_INTEGER_DOT_PRODUCT_ACCELERATION_PROPERTIES_8BIT_KHR)
1761CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_INTEGER_DOT_PRODUCT_ACCELERATION_PROPERTIES_8BIT_KHR, cl_device_integer_dot_product_acceleration_properties_khr)
1762CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_INTEGER_DOT_PRODUCT_ACCELERATION_PROPERTIES_4x8BIT_PACKED_KHR, cl_device_integer_dot_product_acceleration_properties_khr)
1763#endif // defined(CL_DEVICE_INTEGER_DOT_PRODUCT_ACCELERATION_PROPERTIES_8BIT_KHR)
1764#endif // defined(cl_khr_integer_dot_product)
1765
1766#if defined(cl_ext_image_requirements_info)
1767CL_HPP_PARAM_NAME_CL_IMAGE_REQUIREMENTS_EXT(CL_HPP_DECLARE_PARAM_TRAITS_)
1768#endif // cl_ext_image_requirements_info
1769
1770#if defined(cl_ext_image_from_buffer)
1771CL_HPP_PARAM_NAME_CL_IMAGE_REQUIREMENTS_SLICE_PITCH_ALIGNMENT_EXT(CL_HPP_DECLARE_PARAM_TRAITS_)
1772#endif // cl_ext_image_from_buffer
1773
1774#ifdef CL_PLATFORM_ICD_SUFFIX_KHR
1775CL_HPP_DECLARE_PARAM_TRAITS_(cl_platform_info, CL_PLATFORM_ICD_SUFFIX_KHR, string)
1776#endif
1777
1778#ifdef CL_DEVICE_PROFILING_TIMER_OFFSET_AMD
1779CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_PROFILING_TIMER_OFFSET_AMD, cl_ulong)
1780#endif
1781#ifdef CL_DEVICE_GLOBAL_FREE_MEMORY_AMD
1782CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_FREE_MEMORY_AMD, vector<size_type>)
1783#endif
1784#ifdef CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD
1785CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD, cl_uint)
1786#endif
1787#ifdef CL_DEVICE_SIMD_WIDTH_AMD
1788CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_WIDTH_AMD, cl_uint)
1789#endif
1790#ifdef CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD
1791CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD, cl_uint)
1792#endif
1793#ifdef CL_DEVICE_WAVEFRONT_WIDTH_AMD
1794CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WAVEFRONT_WIDTH_AMD, cl_uint)
1795#endif
1796#ifdef CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD
1797CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD, cl_uint)
1798#endif
1799#ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD
1800CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD, cl_uint)
1801#endif
1802#ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD
1803CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD, cl_uint)
1804#endif
1805#ifdef CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD
1806CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD, cl_uint)
1807#endif
1808#ifdef CL_DEVICE_LOCAL_MEM_BANKS_AMD
1809CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_BANKS_AMD, cl_uint)
1810#endif
1811#ifdef CL_DEVICE_BOARD_NAME_AMD
1812CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_BOARD_NAME_AMD, string)
1813#endif
1814
1815#ifdef CL_DEVICE_COMPUTE_UNITS_BITFIELD_ARM
1816CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_UNITS_BITFIELD_ARM, cl_ulong)
1817#endif
1818#ifdef CL_DEVICE_JOB_SLOTS_ARM
1819CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_JOB_SLOTS_ARM, cl_uint)
1820#endif
1821#ifdef CL_DEVICE_SCHEDULING_CONTROLS_CAPABILITIES_ARM
1822CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SCHEDULING_CONTROLS_CAPABILITIES_ARM, cl_bitfield)
1823#endif
1824#ifdef CL_DEVICE_SUPPORTED_REGISTER_ALLOCATIONS_ARM
1825CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SUPPORTED_REGISTER_ALLOCATIONS_ARM, vector<cl_uint>)
1826#endif
1827#ifdef CL_DEVICE_MAX_WARP_COUNT_ARM
1828CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_MAX_WARP_COUNT_ARM, cl_uint)
1829#endif
1830#ifdef CL_KERNEL_MAX_WARP_COUNT_ARM
1831CL_HPP_DECLARE_PARAM_TRAITS_(cl_kernel_info, CL_KERNEL_MAX_WARP_COUNT_ARM, cl_uint)
1832#endif
1833#ifdef CL_KERNEL_EXEC_INFO_WORKGROUP_BATCH_SIZE_ARM
1834CL_HPP_DECLARE_PARAM_TRAITS_(cl_kernel_exec_info, CL_KERNEL_EXEC_INFO_WORKGROUP_BATCH_SIZE_ARM, cl_uint)
1835#endif
1836#ifdef CL_KERNEL_EXEC_INFO_WORKGROUP_BATCH_SIZE_MODIFIER_ARM
1837CL_HPP_DECLARE_PARAM_TRAITS_(cl_kernel_exec_info, CL_KERNEL_EXEC_INFO_WORKGROUP_BATCH_SIZE_MODIFIER_ARM, cl_int)
1838#endif
1839#ifdef CL_KERNEL_EXEC_INFO_WARP_COUNT_LIMIT_ARM
1840CL_HPP_DECLARE_PARAM_TRAITS_(cl_kernel_exec_info, CL_KERNEL_EXEC_INFO_WARP_COUNT_LIMIT_ARM, cl_uint)
1841#endif
1842#ifdef CL_KERNEL_EXEC_INFO_COMPUTE_UNIT_MAX_QUEUED_BATCHES_ARM
1843CL_HPP_DECLARE_PARAM_TRAITS_(cl_kernel_exec_info, CL_KERNEL_EXEC_INFO_COMPUTE_UNIT_MAX_QUEUED_BATCHES_ARM, cl_uint)
1844#endif
1845
1846#ifdef CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV
1847CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, cl_uint)
1848#endif
1849#ifdef CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV
1850CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, cl_uint)
1851#endif
1852#ifdef CL_DEVICE_REGISTERS_PER_BLOCK_NV
1853CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_REGISTERS_PER_BLOCK_NV, cl_uint)
1854#endif
1855#ifdef CL_DEVICE_WARP_SIZE_NV
1856CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WARP_SIZE_NV, cl_uint)
1857#endif
1858#ifdef CL_DEVICE_GPU_OVERLAP_NV
1859CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GPU_OVERLAP_NV, cl_bool)
1860#endif
1861#ifdef CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV
1862CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, cl_bool)
1863#endif
1864#ifdef CL_DEVICE_INTEGRATED_MEMORY_NV
1865CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_INTEGRATED_MEMORY_NV, cl_bool)
1866#endif
1867
1868#if defined(cl_khr_command_buffer)
1869CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMMAND_BUFFER_CAPABILITIES_KHR, cl_device_command_buffer_capabilities_khr)
1870#if CL_KHR_COMMAND_BUFFER_EXTENSION_VERSION > CL_MAKE_VERSION(0, 9, 5)
1871CL_HPP_DECLARE_PARAM_TRAITS_(
1872 cl_device_info, CL_DEVICE_COMMAND_BUFFER_SUPPORTED_QUEUE_PROPERTIES_KHR,
1873 cl_command_queue_properties)
1874#endif
1875CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMMAND_BUFFER_REQUIRED_QUEUE_PROPERTIES_KHR, cl_command_queue_properties)
1876CL_HPP_DECLARE_PARAM_TRAITS_(cl_command_buffer_info_khr, CL_COMMAND_BUFFER_QUEUES_KHR, cl::vector<CommandQueue>)
1877CL_HPP_DECLARE_PARAM_TRAITS_(cl_command_buffer_info_khr, CL_COMMAND_BUFFER_NUM_QUEUES_KHR, cl_uint)
1878CL_HPP_DECLARE_PARAM_TRAITS_(cl_command_buffer_info_khr, CL_COMMAND_BUFFER_REFERENCE_COUNT_KHR, cl_uint)
1879CL_HPP_DECLARE_PARAM_TRAITS_(cl_command_buffer_info_khr, CL_COMMAND_BUFFER_STATE_KHR, cl_command_buffer_state_khr)
1880CL_HPP_DECLARE_PARAM_TRAITS_(cl_command_buffer_info_khr, CL_COMMAND_BUFFER_PROPERTIES_ARRAY_KHR, cl::vector<cl_command_buffer_properties_khr>)
1881#endif /* cl_khr_command_buffer */
1882
1883#if defined(cl_khr_command_buffer_mutable_dispatch)
1884CL_HPP_DECLARE_PARAM_TRAITS_(cl_mutable_command_info_khr, CL_MUTABLE_COMMAND_COMMAND_QUEUE_KHR, CommandQueue)
1885CL_HPP_DECLARE_PARAM_TRAITS_(cl_mutable_command_info_khr, CL_MUTABLE_COMMAND_COMMAND_BUFFER_KHR, CommandBufferKhr)
1886CL_HPP_DECLARE_PARAM_TRAITS_(cl_mutable_command_info_khr, CL_MUTABLE_COMMAND_COMMAND_TYPE_KHR, cl_command_type)
1887
1888#if CL_KHR_COMMAND_BUFFER_MUTABLE_DISPATCH_EXTENSION_VERSION > CL_MAKE_VERSION(0, 9, 2)
1889CL_HPP_DECLARE_PARAM_TRAITS_(cl_mutable_command_info_khr, CL_MUTABLE_COMMAND_PROPERTIES_ARRAY_KHR, cl::vector<cl_command_properties_khr>)
1890#else
1891CL_HPP_DECLARE_PARAM_TRAITS_(cl_mutable_command_info_khr, CL_MUTABLE_DISPATCH_PROPERTIES_ARRAY_KHR, cl::vector<cl_ndrange_kernel_command_properties_khr>)
1892#endif
1893CL_HPP_DECLARE_PARAM_TRAITS_(cl_mutable_command_info_khr, CL_MUTABLE_DISPATCH_KERNEL_KHR, cl_kernel)
1894CL_HPP_DECLARE_PARAM_TRAITS_(cl_mutable_command_info_khr, CL_MUTABLE_DISPATCH_DIMENSIONS_KHR, cl_uint)
1895CL_HPP_DECLARE_PARAM_TRAITS_(cl_mutable_command_info_khr, CL_MUTABLE_DISPATCH_GLOBAL_WORK_OFFSET_KHR, cl::vector<size_type>)
1896CL_HPP_DECLARE_PARAM_TRAITS_(cl_mutable_command_info_khr, CL_MUTABLE_DISPATCH_GLOBAL_WORK_SIZE_KHR, cl::vector<size_type>)
1897CL_HPP_DECLARE_PARAM_TRAITS_(cl_mutable_command_info_khr, CL_MUTABLE_DISPATCH_LOCAL_WORK_SIZE_KHR, cl::vector<size_type>)
1898#endif /* cl_khr_command_buffer_mutable_dispatch */
1899
1900#if defined(cl_khr_kernel_clock)
1901CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_KERNEL_CLOCK_CAPABILITIES_KHR, cl_device_kernel_clock_capabilities_khr)
1902#endif /* cl_khr_kernel_clock */
1903
1904#if defined(cl_khr_spirv_queries)
1905CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SPIRV_EXTENDED_INSTRUCTION_SETS_KHR, cl::vector<const char*>)
1906CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SPIRV_EXTENSIONS_KHR, cl::vector<const char*>)
1907CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SPIRV_CAPABILITIES_KHR, cl::vector<cl_uint>)
1908#endif /* cl_khr_spirv_queries */
1909
1910#if defined(cl_ext_float_atomics)
1911CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SINGLE_FP_ATOMIC_CAPABILITIES_EXT, cl_device_fp_atomic_capabilities_ext)
1912CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_DOUBLE_FP_ATOMIC_CAPABILITIES_EXT, cl_device_fp_atomic_capabilities_ext)
1913CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_HALF_FP_ATOMIC_CAPABILITIES_EXT, cl_device_fp_atomic_capabilities_ext)
1914#endif /* cl_ext_float_atomics */
1915
1916#if defined(cl_intel_command_queue_families)
1917CL_HPP_PARAM_NAME_CL_INTEL_COMMAND_QUEUE_FAMILIES_(CL_HPP_DECLARE_PARAM_TRAITS_)
1918#endif // cl_intel_command_queue_families
1919
1920#if defined(cl_intel_device_attribute_query)
1921CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_IP_VERSION_INTEL, cl_uint)
1922CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_ID_INTEL, cl_uint)
1923CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_NUM_SLICES_INTEL, cl_uint)
1924CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_NUM_SUB_SLICES_PER_SLICE_INTEL, cl_uint)
1925CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_NUM_EUS_PER_SUB_SLICE_INTEL, cl_uint)
1926CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_NUM_THREADS_PER_EU_INTEL, cl_uint)
1927CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_FEATURE_CAPABILITIES_INTEL, cl_device_feature_capabilities_intel)
1928#endif // cl_intel_device_attribute_query
1929
1930#if defined(cl_intel_required_subgroup_size)
1931CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SUB_GROUP_SIZES_INTEL, cl::vector<size_type>)
1932CL_HPP_DECLARE_PARAM_TRAITS_(cl_kernel_work_group_info, CL_KERNEL_SPILL_MEM_SIZE_INTEL, cl_ulong)
1933#endif // cl_intel_required_subgroup_size
1934
1935#if defined(cl_intel_unified_shared_memory)
1936CL_HPP_PARAM_NAME_CL_INTEL_UNIFIED_SHARED_MEMORY_(CL_HPP_DECLARE_PARAM_TRAITS_)
1937#endif // cl_intel_unified_shared_memory
1938
1939// Convenience functions
1940
1941template <typename Func, typename T>
1942inline cl_int
1943getInfo(Func f, cl_uint name, T* param)
1944{
1945 return getInfoHelper(f, name, param, 0);
1946}
1947
1948template <typename Func, typename Arg0>
1949struct GetInfoFunctor0
1950{
1951 Func f_; const Arg0& arg0_;
1952 cl_int operator ()(
1953 cl_uint param, size_type size, void* value, size_type* size_ret)
1954 { return f_(arg0_, param, size, value, size_ret); }
1955};
1956
1957template <typename Func, typename Arg0, typename Arg1>
1958struct GetInfoFunctor1
1959{
1960 Func f_; const Arg0& arg0_; const Arg1& arg1_;
1961 cl_int operator ()(
1962 cl_uint param, size_type size, void* value, size_type* size_ret)
1963 { return f_(arg0_, arg1_, param, size, value, size_ret); }
1964};
1965
1966template <typename Func, typename Arg0, typename T>
1967inline cl_int
1968getInfo(Func f, const Arg0& arg0, cl_uint name, T* param)
1969{
1970 GetInfoFunctor0<Func, Arg0> f0 = { f, arg0 };
1971 return getInfoHelper(f0, name, param, 0);
1972}
1973
1974template <typename Func, typename Arg0, typename Arg1, typename T>
1975inline cl_int
1976getInfo(Func f, const Arg0& arg0, const Arg1& arg1, cl_uint name, T* param)
1977{
1978 GetInfoFunctor1<Func, Arg0, Arg1> f0 = { f, arg0, arg1 };
1979 return getInfoHelper(f0, name, param, 0);
1981
1982
1983template<typename T>
1984struct ReferenceHandler
1985{ };
1986
1987#if CL_HPP_TARGET_OPENCL_VERSION >= 120
1989 * OpenCL 1.2 devices do have retain/release.
1990 */
1991template <>
1992struct ReferenceHandler<cl_device_id>
1993{
1999 * CL_INVALID_DEVICE if device was not a valid subdevice
2000 * CL_OUT_OF_RESOURCES
2001 * CL_OUT_OF_HOST_MEMORY
2002 */
2003 static cl_int retain(cl_device_id device)
2004 { return CL_(clRetainDevice)(device); }
2010 * CL_INVALID_DEVICE if device was not a valid subdevice
2011 * CL_OUT_OF_RESOURCES
2012 * CL_OUT_OF_HOST_MEMORY
2013 */
2014 static cl_int release(cl_device_id device)
2015 { return CL_(clReleaseDevice)(device); }
2016};
2017#else // CL_HPP_TARGET_OPENCL_VERSION >= 120
2021template <>
2022struct ReferenceHandler<cl_device_id>
2023{
2024 // cl_device_id does not have retain().
2025 static cl_int retain(cl_device_id)
2026 { return CL_SUCCESS; }
2027 // cl_device_id does not have release().
2028 static cl_int release(cl_device_id)
2029 { return CL_SUCCESS; }
2031#endif // ! (CL_HPP_TARGET_OPENCL_VERSION >= 120)
2032
2033template <>
2034struct ReferenceHandler<cl_platform_id>
2035{
2036 // cl_platform_id does not have retain().
2037 static cl_int retain(cl_platform_id)
2038 { return CL_SUCCESS; }
2039 // cl_platform_id does not have release().
2040 static cl_int release(cl_platform_id)
2041 { return CL_SUCCESS; }
2042};
2043
2044template <>
2045struct ReferenceHandler<cl_context>
2046{
2047 static cl_int retain(cl_context context)
2048 { return CL_(clRetainContext)(context); }
2049 static cl_int release(cl_context context)
2050 { return CL_(clReleaseContext)(context); }
2051};
2052
2053template <>
2054struct ReferenceHandler<cl_command_queue>
2055{
2056 static cl_int retain(cl_command_queue queue)
2057 { return CL_(clRetainCommandQueue)(queue); }
2058 static cl_int release(cl_command_queue queue)
2059 { return CL_(clReleaseCommandQueue)(queue); }
2060};
2061
2062template <>
2063struct ReferenceHandler<cl_mem>
2064{
2065 static cl_int retain(cl_mem memory)
2066 { return CL_(clRetainMemObject)(memory); }
2067 static cl_int release(cl_mem memory)
2068 { return CL_(clReleaseMemObject)(memory); }
2069};
2070
2071template <>
2072struct ReferenceHandler<cl_sampler>
2073{
2074 static cl_int retain(cl_sampler sampler)
2075 { return CL_(clRetainSampler)(sampler); }
2076 static cl_int release(cl_sampler sampler)
2077 { return CL_(clReleaseSampler)(sampler); }
2078};
2079
2080template <>
2081struct ReferenceHandler<cl_program>
2082{
2083 static cl_int retain(cl_program program)
2084 { return CL_(clRetainProgram)(program); }
2085 static cl_int release(cl_program program)
2086 { return CL_(clReleaseProgram)(program); }
2087};
2088
2089template <>
2090struct ReferenceHandler<cl_kernel>
2091{
2092 static cl_int retain(cl_kernel kernel)
2093 { return CL_(clRetainKernel)(kernel); }
2094 static cl_int release(cl_kernel kernel)
2095 { return CL_(clReleaseKernel)(kernel); }
2096};
2097
2098template <>
2099struct ReferenceHandler<cl_event>
2100{
2101 static cl_int retain(cl_event event)
2102 { return CL_(clRetainEvent)(event); }
2103 static cl_int release(cl_event event)
2104 { return CL_(clReleaseEvent)(event); }
2105};
2106
2107#ifdef cl_khr_semaphore
2108template <>
2109struct ReferenceHandler<cl_semaphore_khr>
2110{
2111 static cl_int retain(cl_semaphore_khr semaphore)
2112 {
2113 if (pfn_clRetainSemaphoreKHR != nullptr) {
2114 return pfn_clRetainSemaphoreKHR(semaphore);
2115 }
2116
2117 return CL_INVALID_OPERATION;
2118 }
2119
2120 static cl_int release(cl_semaphore_khr semaphore)
2121 {
2122 if (pfn_clReleaseSemaphoreKHR != nullptr) {
2123 return pfn_clReleaseSemaphoreKHR(semaphore);
2124 }
2125
2126 return CL_INVALID_OPERATION;
2127 }
2128};
2129#endif // cl_khr_semaphore
2130#if defined(cl_khr_command_buffer)
2131template <>
2132struct ReferenceHandler<cl_command_buffer_khr>
2133{
2134 static cl_int retain(cl_command_buffer_khr cmdBufferKhr)
2135 {
2136 if (pfn_clRetainCommandBufferKHR == nullptr) {
2137 return detail::errHandler(CL_INVALID_OPERATION, __RETAIN_COMMAND_BUFFER_KHR_ERR);
2138 }
2139 return pfn_clRetainCommandBufferKHR(cmdBufferKhr);
2140 }
2141
2142 static cl_int release(cl_command_buffer_khr cmdBufferKhr)
2143 {
2144 if (pfn_clReleaseCommandBufferKHR == nullptr) {
2145 return detail::errHandler(CL_INVALID_OPERATION, __RELEASE_COMMAND_BUFFER_KHR_ERR);
2146 }
2147 return pfn_clReleaseCommandBufferKHR(cmdBufferKhr);
2148 }
2149};
2150
2151template <>
2152struct ReferenceHandler<cl_mutable_command_khr>
2153{
2154 // cl_mutable_command_khr does not have retain().
2155 static cl_int retain(cl_mutable_command_khr)
2156 { return CL_SUCCESS; }
2157 // cl_mutable_command_khr does not have release().
2158 static cl_int release(cl_mutable_command_khr)
2159 { return CL_SUCCESS; }
2160};
2161#endif // cl_khr_command_buffer
2162
2163
2164#if (CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120) || \
2165 (CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200)
2166// Extracts version number with major in the upper 16 bits, minor in the lower 16
2167static cl_uint getVersion(const vector<char> &versionInfo)
2168{
2169 int highVersion = 0;
2170 int lowVersion = 0;
2171 int index = 7;
2172 while(versionInfo[index] != '.' ) {
2173 highVersion *= 10;
2174 highVersion += versionInfo[index]-'0';
2175 ++index;
2176 }
2177 ++index;
2178 while(versionInfo[index] != ' ' && versionInfo[index] != '\0') {
2179 lowVersion *= 10;
2180 lowVersion += versionInfo[index]-'0';
2181 ++index;
2182 }
2183 return (highVersion << 16) | lowVersion;
2184}
2185
2186static cl_uint getPlatformVersion(cl_platform_id platform)
2187{
2188 size_type size = 0;
2189 CL_(clGetPlatformInfo)(platform, CL_PLATFORM_VERSION, 0, nullptr, &size);
2190
2191 vector<char> versionInfo(size);
2192 CL_(clGetPlatformInfo)(platform, CL_PLATFORM_VERSION, size, versionInfo.data(), &size);
2193 return getVersion(versionInfo);
2194}
2195
2196static cl_uint getDevicePlatformVersion(cl_device_id device)
2197{
2198 cl_platform_id platform;
2199 CL_(clGetDeviceInfo)(device, CL_DEVICE_PLATFORM, sizeof(platform), &platform, nullptr);
2200 return getPlatformVersion(platform);
2201}
2202
2203static cl_uint getContextPlatformVersion(cl_context context)
2204{
2205 // The platform cannot be queried directly, so we first have to grab a
2206 // device and obtain its context
2207 size_type size = 0;
2208 CL_(clGetContextInfo)(context, CL_CONTEXT_DEVICES, 0, nullptr, &size);
2209 if (size == 0)
2210 return 0;
2211 vector<cl_device_id> devices(size/sizeof(cl_device_id));
2212 CL_(clGetContextInfo)(context, CL_CONTEXT_DEVICES, size, devices.data(), nullptr);
2213 return getDevicePlatformVersion(devices[0]);
2215#endif // CL_HPP_TARGET_OPENCL_VERSION && CL_HPP_MINIMUM_OPENCL_VERSION
2216
2217template <typename T>
2218class Wrapper
2219{
2220public:
2221 typedef T cl_type;
2222
2223protected:
2224 cl_type object_;
2225
2226public:
2227 Wrapper() : object_(nullptr) { }
2228
2229 Wrapper(const cl_type &obj, bool retainObject) : object_(obj)
2230 {
2231 if (retainObject) {
2232 detail::errHandler(retain(), __RETAIN_ERR);
2233 }
2234 }
2235
2236 ~Wrapper()
2237 {
2238 if (object_ != nullptr) { release(); }
2239 }
2240
2241 Wrapper(const Wrapper<cl_type>& rhs)
2242 {
2243 object_ = rhs.object_;
2244 detail::errHandler(retain(), __RETAIN_ERR);
2245 }
2246
2247 Wrapper(Wrapper<cl_type>&& rhs) noexcept
2248 {
2249 object_ = rhs.object_;
2250 rhs.object_ = nullptr;
2251 }
2252
2253 Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
2254 {
2255 if (this != &rhs) {
2256 detail::errHandler(release(), __RELEASE_ERR);
2257 object_ = rhs.object_;
2258 detail::errHandler(retain(), __RETAIN_ERR);
2259 }
2260 return *this;
2261 }
2262
2263 Wrapper<cl_type>& operator = (Wrapper<cl_type>&& rhs)
2264 {
2265 if (this != &rhs) {
2266 detail::errHandler(release(), __RELEASE_ERR);
2267 object_ = rhs.object_;
2268 rhs.object_ = nullptr;
2269 }
2270 return *this;
2271 }
2272
2273 Wrapper<cl_type>& operator = (const cl_type &rhs)
2274 {
2275 detail::errHandler(release(), __RELEASE_ERR);
2276 object_ = rhs;
2277 return *this;
2278 }
2279
2280 const cl_type& operator ()() const { return object_; }
2281
2282 cl_type& operator ()() { return object_; }
2283
2284 cl_type get() const { return object_; }
2285
2286protected:
2287 template<typename Func, typename U>
2288 friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
2289
2290 cl_int retain() const
2291 {
2292 if (object_ != nullptr) {
2293 return ReferenceHandler<cl_type>::retain(object_);
2294 }
2295 else {
2296 return CL_SUCCESS;
2297 }
2298 }
2299
2300 cl_int release() const
2301 {
2302 if (object_ != nullptr) {
2303 return ReferenceHandler<cl_type>::release(object_);
2304 }
2305 else {
2306 return CL_SUCCESS;
2307 }
2309};
2310
2311template <>
2312class Wrapper<cl_device_id>
2313{
2314public:
2315 typedef cl_device_id cl_type;
2316
2317protected:
2318 cl_type object_;
2319 bool referenceCountable_;
2320
2321 static bool isReferenceCountable(cl_device_id device)
2322 {
2323 bool retVal = false;
2324#if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
2325 if (device != nullptr) {
2326 int version = getDevicePlatformVersion(device);
2327 if(version > ((1 << 16) + 1)) {
2328 retVal = true;
2329 }
2330 }
2331#elif CL_HPP_TARGET_OPENCL_VERSION >= 120
2332 retVal = true;
2333#endif // CL_HPP_TARGET_OPENCL_VERSION
2334 (void)device;
2335 return retVal;
2336 }
2337
2338public:
2339 Wrapper() : object_(nullptr), referenceCountable_(false)
2340 {
2341 }
2342
2343 Wrapper(const cl_type &obj, bool retainObject) :
2344 object_(obj),
2345 referenceCountable_(false)
2346 {
2347 referenceCountable_ = isReferenceCountable(obj);
2348
2349 if (retainObject) {
2350 detail::errHandler(retain(), __RETAIN_ERR);
2351 }
2352 }
2353
2354 ~Wrapper()
2355 {
2356 release();
2357 }
2358
2359 Wrapper(const Wrapper<cl_type>& rhs)
2360 {
2361 object_ = rhs.object_;
2362 referenceCountable_ = isReferenceCountable(object_);
2363 detail::errHandler(retain(), __RETAIN_ERR);
2364 }
2365
2366 Wrapper(Wrapper<cl_type>&& rhs) noexcept
2367 {
2368 object_ = rhs.object_;
2369 referenceCountable_ = rhs.referenceCountable_;
2370 rhs.object_ = nullptr;
2371 rhs.referenceCountable_ = false;
2372 }
2373
2374 Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
2375 {
2376 if (this != &rhs) {
2377 detail::errHandler(release(), __RELEASE_ERR);
2378 object_ = rhs.object_;
2379 referenceCountable_ = rhs.referenceCountable_;
2380 detail::errHandler(retain(), __RETAIN_ERR);
2381 }
2382 return *this;
2383 }
2384
2385 Wrapper<cl_type>& operator = (Wrapper<cl_type>&& rhs)
2386 {
2387 if (this != &rhs) {
2388 detail::errHandler(release(), __RELEASE_ERR);
2389 object_ = rhs.object_;
2390 referenceCountable_ = rhs.referenceCountable_;
2391 rhs.object_ = nullptr;
2392 rhs.referenceCountable_ = false;
2393 }
2394 return *this;
2395 }
2396
2397 Wrapper<cl_type>& operator = (const cl_type &rhs)
2398 {
2399 detail::errHandler(release(), __RELEASE_ERR);
2400 object_ = rhs;
2401 referenceCountable_ = isReferenceCountable(object_);
2402 return *this;
2403 }
2404
2405 const cl_type& operator ()() const { return object_; }
2406
2407 cl_type& operator ()() { return object_; }
2408
2409 cl_type get() const { return object_; }
2410
2411protected:
2412 template<typename Func, typename U>
2413 friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
2414
2415 template<typename Func, typename U>
2416 friend inline cl_int getInfoHelper(Func, cl_uint, vector<U>*, int, typename U::cl_type);
2417
2418 cl_int retain() const
2419 {
2420 if( object_ != nullptr && referenceCountable_ ) {
2421 return ReferenceHandler<cl_type>::retain(object_);
2422 }
2423 else {
2424 return CL_SUCCESS;
2425 }
2426 }
2427
2428 cl_int release() const
2429 {
2430 if (object_ != nullptr && referenceCountable_) {
2431 return ReferenceHandler<cl_type>::release(object_);
2432 }
2433 else {
2434 return CL_SUCCESS;
2435 }
2436 }
2437};
2438
2439template <typename T>
2440inline bool operator==(const Wrapper<T> &lhs, const Wrapper<T> &rhs)
2441{
2442 return lhs() == rhs();
2443}
2444
2445template <typename T>
2446inline bool operator!=(const Wrapper<T> &lhs, const Wrapper<T> &rhs)
2447{
2448 return !operator==(lhs, rhs);
2449}
2450
2451} // namespace detail
2453
2454
2455
2456
2457
2459 * \brief Adds constructors and member functions for cl_image_format.
2460 *
2461 * \see cl_image_format
2463struct ImageFormat : public cl_image_format
2464{
2465 //! \brief Default constructor - performs no initialization.
2466 ImageFormat(){}
2467
2469 ImageFormat(cl_channel_order order, cl_channel_type type)
2470 {
2471 image_channel_order = order;
2472 image_channel_data_type = type;
2473 }
2474
2475 //! \brief Copy constructor.
2476 ImageFormat(const ImageFormat &other) { *this = other; }
2477
2480 {
2481 if (this != &rhs) {
2482 this->image_channel_data_type = rhs.image_channel_data_type;
2483 this->image_channel_order = rhs.image_channel_order;
2484 }
2485 return *this;
2486 }
2487};
2488
2492 * any underlying resources or data structures.
2493 *
2494 * \see cl_device_id
2495 */
2496class Device : public detail::Wrapper<cl_device_id>
2497{
2498private:
2499 static std::once_flag default_initialized_;
2500 static Device default_;
2501 static cl_int default_error_;
2502
2508 static void makeDefault();
2509
2515 static void makeDefaultProvided(const Device &p) {
2516 default_ = p;
2517 }
2518
2519public:
2520#ifdef CL_HPP_UNIT_TEST_ENABLE
2527 static void unitTestClearDefault() {
2528 default_ = Device();
2530#endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
2531
2533 Device() : detail::Wrapper<cl_type>() { }
2534
2535 /*! \brief Constructor from cl_device_id.
2536 *
2537 * This simply copies the device ID value, which is an inexpensive operation.
2538 */
2539 explicit Device(const cl_device_id &device, bool retainObject = false) :
2540 detail::Wrapper<cl_type>(device, retainObject) { }
2541
2542 /*! \brief Returns the first device on the default context.
2543 *
2544 * \see Context::getDefault()
2545 */
2546 static Device getDefault(
2547 cl_int *errResult = nullptr)
2548 {
2549 std::call_once(default_initialized_, makeDefault);
2550 detail::errHandler(default_error_);
2551 if (errResult != nullptr) {
2552 *errResult = default_error_;
2553 }
2554 return default_;
2555 }
2556
2560 * Will only set the default if no default was previously created.
2561 * @return updated default device.
2562 * Should be compared to the passed value to ensure that it was updated.
2563 */
2564 static Device setDefault(const Device &default_device)
2565 {
2566 std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_device));
2567 detail::errHandler(default_error_);
2568 return default_;
2569 }
2570
2571 /*! \brief Assignment operator from cl_device_id.
2572 *
2573 * This simply copies the device ID value, which is an inexpensive operation.
2574 */
2575 Device& operator = (const cl_device_id& rhs)
2576 {
2577 detail::Wrapper<cl_type>::operator=(rhs);
2578 return *this;
2579 }
2581
2583 template <typename T>
2584 cl_int getInfo(cl_device_info name, T* param) const
2585 {
2586 return detail::errHandler(
2587 detail::getInfo(CL_(clGetDeviceInfo), object_, name, param),
2588 __GET_DEVICE_INFO_ERR);
2589 }
2592 template <cl_device_info name> typename
2594 getInfo(cl_int* err = nullptr) const
2595 {
2596 typename detail::param_traits<
2597 detail::cl_device_info, name>::param_type param;
2598 cl_int result = getInfo(name, &param);
2599 if (err != nullptr) {
2600 *err = result;
2601 }
2602 return param;
2603 }
2604
2605#if CL_HPP_TARGET_OPENCL_VERSION >= 210
2608 * The resolution of the device timer may be queried with the
2609 * CL_DEVICE_PROFILING_TIMER_RESOLUTION query.
2610 * @return The host timer value.
2611 */
2612 cl_ulong getHostTimer(cl_int *error = nullptr)
2613 {
2614 cl_ulong retVal = 0;
2615 cl_int err =
2616 CL_(clGetHostTimer)(this->get(), &retVal);
2617 detail::errHandler(
2618 err,
2619 __GET_HOST_TIMER_ERR);
2620 if (error) {
2621 *error = err;
2622 }
2623 return retVal;
2624 }
2625
2632 * The resolution of the device timer may be queried with the
2633 * CL_DEVICE_PROFILING_TIMER_RESOLUTION query.
2634 * @return A pair of (device timer, host timer) timer values.
2635 */
2636 std::pair<cl_ulong, cl_ulong> getDeviceAndHostTimer(cl_int *error = nullptr)
2637 {
2638 std::pair<cl_ulong, cl_ulong> retVal;
2639 cl_int err =
2640 CL_(clGetDeviceAndHostTimer)(this->get(), &(retVal.first), &(retVal.second));
2641 detail::errHandler(
2642 err,
2643 __GET_DEVICE_AND_HOST_TIMER_ERR);
2644 if (error) {
2645 *error = err;
2646 }
2647 return retVal;
2648 }
2649#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
2650
2651#if CL_HPP_TARGET_OPENCL_VERSION >= 120
2653 cl_int createSubDevices(const cl_device_partition_property* properties,
2654 vector<Device>* devices);
2655#endif // defined (CL_HPP_TARGET_OPENCL_VERSION >= 120)
2656
2657#if defined(cl_ext_device_fission)
2659 cl_int createSubDevices(const cl_device_partition_property_ext* properties,
2660 vector<Device>* devices);
2661#endif // defined(cl_ext_device_fission)
2662};
2663
2664using BuildLogType = vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, CL_PROGRAM_BUILD_LOG>::param_type>>;
2665#if defined(CL_HPP_ENABLE_EXCEPTIONS)
2669class BuildError : public Error
2670{
2671private:
2672 BuildLogType buildLogs;
2673public:
2674 BuildError(cl_int err, const char * errStr, const BuildLogType &vec) : Error(err, errStr), buildLogs(vec)
2675 {
2676 }
2677
2678 BuildLogType getBuildLog() const
2679 {
2680 return buildLogs;
2681 }
2682};
2683namespace detail {
2684 static inline cl_int buildErrHandler(
2685 cl_int err,
2686 const char * errStr,
2687 const BuildLogType &buildLogs)
2688 {
2689 if (err != CL_SUCCESS) {
2690 throw BuildError(err, errStr, buildLogs);
2691 }
2692 return err;
2693 }
2694} // namespace detail
2695
2696#else
2697namespace detail {
2698 static inline cl_int buildErrHandler(
2699 cl_int err,
2700 const char * errStr,
2701 const BuildLogType &buildLogs)
2702 {
2703 (void)buildLogs; // suppress unused variable warning
2704 (void)errStr;
2705 return err;
2706 }
2707} // namespace detail
2708#endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2709
2710CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Device::default_initialized_;
2711CL_HPP_DEFINE_STATIC_MEMBER_ Device Device::default_;
2712CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Device::default_error_ = CL_SUCCESS;
2713
2717 * any underlying resources or data structures.
2718 *
2719 * \see cl_platform_id
2720 */
2721class Platform : public detail::Wrapper<cl_platform_id>
2722{
2723private:
2724 static std::once_flag default_initialized_;
2725 static Platform default_;
2726 static cl_int default_error_;
2727
2733 static void makeDefault() {
2734 /* Throwing an exception from a call_once invocation does not do
2735 * what we wish, so we catch it and save the error.
2736 */
2737#if defined(CL_HPP_ENABLE_EXCEPTIONS)
2738 try
2739#endif
2740 {
2741 // If default wasn't passed ,generate one
2742 // Otherwise set it
2743 cl_uint n = 0;
2744
2745 cl_int err = CL_(clGetPlatformIDs)(0, nullptr, &n);
2746 if (err != CL_SUCCESS) {
2747 default_error_ = err;
2748 return;
2749 }
2750 if (n == 0) {
2751 default_error_ = CL_INVALID_PLATFORM;
2752 return;
2753 }
2754
2755 vector<cl_platform_id> ids(n);
2756 err = CL_(clGetPlatformIDs)(n, ids.data(), nullptr);
2757 if (err != CL_SUCCESS) {
2758 default_error_ = err;
2759 return;
2760 }
2761
2762 default_ = Platform(ids[0]);
2763 }
2764#if defined(CL_HPP_ENABLE_EXCEPTIONS)
2765 catch (cl::Error &e) {
2766 default_error_ = e.err();
2767 }
2768#endif
2769 }
2770
2776 static void makeDefaultProvided(const Platform &p) {
2777 default_ = p;
2778 }
2779
2780public:
2781#ifdef CL_HPP_UNIT_TEST_ENABLE
2788 static void unitTestClearDefault() {
2789 default_ = Platform();
2791#endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
2792
2794 Platform() : detail::Wrapper<cl_type>() { }
2795
2799 * Defaults to false to maintain compatibility with
2800 * earlier versions.
2801 * This simply copies the platform ID value, which is an inexpensive operation.
2802 */
2803 explicit Platform(const cl_platform_id &platform, bool retainObject = false) :
2804 detail::Wrapper<cl_type>(platform, retainObject) { }
2805
2806 /*! \brief Assignment operator from cl_platform_id.
2807 *
2808 * This simply copies the platform ID value, which is an inexpensive operation.
2809 */
2810 Platform& operator = (const cl_platform_id& rhs)
2811 {
2812 detail::Wrapper<cl_type>::operator=(rhs);
2813 return *this;
2814 }
2815
2816 static Platform getDefault(
2817 cl_int *errResult = nullptr)
2818 {
2819 std::call_once(default_initialized_, makeDefault);
2820 detail::errHandler(default_error_);
2821 if (errResult != nullptr) {
2822 *errResult = default_error_;
2823 }
2824 return default_;
2825 }
2826
2830 * Will only set the default if no default was previously created.
2831 * @return updated default platform.
2832 * Should be compared to the passed value to ensure that it was updated.
2833 */
2834 static Platform setDefault(const Platform &default_platform)
2835 {
2836 std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_platform));
2837 detail::errHandler(default_error_);
2838 return default_;
2840
2842 template <typename T>
2843 cl_int getInfo(cl_platform_info name, T* param) const
2844 {
2845 return detail::errHandler(
2846 detail::getInfo(CL_(clGetPlatformInfo), object_, name, param),
2847 __GET_PLATFORM_INFO_ERR);
2848 }
2851 template <cl_platform_info name> typename
2853 getInfo(cl_int* err = nullptr) const
2854 {
2855 typename detail::param_traits<
2856 detail::cl_platform_info, name>::param_type param;
2857 cl_int result = getInfo(name, &param);
2858 if (err != nullptr) {
2859 *err = result;
2860 }
2861 return param;
2862 }
2863
2864 /*! \brief Gets a list of devices for this platform.
2865 *
2866 * Wraps clGetDeviceIDs().
2867 */
2868 cl_int getDevices(
2869 cl_device_type type,
2870 vector<Device>* devices) const
2871 {
2872 cl_uint n = 0;
2873 if( devices == nullptr ) {
2874 return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
2875 }
2876 cl_int err = CL_(clGetDeviceIDs)(object_, type, 0, nullptr, &n);
2877 if (err != CL_SUCCESS && err != CL_DEVICE_NOT_FOUND) {
2878 return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2879 }
2880
2881 vector<cl_device_id> ids(n);
2882 if (n>0) {
2883 err = CL_(clGetDeviceIDs)(object_, type, n, ids.data(), nullptr);
2884 if (err != CL_SUCCESS) {
2885 return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2886 }
2887 }
2888
2889 // Cannot trivially assign because we need to capture intermediates
2890 // with safe construction
2891 // We must retain things we obtain from the API to avoid releasing
2892 // API-owned objects.
2893 if (devices) {
2894 devices->resize(ids.size());
2895
2896 // Assign to param, constructing with retain behaviour
2897 // to correctly capture each underlying CL object
2898 for (size_type i = 0; i < ids.size(); i++) {
2899 (*devices)[i] = Device(ids[i], true);
2900 }
2901 }
2902 return CL_SUCCESS;
2903 }
2904
2905#if defined(CL_HPP_USE_DX_INTEROP)
2929 cl_int getDevices(
2930 cl_d3d10_device_source_khr d3d_device_source,
2931 void * d3d_object,
2932 cl_d3d10_device_set_khr d3d_device_set,
2933 vector<Device>* devices) const
2934 {
2935 typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clGetDeviceIDsFromD3D10KHR)(
2936 cl_platform_id platform,
2937 cl_d3d10_device_source_khr d3d_device_source,
2938 void * d3d_object,
2939 cl_d3d10_device_set_khr d3d_device_set,
2940 cl_uint num_entries,
2941 cl_device_id * devices,
2942 cl_uint* num_devices);
2943
2944 if( devices == nullptr ) {
2945 return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
2946 }
2947
2948 static PFN_clGetDeviceIDsFromD3D10KHR pfn_clGetDeviceIDsFromD3D10KHR = nullptr;
2949#if CL_HPP_TARGET_OPENCL_VERSION >= 120
2950 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(object_, clGetDeviceIDsFromD3D10KHR);
2951#endif
2952#if CL_HPP_MINIMUM_OPENCL_VERSION < 120
2953 CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetDeviceIDsFromD3D10KHR);
2954#endif
2955
2956 cl_uint n = 0;
2957 cl_int err = pfn_clGetDeviceIDsFromD3D10KHR(
2958 object_,
2959 d3d_device_source,
2960 d3d_object,
2961 d3d_device_set,
2962 0,
2963 nullptr,
2964 &n);
2965 if (err != CL_SUCCESS) {
2966 return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2967 }
2968
2969 vector<cl_device_id> ids(n);
2970 err = pfn_clGetDeviceIDsFromD3D10KHR(
2971 object_,
2972 d3d_device_source,
2973 d3d_object,
2974 d3d_device_set,
2975 n,
2976 ids.data(),
2977 nullptr);
2978 if (err != CL_SUCCESS) {
2979 return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2980 }
2981
2982 // Cannot trivially assign because we need to capture intermediates
2983 // with safe construction
2984 // We must retain things we obtain from the API to avoid releasing
2985 // API-owned objects.
2986 if (devices) {
2987 devices->resize(ids.size());
2988
2989 // Assign to param, constructing with retain behaviour
2990 // to correctly capture each underlying CL object
2991 for (size_type i = 0; i < ids.size(); i++) {
2992 (*devices)[i] = Device(ids[i], true);
2993 }
2994 }
2995 return CL_SUCCESS;
2996 }
2997#endif
2998
2999 /*! \brief Gets a list of available platforms.
3000 *
3001 * Wraps clGetPlatformIDs().
3002 */
3003 static cl_int get(
3004 vector<Platform>* platforms)
3005 {
3006 cl_uint n = 0;
3007
3008 if( platforms == nullptr ) {
3009 return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_PLATFORM_IDS_ERR);
3010 }
3011
3012 cl_int err = CL_(clGetPlatformIDs)(0, nullptr, &n);
3013 if (err != CL_SUCCESS) {
3014 return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
3015 }
3016
3017 vector<cl_platform_id> ids(n);
3018 err = CL_(clGetPlatformIDs)(n, ids.data(), nullptr);
3019 if (err != CL_SUCCESS) {
3020 return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
3021 }
3022
3023 if (platforms) {
3024 platforms->resize(ids.size());
3025
3026 // Platforms don't reference count
3027 for (size_type i = 0; i < ids.size(); i++) {
3028 (*platforms)[i] = Platform(ids[i]);
3029 }
3030 }
3031 return CL_SUCCESS;
3032 }
3033
3034 /*! \brief Gets the first available platform.
3035 *
3036 * Wraps clGetPlatformIDs(), returning the first result.
3037 */
3038 static cl_int get(
3039 Platform * platform)
3040 {
3041 cl_int err;
3042 Platform default_platform = Platform::getDefault(&err);
3043 if (platform) {
3044 *platform = default_platform;
3045 }
3046 return err;
3047 }
3048
3053 * Throws an exception if no platforms are available
3054 * or an error condition occurs.
3055 * Wraps clGetPlatformIDs(), returning the first result.
3056 */
3057 static Platform get(
3058 cl_int * errResult = nullptr)
3059 {
3060 cl_int err;
3061 Platform default_platform = Platform::getDefault(&err);
3062 if (errResult) {
3063 *errResult = err;
3064 }
3065 return default_platform;
3066 }
3068#if CL_HPP_TARGET_OPENCL_VERSION >= 120
3070 cl_int
3072 {
3073 return CL_(clUnloadPlatformCompiler)(object_);
3074 }
3075#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
3076}; // class Platform
3077
3078#if CL_HPP_TARGET_OPENCL_VERSION >= 120
3080inline cl_int Device::createSubDevices(const cl_device_partition_property* properties,
3081 vector<Device>* devices)
3082{
3083 cl_uint n = 0;
3084 cl_int err = CL_(clCreateSubDevices)(object_, properties, 0, nullptr, &n);
3085 if (err != CL_SUCCESS)
3086 {
3087 return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
3088 }
3089
3090 vector<cl_device_id> ids(n);
3091 err = CL_(clCreateSubDevices)(object_, properties, n, ids.data(), nullptr);
3092 if (err != CL_SUCCESS)
3093 {
3094 return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
3095 }
3096
3097 // Cannot trivially assign because we need to capture intermediates
3098 // with safe construction
3099 if (devices)
3100 {
3101 devices->resize(ids.size());
3102
3103 // Assign to param, constructing with retain behaviour
3104 // to correctly capture each underlying CL object
3105 for (size_type i = 0; i < ids.size(); i++)
3106 {
3107 // We do not need to retain because this device is being created
3108 // by the runtime
3109 (*devices)[i] = Device(ids[i], false);
3110 }
3111 }
3112
3113 return CL_SUCCESS;
3114}
3115#endif // defined (CL_HPP_TARGET_OPENCL_VERSION >= 120)
3116
3117#if defined(cl_ext_device_fission)
3119inline cl_int Device::createSubDevices(const cl_device_partition_property_ext* properties,
3120 vector<Device>* devices)
3121{
3122#if CL_HPP_TARGET_OPENCL_VERSION >= 120
3123 cl::Device device(object_);
3124 cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>()();
3125 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCreateSubDevicesEXT);
3126#endif
3127#if CL_HPP_MINIMUM_OPENCL_VERSION < 120
3128 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateSubDevicesEXT);
3129#endif
3130
3131 cl_uint n = 0;
3132 cl_int err = pfn_clCreateSubDevicesEXT(object_, properties, 0, nullptr, &n);
3133 if (err != CL_SUCCESS)
3134 {
3135 return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
3136 }
3137
3138 vector<cl_device_id> ids(n);
3139 err =
3140 pfn_clCreateSubDevicesEXT(object_, properties, n, ids.data(), nullptr);
3141 if (err != CL_SUCCESS)
3142 {
3143 return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
3144 }
3145 // Cannot trivially assign because we need to capture intermediates
3146 // with safe construction
3147 if (devices)
3148 {
3149 devices->resize(ids.size());
3150
3151 // Assign to param, constructing with retain behaviour
3152 // to correctly capture each underlying CL object
3153 for (size_type i = 0; i < ids.size(); i++)
3154 {
3155 // We do not need to retain because this device is being created
3156 // by the runtime
3157 (*devices)[i] = Device(ids[i], false);
3158 }
3159 }
3160
3161 return CL_SUCCESS;
3162}
3163#endif // defined(cl_ext_device_fission)
3164
3165CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Platform::default_initialized_;
3166CL_HPP_DEFINE_STATIC_MEMBER_ Platform Platform::default_;
3167CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Platform::default_error_ = CL_SUCCESS;
3168
3169
3173#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
3178inline CL_API_PREFIX__VERSION_1_1_DEPRECATED cl_int
3179UnloadCompiler() CL_API_SUFFIX__VERSION_1_1_DEPRECATED;
3180inline cl_int
3182{
3183 return CL_(clUnloadCompiler)();
3184}
3185#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
3186
3187
3188#if defined(cl_ext_image_requirements_info)
3189enum ImageRequirementsInfoExt : cl_image_requirements_info_ext
3190{
3191 RowPitchAlign = CL_IMAGE_REQUIREMENTS_ROW_PITCH_ALIGNMENT_EXT,
3192 BaseAddAlign = CL_IMAGE_REQUIREMENTS_BASE_ADDRESS_ALIGNMENT_EXT,
3193 Size = CL_IMAGE_REQUIREMENTS_SIZE_EXT,
3194 MaxWidth = CL_IMAGE_REQUIREMENTS_MAX_WIDTH_EXT,
3195 MaxHeight = CL_IMAGE_REQUIREMENTS_MAX_HEIGHT_EXT,
3196 MaxDepth = CL_IMAGE_REQUIREMENTS_MAX_DEPTH_EXT,
3197 MaxArraySize = CL_IMAGE_REQUIREMENTS_MAX_ARRAY_SIZE_EXT,
3198#if defined(cl_ext_image_from_buffer)
3199 SlicePitchAlign = CL_IMAGE_REQUIREMENTS_SLICE_PITCH_ALIGNMENT_EXT,
3200#endif
3201};
3202
3203#endif // cl_ext_image_requirements_info
3204
3205
3210 * clRetainContext() and clReleaseContext().
3211 *
3212 * \see cl_context
3213 */
3214class Context
3215 : public detail::Wrapper<cl_context>
3216{
3217private:
3218 static std::once_flag default_initialized_;
3219 static Context default_;
3220 static cl_int default_error_;
3221
3227 static void makeDefault() {
3228 /* Throwing an exception from a call_once invocation does not do
3229 * what we wish, so we catch it and save the error.
3230 */
3231#if defined(CL_HPP_ENABLE_EXCEPTIONS)
3232 try
3233#endif
3234 {
3235#if !defined(__APPLE__) && !defined(__MACOS)
3236 const Platform &p = Platform::getDefault();
3237 cl_platform_id defaultPlatform = p();
3238 cl_context_properties properties[3] = {
3239 CL_CONTEXT_PLATFORM, (cl_context_properties)defaultPlatform, 0
3240 };
3241#else // #if !defined(__APPLE__) && !defined(__MACOS)
3242 cl_context_properties *properties = nullptr;
3243#endif // #if !defined(__APPLE__) && !defined(__MACOS)
3244
3245 default_ = Context(
3246 CL_DEVICE_TYPE_DEFAULT,
3247 properties,
3248 nullptr,
3249 nullptr,
3250 &default_error_);
3251 }
3252#if defined(CL_HPP_ENABLE_EXCEPTIONS)
3253 catch (cl::Error &e) {
3254 default_error_ = e.err();
3255 }
3256#endif
3257 }
3258
3259
3265 static void makeDefaultProvided(const Context &c) {
3266 default_ = c;
3267 }
3268
3269#if defined(cl_ext_image_requirements_info)
3270 struct ImageRequirementsInfo {
3271
3272 ImageRequirementsInfo(cl_mem_flags f, const cl_mem_properties* mem_properties, const ImageFormat* format, const cl_image_desc* desc)
3273 {
3274 flags = f;
3275 properties = mem_properties;
3276 image_format = format;
3277 image_desc = desc;
3278 }
3279
3280 cl_mem_flags flags = 0;
3281 const cl_mem_properties* properties;
3282 const ImageFormat* image_format;
3283 const cl_image_desc* image_desc;
3284 };
3285
3286 static cl_int getImageRequirementsInfoExtHelper(const Context &context,
3287 const ImageRequirementsInfo &info,
3288 cl_image_requirements_info_ext param_name,
3289 size_type param_value_size,
3290 void* param_value,
3291 size_type* param_value_size_ret)
3292 {
3293
3294#if CL_HPP_TARGET_OPENCL_VERSION >= 120
3295 Device device = context.getInfo<CL_CONTEXT_DEVICES>().at(0);
3296 cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>()();
3297 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clGetImageRequirementsInfoEXT);
3298#else
3299 CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetImageRequirementsInfoEXT);
3300#endif
3301
3302 if (pfn_clGetImageRequirementsInfoEXT == nullptr) {
3303 return detail::errHandler(CL_INVALID_OPERATION, __GET_IMAGE_REQUIREMENT_INFO_EXT_ERR);
3304 }
3305
3306 return detail::errHandler(
3307 pfn_clGetImageRequirementsInfoEXT(context(), info.properties,
3308 info.flags, info.image_format, info.image_desc, param_name,
3309 param_value_size, param_value, param_value_size_ret),
3310 __GET_IMAGE_REQUIREMENT_INFO_EXT_ERR);
3311 }
3312#endif // cl_ext_image_requirements_info
3313
3314public:
3315#ifdef CL_HPP_UNIT_TEST_ENABLE
3322 static void unitTestClearDefault() {
3323 default_ = Context();
3324 }
3325#endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
3326
3327 /*! \brief Constructs a context including a list of specified devices.
3328 *
3329 * Wraps clCreateContext().
3330 */
3331 Context(
3332 const vector<Device>& devices,
3333 const cl_context_properties* properties = nullptr,
3334 void (CL_CALLBACK * notifyFptr)(
3335 const char *,
3336 const void *,
3337 size_type,
3338 void *) = nullptr,
3339 void* data = nullptr,
3340 cl_int* err = nullptr)
3341 {
3342 cl_int error;
3343
3344 size_type numDevices = devices.size();
3345 vector<cl_device_id> deviceIDs(numDevices);
3346
3347 for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
3348 deviceIDs[deviceIndex] = (devices[deviceIndex])();
3349 }
3350
3351 object_ = CL_(clCreateContext)(
3352 properties, (cl_uint) numDevices,
3353 deviceIDs.data(),
3354 notifyFptr, data, &error);
3355
3356 detail::errHandler(error, __CREATE_CONTEXT_ERR);
3357 if (err != nullptr) {
3358 *err = error;
3359 }
3360 }
3361
3362 /*! \brief Constructs a context including a specific device.
3363 *
3364 * Wraps clCreateContext().
3365 */
3366 Context(
3367 const Device& device,
3368 const cl_context_properties* properties = nullptr,
3369 void (CL_CALLBACK * notifyFptr)(
3370 const char *,
3371 const void *,
3372 size_type,
3373 void *) = nullptr,
3374 void* data = nullptr,
3375 cl_int* err = nullptr)
3376 {
3377 cl_int error;
3378
3379 cl_device_id deviceID = device();
3380
3381 object_ = CL_(clCreateContext)(
3382 properties, 1,
3383 &deviceID,
3384 notifyFptr, data, &error);
3385
3386 detail::errHandler(error, __CREATE_CONTEXT_ERR);
3387 if (err != nullptr) {
3388 *err = error;
3389 }
3390 }
3391
3392 /*! \brief Constructs a context including all or a subset of devices of a specified type.
3393 *
3394 * Wraps clCreateContextFromType().
3395 */
3396 Context(
3397 cl_device_type type,
3398 const cl_context_properties* properties = nullptr,
3399 void (CL_CALLBACK * notifyFptr)(
3400 const char *,
3401 const void *,
3402 size_type,
3403 void *) = nullptr,
3404 void* data = nullptr,
3405 cl_int* err = nullptr)
3406 {
3407 cl_int error;
3408
3409#if !defined(__APPLE__) && !defined(__MACOS)
3410 cl_context_properties prop[4] = {CL_CONTEXT_PLATFORM, 0, 0, 0 };
3411
3412 if (properties == nullptr) {
3413 // Get a valid platform ID as we cannot send in a blank one
3414 vector<Platform> platforms;
3415 error = Platform::get(&platforms);
3416 if (error != CL_SUCCESS) {
3417 detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
3418 if (err != nullptr) {
3419 *err = error;
3420 }
3421 return;
3422 }
3423
3424 // Check the platforms we found for a device of our specified type
3425 cl_context_properties platform_id = 0;
3426 for (unsigned int i = 0; i < platforms.size(); i++) {
3427
3428 vector<Device> devices;
3429
3430#if defined(CL_HPP_ENABLE_EXCEPTIONS)
3431 try {
3432#endif
3433
3434 error = platforms[i].getDevices(type, &devices);
3435
3436#if defined(CL_HPP_ENABLE_EXCEPTIONS)
3437 } catch (cl::Error& e) {
3438 error = e.err();
3439 }
3440 // Catch if exceptions are enabled as we don't want to exit if first platform has no devices of type
3441 // We do error checking next anyway, and can throw there if needed
3442#endif
3443
3444 // Only squash CL_SUCCESS and CL_DEVICE_NOT_FOUND
3445 if (error != CL_SUCCESS && error != CL_DEVICE_NOT_FOUND) {
3446 detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
3447 if (err != nullptr) {
3448 *err = error;
3449 }
3450 }
3451
3452 if (devices.size() > 0) {
3453 platform_id = (cl_context_properties)platforms[i]();
3454 break;
3455 }
3456 }
3457
3458 if (platform_id == 0) {
3459 detail::errHandler(CL_DEVICE_NOT_FOUND, __CREATE_CONTEXT_FROM_TYPE_ERR);
3460 if (err != nullptr) {
3461 *err = CL_DEVICE_NOT_FOUND;
3462 }
3463 return;
3464 }
3465
3466 prop[1] = platform_id;
3467 properties = &prop[0];
3468 }
3469#endif
3470 object_ = CL_(clCreateContextFromType)(
3471 properties, type, notifyFptr, data, &error);
3472
3473 detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
3474 if (err != nullptr) {
3475 *err = error;
3476 }
3477 }
3478
3479
3480 /*! \brief Returns a singleton context including all devices of CL_DEVICE_TYPE_DEFAULT.
3481 *
3482 * \note All calls to this function return the same cl_context as the first.
3483 */
3484 static Context getDefault(cl_int * err = nullptr)
3485 {
3486 std::call_once(default_initialized_, makeDefault);
3487 detail::errHandler(default_error_);
3488 if (err != nullptr) {
3489 *err = default_error_;
3490 }
3491 return default_;
3492 }
3493
3497 * Will only set the default if no default was previously created.
3498 * @return updated default context.
3499 * Should be compared to the passed value to ensure that it was updated.
3500 */
3501 static Context setDefault(const Context &default_context)
3502 {
3503 std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_context));
3504 detail::errHandler(default_error_);
3505 return default_;
3506 }
3507
3509 Context() : detail::Wrapper<cl_type>() { }
3510
3513 * This effectively transfers ownership of a refcount on the cl_context
3514 * into the new Context object.
3515 */
3516 explicit Context(const cl_context& context, bool retainObject = false) :
3517 detail::Wrapper<cl_type>(context, retainObject) { }
3518
3521 * This effectively transfers ownership of a refcount on the rhs and calls
3522 * clReleaseContext() on the value previously held by this instance.
3523 */
3524 Context& operator = (const cl_context& rhs)
3525 {
3526 detail::Wrapper<cl_type>::operator=(rhs);
3527 return *this;
3529
3531 template <typename T>
3532 cl_int getInfo(cl_context_info name, T* param) const
3533 {
3534 return detail::errHandler(
3535 detail::getInfo(CL_(clGetContextInfo), object_, name, param),
3536 __GET_CONTEXT_INFO_ERR);
3537 }
3540 template <cl_context_info name> typename
3542 getInfo(cl_int* err = nullptr) const
3543 {
3544 typename detail::param_traits<
3545 detail::cl_context_info, name>::param_type param;
3546 cl_int result = getInfo(name, &param);
3547 if (err != nullptr) {
3548 *err = result;
3549 }
3550 return param;
3551 }
3552
3553 /*! \brief Gets a list of supported image formats.
3554 *
3555 * Wraps clGetSupportedImageFormats().
3556 */
3558 cl_mem_flags flags,
3559 cl_mem_object_type type,
3560 vector<ImageFormat>* formats) const
3561 {
3562 cl_uint numEntries;
3563
3564 if (!formats) {
3565 return CL_SUCCESS;
3566 }
3567
3568 cl_int err = CL_(clGetSupportedImageFormats)(
3569 object_,
3570 flags,
3571 type,
3572 0,
3573 nullptr,
3574 &numEntries);
3575 if (err != CL_SUCCESS) {
3576 return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
3577 }
3578
3579 if (numEntries > 0) {
3580 vector<ImageFormat> value(numEntries);
3581 err = CL_(clGetSupportedImageFormats)(
3582 object_,
3583 flags,
3584 type,
3585 numEntries,
3586 (cl_image_format*)value.data(),
3587 nullptr);
3588 if (err != CL_SUCCESS) {
3589 return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
3590 }
3591
3592 formats->assign(value.begin(), value.end());
3593 }
3594 else {
3595 // If no values are being returned, ensure an empty vector comes back
3596 formats->clear();
3597 }
3598
3599 return CL_SUCCESS;
3600 }
3601
3602#if defined(cl_ext_image_requirements_info)
3603 template <typename T>
3604 cl_int getImageRequirementsInfoExt(cl_image_requirements_info_ext name,
3605 T* param,
3606 cl_mem_flags flags = 0,
3607 const cl_mem_properties* properties = nullptr,
3608 const ImageFormat* image_format = nullptr,
3609 const cl_image_desc* image_desc = nullptr) const
3610 {
3611 ImageRequirementsInfo imageInfo = {flags, properties, image_format, image_desc};
3612
3613 return detail::errHandler(
3614 detail::getInfo(
3615 Context::getImageRequirementsInfoExtHelper, *this, imageInfo, name, param),
3616 __GET_IMAGE_REQUIREMENT_INFO_EXT_ERR);
3617 }
3618
3619 template <cl_image_requirements_info_ext type> typename
3620 detail::param_traits<detail::cl_image_requirements_info_ext, type>::param_type
3621 getImageRequirementsInfoExt(cl_mem_flags flags = 0,
3622 const cl_mem_properties* properties = nullptr,
3623 const ImageFormat* image_format = nullptr,
3624 const cl_image_desc* image_desc = nullptr,
3625 cl_int* err = nullptr) const
3626 {
3627 typename detail::param_traits<
3628 detail::cl_image_requirements_info_ext, type>::param_type param;
3629 cl_int result = getImageRequirementsInfoExt(type, &param, flags, properties, image_format, image_desc);
3630 if (err != nullptr) {
3631 *err = result;
3632 }
3633 return param;
3634 }
3635#endif // cl_ext_image_requirements_info
3636
3637#if CL_HPP_TARGET_OPENCL_VERSION >= 300
3644 * callback functions are called in the reverse order in which they were registered.
3645 * If a context callback function was specified when context was created,
3646 * it will not be called after any context destructor callback is called.
3647 */
3648 cl_int setDestructorCallback(
3649 void (CL_CALLBACK * pfn_notify)(cl_context, void *),
3650 void * user_data = nullptr)
3651 {
3652 return detail::errHandler(
3653 CL_(clSetContextDestructorCallback)(
3654 object_,
3655 pfn_notify,
3656 user_data),
3657 __SET_CONTEXT_DESCTRUCTOR_CALLBACK_ERR);
3658 }
3659#endif // CL_HPP_TARGET_OPENCL_VERSION >= 300
3660};
3661
3662inline void Device::makeDefault()
3663{
3664 /* Throwing an exception from a call_once invocation does not do
3665 * what we wish, so we catch it and save the error.
3666 */
3667#if defined(CL_HPP_ENABLE_EXCEPTIONS)
3668 try
3669#endif
3670 {
3671 cl_int error = 0;
3672
3673 Context context = Context::getDefault(&error);
3674 detail::errHandler(error, __CREATE_CONTEXT_ERR);
3675
3676 if (error != CL_SUCCESS) {
3677 default_error_ = error;
3678 }
3679 else {
3680 default_ = context.getInfo<CL_CONTEXT_DEVICES>()[0];
3681 default_error_ = CL_SUCCESS;
3682 }
3683 }
3684#if defined(CL_HPP_ENABLE_EXCEPTIONS)
3685 catch (cl::Error &e) {
3686 default_error_ = e.err();
3687 }
3688#endif
3689}
3690
3691CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Context::default_initialized_;
3692CL_HPP_DEFINE_STATIC_MEMBER_ Context Context::default_;
3693CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Context::default_error_ = CL_SUCCESS;
3694
3699 * clRetainEvent() and clReleaseEvent().
3700 *
3701 * \see cl_event
3702 */
3703class Event : public detail::Wrapper<cl_event>
3704{
3705public:
3708
3713 * earlier versions.
3714 * This effectively transfers ownership of a refcount on the cl_event
3715 * into the new Event object.
3716 */
3717 explicit Event(const cl_event& event, bool retainObject = false) :
3718 detail::Wrapper<cl_type>(event, retainObject) { }
3719
3722 * This effectively transfers ownership of a refcount on the rhs and calls
3723 * clReleaseEvent() on the value previously held by this instance.
3724 */
3725 Event& operator = (const cl_event& rhs)
3726 {
3727 detail::Wrapper<cl_type>::operator=(rhs);
3728 return *this;
3730
3732 template <typename T>
3733 cl_int getInfo(cl_event_info name, T* param) const
3734 {
3735 return detail::errHandler(
3736 detail::getInfo(CL_(clGetEventInfo), object_, name, param),
3737 __GET_EVENT_INFO_ERR);
3738 }
3741 template <cl_event_info name> typename
3743 getInfo(cl_int* err = nullptr) const
3744 {
3745 typename detail::param_traits<
3746 detail::cl_event_info, name>::param_type param;
3747 cl_int result = getInfo(name, &param);
3748 if (err != nullptr) {
3749 *err = result;
3750 }
3751 return param;
3753
3755 template <typename T>
3756 cl_int getProfilingInfo(cl_profiling_info name, T* param) const
3757 {
3758 return detail::errHandler(detail::getInfo(
3759 CL_(clGetEventProfilingInfo), object_, name, param),
3760 __GET_EVENT_PROFILE_INFO_ERR);
3761 }
3764 template <cl_profiling_info name> typename
3766 getProfilingInfo(cl_int* err = nullptr) const
3767 {
3768 typename detail::param_traits<
3769 detail::cl_profiling_info, name>::param_type param;
3770 cl_int result = getProfilingInfo(name, &param);
3771 if (err != nullptr) {
3772 *err = result;
3773 }
3774 return param;
3775 }
3776
3777 /*! \brief Blocks the calling thread until this event completes.
3778 *
3779 * Wraps clWaitForEvents().
3780 */
3781 cl_int wait() const
3782 {
3783 return detail::errHandler(
3784 CL_(clWaitForEvents)(1, &object_),
3785 __WAIT_FOR_EVENTS_ERR);
3786 }
3787
3788#if CL_HPP_TARGET_OPENCL_VERSION >= 110
3789 /*! \brief Registers a user callback function for a specific command execution status.
3790 *
3791 * Wraps clSetEventCallback().
3792 */
3793 cl_int setCallback(
3794 cl_int type,
3795 void (CL_CALLBACK * pfn_notify)(cl_event, cl_int, void *),
3796 void * user_data = nullptr)
3797 {
3798 return detail::errHandler(
3799 CL_(clSetEventCallback)(
3800 object_,
3801 type,
3802 pfn_notify,
3803 user_data),
3804 __SET_EVENT_CALLBACK_ERR);
3805 }
3806#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3807
3810 * Wraps clWaitForEvents().
3811 */
3812 static cl_int
3813 waitForEvents(const vector<Event>& events)
3814 {
3815 static_assert(sizeof(cl::Event) == sizeof(cl_event),
3816 "Size of cl::Event must be equal to size of cl_event");
3817
3818 return detail::errHandler(
3819 CL_(clWaitForEvents)(
3820 (cl_uint) events.size(), (events.size() > 0) ? (const cl_event*)&events.front() : nullptr),
3821 __WAIT_FOR_EVENTS_ERR);
3822 }
3823};
3824
3825#if CL_HPP_TARGET_OPENCL_VERSION >= 110
3826/*! \brief Class interface for user events (a subset of cl_event's).
3827 *
3828 * See Event for details about copy semantics, etc.
3829 */
3830class UserEvent : public Event
3831{
3832public:
3833 /*! \brief Constructs a user event on a given context.
3834 *
3835 * Wraps clCreateUserEvent().
3836 */
3837 UserEvent(
3838 const Context& context,
3839 cl_int * err = nullptr)
3840 {
3841 cl_int error;
3842 object_ = CL_(clCreateUserEvent)(
3843 context(),
3844 &error);
3845
3846 detail::errHandler(error, __CREATE_USER_EVENT_ERR);
3847 if (err != nullptr) {
3848 *err = error;
3850 }
3851
3853 UserEvent() : Event() { }
3854
3855 /*! \brief Sets the execution status of a user event object.
3856 *
3857 * Wraps clSetUserEventStatus().
3858 */
3859 cl_int setStatus(cl_int status)
3860 {
3861 return detail::errHandler(
3862 CL_(clSetUserEventStatus)(object_,status),
3863 __SET_USER_EVENT_STATUS_ERR);
3864 }
3865};
3866#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3867
3872inline static cl_int
3873WaitForEvents(const vector<Event>& events)
3874{
3875 return detail::errHandler(
3876 CL_(clWaitForEvents)(
3877 (cl_uint) events.size(), (events.size() > 0) ? (const cl_event*)&events.front() : nullptr),
3878 __WAIT_FOR_EVENTS_ERR);
3879}
3880
3885 * clRetainMemObject() and clReleaseMemObject().
3886 *
3887 * \see cl_mem
3888 */
3889class Memory : public detail::Wrapper<cl_mem>
3890{
3891public:
3894
3902 * earlier versions.
3903 *
3904 * See Memory for further details.
3905 */
3906 explicit Memory(const cl_mem& memory, bool retainObject) :
3907 detail::Wrapper<cl_type>(memory, retainObject) { }
3908
3911 * This effectively transfers ownership of a refcount on the rhs and calls
3912 * clReleaseMemObject() on the value previously held by this instance.
3913 */
3914 Memory& operator = (const cl_mem& rhs)
3915 {
3916 detail::Wrapper<cl_type>::operator=(rhs);
3917 return *this;
3919
3921 template <typename T>
3922 cl_int getInfo(cl_mem_info name, T* param) const
3923 {
3924 return detail::errHandler(
3925 detail::getInfo(CL_(clGetMemObjectInfo), object_, name, param),
3926 __GET_MEM_OBJECT_INFO_ERR);
3927 }
3930 template <cl_mem_info name> typename
3932 getInfo(cl_int* err = nullptr) const
3933 {
3934 typename detail::param_traits<
3935 detail::cl_mem_info, name>::param_type param;
3936 cl_int result = getInfo(name, &param);
3937 if (err != nullptr) {
3938 *err = result;
3939 }
3940 return param;
3941 }
3942
3943#if CL_HPP_TARGET_OPENCL_VERSION >= 110
3953 * \note
3954 * The registered callbacks are associated with the underlying cl_mem
3955 * value - not the Memory class instance.
3956 */
3957 cl_int setDestructorCallback(
3958 void (CL_CALLBACK * pfn_notify)(cl_mem, void *),
3959 void * user_data = nullptr)
3960 {
3961 return detail::errHandler(
3962 CL_(clSetMemObjectDestructorCallback)(
3963 object_,
3964 pfn_notify,
3965 user_data),
3966 __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR);
3967 }
3968#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3969
3970};
3971
3972// Pre-declare copy functions
3973class Buffer;
3974template< typename IteratorType >
3975cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
3976template< typename IteratorType >
3977cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
3978template< typename IteratorType >
3979cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
3980template< typename IteratorType >
3981cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
3982
3984#if CL_HPP_TARGET_OPENCL_VERSION >= 200
3985namespace detail
3986{
3987 class SVMTraitNull
3988 {
3989 public:
3990 static cl_svm_mem_flags getSVMMemFlags()
3991 {
3992 return 0;
3993 }
3995} // namespace detail
3996
3997template<class Trait = detail::SVMTraitNull>
3998class SVMTraitReadWrite
3999{
4000public:
4001 static cl_svm_mem_flags getSVMMemFlags()
4002 {
4003 return CL_MEM_READ_WRITE |
4004 Trait::getSVMMemFlags();
4006};
4007
4008template<class Trait = detail::SVMTraitNull>
4009class SVMTraitReadOnly
4010{
4011public:
4012 static cl_svm_mem_flags getSVMMemFlags()
4013 {
4014 return CL_MEM_READ_ONLY |
4015 Trait::getSVMMemFlags();
4017};
4018
4019template<class Trait = detail::SVMTraitNull>
4020class SVMTraitWriteOnly
4021{
4022public:
4023 static cl_svm_mem_flags getSVMMemFlags()
4024 {
4025 return CL_MEM_WRITE_ONLY |
4026 Trait::getSVMMemFlags();
4028};
4029
4030template<class Trait = SVMTraitReadWrite<>>
4031class SVMTraitCoarse
4032{
4033public:
4034 static cl_svm_mem_flags getSVMMemFlags()
4035 {
4036 return Trait::getSVMMemFlags();
4038};
4039
4040template<class Trait = SVMTraitReadWrite<>>
4041class SVMTraitFine
4042{
4043public:
4044 static cl_svm_mem_flags getSVMMemFlags()
4045 {
4046 return CL_MEM_SVM_FINE_GRAIN_BUFFER |
4047 Trait::getSVMMemFlags();
4049};
4050
4051template<class Trait = SVMTraitReadWrite<>>
4052class SVMTraitAtomic
4053{
4054public:
4055 static cl_svm_mem_flags getSVMMemFlags()
4056 {
4057 return
4058 CL_MEM_SVM_FINE_GRAIN_BUFFER |
4059 CL_MEM_SVM_ATOMICS |
4060 Trait::getSVMMemFlags();
4061 }
4062};
4063
4064// Pre-declare SVM map function
4065template<typename T>
4066inline cl_int enqueueMapSVM(
4067 T* ptr,
4068 cl_bool blocking,
4069 cl_map_flags flags,
4070 size_type size,
4071 const vector<Event>* events = nullptr,
4072 Event* event = nullptr);
4073
4082 * Instead the allocator embeds a Deleter which may be used with unique_ptr and is used
4083 * with the allocate_shared and allocate_ptr supplied operations.
4084 */
4085template<typename T, class SVMTrait>
4086class SVMAllocator {
4087private:
4088 Context context_;
4089
4090public:
4091 typedef T value_type;
4092 typedef value_type* pointer;
4093 typedef const value_type* const_pointer;
4094 typedef value_type& reference;
4095 typedef const value_type& const_reference;
4096 typedef std::size_t size_type;
4097 typedef std::ptrdiff_t difference_type;
4098
4099 template<typename U>
4100 struct rebind
4101 {
4102 typedef SVMAllocator<U, SVMTrait> other;
4103 };
4104
4105 template<typename U, typename V>
4106 friend class SVMAllocator;
4107
4108 SVMAllocator() :
4109 context_(Context::getDefault())
4110 {
4111 }
4112
4113 explicit SVMAllocator(cl::Context context) :
4114 context_(context)
4115 {
4116 }
4117
4118
4119 SVMAllocator(const SVMAllocator &other) :
4120 context_(other.context_)
4121 {
4122 }
4123
4124 template<typename U>
4125 SVMAllocator(const SVMAllocator<U, SVMTrait> &other) :
4126 context_(other.context_)
4127 {
4128 }
4129
4130 ~SVMAllocator()
4131 {
4132 }
4133
4134 pointer address(reference r) noexcept
4135 {
4136 return std::addressof(r);
4137 }
4138
4139 const_pointer address(const_reference r) noexcept
4140 {
4141 return std::addressof(r);
4142 }
4143
4147 * If the allocator is coarse-grained, this will take ownership to allow
4148 * containers to correctly construct data in place.
4149 */
4150 pointer allocate(
4151 size_type size,
4152 typename cl::SVMAllocator<void, SVMTrait>::const_pointer = 0,
4153 bool map = true)
4154 {
4155 // Allocate memory with default alignment matching the size of the type
4156 void* voidPointer =
4157 CL_(clSVMAlloc)(
4158 context_(),
4159 SVMTrait::getSVMMemFlags(),
4160 size*sizeof(T),
4161 0);
4162 pointer retValue = reinterpret_cast<pointer>(
4163 voidPointer);
4164#if defined(CL_HPP_ENABLE_EXCEPTIONS)
4165 if (!retValue) {
4166 std::bad_alloc excep;
4167 throw excep;
4168 }
4169#endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
4170
4171 // If allocation was coarse-grained then map it
4172 if (map && !(SVMTrait::getSVMMemFlags() & CL_MEM_SVM_FINE_GRAIN_BUFFER)) {
4173 cl_int err = enqueueMapSVM(retValue, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, size*sizeof(T));
4174 if (err != CL_SUCCESS) {
4175 CL_(clSVMFree)(context_(), retValue);
4176 retValue = nullptr;
4177#if defined(CL_HPP_ENABLE_EXCEPTIONS)
4178 std::bad_alloc excep;
4179 throw excep;
4180#endif
4181 }
4182 }
4183
4184 // If exceptions disabled, return null pointer from allocator
4185 return retValue;
4186 }
4187
4188 void deallocate(pointer p, size_type)
4189 {
4190 CL_(clSVMFree)(context_(), p);
4191 }
4192
4193 /**
4194 * Return the maximum possible allocation size.
4195 * This is the minimum of the maximum sizes of all devices in the context.
4196 */
4197 size_type max_size() const noexcept
4198 {
4199 size_type maxSize = std::numeric_limits<size_type>::max() / sizeof(T);
4200
4201 for (const Device &d : context_.getInfo<CL_CONTEXT_DEVICES>()) {
4202 maxSize = std::min(
4203 maxSize,
4204 static_cast<size_type>(d.getInfo<CL_DEVICE_MAX_MEM_ALLOC_SIZE>()));
4205 }
4206
4207 return maxSize;
4208 }
4209
4210 template< class U, class... Args >
4211 void construct(U* p, Args&&... args)
4212 {
4213 new(p)T(args...);
4214 }
4215
4216 template< class U >
4217 void destroy(U* p)
4218 {
4219 p->~U();
4220 }
4225 inline bool operator==(SVMAllocator const& rhs)
4226 {
4227 return (context_==rhs.context_);
4228 }
4229
4230 inline bool operator!=(SVMAllocator const& a)
4231 {
4232 return !operator==(a);
4233 }
4234}; // class SVMAllocator return cl::pointer<T>(tmp, detail::Deleter<T, Alloc>{alloc, copies});
4235
4236
4237template<class SVMTrait>
4238class SVMAllocator<void, SVMTrait> {
4239public:
4240 typedef void value_type;
4241 typedef value_type* pointer;
4242 typedef const value_type* const_pointer;
4243
4244 template<typename U>
4245 struct rebind
4246 {
4247 typedef SVMAllocator<U, SVMTrait> other;
4248 };
4249
4250 template<typename U, typename V>
4251 friend class SVMAllocator;
4252};
4253
4254#if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
4255namespace detail
4256{
4257 template<class Alloc>
4258 class Deleter {
4259 private:
4260 Alloc alloc_;
4261 size_type copies_;
4262
4263 public:
4264 typedef typename std::allocator_traits<Alloc>::pointer pointer;
4265
4266 Deleter(const Alloc &alloc, size_type copies) : alloc_{ alloc }, copies_{ copies }
4267 {
4268 }
4269
4270 void operator()(pointer ptr) const {
4271 Alloc tmpAlloc{ alloc_ };
4272 std::allocator_traits<Alloc>::destroy(tmpAlloc, std::addressof(*ptr));
4273 std::allocator_traits<Alloc>::deallocate(tmpAlloc, ptr, copies_);
4274 }
4275 };
4276} // namespace detail
4277
4281 * This requirement is to ensure that the control block is not
4282 * allocated in memory inaccessible to the host.
4283 */
4284template <class T, class Alloc, class... Args>
4285cl::pointer<T, detail::Deleter<Alloc>> allocate_pointer(const Alloc &alloc_, Args&&... args)
4286{
4287 Alloc alloc(alloc_);
4288 static const size_type copies = 1;
4289
4290 // Ensure that creation of the management block and the
4291 // object are dealt with separately such that we only provide a deleter
4292
4293 T* tmp = std::allocator_traits<Alloc>::allocate(alloc, copies);
4294 if (!tmp) {
4295#if defined(CL_HPP_ENABLE_EXCEPTIONS)
4296 std::bad_alloc excep;
4297 throw excep;
4298#else
4299 return nullptr;
4300#endif
4301 }
4302
4303#if defined(CL_HPP_ENABLE_EXCEPTIONS)
4304 try
4305#endif
4306 {
4307 std::allocator_traits<Alloc>::construct(
4308 alloc,
4309 std::addressof(*tmp),
4310 std::forward<Args>(args)...);
4311
4312 return cl::pointer<T, detail::Deleter<Alloc>>(tmp, detail::Deleter<Alloc>{alloc, copies});
4313 }
4314#if defined(CL_HPP_ENABLE_EXCEPTIONS)
4315 catch (std::bad_alloc&)
4316 {
4317 std::allocator_traits<Alloc>::deallocate(alloc, tmp, copies);
4318 throw;
4319 }
4320#endif
4321}
4322
4323template< class T, class SVMTrait, class... Args >
4324cl::pointer<T, detail::Deleter<SVMAllocator<T, SVMTrait>>> allocate_svm(Args... args)
4325{
4326 SVMAllocator<T, SVMTrait> alloc;
4327 return cl::allocate_pointer<T>(alloc, args...);
4328}
4329
4330template< class T, class SVMTrait, class... Args >
4331cl::pointer<T, detail::Deleter<SVMAllocator<T, SVMTrait>>> allocate_svm(const cl::Context &c, Args... args)
4332{
4334 return cl::allocate_pointer<T>(alloc, args...);
4335}
4336#endif // #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
4337
4341template < class T >
4342using coarse_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitCoarse<>>>;
4343
4347template < class T >
4348using fine_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitFine<>>>;
4349
4353template < class T >
4354using atomic_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitAtomic<>>>;
4355
4356#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
4357
4358
4361 * See Memory for details about copy semantics, etc.
4362 *
4363 * \see Memory
4364 */
4365class Buffer : public Memory
4366{
4367public:
4368
4373 * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
4374 * specified. Note alignment & exclusivity requirements.
4375 */
4376 Buffer(
4377 const Context& context,
4378 cl_mem_flags flags,
4379 size_type size,
4380 void* host_ptr = nullptr,
4381 cl_int* err = nullptr)
4382 {
4383 cl_int error;
4384 object_ = CL_(clCreateBuffer)(context(), flags, size, host_ptr, &error);
4385
4386 detail::errHandler(error, __CREATE_BUFFER_ERR);
4387 if (err != nullptr) {
4388 *err = error;
4389 }
4390 }
4391
4392#if CL_HPP_TARGET_OPENCL_VERSION >= 300
4399 * end with 0.
4400 * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
4401 * specified. Note alignment & exclusivity requirements.
4402 */
4403 Buffer(
4404 const Context& context,
4405 const vector<cl_mem_properties>& properties,
4406 cl_mem_flags flags,
4407 size_type size,
4408 void* host_ptr = nullptr,
4409 cl_int* err = nullptr)
4410 {
4411 cl_int error;
4412
4413 if (properties.empty()) {
4414 object_ = CL_(clCreateBufferWithProperties)(context(), nullptr, flags,
4415 size, host_ptr, &error);
4416 }
4417 else {
4418 object_ = CL_(clCreateBufferWithProperties)(
4419 context(), properties.data(), flags, size, host_ptr, &error);
4420 }
4421
4422 detail::errHandler(error, __CREATE_BUFFER_ERR);
4423 if (err != nullptr) {
4424 *err = error;
4425 }
4426 }
4427#endif
4428
4434 * specified. Note alignment & exclusivity requirements.
4435 *
4436 * \see Context::getDefault()
4437 */
4438 Buffer(
4439 cl_mem_flags flags,
4440 size_type size,
4441 void* host_ptr = nullptr,
4442 cl_int* err = nullptr) : Buffer(Context::getDefault(err), flags, size, host_ptr, err) { }
4443
4444#if CL_HPP_TARGET_OPENCL_VERSION >= 300
4453 * specified. Note alignment & exclusivity requirements.
4454 *
4455 * \see Context::getDefault()
4456 */
4457 Buffer(
4458 const vector<cl_mem_properties>& properties,
4459 cl_mem_flags flags,
4460 size_type size,
4461 void* host_ptr = nullptr,
4462 cl_int* err = nullptr) : Buffer(Context::getDefault(err), properties, flags, size, host_ptr, err) { }
4463#endif
4464
4467 * IteratorType must be random access.
4468 * If useHostPtr is specified iterators must represent contiguous data.
4469 */
4470 template< typename IteratorType >
4471 Buffer(
4472 IteratorType startIterator,
4473 IteratorType endIterator,
4474 bool readOnly,
4475 bool useHostPtr = false,
4476 cl_int* err = nullptr)
4477 {
4478 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
4479 cl_int error;
4480
4481 cl_mem_flags flags = 0;
4482 if( readOnly ) {
4483 flags |= CL_MEM_READ_ONLY;
4484 }
4485 else {
4486 flags |= CL_MEM_READ_WRITE;
4487 }
4488 if( useHostPtr ) {
4489 flags |= CL_MEM_USE_HOST_PTR;
4490 }
4491
4492 size_type size = sizeof(DataType)*(endIterator - startIterator);
4493
4494 Context context = Context::getDefault(err);
4495
4496 if( useHostPtr ) {
4497 object_ = CL_(clCreateBuffer)(context(), flags, size, const_cast<DataType*>(&*startIterator), &error);
4498 } else {
4499 object_ = CL_(clCreateBuffer)(context(), flags, size, 0, &error);
4500 }
4501
4502 detail::errHandler(error, __CREATE_BUFFER_ERR);
4503 if (err != nullptr) {
4504 *err = error;
4505 }
4506
4507 if( !useHostPtr ) {
4508 error = cl::copy(startIterator, endIterator, *this);
4509 detail::errHandler(error, __CREATE_BUFFER_ERR);
4510 if (err != nullptr) {
4511 *err = error;
4512 }
4513 }
4514 }
4515
4521 template< typename IteratorType >
4522 Buffer(const Context &context, IteratorType startIterator, IteratorType endIterator,
4523 bool readOnly, bool useHostPtr = false, cl_int* err = nullptr);
4524
4529 template< typename IteratorType >
4530 Buffer(const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator,
4531 bool readOnly, bool useHostPtr = false, cl_int* err = nullptr);
4532
4534 Buffer() : Memory() { }
4535
4539 * Defaults to false to maintain compatibility with earlier versions.
4540 *
4541 * See Memory for further details.
4542 */
4543 explicit Buffer(const cl_mem& buffer, bool retainObject = false) :
4544 Memory(buffer, retainObject) { }
4545
4546 /*! \brief Assignment from cl_mem - performs shallow copy.
4547 *
4548 * See Memory for further details.
4549 */
4550 Buffer& operator = (const cl_mem& rhs)
4551 {
4552 Memory::operator=(rhs);
4553 return *this;
4554 }
4555
4556
4557#if CL_HPP_TARGET_OPENCL_VERSION >= 110
4558 /*! \brief Creates a new buffer object from this.
4559 *
4560 * Wraps clCreateSubBuffer().
4561 */
4563 cl_mem_flags flags,
4564 cl_buffer_create_type buffer_create_type,
4565 const void * buffer_create_info,
4566 cl_int * err = nullptr)
4567 {
4568 Buffer result;
4569 cl_int error;
4570 result.object_ = CL_(clCreateSubBuffer)(
4571 object_,
4572 flags,
4573 buffer_create_type,
4574 buffer_create_info,
4575 &error);
4576
4577 detail::errHandler(error, __CREATE_SUBBUFFER_ERR);
4578 if (err != nullptr) {
4579 *err = error;
4580 }
4581
4582 return result;
4583 }
4584#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
4585};
4586
4587#if defined (CL_HPP_USE_DX_INTEROP)
4596class BufferD3D10 : public Buffer
4597{
4598public:
4599
4600
4606 BufferD3D10(
4607 const Context& context,
4608 cl_mem_flags flags,
4609 ID3D10Buffer* bufobj,
4610 cl_int * err = nullptr) : pfn_clCreateFromD3D10BufferKHR(nullptr)
4611 {
4612 typedef CL_API_ENTRY cl_mem (CL_API_CALL *PFN_clCreateFromD3D10BufferKHR)(
4613 cl_context context, cl_mem_flags flags, ID3D10Buffer* buffer,
4614 cl_int* errcode_ret);
4615 PFN_clCreateFromD3D10BufferKHR pfn_clCreateFromD3D10BufferKHR;
4616#if CL_HPP_TARGET_OPENCL_VERSION >= 120
4617 vector<cl_context_properties> props = context.getInfo<CL_CONTEXT_PROPERTIES>();
4618 cl_platform platform = nullptr;
4619 for( int i = 0; i < props.size(); ++i ) {
4620 if( props[i] == CL_CONTEXT_PLATFORM ) {
4621 platform = props[i+1];
4622 }
4623 }
4624 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCreateFromD3D10BufferKHR);
4625#endif
4626#if CL_HPP_MINIMUM_OPENCL_VERSION < 120
4627 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateFromD3D10BufferKHR);
4628#endif
4629
4630 cl_int error;
4631 object_ = pfn_clCreateFromD3D10BufferKHR(
4632 context(),
4633 flags,
4634 bufobj,
4635 &error);
4636
4637 // TODO: This should really have a D3D10 rerror code!
4638 detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
4639 if (err != nullptr) {
4640 *err = error;
4641 }
4642 }
4643
4645 BufferD3D10() : Buffer() { }
4646
4654 explicit BufferD3D10(const cl_mem& buffer, bool retainObject = false) :
4655 Buffer(buffer, retainObject) { }
4656
4661 BufferD3D10& operator = (const cl_mem& rhs)
4662 {
4663 Buffer::operator=(rhs);
4664 return *this;
4665 }
4666};
4667#endif
4668
4673 * See Memory for details about copy semantics, etc.
4674 *
4675 * \see Memory
4676 */
4677class BufferGL : public Buffer
4678{
4679public:
4681 * GL buffer.
4682 *
4683 * Wraps clCreateFromGLBuffer().
4684 */
4685 BufferGL(
4686 const Context& context,
4687 cl_mem_flags flags,
4688 cl_GLuint bufobj,
4689 cl_int * err = nullptr)
4690 {
4691 cl_int error;
4692 object_ = CL_(clCreateFromGLBuffer)(
4693 context(),
4694 flags,
4695 bufobj,
4696 &error);
4697
4698 detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
4699 if (err != nullptr) {
4700 *err = error;
4702 }
4703
4705 BufferGL() : Buffer() { }
4706
4710 * Defaults to false to maintain compatibility with
4711 * earlier versions.
4712 * See Memory for further details.
4713 */
4714 explicit BufferGL(const cl_mem& buffer, bool retainObject = false) :
4715 Buffer(buffer, retainObject) { }
4716
4717 /*! \brief Assignment from cl_mem - performs shallow copy.
4718 *
4719 * See Memory for further details.
4720 */
4721 BufferGL& operator = (const cl_mem& rhs)
4722 {
4723 Buffer::operator=(rhs);
4724 return *this;
4726
4727
4729 cl_int getObjectInfo(
4730 cl_gl_object_type *type,
4731 cl_GLuint * gl_object_name)
4732 {
4733 return detail::errHandler(
4734 CL_(clGetGLObjectInfo)(object_,type,gl_object_name),
4735 __GET_GL_OBJECT_INFO_ERR);
4736 }
4737};
4738
4743 * See Memory for details about copy semantics, etc.
4744 *
4745 * \see Memory
4746 */
4747class BufferRenderGL : public Buffer
4748{
4749public:
4751 * GL Renderbuffer.
4752 *
4753 * Wraps clCreateFromGLRenderbuffer().
4754 */
4756 const Context& context,
4757 cl_mem_flags flags,
4758 cl_GLuint bufobj,
4759 cl_int * err = nullptr)
4760 {
4761 cl_int error;
4762 object_ = CL_(clCreateFromGLRenderbuffer)(
4763 context(),
4764 flags,
4765 bufobj,
4766 &error);
4767
4768 detail::errHandler(error, __CREATE_GL_RENDER_BUFFER_ERR);
4769 if (err != nullptr) {
4770 *err = error;
4772 }
4773
4775 BufferRenderGL() : Buffer() { }
4776
4780 * Defaults to false to maintain compatibility with
4781 * earlier versions.
4782 * See Memory for further details.
4783 */
4784 explicit BufferRenderGL(const cl_mem& buffer, bool retainObject = false) :
4785 Buffer(buffer, retainObject) { }
4786
4787 /*! \brief Assignment from cl_mem - performs shallow copy.
4788 *
4789 * See Memory for further details.
4790 */
4791 BufferRenderGL& operator = (const cl_mem& rhs)
4792 {
4793 Buffer::operator=(rhs);
4794 return *this;
4796
4797
4799 cl_int getObjectInfo(
4800 cl_gl_object_type *type,
4801 cl_GLuint * gl_object_name)
4802 {
4803 return detail::errHandler(
4804 CL_(clGetGLObjectInfo)(object_,type,gl_object_name),
4805 __GET_GL_OBJECT_INFO_ERR);
4806 }
4807};
4808
4811 * See Memory for details about copy semantics, etc.
4812 *
4813 * \see Memory
4814 */
4815class Image : public Memory
4816{
4817protected:
4819 Image() : Memory() { }
4820
4824 * Defaults to false to maintain compatibility with
4825 * earlier versions.
4826 * See Memory for further details.
4827 */
4828 explicit Image(const cl_mem& image, bool retainObject = false) :
4829 Memory(image, retainObject) { }
4830
4831 /*! \brief Assignment from cl_mem - performs shallow copy.
4832 *
4833 * See Memory for further details.
4834 */
4835 Image& operator = (const cl_mem& rhs)
4836 {
4837 Memory::operator=(rhs);
4838 return *this;
4839 }
4840
4842public:
4844 template <typename T>
4845 cl_int getImageInfo(cl_image_info name, T* param) const
4846 {
4847 return detail::errHandler(
4848 detail::getInfo(CL_(clGetImageInfo), object_, name, param),
4849 __GET_IMAGE_INFO_ERR);
4850 }
4853 template <cl_image_info name> typename
4855 getImageInfo(cl_int* err = nullptr) const
4856 {
4857 typename detail::param_traits<
4858 detail::cl_image_info, name>::param_type param;
4859 cl_int result = getImageInfo(name, &param);
4860 if (err != nullptr) {
4861 *err = result;
4862 }
4863 return param;
4864 }
4865};
4866
4867#if CL_HPP_TARGET_OPENCL_VERSION >= 120
4870 * See Memory for details about copy semantics, etc.
4871 *
4872 * \see Memory
4873 */
4874class Image1D : public Image
4875{
4876public:
4877 /*! \brief Constructs a 1D Image in a specified context.
4878 *
4879 * Wraps clCreateImage().
4880 */
4881 Image1D(
4882 const Context& context,
4883 cl_mem_flags flags,
4884 ImageFormat format,
4885 size_type width,
4886 void* host_ptr = nullptr,
4887 cl_int* err = nullptr)
4888 {
4889 cl_int error;
4890
4891 cl_image_desc desc = {};
4892 desc.image_type = CL_MEM_OBJECT_IMAGE1D;
4893 desc.image_width = width;
4894
4895 object_ = CL_(clCreateImage)(
4896 context(),
4897 flags,
4898 &format,
4899 &desc,
4900 host_ptr,
4901 &error);
4902
4903 detail::errHandler(error, __CREATE_IMAGE_ERR);
4904 if (err != nullptr) {
4905 *err = error;
4907 }
4908
4910 Image1D() { }
4911
4912#if CL_HPP_TARGET_OPENCL_VERSION >= 300
4919 * end with 0.
4920 * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
4921 * specified. Note alignment & exclusivity requirements.
4922 */
4923 Image1D(const Context &context, const vector<cl_mem_properties> &properties,
4924 cl_mem_flags flags, ImageFormat format, size_type width,
4925 void *host_ptr = nullptr, cl_int *err = nullptr) {
4926 cl_int error;
4927
4928 cl_image_desc desc = {};
4929 desc.image_type = CL_MEM_OBJECT_IMAGE1D;
4930 desc.image_width = width;
4931
4932 if (properties.empty()) {
4933 object_ = CL_(clCreateImageWithProperties)(
4934 context(), nullptr, flags, &format, &desc, host_ptr, &error);
4935 } else {
4936 object_ =
4937 CL_(clCreateImageWithProperties)(context(), properties.data(), flags,
4938 &format, &desc, host_ptr, &error);
4939 }
4940
4941 detail::errHandler(error, __CREATE_IMAGE_ERR);
4942 if (err != nullptr) {
4943 *err = error;
4944 }
4945 }
4946#endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 300
4947
4951 * Defaults to false to maintain compatibility with
4952 * earlier versions.
4953 * See Memory for further details.
4954 */
4955 explicit Image1D(const cl_mem& image1D, bool retainObject = false) :
4956 Image(image1D, retainObject) { }
4957
4958 /*! \brief Assignment from cl_mem - performs shallow copy.
4959 *
4960 * See Memory for further details.
4961 */
4962 Image1D& operator = (const cl_mem& rhs)
4963 {
4964 Image::operator=(rhs);
4965 return *this;
4966 }
4967
4968
4969};
4974class Image1DBuffer : public Image
4975{
4976public:
4977 Image1DBuffer(
4978 const Context& context,
4979 cl_mem_flags flags,
4980 ImageFormat format,
4981 size_type width,
4982 const Buffer &buffer,
4983 cl_int* err = nullptr)
4984 {
4985 cl_int error;
4986
4987 cl_image_desc desc = {};
4988 desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
4989 desc.image_width = width;
4990 desc.buffer = buffer();
4991
4992 object_ = CL_(clCreateImage)(
4993 context(),
4994 flags,
4995 &format,
4996 &desc,
4997 nullptr,
4998 &error);
4999
5000 detail::errHandler(error, __CREATE_IMAGE_ERR);
5001 if (err != nullptr) {
5002 *err = error;
5003 }
5004 }
5005
5006 Image1DBuffer() { }
5007
5008#if CL_HPP_TARGET_OPENCL_VERSION >= 300
5014 * their corresponding values. The non-empty list must
5015 * end with 0.
5016 * \param buffer Refer to a valid buffer or image memory object.
5017 */
5018 Image1DBuffer(const Context &context,
5019 const vector<cl_mem_properties> &properties,
5020 cl_mem_flags flags, ImageFormat format, size_type width,
5021 const Buffer &buffer, cl_int *err = nullptr) {
5022 cl_int error;
5023
5024 cl_image_desc desc = {};
5025 desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
5026 desc.image_width = width;
5027 desc.buffer = buffer();
5028
5029 if (properties.empty()) {
5030 object_ = CL_(clCreateImageWithProperties)(
5031 context(), nullptr, flags, &format, &desc, nullptr, &error);
5032 } else {
5033 object_ =
5034 CL_(clCreateImageWithProperties)(context(), properties.data(), flags,
5035 &format, &desc, nullptr, &error);
5036 }
5037
5038 detail::errHandler(error, __CREATE_IMAGE_ERR);
5039 if (err != nullptr) {
5040 *err = error;
5041 }
5042 }
5043#endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 300
5044
5048 * Defaults to false to maintain compatibility with
5049 * earlier versions.
5050 * See Memory for further details.
5051 */
5052 explicit Image1DBuffer(const cl_mem& image1D, bool retainObject = false) :
5053 Image(image1D, retainObject) { }
5054
5055 Image1DBuffer& operator = (const cl_mem& rhs)
5056 {
5057 Image::operator=(rhs);
5058 return *this;
5059 }
5060};
5065class Image1DArray : public Image
5066{
5067public:
5068 Image1DArray(
5069 const Context& context,
5070 cl_mem_flags flags,
5071 ImageFormat format,
5072 size_type arraySize,
5073 size_type width,
5074 size_type rowPitch,
5075 void* host_ptr = nullptr,
5076 cl_int* err = nullptr)
5077 {
5078 cl_int error;
5079
5080 cl_image_desc desc = {};
5081 desc.image_type = CL_MEM_OBJECT_IMAGE1D_ARRAY;
5082 desc.image_width = width;
5083 desc.image_array_size = arraySize;
5084 desc.image_row_pitch = rowPitch;
5085
5086 object_ = CL_(clCreateImage)(
5087 context(),
5088 flags,
5089 &format,
5090 &desc,
5091 host_ptr,
5092 &error);
5093
5094 detail::errHandler(error, __CREATE_IMAGE_ERR);
5095 if (err != nullptr) {
5096 *err = error;
5097 }
5098 }
5099
5100 Image1DArray() { }
5101
5102#if CL_HPP_TARGET_OPENCL_VERSION >= 300
5109 * end with 0.
5110 * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
5111 * specified. Note alignment & exclusivity requirements.
5112 */
5113 Image1DArray(const Context &context,
5114 const vector<cl_mem_properties> &properties,
5115 cl_mem_flags flags, ImageFormat format, size_type arraySize,
5116 size_type width, size_type rowPitch = 0,
5117 void *host_ptr = nullptr, cl_int *err = nullptr) {
5118 cl_int error;
5119
5120 cl_image_desc desc = {};
5121 desc.image_type = CL_MEM_OBJECT_IMAGE1D_ARRAY;
5122 desc.image_width = width;
5123 desc.image_array_size = arraySize;
5124 desc.image_row_pitch = rowPitch;
5125
5126 if (properties.empty()) {
5127 object_ = CL_(clCreateImageWithProperties)(
5128 context(), nullptr, flags, &format, &desc, host_ptr, &error);
5129 } else {
5130 object_ =
5131 CL_(clCreateImageWithProperties)(context(), properties.data(), flags,
5132 &format, &desc, host_ptr, &error);
5133 }
5134
5135 detail::errHandler(error, __CREATE_IMAGE_ERR);
5136 if (err != nullptr) {
5137 *err = error;
5138 }
5139 }
5140#endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 300
5141
5145 * Defaults to false to maintain compatibility with
5146 * earlier versions.
5147 * See Memory for further details.
5148 */
5149 explicit Image1DArray(const cl_mem& imageArray, bool retainObject = false) :
5150 Image(imageArray, retainObject) { }
5151
5152
5153 Image1DArray& operator = (const cl_mem& rhs)
5154 {
5155 Image::operator=(rhs);
5156 return *this;
5157 }
5158
5159
5160};
5161#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5162
5163
5166 * See Memory for details about copy semantics, etc.
5167 *
5168 * \see Memory
5169 */
5170class Image2D : public Image
5171{
5172public:
5173 /*! \brief Constructs a 2D Image in a specified context.
5174 *
5175 * Wraps clCreateImage().
5176 */
5177 Image2D(
5178 const Context& context,
5179 cl_mem_flags flags,
5180 ImageFormat format,
5181 size_type width,
5182 size_type height,
5183 size_type row_pitch = 0,
5184 void* host_ptr = nullptr,
5185 cl_int* err = nullptr)
5186 {
5187 cl_int error;
5188 bool useCreateImage;
5189
5190#if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
5191 // Run-time decision based on the actual platform
5192 {
5193 cl_uint version = detail::getContextPlatformVersion(context());
5194 useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
5195 }
5196#elif CL_HPP_TARGET_OPENCL_VERSION >= 120
5197 useCreateImage = true;
5198#else
5199 useCreateImage = false;
5200#endif
5201
5202#if CL_HPP_TARGET_OPENCL_VERSION >= 120
5203 if (useCreateImage)
5204 {
5205 cl_image_desc desc = {};
5206 desc.image_type = CL_MEM_OBJECT_IMAGE2D;
5207 desc.image_width = width;
5208 desc.image_height = height;
5209 desc.image_row_pitch = row_pitch;
5210
5211 object_ = CL_(clCreateImage)(
5212 context(),
5213 flags,
5214 &format,
5215 &desc,
5216 host_ptr,
5217 &error);
5218
5219 detail::errHandler(error, __CREATE_IMAGE_ERR);
5220 if (err != nullptr) {
5221 *err = error;
5222 }
5223 }
5224#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5225#if CL_HPP_MINIMUM_OPENCL_VERSION < 120
5226 if (!useCreateImage)
5227 {
5228 object_ = CL_(clCreateImage2D)(
5229 context(), flags,&format, width, height, row_pitch, host_ptr, &error);
5230
5231 detail::errHandler(error, __CREATE_IMAGE2D_ERR);
5232 if (err != nullptr) {
5233 *err = error;
5234 }
5235 }
5236#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
5237 }
5238
5239#if CL_HPP_TARGET_OPENCL_VERSION >= 120
5244 * cl_khr_image2d_from_buffer extension.
5245 *
5246 * Wraps clCreateImage().
5247 */
5248 Image2D(
5249 const Context& context,
5250 ImageFormat format,
5251 const Buffer &sourceBuffer,
5252 size_type width,
5253 size_type height,
5254 size_type row_pitch = 0,
5255 cl_int* err = nullptr)
5256 {
5257 cl_int error;
5258
5259 cl_image_desc desc = {};
5260 desc.image_type = CL_MEM_OBJECT_IMAGE2D;
5261 desc.image_width = width;
5262 desc.image_height = height;
5263 desc.image_row_pitch = row_pitch;
5264 desc.buffer = sourceBuffer();
5265
5266 object_ = CL_(clCreateImage)(
5267 context(),
5268 0, // flags inherited from buffer
5269 &format,
5270 &desc,
5271 nullptr,
5272 &error);
5273
5274 detail::errHandler(error, __CREATE_IMAGE_ERR);
5275 if (err != nullptr) {
5276 *err = error;
5277 }
5278 }
5279#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5280
5281#if CL_HPP_TARGET_OPENCL_VERSION >= 200
5290 * 2.0 API specification.
5291 *
5292 * Wraps clCreateImage().
5293 */
5294 Image2D(
5295 const Context& context,
5296 cl_channel_order order,
5297 const Image &sourceImage,
5298 cl_int* err = nullptr)
5299 {
5300 cl_int error;
5301
5302 // Descriptor fields have to match source image
5303 size_type sourceWidth =
5304 sourceImage.getImageInfo<CL_IMAGE_WIDTH>();
5305 size_type sourceHeight =
5306 sourceImage.getImageInfo<CL_IMAGE_HEIGHT>();
5307 size_type sourceRowPitch =
5308 sourceImage.getImageInfo<CL_IMAGE_ROW_PITCH>();
5309 cl_uint sourceNumMIPLevels =
5310 sourceImage.getImageInfo<CL_IMAGE_NUM_MIP_LEVELS>();
5311 cl_uint sourceNumSamples =
5312 sourceImage.getImageInfo<CL_IMAGE_NUM_SAMPLES>();
5313 cl_image_format sourceFormat =
5314 sourceImage.getImageInfo<CL_IMAGE_FORMAT>();
5315
5316 // Update only the channel order.
5317 // Channel format inherited from source.
5318 sourceFormat.image_channel_order = order;
5319
5320 cl_image_desc desc = {};
5321 desc.image_type = CL_MEM_OBJECT_IMAGE2D;
5322 desc.image_width = sourceWidth;
5323 desc.image_height = sourceHeight;
5324 desc.image_row_pitch = sourceRowPitch;
5325 desc.num_mip_levels = sourceNumMIPLevels;
5326 desc.num_samples = sourceNumSamples;
5327 desc.buffer = sourceImage();
5328
5329 object_ = CL_(clCreateImage)(
5330 context(),
5331 0, // flags should be inherited from mem_object
5332 &sourceFormat,
5333 &desc,
5334 nullptr,
5335 &error);
5336
5337 detail::errHandler(error, __CREATE_IMAGE_ERR);
5338 if (err != nullptr) {
5339 *err = error;
5340 }
5341 }
5342#endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200
5343
5344#if CL_HPP_TARGET_OPENCL_VERSION >= 300
5351 * end with 0.
5352 * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
5353 * specified. Note alignment & exclusivity requirements.
5354 */
5355 Image2D(const Context &context, const vector<cl_mem_properties> &properties,
5356 cl_mem_flags flags, ImageFormat format, size_type width,
5357 size_type height, size_type row_pitch = 0, void *host_ptr = nullptr,
5358 cl_int *err = nullptr) {
5359 cl_int error;
5360
5361 cl_image_desc desc = {};
5362 desc.image_type = CL_MEM_OBJECT_IMAGE2D;
5363 desc.image_width = width;
5364 desc.image_height = height;
5365 desc.image_row_pitch = row_pitch;
5366
5367 if (properties.empty()) {
5368 object_ = CL_(clCreateImageWithProperties)(
5369 context(), nullptr, flags, &format, &desc, host_ptr, &error);
5370 } else {
5371 object_ =
5372 CL_(clCreateImageWithProperties)(context(), properties.data(), flags,
5373 &format, &desc, host_ptr, &error);
5374 }
5375
5376 detail::errHandler(error, __CREATE_IMAGE_ERR);
5377 if (err != nullptr) {
5378 *err = error;
5379 }
5380 }
5381
5387 * their corresponding values. The non-empty list must
5388 * end with 0.
5389 * \param buffer Refer to a valid buffer or image memory object.
5390 */
5391 Image2D(const Context &context, const vector<cl_mem_properties> &properties,
5392 cl_mem_flags flags, ImageFormat format, const Buffer &buffer,
5393 size_type width, size_type height, size_type row_pitch = 0,
5394 cl_int *err = nullptr) {
5395 cl_int error;
5396
5397 cl_image_desc desc = {};
5398 desc.image_type = CL_MEM_OBJECT_IMAGE2D;
5399 desc.image_width = width;
5400 desc.image_height = height;
5401 desc.image_row_pitch = row_pitch;
5402 desc.buffer = buffer();
5403
5404 if (properties.empty()) {
5405 object_ = CL_(clCreateImageWithProperties)(
5406 context(), nullptr, flags, &format, &desc, nullptr, &error);
5407 } else {
5408 object_ =
5409 CL_(clCreateImageWithProperties)(context(), properties.data(), flags,
5410 &format, &desc, nullptr, &error);
5411 }
5412
5413 detail::errHandler(error, __CREATE_IMAGE_ERR);
5414 if (err != nullptr) {
5415 *err = error;
5416 }
5417 }
5419#endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 300
5420
5422 Image2D() { }
5423
5427 * Defaults to false to maintain compatibility with
5428 * earlier versions.
5429 * See Memory for further details.
5430 */
5431 explicit Image2D(const cl_mem& image2D, bool retainObject = false) :
5432 Image(image2D, retainObject) { }
5433
5434 /*! \brief Assignment from cl_mem - performs shallow copy.
5435 *
5436 * See Memory for further details.
5437 */
5438 Image2D& operator = (const cl_mem& rhs)
5439 {
5440 Image::operator=(rhs);
5441 return *this;
5442 }
5443};
5444
5445
5446#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
5453 * \see Memory
5454 * \note Deprecated for OpenCL 1.2. Please use ImageGL instead.
5455 */
5456class CL_API_PREFIX__VERSION_1_1_DEPRECATED Image2DGL : public Image2D
5457{
5458public:
5460 * GL Texture.
5461 *
5462 * Wraps clCreateFromGLTexture2D().
5463 */
5464 Image2DGL(
5465 const Context& context,
5466 cl_mem_flags flags,
5467 cl_GLenum target,
5468 cl_GLint miplevel,
5469 cl_GLuint texobj,
5470 cl_int * err = nullptr)
5471 {
5472 cl_int error;
5473 object_ = CL_(clCreateFromGLTexture2D)(
5474 context(),
5475 flags,
5476 target,
5477 miplevel,
5478 texobj,
5479 &error);
5480
5481 detail::errHandler(error, __CREATE_GL_TEXTURE_2D_ERR);
5482 if (err != nullptr) {
5483 *err = error;
5484 }
5486 }
5487
5489 Image2DGL() : Image2D() { }
5490
5494 * Defaults to false to maintain compatibility with
5495 * earlier versions.
5496 * See Memory for further details.
5497 */
5498 explicit Image2DGL(const cl_mem& image, bool retainObject = false) :
5499 Image2D(image, retainObject) { }
5500
5501 /*! \brief Assignment from cl_mem - performs shallow copy.
5502 *c
5503 * See Memory for further details.
5504 */
5505 Image2DGL& operator = (const cl_mem& rhs)
5506 {
5507 Image2D::operator=(rhs);
5508 return *this;
5509 }
5510
5511
5512
5513} CL_API_SUFFIX__VERSION_1_1_DEPRECATED;
5514#endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
5515
5516#if CL_HPP_TARGET_OPENCL_VERSION >= 120
5520class Image2DArray : public Image
5521{
5522public:
5523 Image2DArray(
5524 const Context& context,
5525 cl_mem_flags flags,
5526 ImageFormat format,
5527 size_type arraySize,
5528 size_type width,
5529 size_type height,
5530 size_type rowPitch,
5531 size_type slicePitch,
5532 void* host_ptr = nullptr,
5533 cl_int* err = nullptr)
5534 {
5535 cl_int error;
5536
5537 cl_image_desc desc = {};
5538 desc.image_type = CL_MEM_OBJECT_IMAGE2D_ARRAY;
5539 desc.image_width = width;
5540 desc.image_height = height;
5541 desc.image_array_size = arraySize;
5542 desc.image_row_pitch = rowPitch;
5543 desc.image_slice_pitch = slicePitch;
5544
5545 object_ = CL_(clCreateImage)(
5546 context(),
5547 flags,
5548 &format,
5549 &desc,
5550 host_ptr,
5551 &error);
5552
5553 detail::errHandler(error, __CREATE_IMAGE_ERR);
5554 if (err != nullptr) {
5555 *err = error;
5556 }
5557 }
5558
5559#if CL_HPP_TARGET_OPENCL_VERSION >= 300
5566 * end with 0.
5567 * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
5568 * specified. Note alignment & exclusivity requirements.
5569 */
5570 Image2DArray(const Context &context,
5571 const vector<cl_mem_properties> &properties,
5572 cl_mem_flags flags, ImageFormat format, size_type arraySize,
5573 size_type width, size_type height, size_type rowPitch = 0,
5574 size_type slicePitch = 0, void *host_ptr = nullptr,
5575 cl_int *err = nullptr) {
5576 cl_int error;
5577
5578 cl_image_desc desc = {};
5579 desc.image_type = CL_MEM_OBJECT_IMAGE2D_ARRAY;
5580 desc.image_width = width;
5581 desc.image_height = height;
5582 desc.image_array_size = arraySize;
5583 desc.image_row_pitch = rowPitch;
5584 desc.image_slice_pitch = slicePitch;
5585
5586 if (properties.empty()) {
5587 object_ = CL_(clCreateImageWithProperties)(
5588 context(), nullptr, flags, &format, &desc, host_ptr, &error);
5589 } else {
5590 object_ =
5591 CL_(clCreateImageWithProperties)(context(), properties.data(), flags,
5592 &format, &desc, host_ptr, &error);
5593 }
5594
5595 detail::errHandler(error, __CREATE_IMAGE_ERR);
5596 if (err != nullptr) {
5597 *err = error;
5598 }
5599 }
5600#endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 300
5601
5602 Image2DArray() { }
5603
5611 explicit Image2DArray(const cl_mem& imageArray, bool retainObject = false) : Image(imageArray, retainObject) { }
5612
5613 Image2DArray& operator = (const cl_mem& rhs)
5614 {
5615 Image::operator=(rhs);
5616 return *this;
5617 }
5618
5619};
5620#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5621
5624 * See Memory for details about copy semantics, etc.
5625 *
5626 * \see Memory
5627 */
5628class Image3D : public Image
5629{
5630public:
5631 /*! \brief Constructs a 3D Image in a specified context.
5632 *
5633 * Wraps clCreateImage().
5634 */
5635 Image3D(
5636 const Context& context,
5637 cl_mem_flags flags,
5638 ImageFormat format,
5639 size_type width,
5640 size_type height,
5641 size_type depth,
5642 size_type row_pitch = 0,
5643 size_type slice_pitch = 0,
5644 void* host_ptr = nullptr,
5645 cl_int* err = nullptr)
5646 {
5647 cl_int error;
5648 bool useCreateImage;
5649
5650#if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
5651 // Run-time decision based on the actual platform
5652 {
5653 cl_uint version = detail::getContextPlatformVersion(context());
5654 useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
5655 }
5656#elif CL_HPP_TARGET_OPENCL_VERSION >= 120
5657 useCreateImage = true;
5658#else
5659 useCreateImage = false;
5660#endif
5661
5662#if CL_HPP_TARGET_OPENCL_VERSION >= 120
5663 if (useCreateImage)
5664 {
5665 cl_image_desc desc = {};
5666 desc.image_type = CL_MEM_OBJECT_IMAGE3D;
5667 desc.image_width = width;
5668 desc.image_height = height;
5669 desc.image_depth = depth;
5670 desc.image_row_pitch = row_pitch;
5671 desc.image_slice_pitch = slice_pitch;
5672
5673 object_ = CL_(clCreateImage)(
5674 context(),
5675 flags,
5676 &format,
5677 &desc,
5678 host_ptr,
5679 &error);
5680
5681 detail::errHandler(error, __CREATE_IMAGE_ERR);
5682 if (err != nullptr) {
5683 *err = error;
5684 }
5685 }
5686#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5687#if CL_HPP_MINIMUM_OPENCL_VERSION < 120
5688 if (!useCreateImage)
5689 {
5690 object_ = CL_(clCreateImage3D)(
5691 context(), flags, &format, width, height, depth, row_pitch,
5692 slice_pitch, host_ptr, &error);
5693
5694 detail::errHandler(error, __CREATE_IMAGE3D_ERR);
5695 if (err != nullptr) {
5696 *err = error;
5697 }
5698 }
5699#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
5700 }
5701
5702#if CL_HPP_TARGET_OPENCL_VERSION >= 300
5709 * end with 0.
5710 * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
5711 * specified. Note alignment & exclusivity requirements.
5712 */
5713 Image3D(const Context &context, const vector<cl_mem_properties> &properties,
5714 cl_mem_flags flags, ImageFormat format, size_type width,
5715 size_type height, size_type depth, size_type row_pitch = 0,
5716 size_type slice_pitch = 0, void *host_ptr = nullptr,
5717 cl_int *err = nullptr) {
5718 cl_int error;
5719
5720 cl_image_desc desc = {};
5721 desc.image_type = CL_MEM_OBJECT_IMAGE3D;
5722 desc.image_width = width;
5723 desc.image_height = height;
5724 desc.image_depth = depth;
5725 desc.image_row_pitch = row_pitch;
5726 desc.image_slice_pitch = slice_pitch;
5727
5728 if (properties.empty()) {
5729 object_ = CL_(clCreateImageWithProperties)(
5730 context(), nullptr, flags, &format, &desc, host_ptr, &error);
5731 } else {
5732 object_ =
5733 CL_(clCreateImageWithProperties)(context(), properties.data(), flags,
5734 &format, &desc, host_ptr, &error);
5735 }
5736
5737 detail::errHandler(error, __CREATE_IMAGE_ERR);
5738 if (err != nullptr) {
5739 *err = error;
5740 }
5742#endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 300
5743
5745 Image3D() : Image() { }
5746
5750 * Defaults to false to maintain compatibility with
5751 * earlier versions.
5752 * See Memory for further details.
5753 */
5754 explicit Image3D(const cl_mem& image3D, bool retainObject = false) :
5755 Image(image3D, retainObject) { }
5756
5757 /*! \brief Assignment from cl_mem - performs shallow copy.
5758 *
5759 * See Memory for further details.
5760 */
5761 Image3D& operator = (const cl_mem& rhs)
5762 {
5763 Image::operator=(rhs);
5764 return *this;
5765 }
5766
5767};
5768
5769#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
5774 * See Memory for details about copy semantics, etc.
5775 *
5776 * \see Memory
5777 */
5778class Image3DGL : public Image3D
5779{
5780public:
5782 * GL Texture.
5783 *
5784 * Wraps clCreateFromGLTexture3D().
5785 */
5786 Image3DGL(
5787 const Context& context,
5788 cl_mem_flags flags,
5789 cl_GLenum target,
5790 cl_GLint miplevel,
5791 cl_GLuint texobj,
5792 cl_int * err = nullptr)
5793 {
5794 cl_int error;
5795 object_ = CL_(clCreateFromGLTexture3D)(
5796 context(),
5797 flags,
5798 target,
5799 miplevel,
5800 texobj,
5801 &error);
5802
5803 detail::errHandler(error, __CREATE_GL_TEXTURE_3D_ERR);
5804 if (err != nullptr) {
5805 *err = error;
5807 }
5808
5810 Image3DGL() : Image3D() { }
5811
5815 * Defaults to false to maintain compatibility with
5816 * earlier versions.
5817 * See Memory for further details.
5818 */
5819 explicit Image3DGL(const cl_mem& image, bool retainObject = false) :
5820 Image3D(image, retainObject) { }
5821
5822 /*! \brief Assignment from cl_mem - performs shallow copy.
5823 *
5824 * See Memory for further details.
5825 */
5826 Image3DGL& operator = (const cl_mem& rhs)
5827 {
5828 Image3D::operator=(rhs);
5829 return *this;
5830 }
5831
5832};
5833#endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
5834
5835#if CL_HPP_TARGET_OPENCL_VERSION >= 120
5838 * We abstract the 2D and 3D GL images into a single instance here
5839 * that wraps all GL sourced images on the grounds that setup information
5840 * was performed by OpenCL anyway.
5841 */
5842class ImageGL : public Image
5843{
5844public:
5845 ImageGL(
5846 const Context& context,
5847 cl_mem_flags flags,
5848 cl_GLenum target,
5849 cl_GLint miplevel,
5850 cl_GLuint texobj,
5851 cl_int * err = nullptr)
5852 {
5853 cl_int error;
5854 object_ = CL_(clCreateFromGLTexture)(
5855 context(),
5856 flags,
5857 target,
5858 miplevel,
5859 texobj,
5860 &error);
5861
5862 detail::errHandler(error, __CREATE_GL_TEXTURE_ERR);
5863 if (err != nullptr) {
5864 *err = error;
5865 }
5866 }
5867
5868 ImageGL() : Image() { }
5869
5873 * Defaults to false to maintain compatibility with
5874 * earlier versions.
5875 * See Memory for further details.
5876 */
5877 explicit ImageGL(const cl_mem& image, bool retainObject = false) :
5878 Image(image, retainObject) { }
5879
5880 ImageGL& operator = (const cl_mem& rhs)
5881 {
5882 Image::operator=(rhs);
5883 return *this;
5884 }
5885
5886};
5887#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5888
5889
5890
5891#if CL_HPP_TARGET_OPENCL_VERSION >= 200
5894* See Memory for details about copy semantics, etc.
5895*
5896* \see Memory
5897*/
5898class Pipe : public Memory
5899{
5900public:
5901
5907 * @param packet_size Size in bytes of a single packet of the pipe.
5908 * @param max_packets Number of packets that may be stored in the pipe.
5909 *
5910 */
5911 Pipe(
5912 const Context& context,
5913 cl_uint packet_size,
5914 cl_uint max_packets,
5915 cl_int* err = nullptr)
5916 {
5917 cl_int error;
5918
5919 cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS;
5920 object_ = CL_(clCreatePipe)(context(), flags, packet_size, max_packets, nullptr, &error);
5921
5922 detail::errHandler(error, __CREATE_PIPE_ERR);
5923 if (err != nullptr) {
5924 *err = error;
5925 }
5926 }
5927
5932 * @param packet_size Size in bytes of a single packet of the pipe.
5933 * @param max_packets Number of packets that may be stored in the pipe.
5934 *
5935 */
5936 Pipe(
5937 cl_uint packet_size,
5938 cl_uint max_packets,
5939 cl_int* err = nullptr)
5940 {
5941 cl_int error;
5942
5943 Context context = Context::getDefault(err);
5944
5945 cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS;
5946 object_ = CL_(clCreatePipe)(context(), flags, packet_size, max_packets, nullptr, &error);
5947
5948 detail::errHandler(error, __CREATE_PIPE_ERR);
5949 if (err != nullptr) {
5950 *err = error;
5952 }
5953
5955 Pipe() : Memory() { }
5956
5960 * Defaults to false to maintain compatibility with earlier versions.
5961 *
5962 * See Memory for further details.
5963 */
5964 explicit Pipe(const cl_mem& pipe, bool retainObject = false) :
5965 Memory(pipe, retainObject) { }
5966
5967 /*! \brief Assignment from cl_mem - performs shallow copy.
5968 *
5969 * See Memory for further details.
5970 */
5971 Pipe& operator = (const cl_mem& rhs)
5972 {
5973 Memory::operator=(rhs);
5974 return *this;
5975 }
5976
5978
5980 template <typename T>
5981 cl_int getInfo(cl_pipe_info name, T* param) const
5982 {
5983 return detail::errHandler(
5984 detail::getInfo(CL_(clGetPipeInfo), object_, name, param),
5985 __GET_PIPE_INFO_ERR);
5986 }
5989 template <cl_pipe_info name> typename
5991 getInfo(cl_int* err = nullptr) const
5992 {
5993 typename detail::param_traits<
5994 detail::cl_pipe_info, name>::param_type param;
5995 cl_int result = getInfo(name, &param);
5996 if (err != nullptr) {
5997 *err = result;
5998 }
5999 return param;
6000 }
6001}; // class Pipe
6002#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
6003
6004
6009 * clRetainSampler() and clReleaseSampler().
6010 *
6011 * \see cl_sampler
6012 */
6013class Sampler : public detail::Wrapper<cl_sampler>
6014{
6015public:
6017 Sampler() { }
6018
6019 /*! \brief Constructs a Sampler in a specified context.
6020 *
6021 * Wraps clCreateSampler().
6022 */
6023 Sampler(
6024 const Context& context,
6025 cl_bool normalized_coords,
6026 cl_addressing_mode addressing_mode,
6027 cl_filter_mode filter_mode,
6028 cl_int* err = nullptr)
6029 {
6030 cl_int error;
6031
6032#if CL_HPP_TARGET_OPENCL_VERSION >= 200
6033 cl_sampler_properties sampler_properties[] = {
6034 CL_SAMPLER_NORMALIZED_COORDS, normalized_coords,
6035 CL_SAMPLER_ADDRESSING_MODE, addressing_mode,
6036 CL_SAMPLER_FILTER_MODE, filter_mode,
6037 0 };
6038 object_ = CL_(clCreateSamplerWithProperties)(
6039 context(),
6040 sampler_properties,
6041 &error);
6042
6043 detail::errHandler(error, __CREATE_SAMPLER_WITH_PROPERTIES_ERR);
6044 if (err != nullptr) {
6045 *err = error;
6046 }
6047#else
6048 object_ = CL_(clCreateSampler)(
6049 context(),
6050 normalized_coords,
6051 addressing_mode,
6052 filter_mode,
6053 &error);
6054
6055 detail::errHandler(error, __CREATE_SAMPLER_ERR);
6056 if (err != nullptr) {
6057 *err = error;
6058 }
6059#endif
6060 }
6061
6066 * earlier versions.
6067 * This effectively transfers ownership of a refcount on the cl_sampler
6068 * into the new Sampler object.
6069 */
6070 explicit Sampler(const cl_sampler& sampler, bool retainObject = false) :
6071 detail::Wrapper<cl_type>(sampler, retainObject) { }
6072
6075 * This effectively transfers ownership of a refcount on the rhs and calls
6076 * clReleaseSampler() on the value previously held by this instance.
6077 */
6078 Sampler& operator = (const cl_sampler& rhs)
6079 {
6080 detail::Wrapper<cl_type>::operator=(rhs);
6081 return *this;
6082 }
6083
6085
6087 template <typename T>
6088 cl_int getInfo(cl_sampler_info name, T* param) const
6089 {
6090 return detail::errHandler(
6091 detail::getInfo(CL_(clGetSamplerInfo), object_, name, param),
6092 __GET_SAMPLER_INFO_ERR);
6093 }
6096 template <cl_sampler_info name> typename
6098 getInfo(cl_int* err = nullptr) const
6099 {
6100 typename detail::param_traits<
6101 detail::cl_sampler_info, name>::param_type param;
6102 cl_int result = getInfo(name, &param);
6103 if (err != nullptr) {
6104 *err = result;
6105 }
6106 return param;
6107 }
6108};
6109
6110class Program;
6111class CommandQueue;
6113class Kernel;
6114
6116class NDRange
6117{
6118private:
6119 size_type sizes_[3];
6120 cl_uint dimensions_;
6121
6122public:
6124 NDRange()
6125 : dimensions_(0)
6126 {
6127 sizes_[0] = 0;
6128 sizes_[1] = 0;
6129 sizes_[2] = 0;
6130 }
6131
6133 NDRange(size_type size0)
6134 : dimensions_(1)
6135 {
6136 sizes_[0] = size0;
6137 sizes_[1] = 1;
6138 sizes_[2] = 1;
6139 }
6140
6142 NDRange(size_type size0, size_type size1)
6143 : dimensions_(2)
6144 {
6145 sizes_[0] = size0;
6146 sizes_[1] = size1;
6147 sizes_[2] = 1;
6148 }
6149
6151 NDRange(size_type size0, size_type size1, size_type size2)
6152 : dimensions_(3)
6153 {
6154 sizes_[0] = size0;
6155 sizes_[1] = size1;
6156 sizes_[2] = size2;
6157 }
6158
6160 NDRange(array<size_type, 1> a) : NDRange(a[0]){}
6161
6163 NDRange(array<size_type, 2> a) : NDRange(a[0], a[1]){}
6164
6166 NDRange(array<size_type, 3> a) : NDRange(a[0], a[1], a[2]){}
6167
6168 /*! \brief Conversion operator to const size_type *.
6169 *
6170 * \returns a pointer to the size of the first dimension.
6171 */
6172 operator const size_type*() const {
6173 return sizes_;
6174 }
6175
6177 size_type dimensions() const
6178 {
6179 return dimensions_;
6181
6183 // runtime number of dimensions
6184 size_type size() const
6185 {
6186 return dimensions_*sizeof(size_type);
6187 }
6188
6189 size_type* get()
6190 {
6191 return sizes_;
6192 }
6193
6194 const size_type* get() const
6195 {
6196 return sizes_;
6197 }
6198};
6199
6200//! \brief A zero-dimensional range.
6201static const NDRange NullRange;
6202
6204struct LocalSpaceArg
6205{
6206 size_type size_;
6207};
6209namespace detail {
6210
6211template <typename T, class Enable = void>
6214// Enable for objects that are not subclasses of memory
6215// Pointers, constants etc
6216template <typename T>
6217struct KernelArgumentHandler<T, typename std::enable_if<!std::is_base_of<cl::Memory, T>::value>::type>
6218{
6219 static size_type size(const T&) { return sizeof(T); }
6220 static const T* ptr(const T& value) { return &value; }
6221};
6223// Enable for subclasses of memory where we want to get a reference to the cl_mem out
6224// and pass that in for safety
6225template <typename T>
6226struct KernelArgumentHandler<T, typename std::enable_if<std::is_base_of<cl::Memory, T>::value>::type>
6227{
6228 static size_type size(const T&) { return sizeof(cl_mem); }
6229 static const cl_mem* ptr(const T& value) { return &(value()); }
6230};
6232// Specialization for DeviceCommandQueue defined later
6233
6234template <>
6236{
6237 static size_type size(const LocalSpaceArg& value) { return value.size_; }
6238 static const void* ptr(const LocalSpaceArg&) { return nullptr; }
6239};
6240
6241}
6243
6244/*! Local
6245 * \brief Helper function for generating LocalSpaceArg objects.
6246 */
6247inline LocalSpaceArg
6248Local(size_type size)
6249{
6250 LocalSpaceArg ret = { size };
6251 return ret;
6252}
6253
6258 * clRetainKernel() and clReleaseKernel().
6259 *
6260 * \see cl_kernel
6261 */
6262class Kernel : public detail::Wrapper<cl_kernel>
6263{
6264public:
6265 inline Kernel(const Program& program, const string& name, cl_int* err = nullptr);
6266 inline Kernel(const Program& program, const char* name, cl_int* err = nullptr);
6267
6269 Kernel() { }
6270
6275 * earlier versions.
6276 * This effectively transfers ownership of a refcount on the cl_kernel
6277 * into the new Kernel object.
6278 */
6279 explicit Kernel(const cl_kernel& kernel, bool retainObject = false) :
6280 detail::Wrapper<cl_type>(kernel, retainObject) { }
6281
6284 * This effectively transfers ownership of a refcount on the rhs and calls
6285 * clReleaseKernel() on the value previously held by this instance.
6286 */
6287 Kernel& operator = (const cl_kernel& rhs)
6288 {
6289 detail::Wrapper<cl_type>::operator=(rhs);
6290 return *this;
6291 }
6292
6293
6294
6295
6296 template <typename T>
6297 cl_int getInfo(cl_kernel_info name, T* param) const
6298 {
6299 return detail::errHandler(
6300 detail::getInfo(CL_(clGetKernelInfo), object_, name, param),
6301 __GET_KERNEL_INFO_ERR);
6302 }
6303
6304 template <cl_kernel_info name> typename
6305 detail::param_traits<detail::cl_kernel_info, name>::param_type
6306 getInfo(cl_int* err = nullptr) const
6307 {
6308 typename detail::param_traits<
6309 detail::cl_kernel_info, name>::param_type param;
6310 cl_int result = getInfo(name, &param);
6311 if (err != nullptr) {
6312 *err = result;
6313 }
6314 return param;
6315 }
6316
6317#if CL_HPP_TARGET_OPENCL_VERSION >= 120
6318 template <typename T>
6319 cl_int getArgInfo(cl_uint argIndex, cl_kernel_arg_info name, T* param) const
6320 {
6321 return detail::errHandler(
6322 detail::getInfo(CL_(clGetKernelArgInfo), object_, argIndex, name, param),
6323 __GET_KERNEL_ARG_INFO_ERR);
6324 }
6325
6326 template <cl_kernel_arg_info name> typename
6327 detail::param_traits<detail::cl_kernel_arg_info, name>::param_type
6328 getArgInfo(cl_uint argIndex, cl_int* err = nullptr) const
6329 {
6330 typename detail::param_traits<
6331 detail::cl_kernel_arg_info, name>::param_type param;
6332 cl_int result = getArgInfo(argIndex, name, &param);
6333 if (err != nullptr) {
6334 *err = result;
6335 }
6336 return param;
6337 }
6338#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6339
6340 template <typename T>
6341 cl_int getWorkGroupInfo(
6342 const Device& device, cl_kernel_work_group_info name, T* param) const
6343 {
6344 return detail::errHandler(
6345 detail::getInfo(
6346 CL_(clGetKernelWorkGroupInfo), object_, device(), name, param),
6347 __GET_KERNEL_WORK_GROUP_INFO_ERR);
6348 }
6349
6350 template <cl_kernel_work_group_info name> typename
6351 detail::param_traits<detail::cl_kernel_work_group_info, name>::param_type
6352 getWorkGroupInfo(const Device& device, cl_int* err = nullptr) const
6353 {
6354 typename detail::param_traits<
6355 detail::cl_kernel_work_group_info, name>::param_type param;
6356 cl_int result = getWorkGroupInfo(device, name, &param);
6357 if (err != nullptr) {
6358 *err = result;
6359 }
6360 return param;
6361 }
6362
6363#if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR) || CL_HPP_TARGET_OPENCL_VERSION >= 210
6364 cl_int getSubGroupInfo(const cl::Device &dev, cl_kernel_sub_group_info name, const cl::NDRange &range, size_type* param) const
6365 {
6366#if CL_HPP_TARGET_OPENCL_VERSION >= 210
6367
6368 return detail::errHandler(
6369 CL_(clGetKernelSubGroupInfo)(object_, dev(), name, range.size(), range.get(), sizeof(size_type), param, nullptr),
6370 __GET_KERNEL_SUB_GROUP_INFO_ERR);
6371
6372#else // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6373
6374 typedef clGetKernelSubGroupInfoKHR_fn PFN_clGetKernelSubGroupInfoKHR;
6375 static PFN_clGetKernelSubGroupInfoKHR pfn_clGetKernelSubGroupInfoKHR = nullptr;
6376 CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetKernelSubGroupInfoKHR);
6377
6378 return detail::errHandler(
6379 pfn_clGetKernelSubGroupInfoKHR(object_, dev(), name, range.size(), range.get(), sizeof(size_type), param, nullptr),
6380 __GET_KERNEL_SUB_GROUP_INFO_ERR);
6381
6382#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6383 }
6384
6385 template <cl_kernel_sub_group_info name>
6386 size_type getSubGroupInfo(const cl::Device &dev, const cl::NDRange &range, cl_int* err = nullptr) const
6387 {
6388 size_type param;
6389 cl_int result = getSubGroupInfo(dev, name, range, &param);
6390 if (err != nullptr) {
6391 *err = result;
6392 }
6393 return param;
6394 }
6395#endif // defined(CL_HPP_USE_CL_SUB_GROUPS_KHR) || CL_HPP_TARGET_OPENCL_VERSION >= 210
6396
6397#if CL_HPP_TARGET_OPENCL_VERSION >= 200
6400 template<typename T, class D>
6401 cl_int setArg(cl_uint index, const cl::pointer<T, D> &argPtr)
6402 {
6403 return detail::errHandler(
6404 CL_(clSetKernelArgSVMPointer)(object_, index, argPtr.get()),
6405 __SET_KERNEL_ARGS_ERR);
6406 }
6410 template<typename T, class Alloc>
6411 cl_int setArg(cl_uint index, const cl::vector<T, Alloc> &argPtr)
6412 {
6413 return detail::errHandler(
6414 CL_(clSetKernelArgSVMPointer)(object_, index, argPtr.data()),
6415 __SET_KERNEL_ARGS_ERR);
6416 }
6417
6418 /*! \brief setArg overload taking a pointer type
6419 */
6420 template<typename T>
6421 typename std::enable_if<std::is_pointer<T>::value, cl_int>::type
6422 setArg(cl_uint index, const T argPtr)
6423 {
6424 return detail::errHandler(
6425 CL_(clSetKernelArgSVMPointer)(object_, index, argPtr),
6426 __SET_KERNEL_ARGS_ERR);
6427 }
6428#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6429
6430 /*! \brief setArg overload taking a POD type
6431 */
6432 template <typename T>
6433 typename std::enable_if<!std::is_pointer<T>::value, cl_int>::type
6434 setArg(cl_uint index, const T &value)
6435 {
6436 return detail::errHandler(
6437 CL_(clSetKernelArg)(
6438 object_,
6439 index,
6442 __SET_KERNEL_ARGS_ERR);
6443 }
6444
6445 cl_int setArg(cl_uint index, size_type size, const void* argPtr)
6446 {
6447 return detail::errHandler(
6448 CL_(clSetKernelArg)(object_, index, size, argPtr),
6449 __SET_KERNEL_ARGS_ERR);
6450 }
6451
6452#if CL_HPP_TARGET_OPENCL_VERSION >= 200
6453 /*!
6454 * Specify a vector of SVM pointers that the kernel may access in
6455 * addition to its arguments.
6456 */
6457 cl_int setSVMPointers(const vector<void*> &pointerList)
6458 {
6459 return detail::errHandler(
6460 CL_(clSetKernelExecInfo)(
6461 object_,
6462 CL_KERNEL_EXEC_INFO_SVM_PTRS,
6463 sizeof(void*)*pointerList.size(),
6464 pointerList.data()));
6465 }
6466
6468 * Specify a std::array of SVM pointers that the kernel may access in
6469 * addition to its arguments.
6470 */
6471 template<int ArrayLength>
6472 cl_int setSVMPointers(const std::array<void*, ArrayLength> &pointerList)
6473 {
6474 return detail::errHandler(
6475 CL_(clSetKernelExecInfo)(
6476 object_,
6477 CL_KERNEL_EXEC_INFO_SVM_PTRS,
6478 sizeof(void*)*pointerList.size(),
6479 pointerList.data()));
6480 }
6481
6489 * if no devices in the context support fine-grained system SVM.
6490 *
6491 * \see clSetKernelExecInfo
6492 */
6493 cl_int enableFineGrainedSystemSVM(bool svmEnabled)
6494 {
6495 cl_bool svmEnabled_ = svmEnabled ? CL_TRUE : CL_FALSE;
6496 return detail::errHandler(
6497 CL_(clSetKernelExecInfo)(
6498 object_,
6499 CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM,
6500 sizeof(cl_bool),
6501 &svmEnabled_
6502 )
6503 );
6504 }
6505
6506 template<int index, int ArrayLength, class D, typename T0, typename T1, typename... Ts>
6507 void setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, const pointer<T0, D> &t0, const pointer<T1, D> &t1, Ts & ... ts)
6508 {
6509 pointerList[index] = static_cast<void*>(t0.get());
6510 setSVMPointersHelper<index + 1, ArrayLength>(pointerList, t1, ts...);
6511 }
6512
6513 template<int index, int ArrayLength, typename T0, typename T1, typename... Ts>
6514 typename std::enable_if<std::is_pointer<T0>::value, void>::type
6515 setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, T0 t0, T1 t1, Ts... ts)
6516 {
6517 pointerList[index] = static_cast<void*>(t0);
6518 setSVMPointersHelper<index + 1, ArrayLength>(pointerList, t1, ts...);
6519 }
6520
6521 template<int index, int ArrayLength, typename T0, class D>
6522 void setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, const pointer<T0, D> &t0)
6523 {
6524 pointerList[index] = static_cast<void*>(t0.get());
6525 }
6526
6527
6528 template<int index, int ArrayLength, typename T0>
6529 typename std::enable_if<std::is_pointer<T0>::value, void>::type
6530 setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, T0 t0)
6531 {
6532 pointerList[index] = static_cast<void*>(t0);
6533 }
6534
6535 template<typename T0, typename... Ts>
6536 cl_int setSVMPointers(const T0 &t0, Ts & ... ts)
6537 {
6538 std::array<void*, 1 + sizeof...(Ts)> pointerList;
6539
6540 setSVMPointersHelper<0, 1 + sizeof...(Ts)>(pointerList, t0, ts...);
6541 return detail::errHandler(
6542 CL_(clSetKernelExecInfo)(
6543 object_,
6544 CL_KERNEL_EXEC_INFO_SVM_PTRS,
6545 sizeof(void*)*(1 + sizeof...(Ts)),
6546 pointerList.data()));
6547 }
6548
6549 template<typename T>
6550 cl_int setExecInfo(cl_kernel_exec_info param_name, const T& val)
6551 {
6552 return detail::errHandler(
6553 CL_(clSetKernelExecInfo)(
6554 object_,
6555 param_name,
6556 sizeof(T),
6557 &val));
6558 }
6559
6560 template<cl_kernel_exec_info name>
6561 cl_int setExecInfo(typename detail::param_traits<detail::cl_kernel_exec_info, name>::param_type& val)
6562 {
6563 return setExecInfo(name, val);
6564 }
6565#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6566
6567#if CL_HPP_TARGET_OPENCL_VERSION >= 210
6569 * Make a deep copy of the kernel object including its arguments.
6570 * @return A new kernel object with internal state entirely separate from that
6571 * of the original but with any arguments set on the original intact.
6572 */
6573 Kernel clone()
6574 {
6575 cl_int error;
6576 Kernel retValue(CL_(clCloneKernel)(this->get(), &error));
6577
6578 detail::errHandler(error, __CLONE_KERNEL_ERR);
6579 return retValue;
6580 }
6581#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6582};
6587class Program : public detail::Wrapper<cl_program>
6588{
6589public:
6590#if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6591 typedef vector<vector<unsigned char>> Binaries;
6592 typedef vector<string> Sources;
6593#else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6594 typedef vector<std::pair<const void*, size_type> > Binaries;
6595 typedef vector<std::pair<const char*, size_type> > Sources;
6596#endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6597
6598 Program(
6599 const string& source,
6600 bool build = false,
6601 cl_int* err = nullptr)
6602 {
6603 cl_int error;
6604
6605 const char * strings = source.c_str();
6606 const size_type length = source.size();
6607
6608 Context context = Context::getDefault(err);
6609
6610 object_ = CL_(clCreateProgramWithSource)(
6611 context(), (cl_uint)1, &strings, &length, &error);
6612
6613 detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6614
6615 if (error == CL_SUCCESS && build) {
6616
6617 error = CL_(clBuildProgram)(
6618 object_,
6619 0,
6620 nullptr,
6621#if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6622 "-cl-std=CL2.0",
6623#else
6624 "",
6625#endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6626 nullptr,
6627 nullptr);
6628
6629 detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6630 }
6631
6632 if (err != nullptr) {
6633 *err = error;
6634 }
6635 }
6636
6637 Program(
6638 const Context& context,
6639 const string& source,
6640 bool build = false,
6641 cl_int* err = nullptr)
6642 {
6643 cl_int error;
6644
6645 const char * strings = source.c_str();
6646 const size_type length = source.size();
6647
6648 object_ = CL_(clCreateProgramWithSource)(
6649 context(), (cl_uint)1, &strings, &length, &error);
6650
6651 detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6652
6653 if (error == CL_SUCCESS && build) {
6654 error = CL_(clBuildProgram)(
6655 object_,
6656 0,
6657 nullptr,
6658#if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6659 "-cl-std=CL2.0",
6660#else
6661 "",
6662#endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6663 nullptr,
6664 nullptr);
6665
6666 detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6667 }
6668
6669 if (err != nullptr) {
6670 *err = error;
6671 }
6672 }
6673
6674 /**
6675 * Create a program from a vector of source strings and the default context.
6676 * Does not compile or link the program.
6677 */
6678 Program(
6679 const Sources& sources,
6680 cl_int* err = nullptr)
6681 {
6682 cl_int error;
6683 Context context = Context::getDefault(err);
6684
6685 const size_type n = (size_type)sources.size();
6686
6687 vector<size_type> lengths(n);
6688 vector<const char*> strings(n);
6689
6690 for (size_type i = 0; i < n; ++i) {
6691#if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6692 strings[i] = sources[(int)i].data();
6693 lengths[i] = sources[(int)i].length();
6694#else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6695 strings[i] = sources[(int)i].first;
6696 lengths[i] = sources[(int)i].second;
6697#endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6698 }
6699
6700 object_ = CL_(clCreateProgramWithSource)(
6701 context(), (cl_uint)n, strings.data(), lengths.data(), &error);
6702
6703 detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6704 if (err != nullptr) {
6705 *err = error;
6706 }
6707 }
6708
6709 /**
6710 * Create a program from a vector of source strings and a provided context.
6711 * Does not compile or link the program.
6712 */
6713 Program(
6714 const Context& context,
6715 const Sources& sources,
6716 cl_int* err = nullptr)
6717 {
6718 cl_int error;
6719
6720 const size_type n = (size_type)sources.size();
6721
6722 vector<size_type> lengths(n);
6723 vector<const char*> strings(n);
6724
6725 for (size_type i = 0; i < n; ++i) {
6726#if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6727 strings[i] = sources[(int)i].data();
6728 lengths[i] = sources[(int)i].length();
6729#else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6730 strings[i] = sources[(int)i].first;
6731 lengths[i] = sources[(int)i].second;
6732#endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6733 }
6734
6735 object_ = CL_(clCreateProgramWithSource)(
6736 context(), (cl_uint)n, strings.data(), lengths.data(), &error);
6737
6738 detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6739 if (err != nullptr) {
6740 *err = error;
6741 }
6742 }
6743
6744#if defined(CL_HPP_USE_IL_KHR) || CL_HPP_TARGET_OPENCL_VERSION >= 210
6746 * Program constructor to allow construction of program from SPIR-V or another IL.
6747 *
6748 * Requires OpenCL 2.1 or newer or the cl_khr_il_program extension.
6749 */
6750 Program(
6751 const vector<char>& IL,
6752 bool build = false,
6753 cl_int* err = nullptr)
6754 {
6755 cl_int error;
6756
6757 Context context = Context::getDefault(err);
6758
6759#if CL_HPP_TARGET_OPENCL_VERSION >= 210
6760
6761 object_ = CL_(clCreateProgramWithIL)(
6762 context(), static_cast<const void*>(IL.data()), IL.size(), &error);
6763
6764#else // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6765
6766 typedef clCreateProgramWithILKHR_fn PFN_clCreateProgramWithILKHR;
6767 static PFN_clCreateProgramWithILKHR pfn_clCreateProgramWithILKHR = nullptr;
6768 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateProgramWithILKHR);
6769
6770 object_ = pfn_clCreateProgramWithILKHR(
6771 context(), static_cast<const void*>(IL.data()), IL.size(), &error);
6772
6773#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6774
6775 detail::errHandler(error, __CREATE_PROGRAM_WITH_IL_ERR);
6776
6777 if (error == CL_SUCCESS && build) {
6778
6779 error = CL_(clBuildProgram)(
6780 object_,
6781 0,
6782 nullptr,
6783#if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6784 "-cl-std=CL2.0",
6785#else
6786 "",
6787#endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6788 nullptr,
6789 nullptr);
6790
6791 detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6792 }
6793
6794 if (err != nullptr) {
6795 *err = error;
6796 }
6797 }
6798
6801 * for a specific context.
6802 *
6803 * Requires OpenCL 2.1 or newer or the cl_khr_il_program extension.
6804 */
6805 Program(
6806 const Context& context,
6807 const vector<char>& IL,
6808 bool build = false,
6809 cl_int* err = nullptr)
6810 {
6811 cl_int error;
6812
6813#if CL_HPP_TARGET_OPENCL_VERSION >= 210
6814
6815 object_ = CL_(clCreateProgramWithIL)(
6816 context(), static_cast<const void*>(IL.data()), IL.size(), &error);
6817
6818#else // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6819
6820 typedef clCreateProgramWithILKHR_fn PFN_clCreateProgramWithILKHR;
6821 static PFN_clCreateProgramWithILKHR pfn_clCreateProgramWithILKHR = nullptr;
6822 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateProgramWithILKHR);
6823
6824 object_ = pfn_clCreateProgramWithILKHR(
6825 context(), static_cast<const void*>(IL.data()), IL.size(), &error);
6826
6827#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
6828
6829 detail::errHandler(error, __CREATE_PROGRAM_WITH_IL_ERR);
6830
6831 if (error == CL_SUCCESS && build) {
6832 error = CL_(clBuildProgram)(
6833 object_,
6834 0,
6835 nullptr,
6836#if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6837 "-cl-std=CL2.0",
6838#else
6839 "",
6840#endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6841 nullptr,
6842 nullptr);
6843
6844 detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6845 }
6846
6847 if (err != nullptr) {
6848 *err = error;
6849 }
6850 }
6851#endif // defined(CL_HPP_USE_IL_KHR) || CL_HPP_TARGET_OPENCL_VERSION >= 210
6852
6868 * CL_INVALID_DEVICE if OpenCL devices listed in devices are not in the list of devices associated with context.
6869 * CL_INVALID_BINARY if an invalid program binary was encountered for any device. binaryStatus will return specific status for each device.
6870 * CL_OUT_OF_HOST_MEMORY if there is a failure to allocate resources required by the OpenCL implementation on the host.
6871 */
6872 Program(
6873 const Context& context,
6874 const vector<Device>& devices,
6875 const Binaries& binaries,
6876 vector<cl_int>* binaryStatus = nullptr,
6877 cl_int* err = nullptr)
6878 {
6879 cl_int error;
6880
6881 const size_type numDevices = devices.size();
6882
6883 // Catch size mismatch early and return
6884 if(binaries.size() != numDevices) {
6885 error = CL_INVALID_VALUE;
6886 detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
6887 if (err != nullptr) {
6888 *err = error;
6889 }
6890 return;
6891 }
6892
6893 vector<size_type> lengths(numDevices);
6894 vector<const unsigned char*> images(numDevices);
6895#if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6896 for (size_type i = 0; i < numDevices; ++i) {
6897 images[i] = binaries[i].data();
6898 lengths[i] = binaries[(int)i].size();
6899 }
6900#else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6901 for (size_type i = 0; i < numDevices; ++i) {
6902 images[i] = (const unsigned char*)binaries[i].first;
6903 lengths[i] = binaries[(int)i].second;
6904 }
6905#endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6906
6907 vector<cl_device_id> deviceIDs(numDevices);
6908 for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6909 deviceIDs[deviceIndex] = (devices[deviceIndex])();
6910 }
6911
6912 if(binaryStatus) {
6913 binaryStatus->resize(numDevices);
6914 }
6915
6916 object_ = CL_(clCreateProgramWithBinary)(
6917 context(), (cl_uint) devices.size(),
6918 deviceIDs.data(),
6919 lengths.data(), images.data(), (binaryStatus != nullptr && numDevices > 0)
6920 ? &binaryStatus->front()
6921 : nullptr, &error);
6922
6923 detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
6924 if (err != nullptr) {
6925 *err = error;
6926 }
6927 }
6928
6929
6930#if CL_HPP_TARGET_OPENCL_VERSION >= 120
6931 /**
6932 * Create program using builtin kernels.
6933 * \param kernelNames Semi-colon separated list of builtin kernel names
6934 */
6935 Program(
6936 const Context& context,
6937 const vector<Device>& devices,
6938 const string& kernelNames,
6939 cl_int* err = nullptr)
6940 {
6941 cl_int error;
6942
6943
6944 size_type numDevices = devices.size();
6945 vector<cl_device_id> deviceIDs(numDevices);
6946 for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6947 deviceIDs[deviceIndex] = (devices[deviceIndex])();
6948 }
6949
6950 object_ = CL_(clCreateProgramWithBuiltInKernels)(
6951 context(),
6952 (cl_uint) devices.size(),
6953 deviceIDs.data(),
6954 kernelNames.c_str(),
6955 &error);
6956
6957 detail::errHandler(error, __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR);
6958 if (err != nullptr) {
6959 *err = error;
6960 }
6961 }
6962#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6963
6964 Program() { }
6965
6966
6969 * \param retainObject will cause the constructor to retain its cl object.
6970 * Defaults to false to maintain compatibility with
6971 * earlier versions.
6972 */
6973 explicit Program(const cl_program& program, bool retainObject = false) :
6974 detail::Wrapper<cl_type>(program, retainObject) { }
6975
6976 Program& operator = (const cl_program& rhs)
6977 {
6978 detail::Wrapper<cl_type>::operator=(rhs);
6979 return *this;
6980 }
6981
6982 cl_int build(
6983 const vector<Device>& devices,
6984 const string& options,
6985 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
6986 void* data = nullptr) const
6987 {
6988 return build(devices, options.c_str(), notifyFptr, data);
6989 }
6990
6991 cl_int build(
6992 const vector<Device>& devices,
6993 const char* options = nullptr,
6994 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
6995 void* data = nullptr) const
6996 {
6997 size_type numDevices = devices.size();
6998 vector<cl_device_id> deviceIDs(numDevices);
6999
7000 for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
7001 deviceIDs[deviceIndex] = (devices[deviceIndex])();
7002 }
7003
7004 cl_int buildError = CL_(clBuildProgram)(
7005 object_,
7006 (cl_uint)
7007 devices.size(),
7008 deviceIDs.data(),
7009 options,
7010 notifyFptr,
7011 data);
7012
7013 return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
7014 }
7015
7016 cl_int build(
7017 const Device& device,
7018 const string& options,
7019 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7020 void* data = nullptr) const
7021 {
7022 return build(device, options.c_str(), notifyFptr, data);
7023 }
7024
7025 cl_int build(
7026 const Device& device,
7027 const char* options = nullptr,
7028 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7029 void* data = nullptr) const
7030 {
7031 cl_device_id deviceID = device();
7032
7033 cl_int buildError = CL_(clBuildProgram)(
7034 object_,
7035 1,
7036 &deviceID,
7037 options,
7038 notifyFptr,
7039 data);
7040
7041 BuildLogType buildLog(0);
7042 buildLog.push_back(std::make_pair(device, getBuildInfo<CL_PROGRAM_BUILD_LOG>(device)));
7043 return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, buildLog);
7044 }
7045
7046 cl_int build(
7047 const string& options,
7048 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7049 void* data = nullptr) const
7050 {
7051 return build(options.c_str(), notifyFptr, data);
7052 }
7053
7054 cl_int build(
7055 const char* options = nullptr,
7056 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7057 void* data = nullptr) const
7058 {
7059 cl_int buildError = CL_(clBuildProgram)(
7060 object_,
7061 0,
7062 nullptr,
7063 options,
7064 notifyFptr,
7065 data);
7066
7067 return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
7068 }
7069
7070#if CL_HPP_TARGET_OPENCL_VERSION >= 120
7071 cl_int compile(
7072 const string& options,
7073 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7074 void* data = nullptr) const
7075 {
7076 return compile(options.c_str(), notifyFptr, data);
7077 }
7078
7079 cl_int compile(
7080 const char* options = nullptr,
7081 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7082 void* data = nullptr) const
7083 {
7084 cl_int error = CL_(clCompileProgram)(
7085 object_,
7086 0,
7087 nullptr,
7088 options,
7089 0,
7090 nullptr,
7091 nullptr,
7092 notifyFptr,
7093 data);
7094 return detail::buildErrHandler(error, __COMPILE_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
7095 }
7096
7097 cl_int compile(
7098 const string& options,
7099 const vector<Program>& inputHeaders,
7100 const vector<string>& headerIncludeNames,
7101 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7102 void* data = nullptr) const
7103 {
7104 return compile(options.c_str(), inputHeaders, headerIncludeNames, notifyFptr, data);
7105 }
7106
7107 cl_int compile(
7108 const char* options,
7109 const vector<Program>& inputHeaders,
7110 const vector<string>& headerIncludeNames,
7111 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7112 void* data = nullptr) const
7113 {
7114 static_assert(sizeof(cl::Program) == sizeof(cl_program),
7115 "Size of cl::Program must be equal to size of cl_program");
7116 vector<const char*> headerIncludeNamesCStr;
7117 for(const string& name: headerIncludeNames) {
7118 headerIncludeNamesCStr.push_back(name.c_str());
7119 }
7120 cl_int error = CL_(clCompileProgram)(
7121 object_,
7122 0,
7123 nullptr,
7124 options,
7125 static_cast<cl_uint>(inputHeaders.size()),
7126 reinterpret_cast<const cl_program*>(inputHeaders.data()),
7127 reinterpret_cast<const char**>(headerIncludeNamesCStr.data()),
7128 notifyFptr,
7129 data);
7130 return detail::buildErrHandler(error, __COMPILE_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
7131 }
7132
7133 cl_int compile(
7134 const string& options,
7135 const vector<Device>& deviceList,
7136 const vector<Program>& inputHeaders = vector<Program>(),
7137 const vector<string>& headerIncludeNames = vector<string>(),
7138 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7139 void* data = nullptr) const
7140 {
7141 return compile(options.c_str(), deviceList, inputHeaders, headerIncludeNames, notifyFptr, data);
7142 }
7143
7144 cl_int compile(
7145 const char* options,
7146 const vector<Device>& deviceList,
7147 const vector<Program>& inputHeaders = vector<Program>(),
7148 const vector<string>& headerIncludeNames = vector<string>(),
7149 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7150 void* data = nullptr) const
7151 {
7152 static_assert(sizeof(cl::Program) == sizeof(cl_program),
7153 "Size of cl::Program must be equal to size of cl_program");
7154 vector<const char*> headerIncludeNamesCStr;
7155 for(const string& name: headerIncludeNames) {
7156 headerIncludeNamesCStr.push_back(name.c_str());
7157 }
7158 vector<cl_device_id> deviceIDList;
7159 for(const Device& device: deviceList) {
7160 deviceIDList.push_back(device());
7161 }
7162 cl_int error = CL_(clCompileProgram)(
7163 object_,
7164 static_cast<cl_uint>(deviceList.size()),
7165 reinterpret_cast<const cl_device_id*>(deviceIDList.data()),
7166 options,
7167 static_cast<cl_uint>(inputHeaders.size()),
7168 reinterpret_cast<const cl_program*>(inputHeaders.data()),
7169 reinterpret_cast<const char**>(headerIncludeNamesCStr.data()),
7170 notifyFptr,
7171 data);
7172 return detail::buildErrHandler(error, __COMPILE_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
7173 }
7174#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7175
7176 template <typename T>
7177 cl_int getInfo(cl_program_info name, T* param) const
7178 {
7179 return detail::errHandler(
7180 detail::getInfo(CL_(clGetProgramInfo), object_, name, param),
7181 __GET_PROGRAM_INFO_ERR);
7182 }
7183
7184 template <cl_program_info name> typename
7185 detail::param_traits<detail::cl_program_info, name>::param_type
7186 getInfo(cl_int* err = nullptr) const
7187 {
7188 typename detail::param_traits<
7189 detail::cl_program_info, name>::param_type param;
7190 cl_int result = getInfo(name, &param);
7191 if (err != nullptr) {
7192 *err = result;
7193 }
7194 return param;
7195 }
7196
7197 template <typename T>
7198 cl_int getBuildInfo(
7199 const Device& device, cl_program_build_info name, T* param) const
7200 {
7201 return detail::errHandler(
7202 detail::getInfo(
7203 CL_(clGetProgramBuildInfo), object_, device(), name, param),
7204 __GET_PROGRAM_BUILD_INFO_ERR);
7205 }
7206
7207 template <cl_program_build_info name> typename
7208 detail::param_traits<detail::cl_program_build_info, name>::param_type
7209 getBuildInfo(const Device& device, cl_int* err = nullptr) const
7210 {
7211 typename detail::param_traits<
7212 detail::cl_program_build_info, name>::param_type param;
7213 cl_int result = getBuildInfo(device, name, &param);
7214 if (err != nullptr) {
7215 *err = result;
7216 }
7217 return param;
7218 }
7219
7223 * On an error reading the info for any device, an empty vector of info will be returned.
7224 */
7225 template <cl_program_build_info name>
7226 vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>>
7227 getBuildInfo(cl_int *err = nullptr) const
7228 {
7229 cl_int result = CL_SUCCESS;
7230
7231 auto devs = getInfo<CL_PROGRAM_DEVICES>(&result);
7232 vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>>
7233 devInfo;
7234
7235 // If there was an initial error from getInfo return the error
7236 if (result != CL_SUCCESS) {
7237 if (err != nullptr) {
7238 *err = result;
7239 }
7240 return devInfo;
7241 }
7242
7243 for (const cl::Device &d : devs) {
7244 typename detail::param_traits<
7245 detail::cl_program_build_info, name>::param_type param;
7246 result = getBuildInfo(d, name, &param);
7247 devInfo.push_back(
7249 (d, param));
7250 if (result != CL_SUCCESS) {
7251 // On error, leave the loop and return the error code
7252 break;
7253 }
7254 }
7255 if (err != nullptr) {
7256 *err = result;
7257 }
7258 if (result != CL_SUCCESS) {
7259 devInfo.clear();
7260 }
7261 return devInfo;
7262 }
7263
7264 cl_int createKernels(vector<Kernel>* kernels)
7265 {
7266 cl_uint numKernels;
7267 cl_int err = CL_(clCreateKernelsInProgram)(object_, 0, nullptr, &numKernels);
7268 if (err != CL_SUCCESS) {
7269 return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
7270 }
7271
7272 vector<cl_kernel> value(numKernels);
7273
7274 err = CL_(clCreateKernelsInProgram)(
7275 object_, numKernels, value.data(), nullptr);
7276 if (err != CL_SUCCESS) {
7277 return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
7278 }
7279
7280 if (kernels) {
7281 kernels->resize(value.size());
7282
7283 // Assign to param, constructing with retain behaviour
7284 // to correctly capture each underlying CL object
7285 for (size_type i = 0; i < value.size(); i++) {
7286 // We do not need to retain because this kernel is being created
7287 // by the runtime
7288 (*kernels)[i] = Kernel(value[i], false);
7289 }
7290 }
7291 return CL_SUCCESS;
7292 }
7293
7294#if CL_HPP_TARGET_OPENCL_VERSION >= 220
7295#if defined(CL_USE_DEPRECATED_OPENCL_2_2_APIS)
7302 * Each call to this function registers the specified user callback function
7303 * on a callback stack associated with program. The registered user callback
7304 * functions are called in the reverse order in which they were registered.
7305 */
7306 CL_API_PREFIX__VERSION_2_2_DEPRECATED cl_int setReleaseCallback(
7307 void (CL_CALLBACK * pfn_notify)(cl_program program, void * user_data),
7308 void * user_data = nullptr) CL_API_SUFFIX__VERSION_2_2_DEPRECATED
7309 {
7310 return detail::errHandler(
7311 CL_(clSetProgramReleaseCallback)(
7312 object_,
7313 pfn_notify,
7314 user_data),
7315 __SET_PROGRAM_RELEASE_CALLBACK_ERR);
7316 }
7317#endif // #if defined(CL_USE_DEPRECATED_OPENCL_2_2_APIS)
7318
7321 * Wraps clSetProgramSpecializationConstant().
7322 */
7323 template <typename T>
7324 typename std::enable_if<!std::is_pointer<T>::value, cl_int>::type
7325 setSpecializationConstant(cl_uint index, const T &value)
7326 {
7327 return detail::errHandler(
7328 CL_(clSetProgramSpecializationConstant)(
7329 object_,
7330 index,
7331 sizeof(value),
7332 &value),
7333 __SET_PROGRAM_SPECIALIZATION_CONSTANT_ERR);
7334 }
7335
7336 /*! \brief Sets a SPIR-V specialization constant.
7337 *
7338 * Wraps clSetProgramSpecializationConstant().
7339 */
7340 cl_int setSpecializationConstant(cl_uint index, size_type size, const void* value)
7341 {
7342 return detail::errHandler(
7343 CL_(clSetProgramSpecializationConstant)(
7344 object_,
7345 index,
7346 size,
7347 value),
7348 __SET_PROGRAM_SPECIALIZATION_CONSTANT_ERR);
7349 }
7350#endif // CL_HPP_TARGET_OPENCL_VERSION >= 220
7351};
7352
7353#if CL_HPP_TARGET_OPENCL_VERSION >= 120
7354inline Program linkProgram(
7355 const Program& input1,
7356 const Program& input2,
7357 const char* options = nullptr,
7358 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7359 void* data = nullptr,
7360 cl_int* err = nullptr)
7361{
7362 cl_int error_local = CL_SUCCESS;
7363 cl_program programs[2] = { input1(), input2() };
7364
7365 Context ctx = input1.getInfo<CL_PROGRAM_CONTEXT>(&error_local);
7366 if(error_local!=CL_SUCCESS) {
7367 detail::errHandler(error_local, __LINK_PROGRAM_ERR);
7368 }
7369
7370 cl_program prog = CL_(clLinkProgram)(
7371 ctx(),
7372 0,
7373 nullptr,
7374 options,
7375 2,
7376 programs,
7377 notifyFptr,
7378 data,
7379 &error_local);
7380
7381 detail::errHandler(error_local,__COMPILE_PROGRAM_ERR);
7382 if (err != nullptr) {
7383 *err = error_local;
7384 }
7385
7386 return Program(prog);
7387}
7388
7389inline Program linkProgram(
7390 const Program& input1,
7391 const Program& input2,
7392 const string& options,
7393 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7394 void* data = nullptr,
7395 cl_int* err = nullptr)
7396{
7397 return linkProgram(input1, input2, options.c_str(), notifyFptr, data, err);
7398}
7399
7400inline Program linkProgram(
7401 const vector<Program>& inputPrograms,
7402 const char* options = nullptr,
7403 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7404 void* data = nullptr,
7405 cl_int* err = nullptr)
7406{
7407 cl_int error_local = CL_SUCCESS;
7408 Context ctx;
7409
7410 static_assert(sizeof(cl::Program) == sizeof(cl_program),
7411 "Size of cl::Program must be equal to size of cl_program");
7412
7413 if(inputPrograms.size() > 0) {
7414 ctx = inputPrograms[0].getInfo<CL_PROGRAM_CONTEXT>(&error_local);
7415 if(error_local!=CL_SUCCESS) {
7416 detail::errHandler(error_local, __LINK_PROGRAM_ERR);
7417 }
7418 }
7419
7420 cl_program prog = CL_(clLinkProgram)(
7421 ctx(),
7422 0,
7423 nullptr,
7424 options,
7425 static_cast<cl_uint>(inputPrograms.size()),
7426 reinterpret_cast<const cl_program *>(inputPrograms.data()),
7427 notifyFptr,
7428 data,
7429 &error_local);
7430
7431 detail::errHandler(error_local,__COMPILE_PROGRAM_ERR);
7432 if (err != nullptr) {
7433 *err = error_local;
7434 }
7435
7436 return Program(prog);
7437}
7438
7439inline Program linkProgram(
7440 const vector<Program>& inputPrograms,
7441 const string& options,
7442 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = nullptr,
7443 void* data = nullptr,
7444 cl_int* err = nullptr)
7445{
7446 return linkProgram(inputPrograms, options.c_str(), notifyFptr, data, err);
7447}
7448#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7449
7450// Template specialization for CL_PROGRAM_BINARIES
7451template <>
7452inline cl_int cl::Program::getInfo(cl_program_info name, vector<vector<unsigned char>>* param) const
7453{
7454 if (name != CL_PROGRAM_BINARIES) {
7455 return CL_INVALID_VALUE;
7456 }
7457 if (param) {
7458 // Resize the parameter array appropriately for each allocation
7459 // and pass down to the helper
7460
7461 vector<size_type> sizes = getInfo<CL_PROGRAM_BINARY_SIZES>();
7462 size_type numBinaries = sizes.size();
7463
7464 // Resize the parameter array and constituent arrays
7465 param->resize(numBinaries);
7466 for (size_type i = 0; i < numBinaries; ++i) {
7467 (*param)[i].resize(sizes[i]);
7468 }
7469
7470 return detail::errHandler(
7471 detail::getInfo(CL_(clGetProgramInfo), object_, name, param),
7472 __GET_PROGRAM_INFO_ERR);
7473 }
7474
7475 return CL_SUCCESS;
7476}
7477
7478template<>
7479inline vector<vector<unsigned char>> cl::Program::getInfo<CL_PROGRAM_BINARIES>(cl_int* err) const
7480{
7481 vector<vector<unsigned char>> binariesVectors;
7482
7483 cl_int result = getInfo(CL_PROGRAM_BINARIES, &binariesVectors);
7484 if (err != nullptr) {
7485 *err = result;
7486 }
7487 return binariesVectors;
7488}
7489
7490#if CL_HPP_TARGET_OPENCL_VERSION >= 220
7491// Template specialization for clSetProgramSpecializationConstant
7492template <>
7493inline cl_int cl::Program::setSpecializationConstant(cl_uint index, const bool &value)
7494{
7495 cl_uchar ucValue = value ? CL_UCHAR_MAX : 0;
7496 return detail::errHandler(
7497 CL_(clSetProgramSpecializationConstant)(
7498 object_,
7499 index,
7500 sizeof(ucValue),
7501 &ucValue),
7502 __SET_PROGRAM_SPECIALIZATION_CONSTANT_ERR);
7503}
7504#endif // CL_HPP_TARGET_OPENCL_VERSION >= 220
7505
7506inline Kernel::Kernel(const Program& program, const string& name, cl_int* err)
7507{
7508 cl_int error;
7509
7510 object_ = CL_(clCreateKernel)(program(), name.c_str(), &error);
7511 detail::errHandler(error, __CREATE_KERNEL_ERR);
7512
7513 if (err != nullptr) {
7514 *err = error;
7515 }
7516}
7517
7518inline Kernel::Kernel(const Program& program, const char* name, cl_int* err)
7519{
7520 cl_int error;
7521
7522 object_ = CL_(clCreateKernel)(program(), name, &error);
7523 detail::errHandler(error, __CREATE_KERNEL_ERR);
7524
7525 if (err != nullptr) {
7526 *err = error;
7527 }
7528}
7529
7530#ifdef cl_khr_external_memory
7531enum class ExternalMemoryType : cl_external_memory_handle_type_khr
7532{
7533 None = 0,
7534#ifdef cl_khr_external_memory_opaque_fd
7535 OpaqueFd = CL_EXTERNAL_MEMORY_HANDLE_OPAQUE_FD_KHR,
7536#endif // cl_khr_external_memory_opaque_fd
7537#ifdef cl_khr_external_memory_win32
7538 OpaqueWin32 = CL_EXTERNAL_MEMORY_HANDLE_OPAQUE_WIN32_KHR,
7539 OpaqueWin32Kmt = CL_EXTERNAL_MEMORY_HANDLE_OPAQUE_WIN32_KMT_KHR,
7540#endif // cl_khr_external_memory_win32
7541#ifdef cl_khr_external_memory_dma_buf
7542 DmaBuf = CL_EXTERNAL_MEMORY_HANDLE_DMA_BUF_KHR,
7543#endif // cl_khr_external_memory_dma_buf
7544};
7545#endif // cl_khr_external_memory
7546
7547enum class QueueProperties : cl_command_queue_properties
7548{
7549 None = 0,
7550 Profiling = CL_QUEUE_PROFILING_ENABLE,
7551 OutOfOrder = CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE,
7552};
7553
7554inline QueueProperties operator|(QueueProperties lhs, QueueProperties rhs)
7555{
7556 return static_cast<QueueProperties>(static_cast<cl_command_queue_properties>(lhs) | static_cast<cl_command_queue_properties>(rhs));
7557}
7558
7559inline QueueProperties operator&(QueueProperties lhs, QueueProperties rhs)
7560{
7561 return static_cast<QueueProperties>(static_cast<cl_command_queue_properties>(lhs) & static_cast<cl_command_queue_properties>(rhs));
7562}
7567class CommandQueue : public detail::Wrapper<cl_command_queue>
7568{
7569private:
7570 static std::once_flag default_initialized_;
7571 static CommandQueue default_;
7572 static cl_int default_error_;
7573
7579 static void makeDefault()
7580 {
7581 /* We don't want to throw an error from this function, so we have to
7582 * catch and set the error flag.
7583 */
7584#if defined(CL_HPP_ENABLE_EXCEPTIONS)
7585 try
7586#endif
7587 {
7588 int error;
7589 Context context = Context::getDefault(&error);
7590
7591 if (error != CL_SUCCESS) {
7592 default_error_ = error;
7593 }
7594 else {
7595 Device device = Device::getDefault();
7596 default_ = CommandQueue(context, device, 0, &default_error_);
7597 }
7598 }
7599#if defined(CL_HPP_ENABLE_EXCEPTIONS)
7600 catch (cl::Error &e) {
7601 default_error_ = e.err();
7602 }
7603#endif
7604 }
7605
7611 static void makeDefaultProvided(const CommandQueue &c) {
7612 default_ = c;
7613 }
7614
7615#ifdef cl_khr_external_memory
7616 static std::once_flag ext_memory_initialized_;
7617
7618 static void initMemoryExtension(const cl::Device& device)
7619 {
7620 auto platform = device.getInfo<CL_DEVICE_PLATFORM>()();
7621
7622 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueAcquireExternalMemObjectsKHR);
7623 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueReleaseExternalMemObjectsKHR);
7624
7625 if ((pfn_clEnqueueAcquireExternalMemObjectsKHR == nullptr)
7626 && (pfn_clEnqueueReleaseExternalMemObjectsKHR == nullptr))
7627 {
7628 detail::errHandler(CL_INVALID_VALUE, __ENQUEUE_ACQUIRE_EXTERNAL_MEMORY_ERR);
7629 }
7630 }
7631#endif // cl_khr_external_memory
7632
7633public:
7634#ifdef CL_HPP_UNIT_TEST_ENABLE
7641 static void unitTestClearDefault() {
7642 default_ = CommandQueue();
7643 }
7644#endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
7645
7646
7647 /*!
7648 * \brief Constructs a CommandQueue based on passed properties.
7649 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7650 */
7652 cl_command_queue_properties properties,
7653 cl_int* err = nullptr)
7654 {
7655 cl_int error;
7656
7657 Context context = Context::getDefault(&error);
7658 detail::errHandler(error, __CREATE_CONTEXT_ERR);
7659
7660 if (error != CL_SUCCESS) {
7661 if (err != nullptr) {
7662 *err = error;
7663 }
7664 }
7665 else {
7666 Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
7667 bool useWithProperties;
7668
7669#if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7670 // Run-time decision based on the actual platform
7671 {
7672 cl_uint version = detail::getContextPlatformVersion(context());
7673 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7674 }
7675#elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7676 useWithProperties = true;
7677#else
7678 useWithProperties = false;
7679#endif
7680
7681#if CL_HPP_TARGET_OPENCL_VERSION >= 200
7682 if (useWithProperties) {
7683 cl_queue_properties queue_properties[] = {
7684 CL_QUEUE_PROPERTIES, properties, 0 };
7685 if ((properties & CL_QUEUE_ON_DEVICE) == 0) {
7686 object_ = CL_(clCreateCommandQueueWithProperties)(
7687 context(), device(), queue_properties, &error);
7688 }
7689 else {
7690 error = CL_INVALID_QUEUE_PROPERTIES;
7691 }
7692
7693 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7694 if (err != nullptr) {
7695 *err = error;
7696 }
7697 }
7698#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7699#if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7700 if (!useWithProperties) {
7701 object_ = CL_(clCreateCommandQueue)(
7702 context(), device(), properties, &error);
7703
7704 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7705 if (err != nullptr) {
7706 *err = error;
7707 }
7708 }
7709#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7710 }
7711 }
7712
7713 /*!
7714 * \brief Constructs a CommandQueue based on passed properties.
7715 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7716 */
7718 QueueProperties properties,
7719 cl_int* err = nullptr)
7720 {
7721 cl_int error;
7722
7723 Context context = Context::getDefault(&error);
7724 detail::errHandler(error, __CREATE_CONTEXT_ERR);
7725
7726 if (error != CL_SUCCESS) {
7727 if (err != nullptr) {
7728 *err = error;
7729 }
7730 }
7731 else {
7732 Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
7733 bool useWithProperties;
7734
7735#if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7736 // Run-time decision based on the actual platform
7737 {
7738 cl_uint version = detail::getContextPlatformVersion(context());
7739 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7740 }
7741#elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7742 useWithProperties = true;
7743#else
7744 useWithProperties = false;
7745#endif
7746
7747#if CL_HPP_TARGET_OPENCL_VERSION >= 200
7748 if (useWithProperties) {
7749 cl_queue_properties queue_properties[] = {
7750 CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
7751
7752 object_ = CL_(clCreateCommandQueueWithProperties)(
7753 context(), device(), queue_properties, &error);
7754
7755 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7756 if (err != nullptr) {
7757 *err = error;
7758 }
7759 }
7760#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7761#if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7762 if (!useWithProperties) {
7763 object_ = CL_(clCreateCommandQueue)(
7764 context(), device(), static_cast<cl_command_queue_properties>(properties), &error);
7765
7766 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7767 if (err != nullptr) {
7768 *err = error;
7769 }
7770 }
7771#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7772
7773 }
7774 }
7775
7776 /*!
7777 * \brief Constructs a CommandQueue for an implementation defined device in the given context
7778 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7779 */
7780 explicit CommandQueue(
7781 const Context& context,
7782 cl_command_queue_properties properties = 0,
7783 cl_int* err = nullptr)
7784 {
7785 cl_int error;
7786 bool useWithProperties;
7787 vector<cl::Device> devices;
7788 error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
7789
7790 detail::errHandler(error, __CREATE_CONTEXT_ERR);
7791
7792 if (error != CL_SUCCESS)
7793 {
7794 if (err != nullptr) {
7795 *err = error;
7796 }
7797 return;
7798 }
7799
7800#if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7801 // Run-time decision based on the actual platform
7802 {
7803 cl_uint version = detail::getContextPlatformVersion(context());
7804 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7805 }
7806#elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7807 useWithProperties = true;
7808#else
7809 useWithProperties = false;
7810#endif
7811
7812#if CL_HPP_TARGET_OPENCL_VERSION >= 200
7813 if (useWithProperties) {
7814 cl_queue_properties queue_properties[] = {
7815 CL_QUEUE_PROPERTIES, properties, 0 };
7816 if ((properties & CL_QUEUE_ON_DEVICE) == 0) {
7817 object_ = CL_(clCreateCommandQueueWithProperties)(
7818 context(), devices[0](), queue_properties, &error);
7819 }
7820 else {
7821 error = CL_INVALID_QUEUE_PROPERTIES;
7822 }
7823
7824 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7825 if (err != nullptr) {
7826 *err = error;
7827 }
7828 }
7829#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7830#if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7831 if (!useWithProperties) {
7832 object_ = CL_(clCreateCommandQueue)(
7833 context(), devices[0](), properties, &error);
7834
7835 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7836 if (err != nullptr) {
7837 *err = error;
7838 }
7839 }
7840#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7841 }
7842
7843 /*!
7844 * \brief Constructs a CommandQueue for an implementation defined device in the given context
7845 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7846 */
7847 explicit CommandQueue(
7848 const Context& context,
7849 QueueProperties properties,
7850 cl_int* err = nullptr)
7851 {
7852 cl_int error;
7853 bool useWithProperties;
7854 vector<cl::Device> devices;
7855 error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
7856
7857 detail::errHandler(error, __CREATE_CONTEXT_ERR);
7858
7859 if (error != CL_SUCCESS)
7860 {
7861 if (err != nullptr) {
7862 *err = error;
7863 }
7864 return;
7865 }
7866
7867#if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7868 // Run-time decision based on the actual platform
7869 {
7870 cl_uint version = detail::getContextPlatformVersion(context());
7871 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7872 }
7873#elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7874 useWithProperties = true;
7875#else
7876 useWithProperties = false;
7877#endif
7878
7879#if CL_HPP_TARGET_OPENCL_VERSION >= 200
7880 if (useWithProperties) {
7881 cl_queue_properties queue_properties[] = {
7882 CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
7883 object_ = CL_(clCreateCommandQueueWithProperties)(
7884 context(), devices[0](), queue_properties, &error);
7885
7886 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7887 if (err != nullptr) {
7888 *err = error;
7889 }
7890 }
7891#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7892#if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7893 if (!useWithProperties) {
7894 object_ = CL_(clCreateCommandQueue)(
7895 context(), devices[0](), static_cast<cl_command_queue_properties>(properties), &error);
7896
7897 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7898 if (err != nullptr) {
7899 *err = error;
7900 }
7901 }
7902#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7903 }
7904
7905 /*!
7906 * \brief Constructs a CommandQueue for a passed device and context
7907 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7908 */
7910 const Context& context,
7911 const Device& device,
7912 cl_command_queue_properties properties = 0,
7913 cl_int* err = nullptr)
7914 {
7915 cl_int error;
7916 bool useWithProperties;
7917
7918#if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7919 // Run-time decision based on the actual platform
7920 {
7921 cl_uint version = detail::getContextPlatformVersion(context());
7922 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7923 }
7924#elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7925 useWithProperties = true;
7926#else
7927 useWithProperties = false;
7928#endif
7929
7930#if CL_HPP_TARGET_OPENCL_VERSION >= 200
7931 if (useWithProperties) {
7932 cl_queue_properties queue_properties[] = {
7933 CL_QUEUE_PROPERTIES, properties, 0 };
7934 object_ = CL_(clCreateCommandQueueWithProperties)(
7935 context(), device(), queue_properties, &error);
7936
7937 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7938 if (err != nullptr) {
7939 *err = error;
7940 }
7941 }
7942#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7943#if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7944 if (!useWithProperties) {
7945 object_ = CL_(clCreateCommandQueue)(
7946 context(), device(), properties, &error);
7947
7948 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7949 if (err != nullptr) {
7950 *err = error;
7951 }
7952 }
7953#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7954 }
7955
7956 /*!
7957 * \brief Constructs a CommandQueue for a passed device and context
7958 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7959 */
7961 const Context& context,
7962 const Device& device,
7963 QueueProperties properties,
7964 cl_int* err = nullptr)
7965 {
7966 cl_int error;
7967 bool useWithProperties;
7968
7969#if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7970 // Run-time decision based on the actual platform
7971 {
7972 cl_uint version = detail::getContextPlatformVersion(context());
7973 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7974 }
7975#elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7976 useWithProperties = true;
7977#else
7978 useWithProperties = false;
7979#endif
7980
7981#if CL_HPP_TARGET_OPENCL_VERSION >= 200
7982 if (useWithProperties) {
7983 cl_queue_properties queue_properties[] = {
7984 CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
7985 object_ = CL_(clCreateCommandQueueWithProperties)(
7986 context(), device(), queue_properties, &error);
7987
7988 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7989 if (err != nullptr) {
7990 *err = error;
7991 }
7992 }
7993#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7994#if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7995 if (!useWithProperties) {
7996 object_ = CL_(clCreateCommandQueue)(
7997 context(), device(), static_cast<cl_command_queue_properties>(properties), &error);
7998
7999 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
8000 if (err != nullptr) {
8001 *err = error;
8002 }
8003 }
8004#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
8005 }
8006
8007 static CommandQueue getDefault(cl_int * err = nullptr)
8008 {
8009 std::call_once(default_initialized_, makeDefault);
8010#if CL_HPP_TARGET_OPENCL_VERSION >= 200
8011 detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8012#else // CL_HPP_TARGET_OPENCL_VERSION >= 200
8013 detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_ERR);
8014#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
8015 if (err != nullptr) {
8016 *err = default_error_;
8017 }
8018 return default_;
8019 }
8020
8024 * Will only set the default if no default was previously created.
8025 * @return updated default command queue.
8026 * Should be compared to the passed value to ensure that it was updated.
8027 */
8028 static CommandQueue setDefault(const CommandQueue &default_queue)
8029 {
8030 std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_queue));
8031 detail::errHandler(default_error_);
8032 return default_;
8033 }
8034
8035 CommandQueue() { }
8036
8037
8040 * \param retainObject will cause the constructor to retain its cl object.
8041 * Defaults to false to maintain compatibility with
8042 * earlier versions.
8043 */
8044 explicit CommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) :
8045 detail::Wrapper<cl_type>(commandQueue, retainObject) { }
8046
8047 CommandQueue& operator = (const cl_command_queue& rhs)
8048 {
8049 detail::Wrapper<cl_type>::operator=(rhs);
8050 return *this;
8051 }
8052
8053 template <typename T>
8054 cl_int getInfo(cl_command_queue_info name, T* param) const
8055 {
8056 return detail::errHandler(
8057 detail::getInfo(
8058 CL_(clGetCommandQueueInfo), object_, name, param),
8059 __GET_COMMAND_QUEUE_INFO_ERR);
8060 }
8061
8062 template <cl_command_queue_info name> typename
8063 detail::param_traits<detail::cl_command_queue_info, name>::param_type
8064 getInfo(cl_int* err = nullptr) const
8065 {
8066 typename detail::param_traits<
8067 detail::cl_command_queue_info, name>::param_type param;
8068 cl_int result = getInfo(name, &param);
8069 if (err != nullptr) {
8070 *err = result;
8071 }
8072 return param;
8073 }
8074
8075 cl_int enqueueReadBuffer(
8076 const Buffer& buffer,
8077 cl_bool blocking,
8078 size_type offset,
8079 size_type size,
8080 void* ptr,
8081 const vector<Event>* events = nullptr,
8082 Event* event = nullptr) const
8083 {
8084 cl_event tmp;
8085 cl_int err = detail::errHandler(
8086 CL_(clEnqueueReadBuffer)(
8087 object_, buffer(), blocking, offset, size,
8088 ptr,
8089 (events != nullptr) ? (cl_uint) events->size() : 0,
8090 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8091 (event != nullptr) ? &tmp : nullptr),
8092 __ENQUEUE_READ_BUFFER_ERR);
8093
8094 if (event != nullptr && err == CL_SUCCESS)
8095 *event = tmp;
8096
8097 return err;
8098 }
8099
8100 cl_int enqueueWriteBuffer(
8101 const Buffer& buffer,
8102 cl_bool blocking,
8103 size_type offset,
8104 size_type size,
8105 const void* ptr,
8106 const vector<Event>* events = nullptr,
8107 Event* event = nullptr) const
8108 {
8109 cl_event tmp;
8110 cl_int err = detail::errHandler(
8111 CL_(clEnqueueWriteBuffer)(
8112 object_, buffer(), blocking, offset, size,
8113 ptr,
8114 (events != nullptr) ? (cl_uint) events->size() : 0,
8115 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8116 (event != nullptr) ? &tmp : nullptr),
8117 __ENQUEUE_WRITE_BUFFER_ERR);
8118
8119 if (event != nullptr && err == CL_SUCCESS)
8120 *event = tmp;
8121
8122 return err;
8123 }
8124
8125 cl_int enqueueCopyBuffer(
8126 const Buffer& src,
8127 const Buffer& dst,
8128 size_type src_offset,
8129 size_type dst_offset,
8130 size_type size,
8131 const vector<Event>* events = nullptr,
8132 Event* event = nullptr) const
8133 {
8134 cl_event tmp;
8135 cl_int err = detail::errHandler(
8136 CL_(clEnqueueCopyBuffer)(
8137 object_, src(), dst(), src_offset, dst_offset, size,
8138 (events != nullptr) ? (cl_uint) events->size() : 0,
8139 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8140 (event != nullptr) ? &tmp : nullptr),
8141 __ENQEUE_COPY_BUFFER_ERR);
8142
8143 if (event != nullptr && err == CL_SUCCESS)
8144 *event = tmp;
8145
8146 return err;
8147 }
8148#if CL_HPP_TARGET_OPENCL_VERSION >= 110
8149 cl_int enqueueReadBufferRect(
8150 const Buffer& buffer,
8151 cl_bool blocking,
8152 const array<size_type, 3>& buffer_offset,
8153 const array<size_type, 3>& host_offset,
8154 const array<size_type, 3>& region,
8155 size_type buffer_row_pitch,
8156 size_type buffer_slice_pitch,
8157 size_type host_row_pitch,
8158 size_type host_slice_pitch,
8159 void *ptr,
8160 const vector<Event>* events = nullptr,
8161 Event* event = nullptr) const
8162 {
8163 cl_event tmp;
8164 cl_int err = detail::errHandler(
8165 CL_(clEnqueueReadBufferRect)(
8166 object_,
8167 buffer(),
8168 blocking,
8169 buffer_offset.data(),
8170 host_offset.data(),
8171 region.data(),
8172 buffer_row_pitch,
8173 buffer_slice_pitch,
8174 host_row_pitch,
8175 host_slice_pitch,
8176 ptr,
8177 (events != nullptr) ? (cl_uint) events->size() : 0,
8178 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8179 (event != nullptr) ? &tmp : nullptr),
8180 __ENQUEUE_READ_BUFFER_RECT_ERR);
8181
8182 if (event != nullptr && err == CL_SUCCESS)
8183 *event = tmp;
8184
8185 return err;
8186 }
8187
8188 cl_int enqueueReadBufferRect(
8189 const Buffer& buffer,
8190 cl_bool blocking,
8191 const array<size_type, 2>& buffer_offset,
8192 const array<size_type, 2>& host_offset,
8193 const array<size_type, 2>& region,
8194 size_type buffer_row_pitch,
8195 size_type buffer_slice_pitch,
8196 size_type host_row_pitch,
8197 size_type host_slice_pitch,
8198 void* ptr,
8199 const vector<Event>* events = nullptr,
8200 Event* event = nullptr) const
8201 {
8202 return enqueueReadBufferRect(
8203 buffer,
8204 blocking,
8205 { buffer_offset[0], buffer_offset[1], 0 },
8206 { host_offset[0], host_offset[1], 0 },
8207 { region[0], region[1], 1 },
8208 buffer_row_pitch,
8209 buffer_slice_pitch,
8210 host_row_pitch,
8211 host_slice_pitch,
8212 ptr,
8213 events,
8214 event);
8215 }
8216
8217 cl_int enqueueWriteBufferRect(
8218 const Buffer& buffer,
8219 cl_bool blocking,
8220 const array<size_type, 3>& buffer_offset,
8221 const array<size_type, 3>& host_offset,
8222 const array<size_type, 3>& region,
8223 size_type buffer_row_pitch,
8224 size_type buffer_slice_pitch,
8225 size_type host_row_pitch,
8226 size_type host_slice_pitch,
8227 const void *ptr,
8228 const vector<Event>* events = nullptr,
8229 Event* event = nullptr) const
8230 {
8231 cl_event tmp;
8232 cl_int err = detail::errHandler(
8233 CL_(clEnqueueWriteBufferRect)(
8234 object_,
8235 buffer(),
8236 blocking,
8237 buffer_offset.data(),
8238 host_offset.data(),
8239 region.data(),
8240 buffer_row_pitch,
8241 buffer_slice_pitch,
8242 host_row_pitch,
8243 host_slice_pitch,
8244 ptr,
8245 (events != nullptr) ? (cl_uint) events->size() : 0,
8246 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8247 (event != nullptr) ? &tmp : nullptr),
8248 __ENQUEUE_WRITE_BUFFER_RECT_ERR);
8249
8250 if (event != nullptr && err == CL_SUCCESS)
8251 *event = tmp;
8252
8253 return err;
8254 }
8255
8256 cl_int enqueueWriteBufferRect(
8257 const Buffer& buffer,
8258 cl_bool blocking,
8259 const array<size_type, 2>& buffer_offset,
8260 const array<size_type, 2>& host_offset,
8261 const array<size_type, 2>& region,
8262 size_type buffer_row_pitch,
8263 size_type buffer_slice_pitch,
8264 size_type host_row_pitch,
8265 size_type host_slice_pitch,
8266 const void* ptr,
8267 const vector<Event>* events = nullptr,
8268 Event* event = nullptr) const
8269 {
8270 return enqueueWriteBufferRect(
8271 buffer,
8272 blocking,
8273 { buffer_offset[0], buffer_offset[1], 0 },
8274 { host_offset[0], host_offset[1], 0 },
8275 { region[0], region[1], 1 },
8276 buffer_row_pitch,
8277 buffer_slice_pitch,
8278 host_row_pitch,
8279 host_slice_pitch,
8280 ptr,
8281 events,
8282 event);
8283 }
8284
8285 cl_int enqueueCopyBufferRect(
8286 const Buffer& src,
8287 const Buffer& dst,
8288 const array<size_type, 3>& src_origin,
8289 const array<size_type, 3>& dst_origin,
8290 const array<size_type, 3>& region,
8291 size_type src_row_pitch,
8292 size_type src_slice_pitch,
8293 size_type dst_row_pitch,
8294 size_type dst_slice_pitch,
8295 const vector<Event>* events = nullptr,
8296 Event* event = nullptr) const
8297 {
8298 cl_event tmp;
8299 cl_int err = detail::errHandler(
8300 CL_(clEnqueueCopyBufferRect)(
8301 object_,
8302 src(),
8303 dst(),
8304 src_origin.data(),
8305 dst_origin.data(),
8306 region.data(),
8307 src_row_pitch,
8308 src_slice_pitch,
8309 dst_row_pitch,
8310 dst_slice_pitch,
8311 (events != nullptr) ? (cl_uint) events->size() : 0,
8312 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8313 (event != nullptr) ? &tmp : nullptr),
8314 __ENQEUE_COPY_BUFFER_RECT_ERR);
8315
8316 if (event != nullptr && err == CL_SUCCESS)
8317 *event = tmp;
8318
8319 return err;
8320 }
8321
8322 cl_int enqueueCopyBufferRect(
8323 const Buffer& src,
8324 const Buffer& dst,
8325 const array<size_type, 2>& src_origin,
8326 const array<size_type, 2>& dst_origin,
8327 const array<size_type, 2>& region,
8328 size_type src_row_pitch,
8329 size_type src_slice_pitch,
8330 size_type dst_row_pitch,
8331 size_type dst_slice_pitch,
8332 const vector<Event>* events = nullptr,
8333 Event* event = nullptr) const
8334 {
8335 return enqueueCopyBufferRect(
8336 src,
8337 dst,
8338 { src_origin[0], src_origin[1], 0 },
8339 { dst_origin[0], dst_origin[1], 0 },
8340 { region[0], region[1], 1 },
8341 src_row_pitch,
8342 src_slice_pitch,
8343 dst_row_pitch,
8344 dst_slice_pitch,
8345 events,
8346 event);
8347 }
8348
8349#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
8350#if CL_HPP_TARGET_OPENCL_VERSION >= 120
8359 * \tparam size Is the size in bytes of the region to fill.
8360 * This must be a multiple of the pattern size.
8361 */
8362 template<typename PatternType>
8363 cl_int enqueueFillBuffer(
8364 const Buffer& buffer,
8365 PatternType pattern,
8366 size_type offset,
8367 size_type size,
8368 const vector<Event>* events = nullptr,
8369 Event* event = nullptr) const
8370 {
8371 cl_event tmp;
8372 cl_int err = detail::errHandler(
8373 CL_(clEnqueueFillBuffer)(
8374 object_,
8375 buffer(),
8376 static_cast<void*>(&pattern),
8377 sizeof(PatternType),
8378 offset,
8379 size,
8380 (events != nullptr) ? (cl_uint) events->size() : 0,
8381 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8382 (event != nullptr) ? &tmp : nullptr),
8383 __ENQUEUE_FILL_BUFFER_ERR);
8384
8385 if (event != nullptr && err == CL_SUCCESS)
8386 *event = tmp;
8387
8388 return err;
8389 }
8390#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
8391
8392 cl_int enqueueReadImage(
8393 const Image& image,
8394 cl_bool blocking,
8395 const array<size_type, 3>& origin,
8396 const array<size_type, 3>& region,
8397 size_type row_pitch,
8398 size_type slice_pitch,
8399 void* ptr,
8400 const vector<Event>* events = nullptr,
8401 Event* event = nullptr) const
8402 {
8403 cl_event tmp;
8404 cl_int err = detail::errHandler(
8405 CL_(clEnqueueReadImage)(
8406 object_,
8407 image(),
8408 blocking,
8409 origin.data(),
8410 region.data(),
8411 row_pitch,
8412 slice_pitch,
8413 ptr,
8414 (events != nullptr) ? (cl_uint) events->size() : 0,
8415 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8416 (event != nullptr) ? &tmp : nullptr),
8417 __ENQUEUE_READ_IMAGE_ERR);
8418
8419 if (event != nullptr && err == CL_SUCCESS)
8420 *event = tmp;
8421
8422 return err;
8423 }
8424
8425 cl_int enqueueReadImage(
8426 const Image& image,
8427 cl_bool blocking,
8428 const array<size_type, 2>& origin,
8429 const array<size_type, 2>& region,
8430 size_type row_pitch,
8431 size_type slice_pitch,
8432 void* ptr,
8433 const vector<Event>* events = nullptr,
8434 Event* event = nullptr) const
8435 {
8436 return enqueueReadImage(
8437 image,
8438 blocking,
8439 { origin[0], origin[1], 0 },
8440 { region[0], region[1], 1 },
8441 row_pitch,
8442 slice_pitch,
8443 ptr,
8444 events,
8445 event);
8446 }
8447
8448 cl_int enqueueWriteImage(
8449 const Image& image,
8450 cl_bool blocking,
8451 const array<size_type, 3>& origin,
8452 const array<size_type, 3>& region,
8453 size_type row_pitch,
8454 size_type slice_pitch,
8455 const void* ptr,
8456 const vector<Event>* events = nullptr,
8457 Event* event = nullptr) const
8458 {
8459 cl_event tmp;
8460 cl_int err = detail::errHandler(
8461 CL_(clEnqueueWriteImage)(
8462 object_,
8463 image(),
8464 blocking,
8465 origin.data(),
8466 region.data(),
8467 row_pitch,
8468 slice_pitch,
8469 ptr,
8470 (events != nullptr) ? (cl_uint) events->size() : 0,
8471 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8472 (event != nullptr) ? &tmp : nullptr),
8473 __ENQUEUE_WRITE_IMAGE_ERR);
8474
8475 if (event != nullptr && err == CL_SUCCESS)
8476 *event = tmp;
8477
8478 return err;
8479 }
8480
8481 cl_int enqueueWriteImage(
8482 const Image& image,
8483 cl_bool blocking,
8484 const array<size_type, 2>& origin,
8485 const array<size_type, 2>& region,
8486 size_type row_pitch,
8487 size_type slice_pitch,
8488 const void* ptr,
8489 const vector<Event>* events = nullptr,
8490 Event* event = nullptr) const
8491 {
8492 return enqueueWriteImage(
8493 image,
8494 blocking,
8495 { origin[0], origin[1], 0 },
8496 { region[0], region[1], 1 },
8497 row_pitch,
8498 slice_pitch,
8499 ptr,
8500 events,
8501 event);
8502 }
8503
8504 cl_int enqueueCopyImage(
8505 const Image& src,
8506 const Image& dst,
8507 const array<size_type, 3>& src_origin,
8508 const array<size_type, 3>& dst_origin,
8509 const array<size_type, 3>& region,
8510 const vector<Event>* events = nullptr,
8511 Event* event = nullptr) const
8512 {
8513 cl_event tmp;
8514 cl_int err = detail::errHandler(
8515 CL_(clEnqueueCopyImage)(
8516 object_,
8517 src(),
8518 dst(),
8519 src_origin.data(),
8520 dst_origin.data(),
8521 region.data(),
8522 (events != nullptr) ? (cl_uint) events->size() : 0,
8523 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8524 (event != nullptr) ? &tmp : nullptr),
8525 __ENQUEUE_COPY_IMAGE_ERR);
8526
8527 if (event != nullptr && err == CL_SUCCESS)
8528 *event = tmp;
8529
8530 return err;
8531 }
8532
8533 cl_int enqueueCopyImage(
8534 const Image& src,
8535 const Image& dst,
8536 const array<size_type, 2>& src_origin,
8537 const array<size_type, 2>& dst_origin,
8538 const array<size_type, 2>& region,
8539 const vector<Event>* events = nullptr,
8540 Event* event = nullptr) const
8541 {
8542 return enqueueCopyImage(
8543 src,
8544 dst,
8545 { src_origin[0], src_origin[1], 0 },
8546 { dst_origin[0], dst_origin[1], 0 },
8547 { region[0], region[1], 1 },
8548 events,
8549 event);
8550 }
8551
8552#if CL_HPP_TARGET_OPENCL_VERSION >= 120
8560 template <typename T>
8561 typename std::enable_if<std::is_same<T, cl_float4>::value ||
8562 std::is_same<T, cl_int4 >::value ||
8563 std::is_same<T, cl_uint4 >::value,
8564 cl_int>::type
8566 const Image& image,
8567 T fillColor,
8568 const array<size_type, 3>& origin,
8569 const array<size_type, 3>& region,
8570 const vector<Event>* events = nullptr,
8571 Event* event = nullptr) const
8572 {
8573 cl_event tmp;
8574 cl_int err = detail::errHandler(
8575 CL_(clEnqueueFillImage)(
8576 object_,
8577 image(),
8578 static_cast<void*>(&fillColor),
8579 origin.data(),
8580 region.data(),
8581 (events != nullptr) ? (cl_uint)events->size() : 0,
8582 (events != nullptr && events->size() > 0) ? (const cl_event*)&events->front() : NULL,
8583 (event != NULL) ? &tmp : nullptr),
8584 __ENQUEUE_FILL_IMAGE_ERR);
8585
8586 if (event != nullptr && err == CL_SUCCESS) *event = tmp;
8587
8588 return err;
8589 }
8590
8598 template <typename T>
8599 typename std::enable_if<std::is_same<T, cl_float4>::value ||
8600 std::is_same<T, cl_int4 >::value ||
8601 std::is_same<T, cl_uint4 >::value, cl_int>::type
8603 const Image& image,
8604 T fillColor,
8605 const array<size_type, 2>& origin,
8606 const array<size_type, 2>& region,
8607 const vector<Event>* events = nullptr,
8608 Event* event = nullptr) const
8609 {
8610 return enqueueFillImage(
8611 image,
8612 fillColor,
8613 { origin[0], origin[1], 0 },
8614 { region[0], region[1], 1 },
8615 events,
8616 event
8617 );
8618 }
8619#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
8620
8621 cl_int enqueueCopyImageToBuffer(
8622 const Image& src,
8623 const Buffer& dst,
8624 const array<size_type, 3>& src_origin,
8625 const array<size_type, 3>& region,
8626 size_type dst_offset,
8627 const vector<Event>* events = nullptr,
8628 Event* event = nullptr) const
8629 {
8630 cl_event tmp;
8631 cl_int err = detail::errHandler(
8632 CL_(clEnqueueCopyImageToBuffer)(
8633 object_,
8634 src(),
8635 dst(),
8636 src_origin.data(),
8637 region.data(),
8638 dst_offset,
8639 (events != nullptr) ? (cl_uint) events->size() : 0,
8640 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8641 (event != nullptr) ? &tmp : nullptr),
8642 __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR);
8643
8644 if (event != nullptr && err == CL_SUCCESS)
8645 *event = tmp;
8646
8647 return err;
8648 }
8649
8650 cl_int enqueueCopyImageToBuffer(
8651 const Image& src,
8652 const Buffer& dst,
8653 const array<size_type, 2>& src_origin,
8654 const array<size_type, 2>& region,
8655 size_type dst_offset,
8656 const vector<Event>* events = nullptr,
8657 Event* event = nullptr) const
8658 {
8659 return enqueueCopyImageToBuffer(
8660 src,
8661 dst,
8662 { src_origin[0], src_origin[1], 0 },
8663 { region[0], region[1], 1 },
8664 dst_offset,
8665 events,
8666 event);
8667 }
8668
8669 cl_int enqueueCopyBufferToImage(
8670 const Buffer& src,
8671 const Image& dst,
8672 size_type src_offset,
8673 const array<size_type, 3>& dst_origin,
8674 const array<size_type, 3>& region,
8675 const vector<Event>* events = nullptr,
8676 Event* event = nullptr) const
8677 {
8678 cl_event tmp;
8679 cl_int err = detail::errHandler(
8680 CL_(clEnqueueCopyBufferToImage)(
8681 object_,
8682 src(),
8683 dst(),
8684 src_offset,
8685 dst_origin.data(),
8686 region.data(),
8687 (events != nullptr) ? (cl_uint) events->size() : 0,
8688 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8689 (event != nullptr) ? &tmp : nullptr),
8690 __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR);
8691
8692 if (event != nullptr && err == CL_SUCCESS)
8693 *event = tmp;
8694
8695 return err;
8696 }
8697
8698 cl_int enqueueCopyBufferToImage(
8699 const Buffer& src,
8700 const Image& dst,
8701 size_type src_offset,
8702 const array<size_type, 2>& dst_origin,
8703 const array<size_type, 2>& region,
8704 const vector<Event>* events = nullptr,
8705 Event* event = nullptr) const
8706 {
8707 return enqueueCopyBufferToImage(
8708 src,
8709 dst,
8710 src_offset,
8711 { dst_origin[0], dst_origin[1], 0 },
8712 { region[0], region[1], 1 },
8713 events,
8714 event);
8715 }
8716
8717 void* enqueueMapBuffer(
8718 const Buffer& buffer,
8719 cl_bool blocking,
8720 cl_map_flags flags,
8721 size_type offset,
8722 size_type size,
8723 const vector<Event>* events = nullptr,
8724 Event* event = nullptr,
8725 cl_int* err = nullptr) const
8726 {
8727 cl_event tmp;
8728 cl_int error;
8729 void * result = CL_(clEnqueueMapBuffer)(
8730 object_, buffer(), blocking, flags, offset, size,
8731 (events != nullptr) ? (cl_uint) events->size() : 0,
8732 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8733 (event != nullptr) ? &tmp : nullptr,
8734 &error);
8735
8736 detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8737 if (err != nullptr) {
8738 *err = error;
8739 }
8740 if (event != nullptr && error == CL_SUCCESS)
8741 *event = tmp;
8742
8743 return result;
8744 }
8745
8746 void* enqueueMapImage(
8747 const Image& image,
8748 cl_bool blocking,
8749 cl_map_flags flags,
8750 const array<size_type, 3>& origin,
8751 const array<size_type, 3>& region,
8752 size_type * row_pitch,
8753 size_type * slice_pitch,
8754 const vector<Event>* events = nullptr,
8755 Event* event = nullptr,
8756 cl_int* err = nullptr) const
8757 {
8758 cl_event tmp;
8759 cl_int error;
8760 void * result = CL_(clEnqueueMapImage)(
8761 object_, image(), blocking, flags,
8762 origin.data(),
8763 region.data(),
8764 row_pitch, slice_pitch,
8765 (events != nullptr) ? (cl_uint) events->size() : 0,
8766 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
8767 (event != nullptr) ? &tmp : nullptr,
8768 &error);
8769
8770 detail::errHandler(error, __ENQUEUE_MAP_IMAGE_ERR);
8771 if (err != nullptr) {
8772 *err = error;
8773 }
8774 if (event != nullptr && error == CL_SUCCESS)
8775 *event = tmp;
8776 return result;
8777 }
8778
8779 void* enqueueMapImage(
8780 const Image& image,
8781 cl_bool blocking,
8782 cl_map_flags flags,
8783 const array<size_type, 2>& origin,
8784 const array<size_type, 2>& region,
8785 size_type* row_pitch,
8786 size_type* slice_pitch,
8787 const vector<Event>* events = nullptr,
8788 Event* event = nullptr,
8789 cl_int* err = nullptr) const
8790 {
8791 return enqueueMapImage(image, blocking, flags,
8792 { origin[0], origin[1], 0 },
8793 { region[0], region[1], 1 }, row_pitch,
8794 slice_pitch, events, event, err);
8795 }
8796
8797#if CL_HPP_TARGET_OPENCL_VERSION >= 200
8798
8800 * Enqueues a command that copies a region of memory from the source pointer to the destination pointer.
8801 * This function is specifically for transferring data between the host and a coarse-grained SVM buffer.
8802 */
8803 template<typename T>
8804 cl_int enqueueMemcpySVM(
8805 T *dst_ptr,
8806 const T *src_ptr,
8807 cl_bool blocking,
8808 size_type size,
8809 const vector<Event> *events = nullptr,
8810 Event *event = nullptr) const {
8811 cl_event tmp;
8812 cl_int err = detail::errHandler(CL_(clEnqueueSVMMemcpy)(
8813 object_, blocking, static_cast<void *>(dst_ptr), static_cast<const void *>(src_ptr), size,
8814 (events != nullptr) ? (cl_uint) events->size() : 0,
8815 (events != nullptr && events->size() > 0) ? (const cl_event *) &events->front() : nullptr,
8816 (event != nullptr) ? &tmp : nullptr), __ENQUEUE_COPY_SVM_ERR);
8817
8818 if (event != nullptr && err == CL_SUCCESS)
8819 *event = tmp;
8820
8821 return err;
8822 }
8823
8825 *Enqueues a command that will copy data from one coarse-grained SVM buffer to another.
8826 *This function takes two cl::pointer instances representing the destination and source buffers.
8827 */
8828 template<typename T, class D>
8829 cl_int enqueueMemcpySVM(
8830 cl::pointer<T, D> &dst_ptr,
8831 const cl::pointer<T, D> &src_ptr,
8832 cl_bool blocking,
8833 size_type size,
8834 const vector<Event> *events = nullptr,
8835 Event *event = nullptr) const {
8836 cl_event tmp;
8837 cl_int err = detail::errHandler(CL_(clEnqueueSVMMemcpy)(
8838 object_, blocking, static_cast<void *>(dst_ptr.get()), static_cast<const void *>(src_ptr.get()),
8839 size,
8840 (events != nullptr) ? (cl_uint) events->size() : 0,
8841 (events != nullptr && events->size() > 0) ? (const cl_event *) &events->front() : nullptr,
8842 (event != nullptr) ? &tmp : nullptr), __ENQUEUE_COPY_SVM_ERR);
8843
8844 if (event != nullptr && err == CL_SUCCESS)
8845 *event = tmp;
8846
8847 return err;
8848 }
8849
8851 * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
8852 * This variant takes a cl::vector instance.
8853 */
8854 template<typename T, class Alloc>
8855 cl_int enqueueMemcpySVM(
8856 cl::vector<T, Alloc> &dst_container,
8857 const cl::vector<T, Alloc> &src_container,
8858 cl_bool blocking,
8859 const vector<Event> *events = nullptr,
8860 Event *event = nullptr) const {
8861 cl_event tmp;
8862 if(src_container.size() != dst_container.size()){
8863 return detail::errHandler(CL_INVALID_VALUE,__ENQUEUE_COPY_SVM_ERR);
8864 }
8865 cl_int err = detail::errHandler(CL_(clEnqueueSVMMemcpy)(
8866 object_, blocking, static_cast<void *>(dst_container.data()),
8867 static_cast<const void *>(src_container.data()),
8868 dst_container.size() * sizeof(T),
8869 (events != nullptr) ? (cl_uint) events->size() : 0,
8870 (events != nullptr && events->size() > 0) ? (const cl_event *) &events->front() : nullptr,
8871 (event != NULL) ? &tmp : nullptr), __ENQUEUE_COPY_SVM_ERR);
8872
8873 if (event != nullptr && err == CL_SUCCESS)
8874 *event = tmp;
8875
8876 return err;
8877 }
8878
8880 * Enqueues a command to fill a SVM buffer with a pattern.
8881 *
8882 */
8883 template<typename T, typename PatternType>
8884 cl_int enqueueMemFillSVM(
8885 T *ptr,
8886 PatternType pattern,
8887 size_type size,
8888 const vector<Event> *events = nullptr,
8889 Event *event = nullptr) const {
8890 cl_event tmp;
8891 cl_int err = detail::errHandler(CL_(clEnqueueSVMMemFill)(
8892 object_, static_cast<void *>(ptr), static_cast<void *>(&pattern),
8893 sizeof(PatternType), size,
8894 (events != nullptr) ? (cl_uint) events->size() : 0,
8895 (events != nullptr && events->size() > 0) ? (const cl_event *) &events->front() : nullptr,
8896 (event != nullptr) ? &tmp : nullptr), __ENQUEUE_FILL_SVM_ERR);
8897
8898 if (event != nullptr && err == CL_SUCCESS)
8899 *event = tmp;
8900
8901 return err;
8902 }
8903
8905 * Enqueues a command that fills a region of a coarse-grained SVM buffer with a specified pattern.
8906 * This variant takes a cl::pointer instance.
8907 */
8908 template<typename T, class D, typename PatternType>
8909 cl_int enqueueMemFillSVM(
8910 cl::pointer<T, D> &ptr,
8911 PatternType pattern,
8912 size_type size,
8913 const vector<Event> *events = nullptr,
8914 Event *event = nullptr) const {
8915 cl_event tmp;
8916 cl_int err = detail::errHandler(CL_(clEnqueueSVMMemFill)(
8917 object_, static_cast<void *>(ptr.get()), static_cast<void *>(&pattern),
8918 sizeof(PatternType), size,
8919 (events != nullptr) ? (cl_uint) events->size() : 0,
8920 (events != nullptr && events->size() > 0) ? (const cl_event *) &events->front() : nullptr,
8921 (event != nullptr) ? &tmp : nullptr), __ENQUEUE_FILL_SVM_ERR);
8922
8923 if (event != nullptr && err == CL_SUCCESS)
8924 *event = tmp;
8925
8926 return err;
8927 }
8928
8930 * Enqueues a command that will allow the host to fill a region of a coarse-grained SVM buffer with a specified pattern.
8931 * This variant takes a cl::vector instance.
8932 */
8933 template<typename T, class Alloc, typename PatternType>
8934 cl_int enqueueMemFillSVM(
8935 cl::vector<T, Alloc> &container,
8936 PatternType pattern,
8937 const vector<Event> *events = nullptr,
8938 Event* event = nullptr) const
8939 {
8940 cl_event tmp;
8941 cl_int err = detail::errHandler(CL_(clEnqueueSVMMemFill)(
8942 object_, static_cast<void *>(container.data()), static_cast<void *>(&pattern),
8943 sizeof(PatternType), container.size() * sizeof(T),
8944 (events != nullptr) ? (cl_uint) events->size() : 0,
8945 (events != nullptr && events->size() > 0) ? (const cl_event *) &events->front() : nullptr,
8946 (event != nullptr) ? &tmp : NULL), __ENQUEUE_FILL_SVM_ERR);
8947
8948 if (event != nullptr && err == CL_SUCCESS)
8949 *event = tmp;
8950
8951 return err;
8952 }
8953
8955 * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
8956 * This variant takes a raw SVM pointer.
8957 */
8958 template<typename T>
8959 cl_int enqueueMapSVM(
8960 T* ptr,
8961 cl_bool blocking,
8962 cl_map_flags flags,
8963 size_type size,
8964 const vector<Event>* events = nullptr,
8965 Event* event = nullptr) const
8966 {
8967 cl_event tmp;
8968 cl_int err = detail::errHandler(CL_(clEnqueueSVMMap)(
8969 object_, blocking, flags, static_cast<void*>(ptr), size,
8970 (events != nullptr) ? (cl_uint)events->size() : 0,
8971 (events != nullptr && events->size() > 0) ? (const cl_event*)&events->front() : nullptr,
8972 (event != nullptr) ? &tmp : nullptr),
8973 __ENQUEUE_MAP_SVM_ERR);
8974
8975 if (event != nullptr && err == CL_SUCCESS)
8976 *event = tmp;
8977
8978 return err;
8979 }
8980
8981
8983 * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
8984 * This variant takes a cl::pointer instance.
8985 */
8986 template<typename T, class D>
8987 cl_int enqueueMapSVM(
8988 cl::pointer<T, D> &ptr,
8989 cl_bool blocking,
8990 cl_map_flags flags,
8991 size_type size,
8992 const vector<Event>* events = nullptr,
8993 Event* event = nullptr) const
8994 {
8995 cl_event tmp;
8996 cl_int err = detail::errHandler(CL_(clEnqueueSVMMap)(
8997 object_, blocking, flags, static_cast<void*>(ptr.get()), size,
8998 (events != nullptr) ? (cl_uint)events->size() : 0,
8999 (events != nullptr && events->size() > 0) ? (const cl_event*)&events->front() : nullptr,
9000 (event != nullptr) ? &tmp : nullptr),
9001 __ENQUEUE_MAP_SVM_ERR);
9002
9003 if (event != nullptr && err == CL_SUCCESS)
9004 *event = tmp;
9005
9006 return err;
9007 }
9008
9010 * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
9011 * This variant takes a cl::vector instance.
9012 */
9013 template<typename T, class Alloc>
9014 cl_int enqueueMapSVM(
9015 cl::vector<T, Alloc> &container,
9016 cl_bool blocking,
9017 cl_map_flags flags,
9018 const vector<Event>* events = nullptr,
9019 Event* event = nullptr) const
9020 {
9021 cl_event tmp;
9022 cl_int err = detail::errHandler(CL_(clEnqueueSVMMap)(
9023 object_, blocking, flags, static_cast<void*>(container.data()), container.size()*sizeof(T),
9024 (events != nullptr) ? (cl_uint)events->size() : 0,
9025 (events != nullptr && events->size() > 0) ? (const cl_event*)&events->front() : nullptr,
9026 (event != nullptr) ? &tmp : nullptr),
9027 __ENQUEUE_MAP_SVM_ERR);
9028
9029 if (event != nullptr && err == CL_SUCCESS)
9030 *event = tmp;
9031
9032 return err;
9033 }
9034#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9035
9036 cl_int enqueueUnmapMemObject(
9037 const Memory& memory,
9038 void* mapped_ptr,
9039 const vector<Event>* events = nullptr,
9040 Event* event = nullptr) const
9041 {
9042 cl_event tmp;
9043 cl_int err = detail::errHandler(
9044 CL_(clEnqueueUnmapMemObject)(
9045 object_, memory(), mapped_ptr,
9046 (events != nullptr) ? (cl_uint) events->size() : 0,
9047 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
9048 (event != nullptr) ? &tmp : nullptr),
9049 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
9050
9051 if (event != nullptr && err == CL_SUCCESS)
9052 *event = tmp;
9053
9054 return err;
9055 }
9056
9057
9058#if CL_HPP_TARGET_OPENCL_VERSION >= 200
9060 * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
9061 * This variant takes a raw SVM pointer.
9062 */
9063 template<typename T>
9064 cl_int enqueueUnmapSVM(
9065 T* ptr,
9066 const vector<Event>* events = nullptr,
9067 Event* event = nullptr) const
9068 {
9069 cl_event tmp;
9070 cl_int err = detail::errHandler(
9071 CL_(clEnqueueSVMUnmap)(
9072 object_, static_cast<void*>(ptr),
9073 (events != nullptr) ? (cl_uint)events->size() : 0,
9074 (events != nullptr && events->size() > 0) ? (const cl_event*)&events->front() : nullptr,
9075 (event != nullptr) ? &tmp : nullptr),
9076 __ENQUEUE_UNMAP_SVM_ERR);
9077
9078 if (event != nullptr && err == CL_SUCCESS)
9079 *event = tmp;
9080
9081 return err;
9082 }
9083
9085 * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
9086 * This variant takes a cl::pointer instance.
9087 */
9088 template<typename T, class D>
9089 cl_int enqueueUnmapSVM(
9090 cl::pointer<T, D> &ptr,
9091 const vector<Event>* events = nullptr,
9092 Event* event = nullptr) const
9093 {
9094 cl_event tmp;
9095 cl_int err = detail::errHandler(
9096 CL_(clEnqueueSVMUnmap)(
9097 object_, static_cast<void*>(ptr.get()),
9098 (events != nullptr) ? (cl_uint)events->size() : 0,
9099 (events != nullptr && events->size() > 0) ? (const cl_event*)&events->front() : nullptr,
9100 (event != nullptr) ? &tmp : nullptr),
9101 __ENQUEUE_UNMAP_SVM_ERR);
9102
9103 if (event != nullptr && err == CL_SUCCESS)
9104 *event = tmp;
9105
9106 return err;
9107 }
9108
9110 * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
9111 * This variant takes a cl::vector instance.
9112 */
9113 template<typename T, class Alloc>
9114 cl_int enqueueUnmapSVM(
9115 cl::vector<T, Alloc> &container,
9116 const vector<Event>* events = nullptr,
9117 Event* event = nullptr) const
9118 {
9119 cl_event tmp;
9120 cl_int err = detail::errHandler(
9121 CL_(clEnqueueSVMUnmap)(
9122 object_, static_cast<void*>(container.data()),
9123 (events != nullptr) ? (cl_uint)events->size() : 0,
9124 (events != nullptr && events->size() > 0) ? (const cl_event*)&events->front() : nullptr,
9125 (event != nullptr) ? &tmp : nullptr),
9126 __ENQUEUE_UNMAP_SVM_ERR);
9127
9128 if (event != nullptr && err == CL_SUCCESS)
9129 *event = tmp;
9130
9131 return err;
9132 }
9133#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9134
9135#if CL_HPP_TARGET_OPENCL_VERSION >= 120
9143 * i.e. this event can be waited on to insure that all events either in the event_wait_list
9144 * or all previously enqueued commands, queued before this command to command_queue,
9145 * have completed.
9146 */
9148 const vector<Event> *events = nullptr,
9149 Event *event = nullptr) const
9150 {
9151 cl_event tmp;
9152 cl_int err = detail::errHandler(
9153 CL_(clEnqueueMarkerWithWaitList)(
9154 object_,
9155 (events != nullptr) ? (cl_uint) events->size() : 0,
9156 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
9157 (event != nullptr) ? &tmp : nullptr),
9158 __ENQUEUE_MARKER_WAIT_LIST_ERR);
9159
9160 if (event != nullptr && err == CL_SUCCESS)
9161 *event = tmp;
9162
9163 return err;
9164 }
9165
9173 * returns an event which can be waited on, i.e. this event can be waited on to insure that
9174 * all events either in the event_wait_list or all previously enqueued commands, queued
9175 * before this command to command_queue, have completed.
9176 */
9178 const vector<Event> *events = nullptr,
9179 Event *event = nullptr) const
9180 {
9181 cl_event tmp;
9182 cl_int err = detail::errHandler(
9183 CL_(clEnqueueBarrierWithWaitList)(
9184 object_,
9185 (events != nullptr) ? (cl_uint) events->size() : 0,
9186 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
9187 (event != nullptr) ? &tmp : nullptr),
9188 __ENQUEUE_BARRIER_WAIT_LIST_ERR);
9189
9190 if (event != nullptr && err == CL_SUCCESS)
9191 *event = tmp;
9192
9193 return err;
9194 }
9195
9196 /**
9197 * Enqueues a command to indicate with which device a set of memory objects
9198 * should be associated.
9199 */
9201 const vector<Memory> &memObjects,
9202 cl_mem_migration_flags flags,
9203 const vector<Event>* events = nullptr,
9204 Event* event = nullptr
9205 ) const
9206 {
9207 cl_event tmp;
9208
9209 vector<cl_mem> localMemObjects(memObjects.size());
9210
9211 for( int i = 0; i < (int)memObjects.size(); ++i ) {
9212 localMemObjects[i] = memObjects[i]();
9213 }
9214
9215 cl_int err = detail::errHandler(
9216 CL_(clEnqueueMigrateMemObjects)(
9217 object_,
9218 (cl_uint)memObjects.size(),
9219 localMemObjects.data(),
9220 flags,
9221 (events != nullptr) ? (cl_uint) events->size() : 0,
9222 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
9223 (event != nullptr) ? &tmp : nullptr),
9224 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
9225
9226 if (event != nullptr && err == CL_SUCCESS)
9227 *event = tmp;
9228
9229 return err;
9230 }
9231#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
9232
9233
9234#if CL_HPP_TARGET_OPENCL_VERSION >= 210
9237 * SVM allocations with a device.
9238 * @param sizes - The length from each pointer to migrate.
9239 */
9240 template<typename T>
9241 cl_int enqueueMigrateSVM(
9242 const cl::vector<T*> &svmRawPointers,
9243 const cl::vector<size_type> &sizes,
9244 cl_mem_migration_flags flags = 0,
9245 const vector<Event>* events = nullptr,
9246 Event* event = nullptr) const
9247 {
9248 cl_event tmp;
9249 cl_int err = detail::errHandler(CL_(clEnqueueSVMMigrateMem)(
9250 object_,
9251 svmRawPointers.size(), static_cast<void**>(svmRawPointers.data()),
9252 sizes.data(), // array of sizes not passed
9253 flags,
9254 (events != nullptr) ? (cl_uint)events->size() : 0,
9255 (events != nullptr && events->size() > 0) ? (const cl_event*)&events->front() : nullptr,
9256 (event != nullptr) ? &tmp : nullptr),
9257 __ENQUEUE_MIGRATE_SVM_ERR);
9258
9259 if (event != nullptr && err == CL_SUCCESS)
9260 *event = tmp;
9261
9262 return err;
9263 }
9264
9266 * Enqueues a command that will allow the host associate a set of SVM allocations with
9267 * a device.
9268 */
9269 template<typename T>
9270 cl_int enqueueMigrateSVM(
9271 const cl::vector<T*> &svmRawPointers,
9272 cl_mem_migration_flags flags = 0,
9273 const vector<Event>* events = nullptr,
9274 Event* event = nullptr) const
9275 {
9276 return enqueueMigrateSVM(svmRawPointers, cl::vector<size_type>(svmRawPointers.size()), flags, events, event);
9277 }
9278
9279
9282 * SVM allocations with a device.
9283 * @param sizes - The length from each pointer to migrate.
9284 */
9285 template<typename T, class D>
9286 cl_int enqueueMigrateSVM(
9287 const cl::vector<cl::pointer<T, D>> &svmPointers,
9288 const cl::vector<size_type> &sizes,
9289 cl_mem_migration_flags flags = 0,
9290 const vector<Event>* events = nullptr,
9291 Event* event = nullptr) const
9292 {
9293 cl::vector<void*> svmRawPointers;
9294 svmRawPointers.reserve(svmPointers.size());
9295 for (auto p : svmPointers) {
9296 svmRawPointers.push_back(static_cast<void*>(p.get()));
9297 }
9298
9299 return enqueueMigrateSVM(svmRawPointers, sizes, flags, events, event);
9300 }
9301
9302
9304 * Enqueues a command that will allow the host associate a set of SVM allocations with
9305 * a device.
9306 */
9307 template<typename T, class D>
9308 cl_int enqueueMigrateSVM(
9309 const cl::vector<cl::pointer<T, D>> &svmPointers,
9310 cl_mem_migration_flags flags = 0,
9311 const vector<Event>* events = nullptr,
9312 Event* event = nullptr) const
9313 {
9314 return enqueueMigrateSVM(svmPointers, cl::vector<size_type>(svmPointers.size()), flags, events, event);
9315 }
9316
9319 * SVM allocations with a device.
9320 * @param sizes - The length from the beginning of each container to migrate.
9321 */
9322 template<typename T, class Alloc>
9323 cl_int enqueueMigrateSVM(
9324 const cl::vector<cl::vector<T, Alloc>> &svmContainers,
9325 const cl::vector<size_type> &sizes,
9326 cl_mem_migration_flags flags = 0,
9327 const vector<Event>* events = nullptr,
9328 Event* event = nullptr) const
9329 {
9330 cl::vector<void*> svmRawPointers;
9331 svmRawPointers.reserve(svmContainers.size());
9332 for (auto p : svmContainers) {
9333 svmRawPointers.push_back(static_cast<void*>(p.data()));
9334 }
9335
9336 return enqueueMigrateSVM(svmRawPointers, sizes, flags, events, event);
9337 }
9338
9340 * Enqueues a command that will allow the host associate a set of SVM allocations with
9341 * a device.
9342 */
9343 template<typename T, class Alloc>
9344 cl_int enqueueMigrateSVM(
9345 const cl::vector<cl::vector<T, Alloc>> &svmContainers,
9346 cl_mem_migration_flags flags = 0,
9347 const vector<Event>* events = nullptr,
9348 Event* event = nullptr) const
9349 {
9350 return enqueueMigrateSVM(svmContainers, cl::vector<size_type>(svmContainers.size()), flags, events, event);
9351 }
9352
9353#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
9354
9355 cl_int enqueueNDRangeKernel(
9356 const Kernel& kernel,
9357 const NDRange& offset,
9358 const NDRange& global,
9359 const NDRange& local = NullRange,
9360 const vector<Event>* events = nullptr,
9361 Event* event = nullptr) const
9362 {
9363 cl_event tmp;
9364 cl_int err = detail::errHandler(
9365 CL_(clEnqueueNDRangeKernel)(
9366 object_, kernel(), (cl_uint) global.dimensions(),
9367 offset.dimensions() != 0 ? (const size_type*) offset : nullptr,
9368 (const size_type*) global,
9369 local.dimensions() != 0 ? (const size_type*) local : nullptr,
9370 (events != nullptr) ? (cl_uint) events->size() : 0,
9371 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
9372 (event != nullptr) ? &tmp : nullptr),
9373 __ENQUEUE_NDRANGE_KERNEL_ERR);
9374
9375 if (event != nullptr && err == CL_SUCCESS)
9376 *event = tmp;
9377
9378 return err;
9379 }
9380
9381#if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
9382 CL_API_PREFIX__VERSION_1_2_DEPRECATED cl_int enqueueTask(
9383 const Kernel& kernel,
9384 const vector<Event>* events = nullptr,
9385 Event* event = nullptr) const CL_API_SUFFIX__VERSION_1_2_DEPRECATED
9386 {
9387 cl_event tmp;
9388 cl_int err = detail::errHandler(
9389 CL_(clEnqueueTask)(
9390 object_, kernel(),
9391 (events != nullptr) ? (cl_uint) events->size() : 0,
9392 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
9393 (event != nullptr) ? &tmp : nullptr),
9394 __ENQUEUE_TASK_ERR);
9395
9396 if (event != nullptr && err == CL_SUCCESS)
9397 *event = tmp;
9398
9399 return err;
9400 }
9401#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
9402
9403 cl_int enqueueNativeKernel(
9404 void (CL_CALLBACK *userFptr)(void *),
9405 std::pair<void*, size_type> args,
9406 const vector<Memory>* mem_objects = nullptr,
9407 const vector<const void*>* mem_locs = nullptr,
9408 const vector<Event>* events = nullptr,
9409 Event* event = nullptr) const
9410 {
9411 cl_event tmp;
9412 cl_int err = detail::errHandler(
9413 CL_(clEnqueueNativeKernel)(
9414 object_, userFptr, args.first, args.second,
9415 (mem_objects != nullptr) ? (cl_uint) mem_objects->size() : 0,
9416 (mem_objects->size() > 0 ) ? reinterpret_cast<const cl_mem *>(mem_objects->data()) : nullptr,
9417 (mem_locs != nullptr && mem_locs->size() > 0) ? const_cast<const void**>(&mem_locs->front()) : nullptr,
9418 (events != nullptr) ? (cl_uint) events->size() : 0,
9419 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
9420 (event != nullptr) ? &tmp : nullptr),
9421 __ENQUEUE_NATIVE_KERNEL);
9422
9423 if (event != nullptr && err == CL_SUCCESS)
9424 *event = tmp;
9425
9426 return err;
9427 }
9428
9430 * Deprecated APIs for 1.2
9431 */
9432#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
9433 CL_API_PREFIX__VERSION_1_1_DEPRECATED
9434 cl_int enqueueMarker(Event* event = nullptr) const CL_API_SUFFIX__VERSION_1_1_DEPRECATED
9435 {
9436 cl_event tmp;
9437 cl_int err = detail::errHandler(
9438 CL_(clEnqueueMarker)(
9439 object_,
9440 (event != nullptr) ? &tmp : nullptr),
9441 __ENQUEUE_MARKER_ERR);
9442
9443 if (event != nullptr && err == CL_SUCCESS)
9444 *event = tmp;
9445
9446 return err;
9447 }
9448
9449 CL_API_PREFIX__VERSION_1_1_DEPRECATED
9450 cl_int enqueueWaitForEvents(const vector<Event>& events) const CL_API_SUFFIX__VERSION_1_1_DEPRECATED
9451 {
9452 return detail::errHandler(
9453 CL_(clEnqueueWaitForEvents)(
9454 object_,
9455 (cl_uint) events.size(),
9456 events.size() > 0 ? (const cl_event*) &events.front() : nullptr),
9457 __ENQUEUE_WAIT_FOR_EVENTS_ERR);
9458 }
9459#endif // defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
9460
9461 cl_int enqueueAcquireGLObjects(
9462 const vector<Memory>* mem_objects = nullptr,
9463 const vector<Event>* events = nullptr,
9464 Event* event = nullptr) const
9465 {
9466 cl_event tmp;
9467 cl_int err = detail::errHandler(
9468 CL_(clEnqueueAcquireGLObjects)(
9469 object_,
9470 (mem_objects != nullptr) ? (cl_uint) mem_objects->size() : 0,
9471 (mem_objects != nullptr && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): nullptr,
9472 (events != nullptr) ? (cl_uint) events->size() : 0,
9473 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
9474 (event != nullptr) ? &tmp : nullptr),
9475 __ENQUEUE_ACQUIRE_GL_ERR);
9476
9477 if (event != nullptr && err == CL_SUCCESS)
9478 *event = tmp;
9479
9480 return err;
9481 }
9482
9483 cl_int enqueueReleaseGLObjects(
9484 const vector<Memory>* mem_objects = nullptr,
9485 const vector<Event>* events = nullptr,
9486 Event* event = nullptr) const
9487 {
9488 cl_event tmp;
9489 cl_int err = detail::errHandler(
9490 CL_(clEnqueueReleaseGLObjects)(
9491 object_,
9492 (mem_objects != nullptr) ? (cl_uint) mem_objects->size() : 0,
9493 (mem_objects != nullptr && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): nullptr,
9494 (events != nullptr) ? (cl_uint) events->size() : 0,
9495 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
9496 (event != nullptr) ? &tmp : nullptr),
9497 __ENQUEUE_RELEASE_GL_ERR);
9498
9499 if (event != nullptr && err == CL_SUCCESS)
9500 *event = tmp;
9501
9502 return err;
9503 }
9504
9505#if defined (CL_HPP_USE_DX_INTEROP)
9506typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueAcquireD3D10ObjectsKHR)(
9507 cl_command_queue command_queue, cl_uint num_objects,
9508 const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
9509 const cl_event* event_wait_list, cl_event* event);
9510typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueReleaseD3D10ObjectsKHR)(
9511 cl_command_queue command_queue, cl_uint num_objects,
9512 const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
9513 const cl_event* event_wait_list, cl_event* event);
9514
9515 cl_int enqueueAcquireD3D10Objects(
9516 const vector<Memory>* mem_objects = nullptr,
9517 const vector<Event>* events = nullptr,
9518 Event* event = nullptr) const
9519 {
9520 static PFN_clEnqueueAcquireD3D10ObjectsKHR pfn_clEnqueueAcquireD3D10ObjectsKHR = nullptr;
9521#if CL_HPP_TARGET_OPENCL_VERSION >= 120
9522 cl_context context = getInfo<CL_QUEUE_CONTEXT>();
9523 cl::Device device(getInfo<CL_QUEUE_DEVICE>());
9524 cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
9525 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueAcquireD3D10ObjectsKHR);
9526#endif
9527#if CL_HPP_MINIMUM_OPENCL_VERSION < 120
9528 CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueAcquireD3D10ObjectsKHR);
9529#endif
9530
9531 cl_event tmp;
9532 cl_int err = detail::errHandler(
9533 pfn_clEnqueueAcquireD3D10ObjectsKHR(
9534 object_,
9535 (mem_objects != nullptr) ? (cl_uint) mem_objects->size() : 0,
9536 (mem_objects != nullptr && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): nullptr,
9537 (events != nullptr) ? (cl_uint) events->size() : 0,
9538 (events != nullptr) ? (const cl_event*) &events->front() : nullptr,
9539 (event != nullptr) ? &tmp : nullptr),
9540 __ENQUEUE_ACQUIRE_GL_ERR);
9541
9542 if (event != nullptr && err == CL_SUCCESS)
9543 *event = tmp;
9544
9545 return err;
9546 }
9547
9548 cl_int enqueueReleaseD3D10Objects(
9549 const vector<Memory>* mem_objects = nullptr,
9550 const vector<Event>* events = nullptr,
9551 Event* event = nullptr) const
9552 {
9553 static PFN_clEnqueueReleaseD3D10ObjectsKHR pfn_clEnqueueReleaseD3D10ObjectsKHR = nullptr;
9554#if CL_HPP_TARGET_OPENCL_VERSION >= 120
9555 cl_context context = getInfo<CL_QUEUE_CONTEXT>();
9556 cl::Device device(getInfo<CL_QUEUE_DEVICE>());
9557 cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
9558 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueReleaseD3D10ObjectsKHR);
9559#endif
9560#if CL_HPP_MINIMUM_OPENCL_VERSION < 120
9561 CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueReleaseD3D10ObjectsKHR);
9562#endif
9563
9564 cl_event tmp;
9565 cl_int err = detail::errHandler(
9566 pfn_clEnqueueReleaseD3D10ObjectsKHR(
9567 object_,
9568 (mem_objects != nullptr) ? (cl_uint) mem_objects->size() : 0,
9569 (mem_objects != nullptr && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): nullptr,
9570 (events != nullptr) ? (cl_uint) events->size() : 0,
9571 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
9572 (event != nullptr) ? &tmp : nullptr),
9573 __ENQUEUE_RELEASE_GL_ERR);
9574
9575 if (event != nullptr && err == CL_SUCCESS)
9576 *event = tmp;
9577
9578 return err;
9579 }
9580#endif
9581
9583 * Deprecated APIs for 1.2
9584 */
9585#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
9586 CL_API_PREFIX__VERSION_1_1_DEPRECATED
9587 cl_int enqueueBarrier() const CL_API_SUFFIX__VERSION_1_1_DEPRECATED
9588 {
9589 return detail::errHandler(
9590 CL_(clEnqueueBarrier)(object_),
9591 __ENQUEUE_BARRIER_ERR);
9592 }
9593#endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
9594
9595 cl_int flush() const
9596 {
9597 return detail::errHandler(CL_(clFlush)(object_), __FLUSH_ERR);
9598 }
9599
9600 cl_int finish() const
9601 {
9602 return detail::errHandler(CL_(clFinish)(object_), __FINISH_ERR);
9603 }
9604
9605#ifdef cl_khr_external_memory
9606 cl_int enqueueAcquireExternalMemObjects(
9607 const vector<Memory>& mem_objects,
9608 const vector<Event>* events_wait = nullptr,
9609 Event *event = nullptr)
9610 {
9611 cl_int err = CL_INVALID_OPERATION;
9612 cl_event tmp;
9613
9614 std::call_once(ext_memory_initialized_, initMemoryExtension, this->getInfo<CL_QUEUE_DEVICE>());
9615
9616 if (pfn_clEnqueueAcquireExternalMemObjectsKHR)
9617 {
9618 err = pfn_clEnqueueAcquireExternalMemObjectsKHR(
9619 object_,
9620 static_cast<cl_uint>(mem_objects.size()),
9621 (mem_objects.size() > 0) ? reinterpret_cast<const cl_mem *>(mem_objects.data()) : nullptr,
9622 (events_wait != nullptr) ? static_cast<cl_uint>(events_wait->size()) : 0,
9623 (events_wait != nullptr && events_wait->size() > 0) ? reinterpret_cast<const cl_event*>(events_wait->data()) : nullptr,
9624 &tmp);
9625 }
9626
9627 detail::errHandler(err, __ENQUEUE_ACQUIRE_EXTERNAL_MEMORY_ERR);
9628
9629 if (event != nullptr && err == CL_SUCCESS)
9630 *event = tmp;
9631
9632 return err;
9633 }
9634
9635 cl_int enqueueReleaseExternalMemObjects(
9636 const vector<Memory>& mem_objects,
9637 const vector<Event>* events_wait = nullptr,
9638 Event *event = nullptr)
9639 {
9640 cl_int err = CL_INVALID_OPERATION;
9641 cl_event tmp;
9642
9643 std::call_once(ext_memory_initialized_, initMemoryExtension, this->getInfo<CL_QUEUE_DEVICE>());
9644
9645 if (pfn_clEnqueueReleaseExternalMemObjectsKHR)
9646 {
9647 err = pfn_clEnqueueReleaseExternalMemObjectsKHR(
9648 object_,
9649 static_cast<cl_uint>(mem_objects.size()),
9650 (mem_objects.size() > 0) ? reinterpret_cast<const cl_mem *>(mem_objects.data()) : nullptr,
9651 (events_wait != nullptr) ? static_cast<cl_uint>(events_wait->size()) : 0,
9652 (events_wait != nullptr && events_wait->size() > 0) ? reinterpret_cast<const cl_event*>(events_wait->data()) : nullptr,
9653 &tmp);
9654 }
9655
9656 detail::errHandler(err, __ENQUEUE_RELEASE_EXTERNAL_MEMORY_ERR);
9657
9658 if (event != nullptr && err == CL_SUCCESS)
9659 *event = tmp;
9660
9661 return err;
9662 }
9663#endif // cl_khr_external_memory && CL_HPP_TARGET_OPENCL_VERSION >= 300
9664
9665#ifdef cl_khr_semaphore
9666 cl_int enqueueWaitSemaphores(
9667 const vector<Semaphore> &sema_objects,
9668 const vector<cl_semaphore_payload_khr> &sema_payloads = {},
9669 const vector<Event>* events_wait_list = nullptr,
9670 Event *event = nullptr) const;
9671
9672 cl_int enqueueSignalSemaphores(
9673 const vector<Semaphore> &sema_objects,
9674 const vector<cl_semaphore_payload_khr>& sema_payloads = {},
9675 const vector<Event>* events_wait_list = nullptr,
9676 Event* event = nullptr);
9677#endif // cl_khr_semaphore
9678}; // CommandQueue
9679
9680#ifdef cl_khr_external_memory
9681CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag CommandQueue::ext_memory_initialized_;
9682#endif
9683
9684CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag CommandQueue::default_initialized_;
9685CL_HPP_DEFINE_STATIC_MEMBER_ CommandQueue CommandQueue::default_;
9686CL_HPP_DEFINE_STATIC_MEMBER_ cl_int CommandQueue::default_error_ = CL_SUCCESS;
9687
9688
9689#if CL_HPP_TARGET_OPENCL_VERSION >= 200
9690enum class DeviceQueueProperties : cl_command_queue_properties
9691{
9692 None = 0,
9693 Profiling = CL_QUEUE_PROFILING_ENABLE,
9694};
9695
9696inline DeviceQueueProperties operator|(DeviceQueueProperties lhs, DeviceQueueProperties rhs)
9697{
9698 return static_cast<DeviceQueueProperties>(static_cast<cl_command_queue_properties>(lhs) | static_cast<cl_command_queue_properties>(rhs));
9699}
9704class DeviceCommandQueue : public detail::Wrapper<cl_command_queue>
9705{
9706public:
9711 DeviceCommandQueue() { }
9716 DeviceCommandQueue(DeviceQueueProperties properties, cl_int* err = nullptr)
9717 {
9718 cl_int error;
9721
9722 cl_command_queue_properties mergedProperties =
9723 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
9724
9725 cl_queue_properties queue_properties[] = {
9726 CL_QUEUE_PROPERTIES, mergedProperties, 0 };
9727 object_ = CL_(clCreateCommandQueueWithProperties)(
9728 context(), device(), queue_properties, &error);
9729
9730 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
9731 if (err != nullptr) {
9732 *err = error;
9733 }
9734 }
9740 const Context& context,
9741 const Device& device,
9742 DeviceQueueProperties properties = DeviceQueueProperties::None,
9743 cl_int* err = nullptr)
9744 {
9745 cl_int error;
9746
9747 cl_command_queue_properties mergedProperties =
9748 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
9749 cl_queue_properties queue_properties[] = {
9750 CL_QUEUE_PROPERTIES, mergedProperties, 0 };
9751 object_ = CL_(clCreateCommandQueueWithProperties)(
9752 context(), device(), queue_properties, &error);
9753
9754 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
9755 if (err != nullptr) {
9756 *err = error;
9757 }
9758 }
9764 const Context& context,
9765 const Device& device,
9766 cl_uint queueSize,
9767 DeviceQueueProperties properties = DeviceQueueProperties::None,
9768 cl_int* err = nullptr)
9769 {
9770 cl_int error;
9771
9772 cl_command_queue_properties mergedProperties =
9773 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
9774 cl_queue_properties queue_properties[] = {
9775 CL_QUEUE_PROPERTIES, mergedProperties,
9776 CL_QUEUE_SIZE, queueSize,
9777 0 };
9778 object_ = CL_(clCreateCommandQueueWithProperties)(
9779 context(), device(), queue_properties, &error);
9780
9781 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
9782 if (err != nullptr) {
9783 *err = error;
9784 }
9785 }
9786
9789 * \param retainObject will cause the constructor to retain its cl object.
9790 * Defaults to false to maintain compatibility with
9791 * earlier versions.
9792 */
9793 explicit DeviceCommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) :
9794 detail::Wrapper<cl_type>(commandQueue, retainObject) { }
9795
9796 DeviceCommandQueue& operator = (const cl_command_queue& rhs)
9797 {
9798 detail::Wrapper<cl_type>::operator=(rhs);
9799 return *this;
9800 }
9801
9802 template <typename T>
9803 cl_int getInfo(cl_command_queue_info name, T* param) const
9804 {
9805 return detail::errHandler(
9806 detail::getInfo(
9807 CL_(clGetCommandQueueInfo), object_, name, param),
9808 __GET_COMMAND_QUEUE_INFO_ERR);
9809 }
9810
9811 template <cl_command_queue_info name> typename
9812 detail::param_traits<detail::cl_command_queue_info, name>::param_type
9813 getInfo(cl_int* err = nullptr) const
9814 {
9815 typename detail::param_traits<
9816 detail::cl_command_queue_info, name>::param_type param;
9817 cl_int result = getInfo(name, &param);
9818 if (err != nullptr) {
9819 *err = result;
9820 }
9821 return param;
9822 }
9823
9826 * in the default context and of the default size.
9827 * If there is already a default queue for the specified device this
9828 * function will return the pre-existing queue.
9829 */
9831 cl_int *err = nullptr)
9832 {
9833 cl_int error;
9836
9837 cl_command_queue_properties properties =
9838 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
9839 cl_queue_properties queue_properties[] = {
9840 CL_QUEUE_PROPERTIES, properties,
9841 0 };
9842 DeviceCommandQueue deviceQueue(
9843 CL_(clCreateCommandQueueWithProperties)(
9844 context(), device(), queue_properties, &error));
9845
9846 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
9847 if (err != nullptr) {
9848 *err = error;
9849 }
9850
9851 return deviceQueue;
9852 }
9853
9856 * and of the default size.
9857 * If there is already a default queue for the specified device this
9858 * function will return the pre-existing queue.
9859 */
9861 const Context &context, const Device &device, cl_int *err = nullptr)
9862 {
9863 cl_int error;
9864
9865 cl_command_queue_properties properties =
9866 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
9867 cl_queue_properties queue_properties[] = {
9868 CL_QUEUE_PROPERTIES, properties,
9869 0 };
9870 DeviceCommandQueue deviceQueue(
9871 CL_(clCreateCommandQueueWithProperties)(
9872 context(), device(), queue_properties, &error));
9873
9874 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
9875 if (err != nullptr) {
9876 *err = error;
9877 }
9878
9879 return deviceQueue;
9880 }
9881
9884 * and of the requested size in bytes.
9885 * If there is already a default queue for the specified device this
9886 * function will return the pre-existing queue.
9887 */
9889 const Context &context, const Device &device, cl_uint queueSize, cl_int *err = nullptr)
9890 {
9891 cl_int error;
9892
9893 cl_command_queue_properties properties =
9894 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
9895 cl_queue_properties queue_properties[] = {
9896 CL_QUEUE_PROPERTIES, properties,
9897 CL_QUEUE_SIZE, queueSize,
9898 0 };
9899 DeviceCommandQueue deviceQueue(
9900 CL_(clCreateCommandQueueWithProperties)(
9901 context(), device(), queue_properties, &error));
9902
9903 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
9904 if (err != nullptr) {
9905 *err = error;
9906 }
9907
9908 return deviceQueue;
9909 }
9910
9911
9912
9913#if CL_HPP_TARGET_OPENCL_VERSION >= 210
9916 * This can update the default command queue for a device repeatedly to account
9917 * for kernels that rely on the default.
9918 * @return updated default device command queue.
9919 */
9920 static DeviceCommandQueue updateDefault(const Context &context, const Device &device, const DeviceCommandQueue &default_queue, cl_int *err = nullptr)
9921 {
9922 cl_int error;
9923 error = CL_(clSetDefaultDeviceCommandQueue)(context.get(), device.get(), default_queue.get());
9924
9925 detail::errHandler(error, __SET_DEFAULT_DEVICE_COMMAND_QUEUE_ERR);
9926 if (err != nullptr) {
9927 *err = error;
9928 }
9929 return default_queue;
9930 }
9935 static DeviceCommandQueue getDefault(const CommandQueue &queue, cl_int * err = nullptr)
9936 {
9937 return queue.getInfo<CL_QUEUE_DEVICE_DEFAULT>(err);
9938 }
9939
9940#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 210
9941}; // DeviceCommandQueue
9942
9943namespace detail
9944{
9945 // Specialization for device command queue
9946 template <>
9948 {
9949 static size_type size(const cl::DeviceCommandQueue&) { return sizeof(cl_command_queue); }
9950 static const cl_command_queue* ptr(const cl::DeviceCommandQueue& value) { return &(value()); }
9951 };
9952} // namespace detail
9953
9954#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9955
9956
9957template< typename IteratorType >
9959 const Context &context,
9960 IteratorType startIterator,
9961 IteratorType endIterator,
9962 bool readOnly,
9963 bool useHostPtr,
9964 cl_int* err)
9965{
9966 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
9967 cl_int error;
9968
9969 cl_mem_flags flags = 0;
9970 if( readOnly ) {
9971 flags |= CL_MEM_READ_ONLY;
9972 }
9973 else {
9974 flags |= CL_MEM_READ_WRITE;
9975 }
9976 if( useHostPtr ) {
9977 flags |= CL_MEM_USE_HOST_PTR;
9978 }
9979
9980 size_type size = sizeof(DataType)*(endIterator - startIterator);
9981
9982 if( useHostPtr ) {
9983 object_ = CL_(clCreateBuffer)(context(), flags, size, const_cast<DataType*>(&*startIterator), &error);
9984 } else {
9985 object_ = CL_(clCreateBuffer)(context(), flags, size, 0, &error);
9986 }
9987
9988 detail::errHandler(error, __CREATE_BUFFER_ERR);
9989 if (err != nullptr) {
9990 *err = error;
9991 }
9992
9993 if( !useHostPtr ) {
9994 CommandQueue queue(context, 0, &error);
9995 detail::errHandler(error, __CREATE_BUFFER_ERR);
9996 if (err != nullptr) {
9997 *err = error;
9998 }
9999
10000 error = cl::copy(queue, startIterator, endIterator, *this);
10001 detail::errHandler(error, __CREATE_BUFFER_ERR);
10002 if (err != nullptr) {
10003 *err = error;
10004 }
10006}
10007
10008template< typename IteratorType >
10010 const CommandQueue &queue,
10011 IteratorType startIterator,
10012 IteratorType endIterator,
10013 bool readOnly,
10014 bool useHostPtr,
10015 cl_int* err)
10016{
10017 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
10018 cl_int error;
10019
10020 cl_mem_flags flags = 0;
10021 if (readOnly) {
10022 flags |= CL_MEM_READ_ONLY;
10023 }
10024 else {
10025 flags |= CL_MEM_READ_WRITE;
10026 }
10027 if (useHostPtr) {
10028 flags |= CL_MEM_USE_HOST_PTR;
10029 }
10030
10031 size_type size = sizeof(DataType)*(endIterator - startIterator);
10032
10033 Context context = queue.getInfo<CL_QUEUE_CONTEXT>();
10034
10035 if (useHostPtr) {
10036 object_ = CL_(clCreateBuffer)(context(), flags, size, const_cast<DataType*>(&*startIterator), &error);
10037 }
10038 else {
10039 object_ = CL_(clCreateBuffer)(context(), flags, size, 0, &error);
10040 }
10041
10042 detail::errHandler(error, __CREATE_BUFFER_ERR);
10043 if (err != nullptr) {
10044 *err = error;
10045 }
10046
10047 if (!useHostPtr) {
10048 error = cl::copy(queue, startIterator, endIterator, *this);
10049 detail::errHandler(error, __CREATE_BUFFER_ERR);
10050 if (err != nullptr) {
10051 *err = error;
10052 }
10053 }
10054}
10055
10056inline cl_int enqueueReadBuffer(
10057 const Buffer& buffer,
10058 cl_bool blocking,
10059 size_type offset,
10060 size_type size,
10061 void* ptr,
10062 const vector<Event>* events = nullptr,
10063 Event* event = nullptr)
10064{
10065 cl_int error;
10066 CommandQueue queue = CommandQueue::getDefault(&error);
10067
10068 if (error != CL_SUCCESS) {
10069 return error;
10070 }
10071
10072 return queue.enqueueReadBuffer(buffer, blocking, offset, size, ptr, events, event);
10073}
10074
10075inline cl_int enqueueWriteBuffer(
10076 const Buffer& buffer,
10077 cl_bool blocking,
10078 size_type offset,
10079 size_type size,
10080 const void* ptr,
10081 const vector<Event>* events = nullptr,
10082 Event* event = nullptr)
10083{
10084 cl_int error;
10085 CommandQueue queue = CommandQueue::getDefault(&error);
10086
10087 if (error != CL_SUCCESS) {
10088 return error;
10089 }
10090
10091 return queue.enqueueWriteBuffer(buffer, blocking, offset, size, ptr, events, event);
10092}
10093
10094inline void* enqueueMapBuffer(
10095 const Buffer& buffer,
10096 cl_bool blocking,
10097 cl_map_flags flags,
10098 size_type offset,
10099 size_type size,
10100 const vector<Event>* events = nullptr,
10101 Event* event = nullptr,
10102 cl_int* err = nullptr)
10103{
10104 cl_int error;
10105 CommandQueue queue = CommandQueue::getDefault(&error);
10106 detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
10107 if (err != nullptr) {
10108 *err = error;
10109 }
10110
10111 void * result = CL_(clEnqueueMapBuffer)(
10112 queue(), buffer(), blocking, flags, offset, size,
10113 (events != nullptr) ? (cl_uint) events->size() : 0,
10114 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
10115 (cl_event*) event,
10116 &error);
10117
10118 detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
10119 if (err != nullptr) {
10120 *err = error;
10121 }
10122 return result;
10123}
10124
10125
10126#if CL_HPP_TARGET_OPENCL_VERSION >= 200
10129 * update a region of a coarse-grained SVM buffer.
10130 * This variant takes a raw SVM pointer.
10131 */
10132template<typename T>
10133inline cl_int enqueueMapSVM(
10134 T* ptr,
10135 cl_bool blocking,
10136 cl_map_flags flags,
10137 size_type size,
10138 const vector<Event>* events,
10139 Event* event)
10140{
10141 cl_int error;
10142 CommandQueue queue = CommandQueue::getDefault(&error);
10143 if (error != CL_SUCCESS) {
10144 return detail::errHandler(error, __ENQUEUE_MAP_SVM_ERR);
10145 }
10146
10147 return queue.enqueueMapSVM(
10148 ptr, blocking, flags, size, events, event);
10149}
10150
10153 * update a region of a coarse-grained SVM buffer.
10154 * This variant takes a cl::pointer instance.
10155 */
10156template<typename T, class D>
10157inline cl_int enqueueMapSVM(
10158 cl::pointer<T, D> &ptr,
10159 cl_bool blocking,
10160 cl_map_flags flags,
10161 size_type size,
10162 const vector<Event>* events = nullptr,
10163 Event* event = nullptr)
10164{
10165 cl_int error;
10166 CommandQueue queue = CommandQueue::getDefault(&error);
10167 if (error != CL_SUCCESS) {
10168 return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
10169 }
10170
10171 return queue.enqueueMapSVM(
10172 ptr, blocking, flags, size, events, event);
10173}
10174
10177 * update a region of a coarse-grained SVM buffer.
10178 * This variant takes a cl::vector instance.
10179 */
10180template<typename T, class Alloc>
10181inline cl_int enqueueMapSVM(
10182 cl::vector<T, Alloc> &container,
10183 cl_bool blocking,
10184 cl_map_flags flags,
10185 const vector<Event>* events = nullptr,
10186 Event* event = nullptr)
10187{
10188 cl_int error;
10189 CommandQueue queue = CommandQueue::getDefault(&error);
10190 if (error != CL_SUCCESS) {
10191 return detail::errHandler(error, __ENQUEUE_MAP_SVM_ERR);
10192 }
10193
10194 return queue.enqueueMapSVM(
10195 container, blocking, flags, events, event);
10196}
10197
10198#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
10199
10200inline cl_int enqueueUnmapMemObject(
10201 const Memory& memory,
10202 void* mapped_ptr,
10203 const vector<Event>* events = nullptr,
10204 Event* event = nullptr)
10205{
10206 cl_int error;
10207 CommandQueue queue = CommandQueue::getDefault(&error);
10208 detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
10209 if (error != CL_SUCCESS) {
10210 return error;
10211 }
10212
10213 cl_event tmp;
10214 cl_int err = detail::errHandler(
10215 CL_(clEnqueueUnmapMemObject)(
10216 queue(), memory(), mapped_ptr,
10217 (events != nullptr) ? (cl_uint)events->size() : 0,
10218 (events != nullptr && events->size() > 0) ? (const cl_event*)&events->front() : nullptr,
10219 (event != nullptr) ? &tmp : nullptr),
10220 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
10221
10222 if (event != nullptr && err == CL_SUCCESS)
10223 *event = tmp;
10224
10225 return err;
10226}
10227
10228#if CL_HPP_TARGET_OPENCL_VERSION >= 200
10231 * SVM buffer back to the OpenCL runtime.
10232 * This variant takes a raw SVM pointer.
10233 */
10234template<typename T>
10235inline cl_int enqueueUnmapSVM(
10236 T* ptr,
10237 const vector<Event>* events = nullptr,
10238 Event* event = nullptr)
10239{
10240 cl_int error;
10241 CommandQueue queue = CommandQueue::getDefault(&error);
10242 if (error != CL_SUCCESS) {
10243 return detail::errHandler(error, __ENQUEUE_UNMAP_SVM_ERR);
10244 }
10245
10246 return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event),
10247 __ENQUEUE_UNMAP_SVM_ERR);
10248
10249}
10250
10253 * SVM buffer back to the OpenCL runtime.
10254 * This variant takes a cl::pointer instance.
10255 */
10256template<typename T, class D>
10257inline cl_int enqueueUnmapSVM(
10258 cl::pointer<T, D> &ptr,
10259 const vector<Event>* events = nullptr,
10260 Event* event = nullptr)
10261{
10262 cl_int error;
10263 CommandQueue queue = CommandQueue::getDefault(&error);
10264 if (error != CL_SUCCESS) {
10265 return detail::errHandler(error, __ENQUEUE_UNMAP_SVM_ERR);
10266 }
10267
10268 return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event),
10269 __ENQUEUE_UNMAP_SVM_ERR);
10270}
10271
10274 * SVM buffer back to the OpenCL runtime.
10275 * This variant takes a cl::vector instance.
10276 */
10277template<typename T, class Alloc>
10278inline cl_int enqueueUnmapSVM(
10279 cl::vector<T, Alloc> &container,
10280 const vector<Event>* events = nullptr,
10281 Event* event = nullptr)
10282{
10283 cl_int error;
10284 CommandQueue queue = CommandQueue::getDefault(&error);
10285 if (error != CL_SUCCESS) {
10286 return detail::errHandler(error, __ENQUEUE_UNMAP_SVM_ERR);
10287 }
10288
10289 return detail::errHandler(queue.enqueueUnmapSVM(container, events, event),
10290 __ENQUEUE_UNMAP_SVM_ERR);
10291}
10292
10293#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
10294
10295inline cl_int enqueueCopyBuffer(
10296 const Buffer& src,
10297 const Buffer& dst,
10298 size_type src_offset,
10299 size_type dst_offset,
10300 size_type size,
10301 const vector<Event>* events = nullptr,
10302 Event* event = nullptr)
10303{
10304 cl_int error;
10305 CommandQueue queue = CommandQueue::getDefault(&error);
10306
10307 if (error != CL_SUCCESS) {
10308 return error;
10309 }
10310
10311 return queue.enqueueCopyBuffer(src, dst, src_offset, dst_offset, size, events, event);
10312}
10313
10316 * Host to Device.
10317 * Uses default command queue.
10318 */
10319template< typename IteratorType >
10320inline cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
10321{
10322 cl_int error;
10323 CommandQueue queue = CommandQueue::getDefault(&error);
10324 if (error != CL_SUCCESS)
10325 return error;
10326
10327 return cl::copy(queue, startIterator, endIterator, buffer);
10328}
10329
10332 * Device to Host.
10333 * Uses default command queue.
10334 */
10335template< typename IteratorType >
10336inline cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
10337{
10338 cl_int error;
10339 CommandQueue queue = CommandQueue::getDefault(&error);
10340 if (error != CL_SUCCESS)
10341 return error;
10342
10343 return cl::copy(queue, buffer, startIterator, endIterator);
10344}
10345
10348 * Host to Device.
10349 * Uses specified queue.
10350 */
10351template< typename IteratorType >
10352inline cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
10353{
10354 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
10355 cl_int error;
10356
10357 size_type length = endIterator-startIterator;
10358 size_type byteLength = length*sizeof(DataType);
10359
10360 DataType *pointer =
10361 static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_WRITE, 0, byteLength, 0, 0, &error));
10362 // if exceptions enabled, enqueueMapBuffer will throw
10363 if( error != CL_SUCCESS ) {
10364 return error;
10365 }
10366#if defined(_MSC_VER) && _MSC_VER < 1920
10367 std::copy(
10368 startIterator,
10369 endIterator,
10370 stdext::checked_array_iterator<DataType*>(
10371 pointer, length));
10372#else
10373 std::copy(startIterator, endIterator, pointer);
10374#endif // defined(_MSC_VER) && _MSC_VER < 1920
10375 Event endEvent;
10376 error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
10377 // if exceptions enabled, enqueueUnmapMemObject will throw
10378 if( error != CL_SUCCESS ) {
10379 return error;
10380 }
10381 endEvent.wait();
10382 return CL_SUCCESS;
10383}
10384
10387 * Device to Host.
10388 * Uses specified queue.
10389 */
10390template< typename IteratorType >
10391inline cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
10392{
10393 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
10394 cl_int error;
10395
10396 size_type length = endIterator-startIterator;
10397 size_type byteLength = length*sizeof(DataType);
10398
10399 DataType *pointer =
10400 static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_READ, 0, byteLength, 0, 0, &error));
10401 // if exceptions enabled, enqueueMapBuffer will throw
10402 if( error != CL_SUCCESS ) {
10403 return error;
10404 }
10405 std::copy(pointer, pointer + length, startIterator);
10406 Event endEvent;
10407 error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
10408 // if exceptions enabled, enqueueUnmapMemObject will throw
10409 if( error != CL_SUCCESS ) {
10410 return error;
10411 }
10412 endEvent.wait();
10413 return CL_SUCCESS;
10414}
10415
10416
10417#if CL_HPP_TARGET_OPENCL_VERSION >= 200
10419 * Blocking SVM map operation - performs a blocking map underneath.
10420 */
10421template<typename T, class Alloc>
10422inline cl_int mapSVM(cl::vector<T, Alloc> &container)
10423{
10424 return enqueueMapSVM(container, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE);
10425}
10426
10428* Blocking SVM map operation - performs a blocking map underneath.
10429*/
10430template<typename T, class Alloc>
10431inline cl_int unmapSVM(cl::vector<T, Alloc> &container)
10432{
10433 return enqueueUnmapSVM(container);
10434}
10435
10436#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
10437
10438#if CL_HPP_TARGET_OPENCL_VERSION >= 110
10439inline cl_int enqueueReadBufferRect(
10440 const Buffer& buffer,
10441 cl_bool blocking,
10442 const array<size_type, 3>& buffer_offset,
10443 const array<size_type, 3>& host_offset,
10444 const array<size_type, 3>& region,
10445 size_type buffer_row_pitch,
10446 size_type buffer_slice_pitch,
10447 size_type host_row_pitch,
10448 size_type host_slice_pitch,
10449 void *ptr,
10450 const vector<Event>* events = nullptr,
10451 Event* event = nullptr)
10452{
10453 cl_int error;
10454 CommandQueue queue = CommandQueue::getDefault(&error);
10455
10456 if (error != CL_SUCCESS) {
10457 return error;
10458 }
10459
10460 return queue.enqueueReadBufferRect(
10461 buffer,
10462 blocking,
10463 buffer_offset,
10464 host_offset,
10465 region,
10466 buffer_row_pitch,
10467 buffer_slice_pitch,
10468 host_row_pitch,
10469 host_slice_pitch,
10470 ptr,
10471 events,
10472 event);
10473}
10474
10475inline cl_int enqueueReadBufferRect(
10476 const Buffer& buffer,
10477 cl_bool blocking,
10478 const array<size_type, 2>& buffer_offset,
10479 const array<size_type, 2>& host_offset,
10480 const array<size_type, 2>& region,
10481 size_type buffer_row_pitch,
10482 size_type buffer_slice_pitch,
10483 size_type host_row_pitch,
10484 size_type host_slice_pitch,
10485 void* ptr,
10486 const vector<Event>* events = nullptr,
10487 Event* event = nullptr)
10488{
10489 return enqueueReadBufferRect(
10490 buffer,
10491 blocking,
10492 { buffer_offset[0], buffer_offset[1], 0 },
10493 { host_offset[0], host_offset[1], 0 },
10494 { region[0], region[1], 1 },
10495 buffer_row_pitch,
10496 buffer_slice_pitch,
10497 host_row_pitch,
10498 host_slice_pitch,
10499 ptr,
10500 events,
10501 event);
10502}
10503
10504inline cl_int enqueueWriteBufferRect(
10505 const Buffer& buffer,
10506 cl_bool blocking,
10507 const array<size_type, 3>& buffer_offset,
10508 const array<size_type, 3>& host_offset,
10509 const array<size_type, 3>& region,
10510 size_type buffer_row_pitch,
10511 size_type buffer_slice_pitch,
10512 size_type host_row_pitch,
10513 size_type host_slice_pitch,
10514 const void *ptr,
10515 const vector<Event>* events = nullptr,
10516 Event* event = nullptr)
10517{
10518 cl_int error;
10519 CommandQueue queue = CommandQueue::getDefault(&error);
10520
10521 if (error != CL_SUCCESS) {
10522 return error;
10523 }
10524
10525 return queue.enqueueWriteBufferRect(
10526 buffer,
10527 blocking,
10528 buffer_offset,
10529 host_offset,
10530 region,
10531 buffer_row_pitch,
10532 buffer_slice_pitch,
10533 host_row_pitch,
10534 host_slice_pitch,
10535 ptr,
10536 events,
10537 event);
10538}
10539
10540inline cl_int enqueueWriteBufferRect(
10541 const Buffer& buffer,
10542 cl_bool blocking,
10543 const array<size_type, 2>& buffer_offset,
10544 const array<size_type, 2>& host_offset,
10545 const array<size_type, 2>& region,
10546 size_type buffer_row_pitch,
10547 size_type buffer_slice_pitch,
10548 size_type host_row_pitch,
10549 size_type host_slice_pitch,
10550 const void* ptr,
10551 const vector<Event>* events = nullptr,
10552 Event* event = nullptr)
10553{
10554 return enqueueWriteBufferRect(
10555 buffer,
10556 blocking,
10557 { buffer_offset[0], buffer_offset[1], 0 },
10558 { host_offset[0], host_offset[1], 0 },
10559 { region[0], region[1], 1 },
10560 buffer_row_pitch,
10561 buffer_slice_pitch,
10562 host_row_pitch,
10563 host_slice_pitch,
10564 ptr,
10565 events,
10566 event);
10567}
10568
10569inline cl_int enqueueCopyBufferRect(
10570 const Buffer& src,
10571 const Buffer& dst,
10572 const array<size_type, 3>& src_origin,
10573 const array<size_type, 3>& dst_origin,
10574 const array<size_type, 3>& region,
10575 size_type src_row_pitch,
10576 size_type src_slice_pitch,
10577 size_type dst_row_pitch,
10578 size_type dst_slice_pitch,
10579 const vector<Event>* events = nullptr,
10580 Event* event = nullptr)
10581{
10582 cl_int error;
10583 CommandQueue queue = CommandQueue::getDefault(&error);
10584
10585 if (error != CL_SUCCESS) {
10586 return error;
10587 }
10588
10589 return queue.enqueueCopyBufferRect(
10590 src,
10591 dst,
10592 src_origin,
10593 dst_origin,
10594 region,
10595 src_row_pitch,
10596 src_slice_pitch,
10597 dst_row_pitch,
10598 dst_slice_pitch,
10599 events,
10600 event);
10601}
10602
10603inline cl_int enqueueCopyBufferRect(
10604 const Buffer& src,
10605 const Buffer& dst,
10606 const array<size_type, 2>& src_origin,
10607 const array<size_type, 2>& dst_origin,
10608 const array<size_type, 2>& region,
10609 size_type src_row_pitch,
10610 size_type src_slice_pitch,
10611 size_type dst_row_pitch,
10612 size_type dst_slice_pitch,
10613 const vector<Event>* events = nullptr,
10614 Event* event = nullptr)
10615{
10616 return enqueueCopyBufferRect(
10617 src,
10618 dst,
10619 { src_origin[0], src_origin[1], 0 },
10620 { dst_origin[0], dst_origin[1], 0 },
10621 { region[0], region[1], 1 },
10622 src_row_pitch,
10623 src_slice_pitch,
10624 dst_row_pitch,
10625 dst_slice_pitch,
10626 events,
10627 event);
10628}
10629#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
10630
10631inline cl_int enqueueReadImage(
10632 const Image& image,
10633 cl_bool blocking,
10634 const array<size_type, 3>& origin,
10635 const array<size_type, 3>& region,
10636 size_type row_pitch,
10637 size_type slice_pitch,
10638 void* ptr,
10639 const vector<Event>* events = nullptr,
10640 Event* event = nullptr)
10641{
10642 cl_int error;
10643 CommandQueue queue = CommandQueue::getDefault(&error);
10644
10645 if (error != CL_SUCCESS) {
10646 return error;
10647 }
10648
10649 return queue.enqueueReadImage(
10650 image,
10651 blocking,
10652 origin,
10653 region,
10654 row_pitch,
10655 slice_pitch,
10656 ptr,
10657 events,
10658 event);
10659}
10660
10661inline cl_int enqueueReadImage(
10662 const Image& image,
10663 cl_bool blocking,
10664 const array<size_type, 2>& origin,
10665 const array<size_type, 2>& region,
10666 size_type row_pitch,
10667 size_type slice_pitch,
10668 void* ptr,
10669 const vector<Event>* events = nullptr,
10670 Event* event = nullptr)
10671{
10672 return enqueueReadImage(
10673 image,
10674 blocking,
10675 { origin[0], origin[1], 0 },
10676 { region[0], region[1], 1 },
10677 row_pitch,
10678 slice_pitch,
10679 ptr,
10680 events,
10681 event);
10682}
10683
10684inline cl_int enqueueWriteImage(
10685 const Image& image,
10686 cl_bool blocking,
10687 const array<size_type, 3>& origin,
10688 const array<size_type, 3>& region,
10689 size_type row_pitch,
10690 size_type slice_pitch,
10691 const void* ptr,
10692 const vector<Event>* events = nullptr,
10693 Event* event = nullptr)
10694{
10695 cl_int error;
10696 CommandQueue queue = CommandQueue::getDefault(&error);
10697
10698 if (error != CL_SUCCESS) {
10699 return error;
10700 }
10701
10702 return queue.enqueueWriteImage(
10703 image,
10704 blocking,
10705 origin,
10706 region,
10707 row_pitch,
10708 slice_pitch,
10709 ptr,
10710 events,
10711 event);
10712}
10713
10714inline cl_int enqueueWriteImage(
10715 const Image& image,
10716 cl_bool blocking,
10717 const array<size_type, 2>& origin,
10718 const array<size_type, 2>& region,
10719 size_type row_pitch,
10720 size_type slice_pitch,
10721 const void* ptr,
10722 const vector<Event>* events = nullptr,
10723 Event* event = nullptr)
10724{
10725 return enqueueWriteImage(
10726 image,
10727 blocking,
10728 { origin[0], origin[1], 0 },
10729 { region[0], region[1], 1 },
10730 row_pitch,
10731 slice_pitch,
10732 ptr,
10733 events,
10734 event);
10735}
10736
10737inline cl_int enqueueCopyImage(
10738 const Image& src,
10739 const Image& dst,
10740 const array<size_type, 3>& src_origin,
10741 const array<size_type, 3>& dst_origin,
10742 const array<size_type, 3>& region,
10743 const vector<Event>* events = nullptr,
10744 Event* event = nullptr)
10745{
10746 cl_int error;
10747 CommandQueue queue = CommandQueue::getDefault(&error);
10748
10749 if (error != CL_SUCCESS) {
10750 return error;
10751 }
10752
10753 return queue.enqueueCopyImage(
10754 src,
10755 dst,
10756 src_origin,
10757 dst_origin,
10758 region,
10759 events,
10760 event);
10761}
10762
10763inline cl_int enqueueCopyImage(
10764 const Image& src,
10765 const Image& dst,
10766 const array<size_type, 2>& src_origin,
10767 const array<size_type, 2>& dst_origin,
10768 const array<size_type, 2>& region,
10769 const vector<Event>* events = nullptr,
10770 Event* event = nullptr)
10771{
10772 return enqueueCopyImage(
10773 src,
10774 dst,
10775 { src_origin[0], src_origin[1], 0 },
10776 { dst_origin[0], dst_origin[1], 0 },
10777 { region[0], region[1], 1 },
10778 events,
10779 event);
10780}
10781
10782inline cl_int enqueueCopyImageToBuffer(
10783 const Image& src,
10784 const Buffer& dst,
10785 const array<size_type, 3>& src_origin,
10786 const array<size_type, 3>& region,
10787 size_type dst_offset,
10788 const vector<Event>* events = nullptr,
10789 Event* event = nullptr)
10790{
10791 cl_int error;
10792 CommandQueue queue = CommandQueue::getDefault(&error);
10793
10794 if (error != CL_SUCCESS) {
10795 return error;
10796 }
10797
10798 return queue.enqueueCopyImageToBuffer(
10799 src,
10800 dst,
10801 src_origin,
10802 region,
10803 dst_offset,
10804 events,
10805 event);
10806}
10807
10808inline cl_int enqueueCopyImageToBuffer(
10809 const Image& src,
10810 const Buffer& dst,
10811 const array<size_type, 2>& src_origin,
10812 const array<size_type, 2>& region,
10813 size_type dst_offset,
10814 const vector<Event>* events = nullptr,
10815 Event* event = nullptr)
10816{
10817 return enqueueCopyImageToBuffer(
10818 src,
10819 dst,
10820 { src_origin[0], src_origin[1], 0 },
10821 { region[0], region[1], 1 },
10822 dst_offset,
10823 events,
10824 event);
10825}
10826
10827inline cl_int enqueueCopyBufferToImage(
10828 const Buffer& src,
10829 const Image& dst,
10830 size_type src_offset,
10831 const array<size_type, 3>& dst_origin,
10832 const array<size_type, 3>& region,
10833 const vector<Event>* events = nullptr,
10834 Event* event = nullptr)
10835{
10836 cl_int error;
10837 CommandQueue queue = CommandQueue::getDefault(&error);
10838
10839 if (error != CL_SUCCESS) {
10840 return error;
10841 }
10842
10843 return queue.enqueueCopyBufferToImage(
10844 src,
10845 dst,
10846 src_offset,
10847 dst_origin,
10848 region,
10849 events,
10850 event);
10851}
10852
10853inline cl_int enqueueCopyBufferToImage(
10854 const Buffer& src,
10855 const Image& dst,
10856 size_type src_offset,
10857 const array<size_type, 2>& dst_origin,
10858 const array<size_type, 2>& region,
10859 const vector<Event>* events = nullptr,
10860 Event* event = nullptr)
10861{
10862 cl_int error;
10863 CommandQueue queue = CommandQueue::getDefault(&error);
10864
10865 if (error != CL_SUCCESS) {
10866 return error;
10867 }
10868
10869 return enqueueCopyBufferToImage(
10870 src,
10871 dst,
10872 src_offset,
10873 { dst_origin[0], dst_origin[1], 0 },
10874 { region[0], region[1], 1 },
10875 events,
10876 event);
10877}
10878
10879inline cl_int flush(void)
10880{
10881 cl_int error;
10882 CommandQueue queue = CommandQueue::getDefault(&error);
10883
10884 if (error != CL_SUCCESS) {
10885 return error;
10886 }
10887
10888 return queue.flush();
10889}
10890
10891inline cl_int finish(void)
10892{
10893 cl_int error;
10894 CommandQueue queue = CommandQueue::getDefault(&error);
10895
10896 if (error != CL_SUCCESS) {
10897 return error;
10898 }
10899
10901 return queue.finish();
10902}
10903
10904class EnqueueArgs
10905{
10906private:
10907 CommandQueue queue_;
10908 const NDRange offset_;
10909 const NDRange global_;
10910 const NDRange local_;
10911 vector<Event> events_;
10912
10913 template<typename... Ts>
10914 friend class KernelFunctor;
10915
10916public:
10917 EnqueueArgs(NDRange global) :
10918 queue_(CommandQueue::getDefault()),
10919 offset_(NullRange),
10920 global_(global),
10921 local_(NullRange)
10922 {
10923
10924 }
10925
10926 EnqueueArgs(NDRange global, NDRange local) :
10927 queue_(CommandQueue::getDefault()),
10928 offset_(NullRange),
10929 global_(global),
10930 local_(local)
10931 {
10932
10933 }
10934
10935 EnqueueArgs(NDRange offset, NDRange global, NDRange local) :
10936 queue_(CommandQueue::getDefault()),
10937 offset_(offset),
10938 global_(global),
10939 local_(local)
10940 {
10941
10942 }
10943
10944 EnqueueArgs(Event e, NDRange global) :
10945 queue_(CommandQueue::getDefault()),
10946 offset_(NullRange),
10947 global_(global),
10948 local_(NullRange)
10949 {
10950 events_.push_back(e);
10951 }
10952
10953 EnqueueArgs(Event e, NDRange global, NDRange local) :
10954 queue_(CommandQueue::getDefault()),
10955 offset_(NullRange),
10956 global_(global),
10957 local_(local)
10958 {
10959 events_.push_back(e);
10960 }
10961
10962 EnqueueArgs(Event e, NDRange offset, NDRange global, NDRange local) :
10963 queue_(CommandQueue::getDefault()),
10964 offset_(offset),
10965 global_(global),
10966 local_(local)
10967 {
10968 events_.push_back(e);
10969 }
10970
10971 EnqueueArgs(const vector<Event> &events, NDRange global) :
10972 queue_(CommandQueue::getDefault()),
10973 offset_(NullRange),
10974 global_(global),
10975 local_(NullRange),
10976 events_(events)
10977 {
10978
10979 }
10980
10981 EnqueueArgs(const vector<Event> &events, NDRange global, NDRange local) :
10982 queue_(CommandQueue::getDefault()),
10983 offset_(NullRange),
10984 global_(global),
10985 local_(local),
10986 events_(events)
10987 {
10988
10989 }
10990
10991 EnqueueArgs(const vector<Event> &events, NDRange offset, NDRange global, NDRange local) :
10992 queue_(CommandQueue::getDefault()),
10993 offset_(offset),
10994 global_(global),
10995 local_(local),
10996 events_(events)
10997 {
10998
10999 }
11000
11001 EnqueueArgs(CommandQueue &queue, NDRange global) :
11002 queue_(queue),
11003 offset_(NullRange),
11004 global_(global),
11005 local_(NullRange)
11006 {
11007
11008 }
11009
11010 EnqueueArgs(CommandQueue &queue, NDRange global, NDRange local) :
11011 queue_(queue),
11012 offset_(NullRange),
11013 global_(global),
11014 local_(local)
11015 {
11016
11017 }
11018
11019 EnqueueArgs(CommandQueue &queue, NDRange offset, NDRange global, NDRange local) :
11020 queue_(queue),
11021 offset_(offset),
11022 global_(global),
11023 local_(local)
11024 {
11025
11026 }
11027
11028 EnqueueArgs(CommandQueue &queue, Event e, NDRange global) :
11029 queue_(queue),
11030 offset_(NullRange),
11031 global_(global),
11032 local_(NullRange)
11033 {
11034 events_.push_back(e);
11035 }
11036
11037 EnqueueArgs(CommandQueue &queue, Event e, NDRange global, NDRange local) :
11038 queue_(queue),
11039 offset_(NullRange),
11040 global_(global),
11041 local_(local)
11042 {
11043 events_.push_back(e);
11044 }
11045
11046 EnqueueArgs(CommandQueue &queue, Event e, NDRange offset, NDRange global, NDRange local) :
11047 queue_(queue),
11048 offset_(offset),
11049 global_(global),
11050 local_(local)
11051 {
11052 events_.push_back(e);
11053 }
11054
11055 EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange global) :
11056 queue_(queue),
11057 offset_(NullRange),
11058 global_(global),
11059 local_(NullRange),
11060 events_(events)
11061 {
11062
11063 }
11064
11065 EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange global, NDRange local) :
11066 queue_(queue),
11067 offset_(NullRange),
11068 global_(global),
11069 local_(local),
11070 events_(events)
11071 {
11072
11073 }
11074
11075 EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange offset, NDRange global, NDRange local) :
11076 queue_(queue),
11077 offset_(offset),
11078 global_(global),
11079 local_(local),
11080 events_(events)
11081 {
11082
11083 }
11084};
11085
11086
11087//----------------------------------------------------------------------------------------------
11088
11089
11091 * Type safe kernel functor.
11092 *
11093 */
11094template<typename... Ts>
11095class KernelFunctor
11096{
11097private:
11098 Kernel kernel_;
11099
11100 template<int index, typename T0, typename... T1s>
11101 void setArgs(T0&& t0, T1s&&... t1s)
11102 {
11103 kernel_.setArg(index, t0);
11104 setArgs<index + 1, T1s...>(std::forward<T1s>(t1s)...);
11105 }
11106
11107 template<int index, typename T0>
11108 void setArgs(T0&& t0)
11109 {
11110 kernel_.setArg(index, t0);
11111 }
11112
11113 template<int index>
11114 void setArgs()
11115 {
11116 }
11117
11118
11119public:
11120 KernelFunctor(Kernel kernel) : kernel_(kernel)
11121 {}
11122
11123 KernelFunctor(
11124 const Program& program,
11125 const string name,
11126 cl_int * err = nullptr) :
11127 kernel_(program, name.c_str(), err)
11128 {}
11129
11131 typedef Event result_type;
11132
11134 * Enqueue kernel.
11135 * @param args Launch parameters of the kernel.
11136 * @param t0... List of kernel arguments based on the template type of the functor.
11137 */
11139 const EnqueueArgs& args,
11140 Ts... ts)
11141 {
11142 Event event;
11143 setArgs<0>(std::forward<Ts>(ts)...);
11144
11145 args.queue_.enqueueNDRangeKernel(
11146 kernel_,
11147 args.offset_,
11148 args.global_,
11149 args.local_,
11150 &args.events_,
11151 &event);
11152
11153 return event;
11154 }
11155
11158 * @param args Launch parameters of the kernel.
11159 * @param t0... List of kernel arguments based on the template type of the functor.
11160 * @param error Out parameter returning the error code from the execution.
11161 */
11163 const EnqueueArgs& args,
11164 Ts... ts,
11165 cl_int &error)
11166 {
11167 Event event;
11168 setArgs<0>(std::forward<Ts>(ts)...);
11169
11170 error = args.queue_.enqueueNDRangeKernel(
11171 kernel_,
11172 args.offset_,
11173 args.global_,
11174 args.local_,
11175 &args.events_,
11176 &event);
11177
11178 return event;
11179 }
11180
11181#if CL_HPP_TARGET_OPENCL_VERSION >= 200
11182 cl_int setSVMPointers(const vector<void*> &pointerList)
11183 {
11184 return kernel_.setSVMPointers(pointerList);
11185 }
11186
11187 template<typename T0, typename... T1s>
11188 cl_int setSVMPointers(const T0 &t0, T1s &... ts)
11189 {
11190 return kernel_.setSVMPointers(t0, ts...);
11191 }
11192#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
11193
11194 Kernel getKernel()
11195 {
11196 return kernel_;
11197 }
11198};
11199
11200namespace compatibility {
11202 * Backward compatibility class to ensure that cl.hpp code works with opencl.hpp.
11203 * Please use KernelFunctor directly.
11204 */
11205 template<typename... Ts>
11206 struct make_kernel
11207 {
11208 typedef KernelFunctor<Ts...> FunctorType;
11209
11210 FunctorType functor_;
11211
11212 make_kernel(
11213 const Program& program,
11214 const string name,
11215 cl_int * err = nullptr) :
11216 functor_(FunctorType(program, name, err))
11217 {}
11218
11219 make_kernel(
11220 const Kernel kernel) :
11221 functor_(FunctorType(kernel))
11222 {}
11223
11225 typedef Event result_type;
11226
11228 typedef Event type_(
11229 const EnqueueArgs&,
11230 Ts...);
11231
11232 Event operator()(
11233 const EnqueueArgs& enqueueArgs,
11234 Ts... args)
11235 {
11236 return functor_(
11237 enqueueArgs, args...);
11238 }
11239 };
11240} // namespace compatibility
11241
11242#ifdef cl_khr_semaphore
11243
11244#ifdef cl_khr_external_semaphore
11245enum ExternalSemaphoreType : cl_external_semaphore_handle_type_khr
11246{
11247 None = 0,
11248#ifdef cl_khr_external_semaphore_opaque_fd
11249 OpaqueFd = CL_SEMAPHORE_HANDLE_OPAQUE_FD_KHR,
11250#endif // cl_khr_external_semaphore_opaque_fd
11251#ifdef cl_khr_external_semaphore_sync_fd
11252 SyncFd = CL_SEMAPHORE_HANDLE_SYNC_FD_KHR,
11253#endif // cl_khr_external_semaphore_sync_fd
11254#ifdef cl_khr_external_semaphore_win32
11255 OpaqueWin32 = CL_SEMAPHORE_HANDLE_OPAQUE_WIN32_KHR,
11256 OpaqueWin32Kmt = CL_SEMAPHORE_HANDLE_OPAQUE_WIN32_KMT_KHR,
11257#endif // cl_khr_external_semaphore_win32
11258};
11259#endif // cl_khr_external_semaphore
11260
11261class Semaphore : public detail::Wrapper<cl_semaphore_khr>
11262{
11263public:
11264 Semaphore() : detail::Wrapper<cl_type>() {}
11265 Semaphore(
11266 const Context &context,
11267 const vector<cl_semaphore_properties_khr>& sema_props,
11268 cl_int *err = nullptr)
11269 {
11270 /* initialization of addresses to extension functions (it is done only once) */
11271 std::call_once(ext_init_, initExtensions, context);
11272
11273 cl_int error = CL_INVALID_OPERATION;
11274
11275 if (pfn_clCreateSemaphoreWithPropertiesKHR)
11276 {
11277 object_ = pfn_clCreateSemaphoreWithPropertiesKHR(
11278 context(),
11279 sema_props.data(),
11280 &error);
11281 }
11282
11283 detail::errHandler(error, __CREATE_SEMAPHORE_KHR_WITH_PROPERTIES_ERR);
11284
11285 if (err != nullptr) {
11286 *err = error;
11287 }
11288 }
11289 Semaphore(
11290 const vector<cl_semaphore_properties_khr>& sema_props,
11291 cl_int* err = nullptr):Semaphore(Context::getDefault(err), sema_props, err) {}
11292
11293 explicit Semaphore(const cl_semaphore_khr& semaphore, bool retainObject = false) :
11294 detail::Wrapper<cl_type>(semaphore, retainObject) {}
11295 Semaphore& operator = (const cl_semaphore_khr& rhs) {
11296 detail::Wrapper<cl_type>::operator=(rhs);
11297 return *this;
11298 }
11299 template <typename T>
11300 cl_int getInfo(cl_semaphore_info_khr name, T* param) const
11301 {
11302 if (pfn_clGetSemaphoreInfoKHR == nullptr) {
11303 return detail::errHandler(CL_INVALID_OPERATION,
11304 __GET_SEMAPHORE_KHR_INFO_ERR);
11305 }
11306
11307 return detail::errHandler(
11308 detail::getInfo(pfn_clGetSemaphoreInfoKHR, object_, name, param),
11309 __GET_SEMAPHORE_KHR_INFO_ERR);
11310 }
11311 template <cl_semaphore_info_khr name> typename
11312 detail::param_traits<detail::cl_semaphore_info_khr, name>::param_type
11313 getInfo(cl_int* err = nullptr) const
11314 {
11315 typename detail::param_traits<
11316 detail::cl_semaphore_info_khr, name>::param_type param;
11317 cl_int result = getInfo(name, &param);
11318 if (err != nullptr) {
11319 *err = result;
11320 }
11321 return param;
11322 }
11323
11324#ifdef cl_khr_external_semaphore
11325 template <typename T>
11326 cl_int getHandleForTypeKHR(
11327 const Device& device, cl_external_semaphore_handle_type_khr name, T* param) const
11328 {
11329 if (pfn_clGetSemaphoreHandleForTypeKHR == nullptr) {
11330 return detail::errHandler(CL_INVALID_OPERATION,
11331 __GET_SEMAPHORE_HANDLE_FOR_TYPE_KHR_ERR);
11332 }
11333
11334 return detail::errHandler(
11335 detail::getInfo(
11336 pfn_clGetSemaphoreHandleForTypeKHR, object_, device(), name, param),
11337 __GET_SEMAPHORE_HANDLE_FOR_TYPE_KHR_ERR);
11338 }
11339
11340 template <cl_external_semaphore_handle_type_khr type> typename
11341 detail::param_traits<detail::cl_external_semaphore_handle_type_khr, type>::param_type
11342 getHandleForTypeKHR(const Device& device, cl_int* err = nullptr) const
11343 {
11344 typename detail::param_traits<
11345 detail::cl_external_semaphore_handle_type_khr, type>::param_type param;
11346 cl_int result = getHandleForTypeKHR(device, type, &param);
11347 if (err != nullptr) {
11348 *err = result;
11349 }
11350 return param;
11351 }
11352#endif // cl_khr_external_semaphore
11353
11354 cl_int retain()
11355 {
11356 if (pfn_clRetainSemaphoreKHR == nullptr) {
11357 return detail::errHandler(CL_INVALID_OPERATION,
11358 __RETAIN_SEMAPHORE_KHR_ERR);
11359 }
11360 return pfn_clRetainSemaphoreKHR(object_);
11361 }
11362
11363 cl_int release()
11364 {
11365 if (pfn_clReleaseSemaphoreKHR == nullptr) {
11366 return detail::errHandler(CL_INVALID_OPERATION,
11367 __RELEASE_SEMAPHORE_KHR_ERR);
11368 }
11369 return pfn_clReleaseSemaphoreKHR(object_);
11370 }
11371
11372private:
11373 static std::once_flag ext_init_;
11374
11375 static void initExtensions(const Context& context)
11376 {
11377#if CL_HPP_TARGET_OPENCL_VERSION >= 120
11378 Device device = context.getInfo<CL_CONTEXT_DEVICES>().at(0);
11379 cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>()();
11380 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCreateSemaphoreWithPropertiesKHR);
11381 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clReleaseSemaphoreKHR);
11382 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clRetainSemaphoreKHR);
11383 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueWaitSemaphoresKHR);
11384 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueSignalSemaphoresKHR);
11385 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clGetSemaphoreInfoKHR);
11386#ifdef cl_khr_external_semaphore
11387 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clGetSemaphoreHandleForTypeKHR);
11388#endif // cl_khr_external_semaphore
11389
11390#else
11391 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateSemaphoreWithPropertiesKHR);
11392 CL_HPP_INIT_CL_EXT_FCN_PTR_(clReleaseSemaphoreKHR);
11393 CL_HPP_INIT_CL_EXT_FCN_PTR_(clRetainSemaphoreKHR);
11394 CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueWaitSemaphoresKHR);
11395 CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueSignalSemaphoresKHR);
11396 CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetSemaphoreInfoKHR);
11397#ifdef cl_khr_external_semaphore
11398 CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetSemaphoreHandleForTypeKHR);
11399#endif // cl_khr_external_semaphore
11400
11401#endif
11402 if ((pfn_clCreateSemaphoreWithPropertiesKHR == nullptr) &&
11403 (pfn_clReleaseSemaphoreKHR == nullptr) &&
11404 (pfn_clRetainSemaphoreKHR == nullptr) &&
11405 (pfn_clEnqueueWaitSemaphoresKHR == nullptr) &&
11406 (pfn_clEnqueueSignalSemaphoresKHR == nullptr) &&
11407#ifdef cl_khr_external_semaphore
11408 (pfn_clGetSemaphoreHandleForTypeKHR == nullptr) &&
11409#endif // cl_khr_external_semaphore
11410 (pfn_clGetSemaphoreInfoKHR == nullptr))
11411 {
11412 detail::errHandler(CL_INVALID_VALUE, __CREATE_SEMAPHORE_KHR_WITH_PROPERTIES_ERR);
11413 }
11414 }
11415
11416};
11417
11418CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Semaphore::ext_init_;
11419
11420inline cl_int CommandQueue::enqueueWaitSemaphores(
11421 const vector<Semaphore> &sema_objects,
11422 const vector<cl_semaphore_payload_khr> &sema_payloads,
11423 const vector<Event>* events_wait_list,
11424 Event *event) const
11425{
11426 cl_event tmp;
11427 cl_int err = CL_INVALID_OPERATION;
11428
11429 if (pfn_clEnqueueWaitSemaphoresKHR != nullptr) {
11430 err = pfn_clEnqueueWaitSemaphoresKHR(
11431 object_,
11432 (cl_uint)sema_objects.size(),
11433 (const cl_semaphore_khr *) &sema_objects.front(),
11434 (sema_payloads.size() > 0) ? &sema_payloads.front() : nullptr,
11435 (events_wait_list != nullptr) ? (cl_uint) events_wait_list->size() : 0,
11436 (events_wait_list != nullptr && events_wait_list->size() > 0) ? (const cl_event*) &events_wait_list->front() : nullptr,
11437 (event != nullptr) ? &tmp : nullptr);
11438 }
11439
11440 detail::errHandler(err, __ENQUEUE_WAIT_SEMAPHORE_KHR_ERR);
11441
11442 if (event != nullptr && err == CL_SUCCESS)
11443 *event = tmp;
11444
11445 return err;
11446}
11447
11448inline cl_int CommandQueue::enqueueSignalSemaphores(
11449 const vector<Semaphore> &sema_objects,
11450 const vector<cl_semaphore_payload_khr>& sema_payloads,
11451 const vector<Event>* events_wait_list,
11452 Event* event)
11453{
11454 cl_event tmp;
11455 cl_int err = CL_INVALID_OPERATION;
11456
11457 if (pfn_clEnqueueSignalSemaphoresKHR != nullptr) {
11458 err = pfn_clEnqueueSignalSemaphoresKHR(
11459 object_,
11460 (cl_uint)sema_objects.size(),
11461 (const cl_semaphore_khr*) &sema_objects.front(),
11462 (sema_payloads.size() > 0) ? &sema_payloads.front() : nullptr,
11463 (events_wait_list != nullptr) ? (cl_uint) events_wait_list->size() : 0,
11464 (events_wait_list != nullptr && events_wait_list->size() > 0) ? (const cl_event*) &events_wait_list->front() : nullptr,
11465 (event != nullptr) ? &tmp : nullptr);
11466 }
11467
11468 detail::errHandler(err, __ENQUEUE_SIGNAL_SEMAPHORE_KHR_ERR);
11469
11470 if (event != nullptr && err == CL_SUCCESS)
11471 *event = tmp;
11472
11473 return err;
11474}
11475
11476#endif // cl_khr_semaphore
11477
11478#if defined(cl_khr_command_buffer)
11482class CommandBufferKhr : public detail::Wrapper<cl_command_buffer_khr>
11483{
11484public:
11486 CommandBufferKhr() : detail::Wrapper<cl_type>() { }
11487
11488 explicit CommandBufferKhr(const vector<CommandQueue> &queues,
11489 cl_command_buffer_properties_khr properties = 0,
11490 cl_int* errcode_ret = nullptr)
11491 {
11492 cl_command_buffer_properties_khr command_buffer_properties[] = {
11493 CL_COMMAND_BUFFER_FLAGS_KHR, properties, 0
11494 };
11495
11496 /* initialization of addresses to extension functions (it is done only once) */
11497 std::call_once(ext_init_, [&] { initExtensions(queues[0].getInfo<CL_QUEUE_DEVICE>()); });
11498 cl_int error = CL_INVALID_OPERATION;
11499
11500 static_assert(sizeof(cl::CommandQueue) == sizeof(cl_command_queue),
11501 "Size of cl::CommandQueue must be equal to size of cl_command_queue");
11502
11503 if (pfn_clCreateCommandBufferKHR)
11504 {
11505 object_ = pfn_clCreateCommandBufferKHR((cl_uint) queues.size(),
11506 (const cl_command_queue *) &queues.front(),
11507 command_buffer_properties,
11508 &error);
11509 }
11510
11511 detail::errHandler(error, __CREATE_COMMAND_BUFFER_KHR_ERR);
11512 if (errcode_ret != nullptr) {
11513 *errcode_ret = error;
11514 }
11515 }
11516
11517 explicit CommandBufferKhr(const cl_command_buffer_khr& commandBufferKhr, bool retainObject = false) :
11518 detail::Wrapper<cl_type>(commandBufferKhr, retainObject) { }
11519
11520 CommandBufferKhr& operator=(const cl_command_buffer_khr& rhs)
11521 {
11522 detail::Wrapper<cl_type>::operator=(rhs);
11523 return *this;
11524 }
11525
11526 template <typename T>
11527 cl_int getInfo(cl_command_buffer_info_khr name, T* param) const
11528 {
11529 if (pfn_clGetCommandBufferInfoKHR == nullptr) {
11530 return detail::errHandler(CL_INVALID_OPERATION,
11531 __GET_COMMAND_BUFFER_INFO_KHR_ERR);
11532 }
11533 return detail::errHandler(
11534 detail::getInfo(pfn_clGetCommandBufferInfoKHR, object_, name, param),
11535 __GET_COMMAND_BUFFER_INFO_KHR_ERR);
11536 }
11537
11538 template <cl_command_buffer_info_khr name> typename
11539 detail::param_traits<detail::cl_command_buffer_info_khr, name>::param_type
11540 getInfo(cl_int* err = nullptr) const
11541 {
11542 typename detail::param_traits<
11543 detail::cl_command_buffer_info_khr, name>::param_type param;
11544 cl_int result = getInfo(name, &param);
11545 if (err != nullptr) {
11546 *err = result;
11547 }
11548 return param;
11549 }
11550
11551 cl_int finalizeCommandBuffer() const
11552 {
11553 if (pfn_clFinalizeCommandBufferKHR == nullptr) {
11554 return detail::errHandler(CL_INVALID_OPERATION, __FINALIZE_COMMAND_BUFFER_KHR_ERR);
11555 }
11556 return detail::errHandler(pfn_clFinalizeCommandBufferKHR(object_), __FINALIZE_COMMAND_BUFFER_KHR_ERR);
11557 }
11558
11559 cl_int enqueueCommandBuffer(vector<CommandQueue> &queues,
11560 const vector<Event>* events = nullptr,
11561 Event* event = nullptr)
11562 {
11563 if (pfn_clEnqueueCommandBufferKHR == nullptr) {
11564 return detail::errHandler(CL_INVALID_OPERATION,
11565 __ENQUEUE_COMMAND_BUFFER_KHR_ERR);
11566 }
11567
11568 static_assert(sizeof(cl::CommandQueue) == sizeof(cl_command_queue),
11569 "Size of cl::CommandQueue must be equal to size of cl_command_queue");
11570
11571 return detail::errHandler(pfn_clEnqueueCommandBufferKHR((cl_uint) queues.size(),
11572 (cl_command_queue *) &queues.front(),
11573 object_,
11574 (events != nullptr) ? (cl_uint) events->size() : 0,
11575 (events != nullptr && events->size() > 0) ? (const cl_event*) &events->front() : nullptr,
11576 (cl_event*) event),
11577 __ENQUEUE_COMMAND_BUFFER_KHR_ERR);
11578 }
11579
11580 cl_int commandBarrierWithWaitList(const vector<cl_sync_point_khr>* sync_points_vec = nullptr,
11581 cl_sync_point_khr* sync_point = nullptr,
11582 MutableCommandKhr* mutable_handle = nullptr,
11583 const CommandQueue* command_queue = nullptr)
11584 {
11585 if (pfn_clCommandBarrierWithWaitListKHR == nullptr) {
11586 return detail::errHandler(CL_INVALID_OPERATION,
11587 __COMMAND_BARRIER_WITH_WAIT_LIST_KHR_ERR);
11588 }
11589
11590 cl_sync_point_khr tmp_sync_point;
11591 cl_int error = detail::errHandler(
11592 pfn_clCommandBarrierWithWaitListKHR(object_,
11593 (command_queue != nullptr) ? (*command_queue)() : nullptr,
11594#if CL_KHR_COMMAND_BUFFER_EXTENSION_VERSION > CL_MAKE_VERSION(0, 9, 4)
11595 nullptr, // Properties
11596#endif
11597 (sync_points_vec != nullptr) ? (cl_uint) sync_points_vec->size() : 0,
11598 (sync_points_vec != nullptr && sync_points_vec->size() > 0) ? &sync_points_vec->front() : nullptr,
11599 (sync_point != nullptr) ? &tmp_sync_point : nullptr,
11600 (cl_mutable_command_khr*) mutable_handle),
11601 __COMMAND_BARRIER_WITH_WAIT_LIST_KHR_ERR);
11602
11603 if (sync_point != nullptr && error == CL_SUCCESS)
11604 *sync_point = tmp_sync_point;
11605
11606 return error;
11607 }
11608
11609 cl_int commandCopyBuffer(const Buffer& src,
11610 const Buffer& dst,
11611 size_type src_offset,
11612 size_type dst_offset,
11613 size_type size,
11614 const vector<cl_sync_point_khr>* sync_points_vec = nullptr,
11615 cl_sync_point_khr* sync_point = nullptr,
11616 MutableCommandKhr* mutable_handle = nullptr,
11617 const CommandQueue* command_queue = nullptr)
11618 {
11619 if (pfn_clCommandCopyBufferKHR == nullptr) {
11620 return detail::errHandler(CL_INVALID_OPERATION,
11621 __COMMAND_COPY_BUFFER_KHR_ERR);
11622 }
11623
11624 cl_sync_point_khr tmp_sync_point;
11625 cl_int error = detail::errHandler(
11626 pfn_clCommandCopyBufferKHR(object_,
11627 (command_queue != nullptr) ? (*command_queue)() : nullptr,
11628#if CL_KHR_COMMAND_BUFFER_EXTENSION_VERSION > CL_MAKE_VERSION(0, 9, 4)
11629 nullptr, // Properties
11630#endif
11631 src(),
11632 dst(),
11633 src_offset,
11634 dst_offset,
11635 size,
11636 (sync_points_vec != nullptr) ? (cl_uint) sync_points_vec->size() : 0,
11637 (sync_points_vec != nullptr && sync_points_vec->size() > 0) ? &sync_points_vec->front() : nullptr,
11638 (sync_point != nullptr) ? &tmp_sync_point : nullptr,
11639 (cl_mutable_command_khr*) mutable_handle),
11640 __COMMAND_COPY_BUFFER_KHR_ERR);
11641
11642 if (sync_point != nullptr && error == CL_SUCCESS)
11643 *sync_point = tmp_sync_point;
11644
11645 return error;
11646 }
11647
11648 cl_int commandCopyBufferRect(const Buffer& src,
11649 const Buffer& dst,
11650 const array<size_type, 3>& src_origin,
11651 const array<size_type, 3>& dst_origin,
11652 const array<size_type, 3>& region,
11653 size_type src_row_pitch,
11654 size_type src_slice_pitch,
11655 size_type dst_row_pitch,
11656 size_type dst_slice_pitch,
11657 const vector<cl_sync_point_khr>* sync_points_vec = nullptr,
11658 cl_sync_point_khr* sync_point = nullptr,
11659 MutableCommandKhr* mutable_handle = nullptr,
11660 const CommandQueue* command_queue = nullptr)
11661 {
11662 if (pfn_clCommandCopyBufferRectKHR == nullptr) {
11663 return detail::errHandler(CL_INVALID_OPERATION,
11664 __COMMAND_COPY_BUFFER_RECT_KHR_ERR);
11665 }
11666
11667 cl_sync_point_khr tmp_sync_point;
11668 cl_int error = detail::errHandler(
11669 pfn_clCommandCopyBufferRectKHR(object_,
11670 (command_queue != nullptr) ? (*command_queue)() : nullptr,
11671#if CL_KHR_COMMAND_BUFFER_EXTENSION_VERSION > CL_MAKE_VERSION(0, 9, 4)
11672 nullptr, // Properties
11673#endif
11674 src(),
11675 dst(),
11676 src_origin.data(),
11677 dst_origin.data(),
11678 region.data(),
11679 src_row_pitch,
11680 src_slice_pitch,
11681 dst_row_pitch,
11682 dst_slice_pitch,
11683 (sync_points_vec != nullptr) ? (cl_uint) sync_points_vec->size() : 0,
11684 (sync_points_vec != nullptr && sync_points_vec->size() > 0) ? &sync_points_vec->front() : nullptr,
11685 (sync_point != nullptr) ? &tmp_sync_point : nullptr,
11686 (cl_mutable_command_khr*) mutable_handle),
11687 __COMMAND_COPY_BUFFER_RECT_KHR_ERR);
11688
11689 if (sync_point != nullptr && error == CL_SUCCESS)
11690 *sync_point = tmp_sync_point;
11691
11692 return error;
11693 }
11694
11695 cl_int commandCopyBufferToImage(const Buffer& src,
11696 const Image& dst,
11697 size_type src_offset,
11698 const array<size_type, 3>& dst_origin,
11699 const array<size_type, 3>& region,
11700 const vector<cl_sync_point_khr>* sync_points_vec = nullptr,
11701 cl_sync_point_khr* sync_point = nullptr,
11702 MutableCommandKhr* mutable_handle = nullptr,
11703 const CommandQueue* command_queue = nullptr)
11704 {
11705 if (pfn_clCommandCopyBufferToImageKHR == nullptr) {
11706 return detail::errHandler(CL_INVALID_OPERATION,
11707 __COMMAND_COPY_BUFFER_TO_IMAGE_KHR_ERR);
11708 }
11709
11710 cl_sync_point_khr tmp_sync_point;
11711 cl_int error = detail::errHandler(
11712 pfn_clCommandCopyBufferToImageKHR(object_,
11713 (command_queue != nullptr) ? (*command_queue)() : nullptr,
11714#if CL_KHR_COMMAND_BUFFER_EXTENSION_VERSION > CL_MAKE_VERSION(0, 9, 4)
11715 nullptr, // Properties
11716#endif
11717 src(),
11718 dst(),
11719 src_offset,
11720 dst_origin.data(),
11721 region.data(),
11722 (sync_points_vec != nullptr) ? (cl_uint) sync_points_vec->size() : 0,
11723 (sync_points_vec != nullptr && sync_points_vec->size() > 0) ? &sync_points_vec->front() : nullptr,
11724 (sync_point != nullptr) ? &tmp_sync_point : nullptr,
11725 (cl_mutable_command_khr*) mutable_handle),
11726 __COMMAND_COPY_BUFFER_TO_IMAGE_KHR_ERR);
11727
11728 if (sync_point != nullptr && error == CL_SUCCESS)
11729 *sync_point = tmp_sync_point;
11730
11731 return error;
11732 }
11733
11734 cl_int commandCopyImage(const Image& src,
11735 const Image& dst,
11736 const array<size_type, 3>& src_origin,
11737 const array<size_type, 3>& dst_origin,
11738 const array<size_type, 3>& region,
11739 const vector<cl_sync_point_khr>* sync_points_vec = nullptr,
11740 cl_sync_point_khr* sync_point = nullptr,
11741 MutableCommandKhr* mutable_handle = nullptr,
11742 const CommandQueue* command_queue = nullptr)
11743 {
11744 if (pfn_clCommandCopyImageKHR == nullptr) {
11745 return detail::errHandler(CL_INVALID_OPERATION,
11746 __COMMAND_COPY_IMAGE_KHR_ERR);
11747 }
11748
11749 cl_sync_point_khr tmp_sync_point;
11750 cl_int error = detail::errHandler(
11751 pfn_clCommandCopyImageKHR(object_,
11752 (command_queue != nullptr) ? (*command_queue)() : nullptr,
11753#if CL_KHR_COMMAND_BUFFER_EXTENSION_VERSION > CL_MAKE_VERSION(0, 9, 4)
11754 nullptr, // Properties
11755#endif
11756 src(),
11757 dst(),
11758 src_origin.data(),
11759 dst_origin.data(),
11760 region.data(),
11761 (sync_points_vec != nullptr) ? (cl_uint) sync_points_vec->size() : 0,
11762 (sync_points_vec != nullptr && sync_points_vec->size() > 0) ? &sync_points_vec->front() : nullptr,
11763 (sync_point != nullptr) ? &tmp_sync_point : nullptr,
11764 (cl_mutable_command_khr*) mutable_handle),
11765 __COMMAND_COPY_IMAGE_KHR_ERR);
11766
11767 if (sync_point != nullptr && error == CL_SUCCESS)
11768 *sync_point = tmp_sync_point;
11769
11770 return error;
11771 }
11772
11773 cl_int commandCopyImageToBuffer(const Image& src,
11774 const Buffer& dst,
11775 const array<size_type, 3>& src_origin,
11776 const array<size_type, 3>& region,
11777 size_type dst_offset,
11778 const vector<cl_sync_point_khr>* sync_points_vec = nullptr,
11779 cl_sync_point_khr* sync_point = nullptr,
11780 MutableCommandKhr* mutable_handle = nullptr,
11781 const CommandQueue* command_queue = nullptr)
11782 {
11783 if (pfn_clCommandCopyImageToBufferKHR == nullptr) {
11784 return detail::errHandler(CL_INVALID_OPERATION,
11785 __COMMAND_COPY_IMAGE_TO_BUFFER_KHR_ERR);
11786 }
11787
11788 cl_sync_point_khr tmp_sync_point;
11789 cl_int error = detail::errHandler(
11790 pfn_clCommandCopyImageToBufferKHR(object_,
11791 (command_queue != nullptr) ? (*command_queue)() : nullptr,
11792#if CL_KHR_COMMAND_BUFFER_EXTENSION_VERSION > CL_MAKE_VERSION(0, 9, 4)
11793 nullptr, // Properties
11794#endif
11795 src(),
11796 dst(),
11797 src_origin.data(),
11798 region.data(),
11799 dst_offset,
11800 (sync_points_vec != nullptr) ? (cl_uint) sync_points_vec->size() : 0,
11801 (sync_points_vec != nullptr && sync_points_vec->size() > 0) ? &sync_points_vec->front() : nullptr,
11802 (sync_point != nullptr) ? &tmp_sync_point : nullptr,
11803 (cl_mutable_command_khr*) mutable_handle),
11804 __COMMAND_COPY_IMAGE_TO_BUFFER_KHR_ERR);
11805
11806 if (sync_point != nullptr && error == CL_SUCCESS)
11807 *sync_point = tmp_sync_point;
11808
11809 return error;
11810 }
11811
11812 template<typename PatternType>
11813 cl_int commandFillBuffer(const Buffer& buffer,
11814 PatternType pattern,
11815 size_type offset,
11816 size_type size,
11817 const vector<cl_sync_point_khr>* sync_points_vec = nullptr,
11818 cl_sync_point_khr* sync_point = nullptr,
11819 MutableCommandKhr* mutable_handle = nullptr,
11820 const CommandQueue* command_queue = nullptr)
11821 {
11822 if (pfn_clCommandFillBufferKHR == nullptr) {
11823 return detail::errHandler(CL_INVALID_OPERATION,
11824 __COMMAND_FILL_BUFFER_KHR_ERR);
11825 }
11826
11827 cl_sync_point_khr tmp_sync_point;
11828 cl_int error = detail::errHandler(
11829 pfn_clCommandFillBufferKHR(object_,
11830 (command_queue != nullptr) ? (*command_queue)() : nullptr,
11831#if CL_KHR_COMMAND_BUFFER_EXTENSION_VERSION > CL_MAKE_VERSION(0, 9, 4)
11832 nullptr, // Properties
11833#endif
11834 buffer(),
11835 static_cast<void*>(&pattern),
11836 sizeof(PatternType),
11837 offset,
11838 size,
11839 (sync_points_vec != nullptr) ? (cl_uint) sync_points_vec->size() : 0,
11840 (sync_points_vec != nullptr && sync_points_vec->size() > 0) ? &sync_points_vec->front() : nullptr,
11841 (sync_point != nullptr) ? &tmp_sync_point : nullptr,
11842 (cl_mutable_command_khr*) mutable_handle),
11843 __COMMAND_FILL_BUFFER_KHR_ERR);
11844
11845 if (sync_point != nullptr && error == CL_SUCCESS)
11846 *sync_point = tmp_sync_point;
11847
11848 return error;
11849 }
11850
11851 cl_int commandFillImage(const Image& image,
11852 cl_float4 fillColor,
11853 const array<size_type, 3>& origin,
11854 const array<size_type, 3>& region,
11855 const vector<cl_sync_point_khr>* sync_points_vec = nullptr,
11856 cl_sync_point_khr* sync_point = nullptr,
11857 MutableCommandKhr* mutable_handle = nullptr,
11858 const CommandQueue* command_queue = nullptr)
11859 {
11860 if (pfn_clCommandFillImageKHR == nullptr) {
11861 return detail::errHandler(CL_INVALID_OPERATION,
11862 __COMMAND_FILL_IMAGE_KHR_ERR);
11863 }
11864
11865 cl_sync_point_khr tmp_sync_point;
11866 cl_int error = detail::errHandler(
11867 pfn_clCommandFillImageKHR(object_,
11868 (command_queue != nullptr) ? (*command_queue)() : nullptr,
11869#if CL_KHR_COMMAND_BUFFER_EXTENSION_VERSION > CL_MAKE_VERSION(0, 9, 4)
11870 nullptr, // Properties
11871#endif
11872 image(),
11873 static_cast<void*>(&fillColor),
11874 origin.data(),
11875 region.data(),
11876 (sync_points_vec != nullptr) ? (cl_uint) sync_points_vec->size() : 0,
11877 (sync_points_vec != nullptr && sync_points_vec->size() > 0) ? &sync_points_vec->front() : nullptr,
11878 (sync_point != nullptr) ? &tmp_sync_point : nullptr,
11879 (cl_mutable_command_khr*) mutable_handle),
11880 __COMMAND_FILL_IMAGE_KHR_ERR);
11881
11882 if (sync_point != nullptr && error == CL_SUCCESS)
11883 *sync_point = tmp_sync_point;
11884
11885 return error;
11886 }
11887
11888 cl_int commandNDRangeKernel(
11889#if CL_KHR_COMMAND_BUFFER_EXTENSION_VERSION > CL_MAKE_VERSION(0, 9, 4)
11890 const cl::vector<cl_command_properties_khr> &properties,
11891#else
11892 const cl::vector<cl_ndrange_kernel_command_properties_khr> &properties,
11893#endif
11894 const Kernel& kernel,
11895 const NDRange& offset,
11896 const NDRange& global,
11897 const NDRange& local = NullRange,
11898 const vector<cl_sync_point_khr>* sync_points_vec = nullptr,
11899 cl_sync_point_khr* sync_point = nullptr,
11900 MutableCommandKhr* mutable_handle = nullptr,
11901 const CommandQueue* command_queue = nullptr)
11902 {
11903 if (pfn_clCommandNDRangeKernelKHR == nullptr) {
11904 return detail::errHandler(CL_INVALID_OPERATION,
11905 __COMMAND_NDRANGE_KERNEL_KHR_ERR);
11906 }
11907
11908 cl_sync_point_khr tmp_sync_point;
11909 cl_int error = detail::errHandler(
11910 pfn_clCommandNDRangeKernelKHR(object_,
11911 (command_queue != nullptr) ? (*command_queue)() : nullptr,
11912 &properties[0],
11913 kernel(),
11914 (cl_uint) global.dimensions(),
11915 offset.dimensions() != 0 ? (const size_type*) offset : nullptr,
11916 (const size_type*) global,
11917 local.dimensions() != 0 ? (const size_type*) local : nullptr,
11918 (sync_points_vec != nullptr) ? (cl_uint) sync_points_vec->size() : 0,
11919 (sync_points_vec != nullptr && sync_points_vec->size() > 0) ? &sync_points_vec->front() : nullptr,
11920 (sync_point != nullptr) ? &tmp_sync_point : nullptr,
11921 (cl_mutable_command_khr*) mutable_handle),
11922 __COMMAND_NDRANGE_KERNEL_KHR_ERR);
11923
11924 if (sync_point != nullptr && error == CL_SUCCESS)
11925 *sync_point = tmp_sync_point;
11926
11927 return error;
11928 }
11929
11930#if defined(cl_khr_command_buffer_mutable_dispatch)
11931#if CL_KHR_COMMAND_BUFFER_MUTABLE_DISPATCH_EXTENSION_VERSION < \
11932 CL_MAKE_VERSION(0, 9, 2)
11933 cl_int updateMutableCommands(const cl_mutable_base_config_khr* mutable_config)
11934 {
11935 if (pfn_clUpdateMutableCommandsKHR == nullptr) {
11936 return detail::errHandler(CL_INVALID_OPERATION,
11937 __UPDATE_MUTABLE_COMMANDS_KHR_ERR);
11938 }
11939 return detail::errHandler(pfn_clUpdateMutableCommandsKHR(object_, mutable_config),
11940 __UPDATE_MUTABLE_COMMANDS_KHR_ERR);
11941 }
11942#else
11943 template <int ArrayLength>
11944 cl_int updateMutableCommands(std::array<cl_command_buffer_update_type_khr,
11945 ArrayLength> &config_types,
11946 std::array<const void *, ArrayLength> &configs) {
11947 if (pfn_clUpdateMutableCommandsKHR == nullptr) {
11948 return detail::errHandler(CL_INVALID_OPERATION,
11949 __UPDATE_MUTABLE_COMMANDS_KHR_ERR);
11950 }
11951 return detail::errHandler(
11952 pfn_clUpdateMutableCommandsKHR(object_, static_cast<cl_uint>(configs.size()),
11953 config_types.data(), configs.data()),
11954 __UPDATE_MUTABLE_COMMANDS_KHR_ERR);
11955 }
11956#endif /* CL_KHR_COMMAND_BUFFER_MUTABLE_DISPATCH_EXTENSION_VERSION */
11957#endif /* cl_khr_command_buffer_mutable_dispatch */
11958
11959private:
11960 static std::once_flag ext_init_;
11961
11962 static void initExtensions(const cl::Device& device)
11963 {
11964#if CL_HPP_TARGET_OPENCL_VERSION >= 120
11965 cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>()();
11966 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCreateCommandBufferKHR);
11967 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clFinalizeCommandBufferKHR);
11968 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clRetainCommandBufferKHR);
11969 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clReleaseCommandBufferKHR);
11970 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clGetCommandBufferInfoKHR);
11971 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueCommandBufferKHR);
11972 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCommandBarrierWithWaitListKHR);
11973 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCommandCopyBufferKHR);
11974 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCommandCopyBufferRectKHR);
11975 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCommandCopyBufferToImageKHR);
11976 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCommandCopyImageKHR);
11977 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCommandCopyImageToBufferKHR);
11978 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCommandFillBufferKHR);
11979 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCommandFillImageKHR);
11980 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCommandNDRangeKernelKHR);
11981#if defined(cl_khr_command_buffer_mutable_dispatch)
11982 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clUpdateMutableCommandsKHR);
11983 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clGetMutableCommandInfoKHR);
11984#endif /* cl_khr_command_buffer_mutable_dispatch */
11985#elif CL_HPP_TARGET_OPENCL_VERSION >= 110
11986 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateCommandBufferKHR);
11987 CL_HPP_INIT_CL_EXT_FCN_PTR_(clFinalizeCommandBufferKHR);
11988 CL_HPP_INIT_CL_EXT_FCN_PTR_(clRetainCommandBufferKHR);
11989 CL_HPP_INIT_CL_EXT_FCN_PTR_(clReleaseCommandBufferKHR);
11990 CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetCommandBufferInfoKHR);
11991 CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueCommandBufferKHR);
11992 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCommandBarrierWithWaitListKHR);
11993 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCommandCopyBufferKHR);
11994 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCommandCopyBufferRectKHR);
11995 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCommandCopyBufferToImageKHR);
11996 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCommandCopyImageKHR);
11997 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCommandCopyImageToBufferKHR);
11998 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCommandFillBufferKHR);
11999 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCommandFillImageKHR);
12000 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCommandNDRangeKernelKHR);
12001#if defined(cl_khr_command_buffer_mutable_dispatch)
12002 CL_HPP_INIT_CL_EXT_FCN_PTR_(clUpdateMutableCommandsKHR);
12003 CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetMutableCommandInfoKHR);
12004#endif /* cl_khr_command_buffer_mutable_dispatch */
12005#endif
12006 if ((pfn_clCreateCommandBufferKHR == nullptr) &&
12007 (pfn_clFinalizeCommandBufferKHR == nullptr) &&
12008 (pfn_clRetainCommandBufferKHR == nullptr) &&
12009 (pfn_clReleaseCommandBufferKHR == nullptr) &&
12010 (pfn_clGetCommandBufferInfoKHR == nullptr) &&
12011 (pfn_clEnqueueCommandBufferKHR == nullptr) &&
12012 (pfn_clCommandBarrierWithWaitListKHR == nullptr) &&
12013 (pfn_clCommandCopyBufferKHR == nullptr) &&
12014 (pfn_clCommandCopyBufferRectKHR == nullptr) &&
12015 (pfn_clCommandCopyBufferToImageKHR == nullptr) &&
12016 (pfn_clCommandCopyImageKHR == nullptr) &&
12017 (pfn_clCommandCopyImageToBufferKHR == nullptr) &&
12018 (pfn_clCommandFillBufferKHR == nullptr) &&
12019 (pfn_clCommandFillImageKHR == nullptr) &&
12020 (pfn_clCommandNDRangeKernelKHR == nullptr)
12021#if defined(cl_khr_command_buffer_mutable_dispatch)
12022 && (pfn_clUpdateMutableCommandsKHR == nullptr)
12023 && (pfn_clGetMutableCommandInfoKHR == nullptr)
12024#endif /* cl_khr_command_buffer_mutable_dispatch */
12025 )
12026 {
12027 detail::errHandler(CL_INVALID_VALUE, __CREATE_COMMAND_BUFFER_KHR_ERR);
12028 }
12029 }
12030}; // CommandBufferKhr
12031
12032CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag CommandBufferKhr::ext_init_;
12033
12034#if defined(cl_khr_command_buffer_mutable_dispatch)
12038class MutableCommandKhr : public detail::Wrapper<cl_mutable_command_khr>
12039{
12040public:
12042 MutableCommandKhr() : detail::Wrapper<cl_type>() { }
12043
12044 explicit MutableCommandKhr(const cl_mutable_command_khr& mutableCommandKhr, bool retainObject = false) :
12045 detail::Wrapper<cl_type>(mutableCommandKhr, retainObject) { }
12046
12047 MutableCommandKhr& operator=(const cl_mutable_command_khr& rhs)
12048 {
12049 detail::Wrapper<cl_type>::operator=(rhs);
12050 return *this;
12051 }
12052
12053 template <typename T>
12054 cl_int getInfo(cl_mutable_command_info_khr name, T* param) const
12055 {
12056 if (pfn_clGetMutableCommandInfoKHR == nullptr) {
12057 return detail::errHandler(CL_INVALID_OPERATION,
12058 __GET_MUTABLE_COMMAND_INFO_KHR_ERR);
12059 }
12060 return detail::errHandler(
12061 detail::getInfo(pfn_clGetMutableCommandInfoKHR, object_, name, param),
12062 __GET_MUTABLE_COMMAND_INFO_KHR_ERR);
12063 }
12064
12065 template <cl_mutable_command_info_khr name> typename
12066 detail::param_traits<detail::cl_mutable_command_info_khr, name>::param_type
12067 getInfo(cl_int* err = nullptr) const
12068 {
12069 typename detail::param_traits<
12070 detail::cl_mutable_command_info_khr, name>::param_type param;
12071 cl_int result = getInfo(name, &param);
12072 if (err != nullptr) {
12073 *err = result;
12074 }
12075 return param;
12076 }
12077}; // MutableCommandKhr
12078#endif /* cl_khr_command_buffer_mutable_dispatch */
12079
12080#endif // cl_khr_command_buffer
12081//----------------------------------------------------------------------------------------------------------------------
12082
12083#undef CL_HPP_ERR_STR_
12084#if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS)
12085#undef __GET_DEVICE_INFO_ERR
12086#undef __GET_PLATFORM_INFO_ERR
12087#undef __GET_DEVICE_IDS_ERR
12088#undef __GET_PLATFORM_IDS_ERR
12089#undef __GET_CONTEXT_INFO_ERR
12090#undef __GET_EVENT_INFO_ERR
12091#undef __GET_EVENT_PROFILE_INFO_ERR
12092#undef __GET_MEM_OBJECT_INFO_ERR
12093#undef __GET_IMAGE_INFO_ERR
12094#undef __GET_SAMPLER_INFO_ERR
12095#undef __GET_KERNEL_INFO_ERR
12096#undef __GET_KERNEL_ARG_INFO_ERR
12097#undef __GET_KERNEL_SUB_GROUP_INFO_ERR
12098#undef __GET_KERNEL_WORK_GROUP_INFO_ERR
12099#undef __GET_PROGRAM_INFO_ERR
12100#undef __GET_PROGRAM_BUILD_INFO_ERR
12101#undef __GET_COMMAND_QUEUE_INFO_ERR
12102#undef __CREATE_CONTEXT_ERR
12103#undef __CREATE_CONTEXT_FROM_TYPE_ERR
12104#undef __CREATE_COMMAND_BUFFER_KHR_ERR
12105#undef __GET_COMMAND_BUFFER_INFO_KHR_ERR
12106#undef __FINALIZE_COMMAND_BUFFER_KHR_ERR
12107#undef __ENQUEUE_COMMAND_BUFFER_KHR_ERR
12108#undef __COMMAND_BARRIER_WITH_WAIT_LIST_KHR_ERR
12109#undef __COMMAND_COPY_BUFFER_KHR_ERR
12110#undef __COMMAND_COPY_BUFFER_RECT_KHR_ERR
12111#undef __COMMAND_COPY_BUFFER_TO_IMAGE_KHR_ERR
12112#undef __COMMAND_COPY_IMAGE_KHR_ERR
12113#undef __COMMAND_COPY_IMAGE_TO_BUFFER_KHR_ERR
12114#undef __COMMAND_FILL_BUFFER_KHR_ERR
12115#undef __COMMAND_FILL_IMAGE_KHR_ERR
12116#undef __COMMAND_NDRANGE_KERNEL_KHR_ERR
12117#undef __UPDATE_MUTABLE_COMMANDS_KHR_ERR
12118#undef __GET_MUTABLE_COMMAND_INFO_KHR_ERR
12119#undef __RETAIN_COMMAND_BUFFER_KHR_ERR
12120#undef __RELEASE_COMMAND_BUFFER_KHR_ERR
12121#undef __GET_SUPPORTED_IMAGE_FORMATS_ERR
12122#undef __SET_CONTEXT_DESCTRUCTOR_CALLBACK_ERR
12123#undef __CREATE_BUFFER_ERR
12124#undef __COPY_ERR
12125#undef __CREATE_SUBBUFFER_ERR
12126#undef __CREATE_GL_BUFFER_ERR
12127#undef __CREATE_GL_RENDER_BUFFER_ERR
12128#undef __GET_GL_OBJECT_INFO_ERR
12129#undef __CREATE_IMAGE_ERR
12130#undef __CREATE_GL_TEXTURE_ERR
12131#undef __IMAGE_DIMENSION_ERR
12132#undef __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR
12133#undef __CREATE_USER_EVENT_ERR
12134#undef __SET_USER_EVENT_STATUS_ERR
12135#undef __SET_EVENT_CALLBACK_ERR
12136#undef __WAIT_FOR_EVENTS_ERR
12137#undef __CREATE_KERNEL_ERR
12138#undef __SET_KERNEL_ARGS_ERR
12139#undef __CREATE_PROGRAM_WITH_SOURCE_ERR
12140#undef __CREATE_PROGRAM_WITH_BINARY_ERR
12141#undef __CREATE_PROGRAM_WITH_IL_ERR
12142#undef __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR
12143#undef __BUILD_PROGRAM_ERR
12144#undef __COMPILE_PROGRAM_ERR
12145#undef __LINK_PROGRAM_ERR
12146#undef __CREATE_KERNELS_IN_PROGRAM_ERR
12147#undef __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR
12148#undef __CREATE_SAMPLER_WITH_PROPERTIES_ERR
12149#undef __SET_COMMAND_QUEUE_PROPERTY_ERR
12150#undef __ENQUEUE_READ_BUFFER_ERR
12151#undef __ENQUEUE_READ_BUFFER_RECT_ERR
12152#undef __ENQUEUE_WRITE_BUFFER_ERR
12153#undef __ENQUEUE_WRITE_BUFFER_RECT_ERR
12154#undef __ENQEUE_COPY_BUFFER_ERR
12155#undef __ENQEUE_COPY_BUFFER_RECT_ERR
12156#undef __ENQUEUE_FILL_BUFFER_ERR
12157#undef __ENQUEUE_READ_IMAGE_ERR
12158#undef __ENQUEUE_WRITE_IMAGE_ERR
12159#undef __ENQUEUE_COPY_IMAGE_ERR
12160#undef __ENQUEUE_FILL_IMAGE_ERR
12161#undef __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR
12162#undef __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR
12163#undef __ENQUEUE_MAP_BUFFER_ERR
12164#undef __ENQUEUE_MAP_IMAGE_ERR
12165#undef __ENQUEUE_MAP_SVM_ERR
12166#undef __ENQUEUE_FILL_SVM_ERR
12167#undef __ENQUEUE_COPY_SVM_ERR
12168#undef __ENQUEUE_UNMAP_SVM_ERR
12169#undef __ENQUEUE_MAP_IMAGE_ERR
12170#undef __ENQUEUE_UNMAP_MEM_OBJECT_ERR
12171#undef __ENQUEUE_NDRANGE_KERNEL_ERR
12172#undef __ENQUEUE_NATIVE_KERNEL
12173#undef __ENQUEUE_MIGRATE_MEM_OBJECTS_ERR
12174#undef __ENQUEUE_MIGRATE_SVM_ERR
12175#undef __ENQUEUE_ACQUIRE_GL_ERR
12176#undef __ENQUEUE_RELEASE_GL_ERR
12177#undef __CREATE_PIPE_ERR
12178#undef __GET_PIPE_INFO_ERR
12179#undef __RETAIN_ERR
12180#undef __RELEASE_ERR
12181#undef __FLUSH_ERR
12182#undef __FINISH_ERR
12183#undef __VECTOR_CAPACITY_ERR
12184#undef __CREATE_SUB_DEVICES_ERR
12185#undef __ENQUEUE_ACQUIRE_EXTERNAL_MEMORY_ERR
12186#undef __ENQUEUE_RELEASE_EXTERNAL_MEMORY_ERR
12187#undef __ENQUEUE_MARKER_ERR
12188#undef __ENQUEUE_WAIT_FOR_EVENTS_ERR
12189#undef __ENQUEUE_BARRIER_ERR
12190#undef __UNLOAD_COMPILER_ERR
12191#undef __CREATE_GL_TEXTURE_2D_ERR
12192#undef __CREATE_GL_TEXTURE_3D_ERR
12193#undef __CREATE_IMAGE2D_ERR
12194#undef __CREATE_IMAGE3D_ERR
12195#undef __CREATE_COMMAND_QUEUE_ERR
12196#undef __ENQUEUE_TASK_ERR
12197#undef __CREATE_SAMPLER_ERR
12198#undef __ENQUEUE_MARKER_WAIT_LIST_ERR
12199#undef __ENQUEUE_BARRIER_WAIT_LIST_ERR
12200#undef __CLONE_KERNEL_ERR
12201#undef __GET_HOST_TIMER_ERR
12202#undef __GET_DEVICE_AND_HOST_TIMER_ERR
12203#undef __GET_SEMAPHORE_KHR_INFO_ERR
12204#undef __CREATE_SEMAPHORE_KHR_WITH_PROPERTIES_ERR
12205#undef __GET_IMAGE_REQUIREMENT_INFO_EXT_ERR
12206#undef __ENQUEUE_WAIT_SEMAPHORE_KHR_ERR
12207#undef __ENQUEUE_SIGNAL_SEMAPHORE_KHR_ERR
12208#undef __RETAIN_SEMAPHORE_KHR_ERR
12209#undef __RELEASE_SEMAPHORE_KHR_ERR
12210#undef __GET_SEMAPHORE_HANDLE_FOR_TYPE_KHR_ERR
12211
12212#endif //CL_HPP_USER_OVERRIDE_ERROR_STRINGS
12213
12214// Extensions
12215#undef CL_HPP_CREATE_CL_EXT_FCN_PTR_ALIAS_
12216#undef CL_HPP_INIT_CL_EXT_FCN_PTR_
12217#undef CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_
12218
12219#undef CL_HPP_DEFINE_STATIC_MEMBER_
12220
12221#undef CL_
12222
12223} // namespace cl
12224
12225#endif // CL_HPP_
BufferGL(const Context &context, cl_mem_flags flags, cl_GLuint bufobj, cl_int *err=nullptr)
Constructs a BufferGL in a specified context, from a given GL buffer.
Definition opencl.hpp:4681
BufferGL & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition opencl.hpp:4717
BufferGL()
Default constructor - initializes to nullptr.
Definition opencl.hpp:4701
cl_int getObjectInfo(cl_gl_object_type *type, cl_GLuint *gl_object_name)
Wrapper for clGetGLObjectInfo().
Definition opencl.hpp:4725
Class interface for Buffer Memory Objects.
Definition opencl.hpp:4362
Buffer()
Default constructor - initializes to nullptr.
Definition opencl.hpp:4530
Buffer createSubBuffer(cl_mem_flags flags, cl_buffer_create_type buffer_create_type, const void *buffer_create_info, cl_int *err=nullptr)
Creates a new buffer object from this.
Definition opencl.hpp:4558
Buffer & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition opencl.hpp:4546
Buffer(const Context &context, cl_mem_flags flags, size_type size, void *host_ptr=nullptr, cl_int *err=nullptr)
Constructs a Buffer in a specified context.
Definition opencl.hpp:4372
cl_int getObjectInfo(cl_gl_object_type *type, cl_GLuint *gl_object_name)
Wrapper for clGetGLObjectInfo().
Definition opencl.hpp:4795
BufferRenderGL()
Default constructor - initializes to nullptr.
Definition opencl.hpp:4771
BufferRenderGL(const Context &context, cl_mem_flags flags, cl_GLuint bufobj, cl_int *err=nullptr)
Constructs a BufferRenderGL in a specified context, from a given GL Renderbuffer.
Definition opencl.hpp:4751
BufferRenderGL & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition opencl.hpp:4787
CommandQueue interface for cl_command_queue.
Definition opencl.hpp:7564
cl_int enqueueUnmapSVM(T *ptr, const vector< Event > *events=nullptr, Event *event=nullptr) const
Definition opencl.hpp:9060
CommandQueue(cl_command_queue_properties properties, cl_int *err=nullptr)
Constructs a CommandQueue based on passed properties. Will return an CL_INVALID_QUEUE_PROPERTIES erro...
Definition opencl.hpp:7647
cl_int enqueueMemcpySVM(T *dst_ptr, const T *src_ptr, cl_bool blocking, size_type size, const vector< Event > *events=nullptr, Event *event=nullptr) const
Definition opencl.hpp:8800
cl_int enqueueMigrateMemObjects(const vector< Memory > &memObjects, cl_mem_migration_flags flags, const vector< Event > *events=nullptr, Event *event=nullptr) const
Definition opencl.hpp:9196
cl_int enqueueMarkerWithWaitList(const vector< Event > *events=nullptr, Event *event=nullptr) const
Definition opencl.hpp:9143
CL_API_PREFIX__VERSION_1_1_DEPRECATED cl_int enqueueMarker(Event *event=nullptr) const CL_API_SUFFIX__VERSION_1_1_DEPRECATED
Definition opencl.hpp:9430
static CommandQueue setDefault(const CommandQueue &default_queue)
Definition opencl.hpp:8024
std::enable_if< std::is_same< T, cl_float4 >::value||std::is_same< T, cl_int4 >::value||std::is_same< T, cl_uint4 >::value, cl_int >::type enqueueFillImage(const Image &image, T fillColor, const array< size_type, 3 > &origin, const array< size_type, 3 > &region, const vector< Event > *events=nullptr, Event *event=nullptr) const
Definition opencl.hpp:8561
cl_int enqueueMapSVM(T *ptr, cl_bool blocking, cl_map_flags flags, size_type size, const vector< Event > *events=nullptr, Event *event=nullptr) const
Definition opencl.hpp:8955
cl_int enqueueMigrateSVM(const cl::vector< T * > &svmRawPointers, const cl::vector< size_type > &sizes, cl_mem_migration_flags flags=0, const vector< Event > *events=nullptr, Event *event=nullptr) const
Definition opencl.hpp:9237
cl_int enqueueMemFillSVM(T *ptr, PatternType pattern, size_type size, const vector< Event > *events=nullptr, Event *event=nullptr) const
Definition opencl.hpp:8880
CL_API_PREFIX__VERSION_1_1_DEPRECATED cl_int enqueueBarrier() const CL_API_SUFFIX__VERSION_1_1_DEPRECATED
Definition opencl.hpp:9583
cl_int enqueueBarrierWithWaitList(const vector< Event > *events=nullptr, Event *event=nullptr) const
Definition opencl.hpp:9173
cl_int enqueueFillBuffer(const Buffer &buffer, PatternType pattern, size_type offset, size_type size, const vector< Event > *events=nullptr, Event *event=nullptr) const
Definition opencl.hpp:8359
Class interface for cl_context.
Definition opencl.hpp:3212
Context & operator=(const cl_context &rhs)
Assignment operator from cl_context - takes ownership.
Definition opencl.hpp:3520
cl_int setDestructorCallback(void(CL_CALLBACK *pfn_notify)(cl_context, void *), void *user_data=nullptr)
Registers a destructor callback function with a context.
Definition opencl.hpp:3644
static Context setDefault(const Context &default_context)
Definition opencl.hpp:3497
static Context getDefault(cl_int *err=nullptr)
Returns a singleton context including all devices of CL_DEVICE_TYPE_DEFAULT.
Definition opencl.hpp:3480
cl_int getSupportedImageFormats(cl_mem_flags flags, cl_mem_object_type type, vector< ImageFormat > *formats) const
Gets a list of supported image formats.
Definition opencl.hpp:3553
Context()
Default constructor - initializes to nullptr.
Definition opencl.hpp:3505
cl_int getInfo(cl_context_info name, T *param) const
Wrapper for clGetContextInfo().
Definition opencl.hpp:3528
Context(const vector< Device > &devices, const cl_context_properties *properties=nullptr, void(CL_CALLBACK *notifyFptr)(const char *, const void *, size_type, void *)=nullptr, void *data=nullptr, cl_int *err=nullptr)
Constructs a context including a list of specified devices.
Definition opencl.hpp:3327
DeviceCommandQueue interface for device cl_command_queues.
Definition opencl.hpp:9701
static DeviceCommandQueue updateDefault(const Context &context, const Device &device, const DeviceCommandQueue &default_queue, cl_int *err=nullptr)
Definition opencl.hpp:9916
static DeviceCommandQueue getDefault(const CommandQueue &queue, cl_int *err=nullptr)
Definition opencl.hpp:9931
static DeviceCommandQueue makeDefault(cl_int *err=nullptr)
Definition opencl.hpp:9826
Class interface for cl_device_id.
Definition opencl.hpp:2493
static Device getDefault(cl_int *errResult=nullptr)
Returns the first device on the default context.
Definition opencl.hpp:2542
Device & operator=(const cl_device_id &rhs)
Assignment operator from cl_device_id.
Definition opencl.hpp:2571
cl_int getInfo(cl_device_info name, T *param) const
Wrapper for clGetDeviceInfo().
Definition opencl.hpp:2580
Device()
Default constructor - initializes to nullptr.
Definition opencl.hpp:2529
cl_ulong getHostTimer(cl_int *error=nullptr)
Definition opencl.hpp:2608
std::pair< cl_ulong, cl_ulong > getDeviceAndHostTimer(cl_int *error=nullptr)
Definition opencl.hpp:2632
cl_int createSubDevices(const cl_device_partition_property *properties, vector< Device > *devices)
Wrapper for clCreateSubDevices().
Definition opencl.hpp:3076
static Device setDefault(const Device &default_device)
Definition opencl.hpp:2560
Class interface for cl_event.
Definition opencl.hpp:3700
cl_int setCallback(cl_int type, void(CL_CALLBACK *pfn_notify)(cl_event, cl_int, void *), void *user_data=nullptr)
Registers a user callback function for a specific command execution status.
Definition opencl.hpp:3789
cl_int getProfilingInfo(cl_profiling_info name, T *param) const
Wrapper for clGetEventProfilingInfo().
Definition opencl.hpp:3752
cl_int getInfo(cl_event_info name, T *param) const
Wrapper for clGetEventInfo().
Definition opencl.hpp:3729
cl_int wait() const
Blocks the calling thread until this event completes.
Definition opencl.hpp:3777
Event()
Default constructor - initializes to nullptr.
Definition opencl.hpp:3703
Event & operator=(const cl_event &rhs)
Assignment operator from cl_event - takes ownership.
Definition opencl.hpp:3721
static cl_int waitForEvents(const vector< Event > &events)
Blocks the calling thread until every event specified is complete.
Definition opencl.hpp:3809
Image interface for arrays of 1D images.
Definition opencl.hpp:5062
Image interface for 1D buffer images.
Definition opencl.hpp:4971
Image1D()
Default constructor - initializes to nullptr.
Definition opencl.hpp:4906
Image1D & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition opencl.hpp:4958
Image1D(const Context &context, cl_mem_flags flags, ImageFormat format, size_type width, void *host_ptr=nullptr, cl_int *err=nullptr)
Constructs a 1D Image in a specified context.
Definition opencl.hpp:4877
Image interface for arrays of 2D images.
Definition opencl.hpp:5517
Class interface for GL 2D Image Memory objects.
Definition opencl.hpp:5453
Image2DGL(const Context &context, cl_mem_flags flags, cl_GLenum target, cl_GLint miplevel, cl_GLuint texobj, cl_int *err=nullptr)
Constructs an Image2DGL in a specified context, from a given GL Texture.
Definition opencl.hpp:5460
Class interface for 2D Image Memory objects.
Definition opencl.hpp:5167
Image2D(const Context &context, cl_mem_flags flags, ImageFormat format, size_type width, size_type height, size_type row_pitch=0, void *host_ptr=nullptr, cl_int *err=nullptr)
Constructs a 2D Image in a specified context.
Definition opencl.hpp:5173
Image2D()
Default constructor - initializes to nullptr.
Definition opencl.hpp:5418
Image2D & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition opencl.hpp:5434
Image3DGL()
Default constructor - initializes to nullptr.
Definition opencl.hpp:5806
Image3DGL & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition opencl.hpp:5822
Image3DGL(const Context &context, cl_mem_flags flags, cl_GLenum target, cl_GLint miplevel, cl_GLuint texobj, cl_int *err=nullptr)
Constructs an Image3DGL in a specified context, from a given GL Texture.
Definition opencl.hpp:5782
Image3D & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition opencl.hpp:5757
Image3D()
Default constructor - initializes to nullptr.
Definition opencl.hpp:5741
Image3D(const Context &context, cl_mem_flags flags, ImageFormat format, size_type width, size_type height, size_type depth, size_type row_pitch=0, size_type slice_pitch=0, void *host_ptr=nullptr, cl_int *err=nullptr)
Constructs a 3D Image in a specified context.
Definition opencl.hpp:5631
general image interface for GL interop. We abstract the 2D and 3D GL images into a single instance he...
Definition opencl.hpp:5839
C++ base class for Image Memory objects.
Definition opencl.hpp:4812
cl_int getImageInfo(cl_image_info name, T *param) const
Wrapper for clGetImageInfo().
Definition opencl.hpp:4841
Image & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition opencl.hpp:4831
Image()
Default constructor - initializes to nullptr.
Definition opencl.hpp:4815
Event operator()(const EnqueueArgs &args, Ts... ts)
Definition opencl.hpp:11134
Event result_type
Return type of the functor.
Definition opencl.hpp:11127
Class interface for cl_kernel.
Definition opencl.hpp:6259
cl_int setSVMPointers(const vector< void * > &pointerList)
Definition opencl.hpp:6453
cl_int setArg(cl_uint index, const cl::pointer< T, D > &argPtr)
setArg overload taking a shared_ptr type
Definition opencl.hpp:6397
Kernel()
Default constructor - initializes to nullptr.
Definition opencl.hpp:6265
Kernel & operator=(const cl_kernel &rhs)
Assignment operator from cl_kernel - takes ownership.
Definition opencl.hpp:6283
Kernel clone()
Definition opencl.hpp:6569
cl_int enableFineGrainedSystemSVM(bool svmEnabled)
Enable fine-grained system SVM.
Definition opencl.hpp:6489
Class interface for cl_mem.
Definition opencl.hpp:3886
Memory()
Default constructor - initializes to nullptr.
Definition opencl.hpp:3889
Memory & operator=(const cl_mem &rhs)
Assignment operator from cl_mem - takes ownership.
Definition opencl.hpp:3910
cl_int getInfo(cl_mem_info name, T *param) const
Wrapper for clGetMemObjectInfo().
Definition opencl.hpp:3918
cl_int setDestructorCallback(void(CL_CALLBACK *pfn_notify)(cl_mem, void *), void *user_data=nullptr)
Registers a callback function to be called when the memory object is no longer needed.
Definition opencl.hpp:3953
Class interface for specifying NDRange values.
Definition opencl.hpp:6113
size_type dimensions() const
Queries the number of dimensions in the range.
Definition opencl.hpp:6173
size_type size() const
Returns the size of the object in bytes based on the.
Definition opencl.hpp:6180
NDRange()
Default constructor - resulting range has zero dimensions.
Definition opencl.hpp:6120
Class interface for Pipe Memory Objects.
Definition opencl.hpp:5895
Pipe()
Default constructor - initializes to nullptr.
Definition opencl.hpp:5951
cl_int getInfo(cl_pipe_info name, T *param) const
Wrapper for clGetMemObjectInfo().
Definition opencl.hpp:5977
Pipe & operator=(const cl_mem &rhs)
Assignment from cl_mem - performs shallow copy.
Definition opencl.hpp:5967
Pipe(const Context &context, cl_uint packet_size, cl_uint max_packets, cl_int *err=nullptr)
Constructs a Pipe in a specified context.
Definition opencl.hpp:5907
Class interface for cl_platform_id.
Definition opencl.hpp:2718
Platform()
Default constructor - initializes to nullptr.
Definition opencl.hpp:2790
cl_int unloadCompiler()
Wrapper for clUnloadCompiler().
Definition opencl.hpp:3067
cl_int getDevices(cl_device_type type, vector< Device > *devices) const
Gets a list of devices for this platform.
Definition opencl.hpp:2864
Platform & operator=(const cl_platform_id &rhs)
Assignment operator from cl_platform_id.
Definition opencl.hpp:2806
static cl_int get(vector< Platform > *platforms)
Gets a list of available platforms.
Definition opencl.hpp:2999
cl_int getInfo(cl_platform_info name, T *param) const
Wrapper for clGetPlatformInfo().
Definition opencl.hpp:2839
static Platform setDefault(const Platform &default_platform)
Definition opencl.hpp:2830
Program interface that implements cl_program.
Definition opencl.hpp:6584
CL_API_PREFIX__VERSION_2_2_DEPRECATED cl_int setReleaseCallback(void(CL_CALLBACK *pfn_notify)(cl_program program, void *user_data), void *user_data=nullptr) CL_API_SUFFIX__VERSION_2_2_DEPRECATED
Registers a callback function to be called when destructors for program scope global variables are co...
Definition opencl.hpp:7302
std::enable_if<!std::is_pointer< T >::value, cl_int >::type setSpecializationConstant(cl_uint index, const T &value)
Sets a SPIR-V specialization constant.
Definition opencl.hpp:7321
bool operator==(SVMAllocator const &rhs)
Definition opencl.hpp:4221
pointer allocate(size_type size, typename cl::SVMAllocator< void, SVMTrait >::const_pointer=0, bool map=true)
Definition opencl.hpp:4146
size_type max_size() const noexcept
Definition opencl.hpp:4193
Sampler()
Default constructor - initializes to nullptr.
Definition opencl.hpp:6013
Sampler & operator=(const cl_sampler &rhs)
Assignment operator from cl_sampler - takes ownership.
Definition opencl.hpp:6074
cl_int getInfo(cl_sampler_info name, T *param) const
Wrapper for clGetSamplerInfo().
Definition opencl.hpp:6084
UserEvent()
Default constructor - initializes to nullptr.
Definition opencl.hpp:3849
UserEvent(const Context &context, cl_int *err=nullptr)
Constructs a user event on a given context.
Definition opencl.hpp:3833
cl_int setStatus(cl_int status)
Sets the execution status of a user event object.
Definition opencl.hpp:3855
The OpenCL C++ bindings are defined within this namespace.
Definition opencl.hpp:586
cl_int copy(IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer)
Definition opencl.hpp:10316
vector< T, cl::SVMAllocator< int, cl::SVMTraitCoarse<> > > coarse_svm_vector
Vector alias to simplify contruction of coarse-grained SVM containers.
Definition opencl.hpp:4338
LocalSpaceArg Local(size_type size)
Helper function for generating LocalSpaceArg objects.
Definition opencl.hpp:6244
CL_API_PREFIX__VERSION_1_1_DEPRECATED cl_int UnloadCompiler() CL_API_SUFFIX__VERSION_1_1_DEPRECATED
Definition opencl.hpp:3177
vector< T, cl::SVMAllocator< int, cl::SVMTraitFine<> > > fine_svm_vector
Vector alias to simplify contruction of fine-grained SVM containers.
Definition opencl.hpp:4344
vector< T, cl::SVMAllocator< int, cl::SVMTraitAtomic<> > > atomic_svm_vector
Vector alias to simplify contruction of fine-grained SVM containers that support platform atomics.
Definition opencl.hpp:4350
cl::pointer< T, detail::Deleter< Alloc > > allocate_pointer(const Alloc &alloc_, Args &&... args)
Definition opencl.hpp:4281
cl_int enqueueUnmapSVM(T *ptr, const vector< Event > *events=nullptr, Event *event=nullptr)
Definition opencl.hpp:10231
cl_int enqueueMapSVM(T *ptr, cl_bool blocking, cl_map_flags flags, size_type size, const vector< Event > *events=nullptr, Event *event=nullptr)
Definition opencl.hpp:10129
cl_int mapSVM(cl::vector< T, Alloc > &container)
Definition opencl.hpp:10418
cl_int unmapSVM(cl::vector< T, Alloc > &container)
Definition opencl.hpp:10427
Adds constructors and member functions for cl_image_format.
Definition opencl.hpp:2460
ImageFormat & operator=(const ImageFormat &rhs)
Assignment operator.
Definition opencl.hpp:2475
ImageFormat()
Default constructor - performs no initialization.
Definition opencl.hpp:2462
Local address wrapper for use with Kernel::setArg.
Definition opencl.hpp:6201
Event type_(const EnqueueArgs &, Ts...)
Function signature of kernel functor with no event dependency.
Definition opencl.hpp:11224
Event result_type
Return type of the functor.
Definition opencl.hpp:11221
static cl_int release(cl_device_id device)
Definition opencl.hpp:2010
static cl_int retain(cl_device_id device)
Definition opencl.hpp:1999