oneAPI Deep Neural Network Library (oneDNN)  1.95.0
Performance library for Deep Learning
example_utils.hpp
1 /*******************************************************************************
2 * Copyright 2019-2020 Intel Corporation
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *******************************************************************************/
16 
17 #ifndef EXAMPLE_UTILS_HPP
18 #define EXAMPLE_UTILS_HPP
19 
20 #include <algorithm>
21 #include <cassert>
22 #include <functional>
23 #include <iostream>
24 #include <numeric>
25 #include <stdexcept>
26 #include <stdlib.h>
27 #include <string>
28 #include <initializer_list>
29 
30 #include "dnnl.hpp"
31 #include "dnnl_debug.h"
32 
33 #if DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_OMP
34 
35 #ifdef _MSC_VER
36 #define PRAGMA_MACRo(x) __pragma(x)
37 #define PRAGMA_MACRO(x) PRAGMA_MACRo(x)
38 #else
39 #define PRAGMA_MACRo(x) _Pragma(#x)
40 #define PRAGMA_MACRO(x) PRAGMA_MACRo(x)
41 #endif
42 
43 // MSVC doesn't support collapse clause in omp parallel
44 #if defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER)
45 #define collapse(x)
46 #endif
47 
48 #define PRAGMA_OMP_PARALLEL_FOR_COLLAPSE(n) PRAGMA_MACRO(omp parallel for collapse(n))
49 #else // DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_OMP
50 #define PRAGMA_OMP_PARALLEL_FOR_COLLAPSE(n)
51 #endif
52 
53 dnnl::engine::kind validate_engine_kind(dnnl::engine::kind akind) {
54  // Checking if a GPU exists on the machine
55  if (akind == dnnl::engine::kind::gpu) {
57  std::cout << "Application couldn't find GPU, please run with CPU "
58  "instead.\n";
59  exit(0);
60  }
61  }
62  return akind;
63 }
64 
65 // Exception class to indicate that the example uses a feature that is not
66 // available on the current systems. It is not treated as an error then, but
67 // just notifies a user.
68 struct example_allows_unimplemented : public std::exception {
69  example_allows_unimplemented(const char *message) noexcept
70  : message(message) {}
71  const char *what() const noexcept override { return message; }
72  const char *message;
73 };
74 
75 inline const char *engine_kind2str_upper(dnnl::engine::kind kind);
76 
77 // Runs example function with signature void() and catches errors.
78 // Returns `0` on success, `1` or oneDNN error, and `2` on example error.
79 inline int handle_example_errors(
80  std::initializer_list<dnnl::engine::kind> engine_kinds,
81  std::function<void()> example) {
82  int exit_code = 0;
83 
84  try {
85  example();
86  } catch (example_allows_unimplemented &e) {
87  std::cout << e.message << std::endl;
88  exit_code = 0;
89  } catch (dnnl::error &e) {
90  std::cout << "oneDNN error caught: " << std::endl
91  << "\tStatus: " << dnnl_status2str(e.status) << std::endl
92  << "\tMessage: " << e.what() << std::endl;
93  exit_code = 1;
94  } catch (std::exception &e) {
95  std::cout << "Error in the example: " << e.what() << "." << std::endl;
96  exit_code = 2;
97  }
98 
99  std::string engine_kind_str;
100  for (auto it = engine_kinds.begin(); it != engine_kinds.end(); ++it) {
101  if (it != engine_kinds.begin()) engine_kind_str += "/";
102  engine_kind_str += engine_kind2str_upper(*it);
103  }
104 
105  std::cout << "Example " << (exit_code ? "failed" : "passed") << " on "
106  << engine_kind_str << "." << std::endl;
107  return exit_code;
108 }
109 
110 // Same as above, but for functions with signature
111 // void(dnnl::engine::kind engine_kind, int argc, char **argv).
112 inline int handle_example_errors(
113  std::function<void(dnnl::engine::kind, int, char **)> example,
114  dnnl::engine::kind engine_kind, int argc, char **argv) {
115  return handle_example_errors(
116  {engine_kind}, [&]() { example(engine_kind, argc, argv); });
117 }
118 
119 // Same as above, but for functions with signature void(dnnl::engine::kind).
120 inline int handle_example_errors(
121  std::function<void(dnnl::engine::kind)> example,
122  dnnl::engine::kind engine_kind) {
123  return handle_example_errors(
124  {engine_kind}, [&]() { example(engine_kind); });
125 }
126 
127 inline dnnl::engine::kind parse_engine_kind(
128  int argc, char **argv, int extra_args = 0) {
129  // Returns default engine kind, i.e. CPU, if none given
130  if (argc == 1) {
131  return validate_engine_kind(dnnl::engine::kind::cpu);
132  } else if (argc <= extra_args + 2) {
133  std::string engine_kind_str = argv[1];
134  // Checking the engine type, i.e. CPU or GPU
135  if (engine_kind_str == "cpu") {
136  return validate_engine_kind(dnnl::engine::kind::cpu);
137  } else if (engine_kind_str == "gpu") {
138  return validate_engine_kind(dnnl::engine::kind::gpu);
139  }
140  }
141 
142  // If all above fails, the example should be ran properly
143  std::cout << "Inappropriate engine kind." << std::endl
144  << "Please run the example like this: " << argv[0] << " [cpu|gpu]"
145  << (extra_args ? " [extra arguments]" : "") << "." << std::endl;
146  exit(1);
147 }
148 
149 inline const char *engine_kind2str_upper(dnnl::engine::kind kind) {
150  if (kind == dnnl::engine::kind::cpu) return "CPU";
151  if (kind == dnnl::engine::kind::gpu) return "GPU";
152  assert(!"not expected");
153  return "<Unknown engine>";
154 }
155 
156 inline dnnl::memory::dim product(const dnnl::memory::dims &dims) {
157  return std::accumulate(dims.begin(), dims.end(), (dnnl::memory::dim)1,
158  std::multiplies<dnnl::memory::dim>());
159 }
160 
161 // Read from memory, write to handle
162 inline void read_from_dnnl_memory(void *handle, dnnl::memory &mem) {
163  dnnl::engine eng = mem.get_engine();
164  size_t size = mem.get_desc().get_size();
165 
166 #if DNNL_WITH_SYCL
167  bool is_cpu_sycl = (DNNL_CPU_RUNTIME == DNNL_RUNTIME_SYCL
168  && eng.get_kind() == dnnl::engine::kind::cpu);
169  bool is_gpu_sycl = (DNNL_GPU_RUNTIME == DNNL_RUNTIME_SYCL
170  && eng.get_kind() == dnnl::engine::kind::gpu);
171  if (is_cpu_sycl || is_gpu_sycl) {
172 #ifdef DNNL_USE_SYCL_BUFFERS
173  auto buffer = mem.get_sycl_buffer<uint8_t>();
174  auto src = buffer.get_access<cl::sycl::access::mode::read>();
175  uint8_t *src_ptr = src.get_pointer();
176 #elif defined(DNNL_USE_DPCPP_USM)
177  uint8_t *src_ptr = (uint8_t *)mem.get_data_handle();
178 #else
179 #error "Not expected"
180 #endif
181  for (size_t i = 0; i < size; ++i)
182  ((uint8_t *)handle)[i] = src_ptr[i];
183  return;
184  }
185 #endif
186 #if DNNL_GPU_RUNTIME == DNNL_RUNTIME_OCL
187  if (eng.get_kind() == dnnl::engine::kind::gpu) {
188  dnnl::stream s(eng);
189  cl_command_queue q = s.get_ocl_command_queue();
190  cl_mem m = mem.get_ocl_mem_object();
191 
192  cl_int ret = clEnqueueReadBuffer(
193  q, m, CL_TRUE, 0, size, handle, 0, NULL, NULL);
194  if (ret != CL_SUCCESS)
195  throw std::runtime_error("clEnqueueReadBuffer failed.");
196  return;
197  }
198 #endif
199 
200  if (eng.get_kind() == dnnl::engine::kind::cpu) {
201  uint8_t *src = static_cast<uint8_t *>(mem.get_data_handle());
202  for (size_t i = 0; i < size; ++i)
203  ((uint8_t *)handle)[i] = src[i];
204  return;
205  }
206 
207  assert(!"not expected");
208 }
209 
210 // Read from handle, write to memory
211 inline void write_to_dnnl_memory(void *handle, dnnl::memory &mem) {
212  dnnl::engine eng = mem.get_engine();
213  size_t size = mem.get_desc().get_size();
214 
215 #if DNNL_WITH_SYCL
216  bool is_cpu_sycl = (DNNL_CPU_RUNTIME == DNNL_RUNTIME_SYCL
217  && eng.get_kind() == dnnl::engine::kind::cpu);
218  bool is_gpu_sycl = (DNNL_GPU_RUNTIME == DNNL_RUNTIME_SYCL
219  && eng.get_kind() == dnnl::engine::kind::gpu);
220  if (is_cpu_sycl || is_gpu_sycl) {
221 #ifdef DNNL_USE_SYCL_BUFFERS
222  auto buffer = mem.get_sycl_buffer<uint8_t>();
223  auto dst = buffer.get_access<cl::sycl::access::mode::write>();
224  uint8_t *dst_ptr = dst.get_pointer();
225 #elif defined(DNNL_USE_DPCPP_USM)
226  uint8_t *dst_ptr = (uint8_t *)mem.get_data_handle();
227 #else
228 #error "Not expected"
229 #endif
230  for (size_t i = 0; i < size; ++i)
231  dst_ptr[i] = ((uint8_t *)handle)[i];
232  return;
233  }
234 #endif
235 #if DNNL_GPU_RUNTIME == DNNL_RUNTIME_OCL
236  if (eng.get_kind() == dnnl::engine::kind::gpu) {
237  dnnl::stream s(eng);
238  cl_command_queue q = s.get_ocl_command_queue();
239  cl_mem m = mem.get_ocl_mem_object();
240 
241  cl_int ret = clEnqueueWriteBuffer(
242  q, m, CL_TRUE, 0, size, handle, 0, NULL, NULL);
243  if (ret != CL_SUCCESS)
244  throw std::runtime_error("clEnqueueWriteBuffer failed.");
245  return;
246  }
247 #endif
248 
249  if (eng.get_kind() == dnnl::engine::kind::cpu) {
250  uint8_t *dst = static_cast<uint8_t *>(mem.get_data_handle());
251  for (size_t i = 0; i < size; ++i)
252  dst[i] = ((uint8_t *)handle)[i];
253  return;
254  }
255 
256  assert(!"not expected");
257 }
258 
259 #endif
dnnl::memory::desc::get_size
size_t get_size() const
Returns size of the memory descriptor in bytes.
Definition: dnnl.hpp:2038
dnnl::memory::get_sycl_buffer
cl::sycl::buffer< T, ndims > get_sycl_buffer(size_t *offset=nullptr) const
Returns the underlying SYCL buffer object.
Definition: dnnl.hpp:2285
dnnl::error::what
const char * what() const noexcept override
Returns the explanatory string.
Definition: dnnl.hpp:108
dnnl::stream
An execution stream.
Definition: dnnl.hpp:1086
dnnl::engine
An execution engine.
Definition: dnnl.hpp:865
dnnl::engine::get_count
static size_t get_count(kind akind)
Returns the number of engines of a certain kind.
Definition: dnnl.hpp:889
dnnl::engine::kind
kind
Kinds of engines.
Definition: dnnl.hpp:870
DNNL_RUNTIME_SYCL
#define DNNL_RUNTIME_SYCL
SYCL runtime.
Definition: dnnl_types.h:2213
dnnl.hpp
dnnl::memory::get_desc
desc get_desc() const
Returns the associated memory descriptor.
Definition: dnnl.hpp:2139
dnnl::engine::kind::gpu
@ gpu
GPU engine.
dnnl::error
oneDNN exception class.
Definition: dnnl.hpp:96
dnnl::memory::dim
dnnl_dim_t dim
Integer type for representing dimension sizes and indices.
Definition: dnnl.hpp:1243
dnnl_debug.h
dnnl::memory
Memory object.
Definition: dnnl.hpp:1241
dnnl::memory::dims
std::vector< dim > dims
Vector of dimensions.
Definition: dnnl.hpp:1246
dnnl::memory::get_engine
engine get_engine() const
Returns the associated engine.
Definition: dnnl.hpp:2147
dnnl::memory::get_data_handle
void * get_data_handle() const
Returns the underlying memory buffer.
Definition: dnnl.hpp:2158
dnnl::memory::get_ocl_mem_object
cl_mem get_ocl_mem_object() const
Returns the OpenCL memory object associated with the memory.
Definition: dnnl.hpp:2257
dnnl::engine::kind::cpu
@ cpu
CPU engine.