clDNN
layout.hpp
1 /*
2 // Copyright (c) 2016 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16 
18 #pragma once
19 #include "tensor.hpp"
20 #include <cmath>
21 #include <cstdlib>
22 
23 namespace cldnn
24 {
27 
30 
32 enum class data_types : size_t
33 {
34  i8 = cldnn_i8,
35  u8 = cldnn_u8,
36  f16 = cldnn_f16,
37  f32 = cldnn_f32,
38 };
39 
41 template <typename T> struct type_to_data_type;
42 #ifndef DOXYGEN_SHOULD_SKIP_THIS
43 template<> struct type_to_data_type <int8_t> { static const data_types value = data_types::i8; };
44 template<> struct type_to_data_type <uint8_t> { static const data_types value = data_types::u8; };
45 template<> struct type_to_data_type <half_t> { static const data_types value = data_types::f16; };
46 template<> struct type_to_data_type <float> { static const data_types value = data_types::f32; };
47 #endif
48 
50 template<data_types Data_Type> struct data_type_to_type;
51 #ifndef DOXYGEN_SHOULD_SKIP_THIS
52 template<> struct data_type_to_type <data_types::i8> { typedef int8_t type; };
53 template<> struct data_type_to_type<data_types::f16> { typedef half_t type; };
54 template<> struct data_type_to_type<data_types::f32> { typedef float type; };
55 #endif
56 
57 
60 {
61  static size_t size_of(data_types data_type)
62  {
63  return (static_cast<uint32_t>(data_type) & ~(CLDNN_FLOAT_TYPE_MASK | CLDNN_UINT_TYPE_MASK));
64  }
65 
66  static bool is_floating_point(data_types data_type)
67  {
68  return (static_cast<uint32_t>(data_type) & CLDNN_FLOAT_TYPE_MASK) != 0;
69  }
70 
71  static size_t align_of(data_types data_type)
72  {
73  switch (data_type)
74  {
75  case data_types::i8:
77  case data_types::f16:
79  case data_types::f32:
81  default: return size_t(1);
82  }
83  }
84 
85  static std::string name(data_types data_type)
86  {
87  switch (data_type)
88  {
89  case data_types::i8:
90  return "i8";
91  case data_types::f16:
92  return "f16";
93  case data_types::f32:
94  return "f32";
95  default:
96  assert(0);
97  return std::string("invalid data type: " + std::to_string((int)data_type));
98  }
99  }
100 };
101 
103 template <typename T>
105 {
106  return data_type == type_to_data_type<T>::value;
107 }
108 
110 constexpr auto fuse(data_types dt, cldnn::format::type fmt) -> decltype(static_cast<std::underlying_type<data_types>::type>(dt) | static_cast<std::underlying_type<format::type>::type>(fmt))
111 {
112  using dt_type = std::underlying_type<data_types>::type;
113  using fmt_type = std::underlying_type<cldnn::format::type>::type;
114  using fmt_narrow_type = int16_t;
115 
116  return static_cast<fmt_type>(fmt) <= std::numeric_limits<fmt_narrow_type>::max() &&
117  static_cast<dt_type>(dt) <= (std::numeric_limits<dt_type>::max() >> (sizeof(fmt_narrow_type) * 8))
118  ? (static_cast<dt_type>(dt) << (sizeof(fmt_narrow_type) * 8)) |
119  (static_cast<fmt_type>(fmt) >= 0 ? static_cast<fmt_narrow_type>(fmt) : static_cast<fmt_narrow_type>(-1))
120  : throw std::invalid_argument("data_type and/or format values are too big to be fused into single value");
121 }
122 
123 
125 struct padding
126 {
128  float filling_value() const { return _filling_value; }
129 
132  tensor lower_size() const { return _lower_size; }
133 
136  tensor upper_size() const { return _upper_size; }
137 
142  padding(const std::vector<tensor::value_type>& lower_sizes, const std::vector<tensor::value_type>& upper_sizes, float filling_value = 0.0f)
143  : _lower_size(to_abs(lower_sizes), 0), _upper_size(to_abs(upper_sizes), 0), _filling_value(filling_value)
144  {}
145 
149  padding(const std::vector<tensor::value_type>& sizes, float filling_value = 0.0f)
150  : padding(sizes, sizes, filling_value)
151  {}
152 
154  padding() : padding({ 0,0,0,0 }, 0) {}
155 
157  padding(const cldnn_padding& other)
158  : _lower_size(other.lower_size), _upper_size(other.upper_size), _filling_value(other.filling_value)
159  {}
160 
162  operator cldnn_padding() const
163  {
164  return{ static_cast<cldnn_tensor>(_lower_size),
165  static_cast<cldnn_tensor>(_upper_size),
166  _filling_value };
167  }
168 
170  explicit operator bool() const
171  {
172  return std::any_of(_lower_size.raw.begin(), _lower_size.raw.end(), [](const tensor::value_type& el) { return el != 0; }) ||
173  std::any_of(_upper_size.raw.begin(), _upper_size.raw.end(), [](const tensor::value_type& el) { return el != 0; });
174  }
175 
176  friend bool operator ==(const padding& lhs, const padding& rhs)
177  {
178  return lhs._lower_size == rhs._lower_size
179  && lhs._upper_size == rhs._upper_size
180  && lhs._filling_value == rhs._filling_value;
181  }
182 
183  friend bool operator !=(const padding& lhs, const padding& rhs)
184  {
185  return !(lhs == rhs);
186  }
187 
188  friend bool operator <(const padding& lhs, const padding& rhs)
189  {
190  if (lhs._filling_value != rhs._filling_value)
191  return (lhs._filling_value < rhs._filling_value);
192  if (lhs._lower_size != rhs._lower_size)
193  return (lhs._lower_size < rhs._lower_size);
194  return (lhs._upper_size < rhs._upper_size);
195  }
196 
197  static padding max(padding const& lhs, padding const& rhs, float filling_value = 0.0f)
198  {
199  auto lower = tensor::max(lhs.lower_size(), rhs.lower_size());
200  auto upper = tensor::max(lhs.upper_size(), rhs.upper_size());
201  return padding{ lower.sizes(), upper.sizes(), filling_value };
202  }
203 
204 private:
205  tensor _lower_size;
206  tensor _upper_size;
207  // TODO: Add support for non-zero filling value (if necessary) or remove variable (if not necessary).
208  float _filling_value;
209 
212  static std::vector<tensor::value_type> to_abs(const std::vector<tensor::value_type>& sizes)
213  {
214  std::vector<tensor::value_type> result;
215  result.reserve(sizes.size());
216  std::transform(sizes.cbegin(), sizes.cend(), std::back_inserter(result), [](const tensor::value_type& el) { return abs(el); });
217  return result; // NRVO
218  }
219 };
220 
223 struct layout
224 {
228  , format(fmt)
229  , size(size)
230  , data_padding(apadding)
231  {}
232 
234  layout(const cldnn_layout& other)
235  : data_type(static_cast<data_types>(other.data_type))
236  , format(static_cast<cldnn::format::type>(other.format))
237  , size(other.size)
238  , data_padding(other.padding)
239  {}
240 
242  operator cldnn_layout() const
243  {
244  return{ static_cast<decltype(cldnn_layout::data_type)>(data_type), static_cast<decltype(cldnn_layout::format)>(format), size, data_padding };
245  }
246 
247  layout(const layout& other) = default;
248 
249  layout& operator=(const layout& other)
250  {
251  if (this == &other)
252  return *this;
253  data_type = other.data_type;
254  format = other.format;
255  size = other.size;
256  data_padding = other.data_padding;
257  return *this;
258  }
259 
260  friend bool operator==(const layout& lhs, const layout& rhs)
261  {
262  return lhs.data_type == rhs.data_type
263  && lhs.format == rhs.format
264  && lhs.size == rhs.size
265  && lhs.data_padding == rhs.data_padding;
266  }
267 
268  friend bool operator!=(const layout& lhs, const layout& rhs)
269  {
270  return !(lhs == rhs);
271  }
272 
273  friend bool operator<(const layout& lhs, const layout& rhs)
274  {
275  if (lhs.data_type != rhs.data_type)
276  return (lhs.data_type < rhs.data_type);
277  if (lhs.format != rhs.format)
278  return (lhs.format < rhs.format);
279  if (lhs.size < rhs.size)
280  return (lhs.size < rhs.size);
281  return (lhs.data_padding < rhs.data_padding);
282  }
283 
285  size_t count() const { return size.count(); }
286 
289  {
291  }
292 
293  tensor get_pitches() const
294  {
295  auto sizes = get_buffer_size().sizes(format);
296  std::vector<tensor::value_type> pitches(sizes.size(), tensor::value_type(1));
297  std::partial_sum(sizes.rbegin(), sizes.rend() - 1, pitches.rbegin() + 1, std::multiplies<tensor::value_type>());
298  return{ format, pitches };
299  }
300 
301  // @brief Calculates position within buffer of the data element pointed by the provided tensor.
302  // element == { 0,0,0,0 } means first no-padding (i.e. data) element
303  size_t get_linear_offset(tensor element = { 0,0,0,0 }) const
304  {
305  auto pitches = get_pitches();
306  auto l_padd = data_padding.lower_size();
307  auto u_padd = data_padding.upper_size();
308 
309  if ((element.batch[0] < 0 && -element.batch[0] > l_padd.batch[0]) ||
310  (element.feature[0] < 0 && -element.feature[0] > l_padd.feature[0]) ||
311  (element.spatial[0] < 0 && -element.spatial[0] > l_padd.spatial[0]) ||
312  (element.spatial[1] < 0 && -element.spatial[1] > l_padd.spatial[1]) ||
313  (element.batch[0] >= size.batch[0] + u_padd.batch[0]) ||
314  (element.feature[0] >= size.feature[0] + u_padd.feature[0]) ||
315  (element.spatial[0] >= size.spatial[0] + u_padd.spatial[0]) ||
316  (element.spatial[1] >= size.spatial[1] + u_padd.spatial[1]))
317  throw std::invalid_argument("Requested to calculate linear offset for an element which lies outside of the buffer range.");
318 
319  size_t linear_offset =
320  static_cast<size_t>(element.batch[0] + l_padd.batch[0]) * static_cast<size_t>(pitches.batch[0]) +
321  static_cast<size_t>(element.feature[0] + l_padd.feature[0]) * static_cast<size_t>(pitches.feature[0]) +
322  static_cast<size_t>(element.spatial[0] + l_padd.spatial[0]) * static_cast<size_t>(pitches.spatial[0]) +
323  static_cast<size_t>(element.spatial[1] + l_padd.spatial[1]) * static_cast<size_t>(pitches.spatial[1]);
324 
325  return linear_offset;
326  }
327 
329  size_t get_linear_size() const
330  {
331  auto sizes = get_buffer_size().sizes();
332  if (this->format == cldnn::format::os_iyx_osv16 && !is_aligned_to(sizes[0], 16))
333  {
334  sizes[0] = align_to(sizes[0], 16);
335  }
336  else if (this->format == cldnn::format::bs_xs_xsv8_bsv8 && !(is_aligned_to(sizes[0], 8) && is_aligned_to(sizes[2], 8)))
337  {
338  sizes[0] = align_to(sizes[0], 8);
339  sizes[2] = align_to(sizes[2], 8);
340  }
341  else if (this->format == cldnn::format::bs_xs_xsv8_bsv16 && !(is_aligned_to(sizes[0], 16) && is_aligned_to(sizes[2], 8)))
342  {
343  sizes[0] = align_to(sizes[0], 16);
344  sizes[2] = align_to(sizes[2], 8);
345  }
346  else if (this->format == cldnn::format::bs_x_bsv16 && !is_aligned_to(sizes[0], 16))
347  {
348  sizes[0] = align_to(sizes[0], 16);
349  }
350  else if (this->format == cldnn::format::bf8_xy16 && !(is_aligned_to(sizes[1], 8) && is_aligned_to(sizes[2] * sizes[3], 16)))
351  {
352  sizes[1] = align_to(sizes[1], 8);
353  sizes[3] = align_to(sizes[2]*sizes[3], 16);
354  sizes[2] = 1;
355  }
356  return std::accumulate(
357  sizes.begin(),
358  sizes.end(),
359  static_cast<size_t>(1),
360  std::multiplies<size_t>()
361  );
362  }
363 
365  layout with_padding(padding const& padd) const
366  {
367  layout ret = *this;
368  ret.data_padding = padd;
369  return ret;
370  }
371 
374 
377 
380 
383 
385  size_t bytes_count() const { return data_type_traits::size_of(data_type) * get_linear_size(); }
386 
387  bool has_fused_format(data_types const& dt, cldnn::format const& fmt) const
388  {
389  return (data_type == dt && format == fmt);
390  }
391 
392  auto fused_format() const -> decltype(fuse(data_type, format))
393  {
394  return fuse(data_type, format);
395  }
396 };
397 
398 
401 }
Not supported in current HW.
mutable_array_ref< value_type > feature
Feature maps.
Definition: tensor.hpp:266
mutable_array_ref< value_type > raw
Raw representation of all dimensions.
Definition: tensor.hpp:264
padding()
Constructs "zero-sized" padding.
Definition: layout.hpp:154
int32_t format
Memor format (cldnn_format_type)
Definition: cldnn.h:298
Represents data padding information.
Definition: layout.hpp:125
layout(const cldnn_layout &other)
Construct C++ layout based on C API cldnn_layout.
Definition: layout.hpp:234
layout(data_types data_type, cldnn::format fmt, tensor size, padding apadding=padding())
Constructs layout based on data_type and size information described by tensor.
Definition: layout.hpp:226
size_t count() const
Returns tensor elements count calculated as multiplication of all elements.
Definition: tensor.hpp:564
tensor get_buffer_size() const
Layout size with padding included.
Definition: layout.hpp:288
Converts data_types to C++ type.
Definition: layout.hpp:50
static tensor max(tensor const &lhs, tensor const &rhs)
Returns a tensor containing values maximum from lhs and rhs.
Definition: tensor.hpp:689
mutable_array_ref< value_type > batch
Batch dimensions.
Definition: tensor.hpp:265
N-dimensional vector. Mostly used to represent memory size.
Definition: tensor.hpp:256
float filling_value() const
Filling value for padding area.
Definition: layout.hpp:128
Helper class to identify key properties for data_types.
Definition: layout.hpp:59
tensor upper_size() const
Gets upper padding sizes. For spatials, it means size of right (X) and bottom (Y) padding...
Definition: layout.hpp:136
size_t count() const
Number of elements to be stored in this memory layout.
Definition: layout.hpp:285
padding(const cldnn_padding &other)
Copy construction.
Definition: layout.hpp:157
size_t get_linear_size() const
Get aligned linear size calculated as multiplication of all elements.
Definition: layout.hpp:329
data_types data_type
Data type stored in memory (see. data_types)
Definition: layout.hpp:373
size_t data_type
data type (cldnn_data_type) stored in memory.
Definition: cldnn.h:297
mutable_array_ref< value_type > spatial
Spatial dimensions.
Definition: tensor.hpp:267
tensor size
The size of the memory (excluding padding)
Definition: layout.hpp:379
Memory layout description.
Definition: cldnn.h:295
data_types
Possible data types could be stored in memory.
Definition: layout.hpp:32
padding(const std::vector< tensor::value_type > &sizes, float filling_value=0.0f)
Constrcuts symmetric padding.
Definition: layout.hpp:149
int32_t value_type
Values type stored in tensor.
Definition: tensor.hpp:262
tensor add(const tensor &rhs) const
Returns a tensor with all elements added by appropriate elements of rhs.
Definition: tensor.hpp:504
padding data_padding
Explicit padding of the memory.
Definition: layout.hpp:382
Represents memory formats (orders). In CNN most of data is described as 4 dimensional blocks...
Definition: tensor.hpp:75
Converts C++ type to data_types .
Definition: layout.hpp:41
Padding information.
Definition: cldnn.h:275
cldnn::format format
Format stored in memory (see. format)
Definition: layout.hpp:376
size_t bytes_count() const
Number of bytes needed to store this layout.
Definition: layout.hpp:385
std::vector< value_type > sizes(cldnn::format fmt) const
Returns a vector of tensors values, ordered regarding to format.
Definition: tensor.hpp:537
constexpr auto fuse(data_types dt, cldnn::format::type fmt) -> decltype(static_cast< std::underlying_type< data_types >::type >(dt)|static_cast< std::underlying_type< format::type >::type >(fmt))
Helper function to get both data_types and format::type in a single, unique value. Useable in &#39;case&#39; statement.
Definition: layout.hpp:110
tensor lower_size() const
Gets lower padding sizes. For spatials, it means size of left (X) and top (Y) padding.
Definition: layout.hpp:132
layout with_padding(padding const &padd) const
Modify padding in layout.
Definition: layout.hpp:365
bool data_type_match(data_types data_type)
Helper function to check if C++ type matches data_type.
Definition: layout.hpp:104
Describes memory layout.
Definition: layout.hpp:223
N-dimensional vector. Mostly used to represent memory size.
Definition: cldnn.h:266
padding(const std::vector< tensor::value_type > &lower_sizes, const std::vector< tensor::value_type > &upper_sizes, float filling_value=0.0f)
Definition: layout.hpp:142