42 #ifndef DOXYGEN_SHOULD_SKIP_THIS 45 template<>
struct type_to_data_type <
half_t> {
static const data_types value = data_types::f16; };
46 template<>
struct type_to_data_type <float> {
static const data_types value = data_types::f32; };
51 #ifndef DOXYGEN_SHOULD_SKIP_THIS 54 template<>
struct data_type_to_type<
data_types::f32> {
typedef float type; };
63 return (static_cast<uint32_t>(data_type) & ~(CLDNN_FLOAT_TYPE_MASK | CLDNN_UINT_TYPE_MASK));
66 static bool is_floating_point(
data_types data_type)
68 return (static_cast<uint32_t>(data_type) & CLDNN_FLOAT_TYPE_MASK) != 0;
81 default:
return size_t(1);
97 return std::string(
"invalid data type: " + std::to_string((
int)data_type));
103 template <
typename T>
110 constexpr
auto fuse(
data_types dt,
cldnn::format::type fmt) -> decltype(
static_cast<std::underlying_type<data_types>::type
>(dt) |
static_cast<std::underlying_type<format::type>::type
>(fmt))
112 using dt_type = std::underlying_type<data_types>::type;
113 using fmt_type = std::underlying_type<cldnn::format::type>::type;
114 using fmt_narrow_type = int16_t;
116 return static_cast<fmt_type
>(fmt) <= std::numeric_limits<fmt_narrow_type>::max() &&
117 static_cast<dt_type
>(dt) <= (std::numeric_limits<dt_type>::max() >> (
sizeof(fmt_narrow_type) * 8))
118 ? (static_cast<dt_type>(dt) << (
sizeof(fmt_narrow_type) * 8)) |
119 (static_cast<fmt_type>(fmt) >= 0 ?
static_cast<fmt_narrow_type
>(fmt) : static_cast<fmt_narrow_type>(-1))
120 :
throw std::invalid_argument(
"data_type and/or format values are too big to be fused into single value");
142 padding(
const std::vector<tensor::value_type>& lower_sizes,
const std::vector<tensor::value_type>& upper_sizes,
float filling_value = 0.0f)
143 : _lower_size(to_abs(lower_sizes), 0), _upper_size(to_abs(upper_sizes), 0), _filling_value(
filling_value)
165 static_cast<cldnn_tensor>(_upper_size),
170 explicit operator bool()
const 172 return std::any_of(_lower_size.
raw.begin(), _lower_size.
raw.end(), [](
const tensor::value_type& el) {
return el != 0; }) ||
178 return lhs._lower_size == rhs._lower_size
179 && lhs._upper_size == rhs._upper_size
180 && lhs._filling_value == rhs._filling_value;
185 return !(lhs == rhs);
190 if (lhs._filling_value != rhs._filling_value)
191 return (lhs._filling_value < rhs._filling_value);
192 if (lhs._lower_size != rhs._lower_size)
193 return (lhs._lower_size < rhs._lower_size);
194 return (lhs._upper_size < rhs._upper_size);
199 auto lower =
tensor::max(lhs.lower_size(), rhs.lower_size());
200 auto upper =
tensor::max(lhs.upper_size(), rhs.upper_size());
208 float _filling_value;
212 static std::vector<tensor::value_type> to_abs(
const std::vector<tensor::value_type>& sizes)
214 std::vector<tensor::value_type> result;
215 result.reserve(sizes.size());
216 std::transform(sizes.cbegin(), sizes.cend(), std::back_inserter(result), [](
const tensor::value_type& el) {
return abs(el); });
260 friend bool operator==(
const layout& lhs,
const layout& rhs)
262 return lhs.data_type == rhs.data_type
263 && lhs.format == rhs.format
264 && lhs.size == rhs.size
265 && lhs.data_padding == rhs.data_padding;
268 friend bool operator!=(
const layout& lhs,
const layout& rhs)
270 return !(lhs == rhs);
273 friend bool operator<(
const layout& lhs,
const layout& rhs)
275 if (lhs.data_type != rhs.data_type)
276 return (lhs.data_type < rhs.data_type);
277 if (lhs.format != rhs.format)
278 return (lhs.format < rhs.format);
279 if (lhs.size < rhs.size)
280 return (lhs.size < rhs.size);
281 return (lhs.data_padding < rhs.data_padding);
293 tensor get_pitches()
const 297 std::partial_sum(sizes.rbegin(), sizes.rend() - 1, pitches.rbegin() + 1, std::multiplies<tensor::value_type>());
298 return{
format, pitches };
303 size_t get_linear_offset(tensor element = { 0,0,0,0 })
const 305 auto pitches = get_pitches();
309 if ((element.batch[0] < 0 && -element.batch[0] > l_padd.batch[0]) ||
310 (element.feature[0] < 0 && -element.feature[0] > l_padd.feature[0]) ||
311 (element.spatial[0] < 0 && -element.spatial[0] > l_padd.spatial[0]) ||
312 (element.spatial[1] < 0 && -element.spatial[1] > l_padd.spatial[1]) ||
313 (element.batch[0] >=
size.
batch[0] + u_padd.batch[0]) ||
314 (element.feature[0] >=
size.
feature[0] + u_padd.feature[0]) ||
315 (element.spatial[0] >=
size.
spatial[0] + u_padd.spatial[0]) ||
316 (element.spatial[1] >=
size.
spatial[1] + u_padd.spatial[1]))
317 throw std::invalid_argument(
"Requested to calculate linear offset for an element which lies outside of the buffer range.");
319 size_t linear_offset =
320 static_cast<size_t>(element.batch[0] + l_padd.batch[0]) * static_cast<size_t>(pitches.batch[0]) +
321 static_cast<size_t>(element.feature[0] + l_padd.feature[0]) * static_cast<size_t>(pitches.feature[0]) +
322 static_cast<size_t>(element.spatial[0] + l_padd.spatial[0]) * static_cast<size_t>(pitches.spatial[0]) +
323 static_cast<size_t>(element.spatial[1] + l_padd.spatial[1]) * static_cast<size_t>(pitches.spatial[1]);
325 return linear_offset;
334 sizes[0] = align_to(sizes[0], 16);
338 sizes[0] = align_to(sizes[0], 8);
339 sizes[2] = align_to(sizes[2], 8);
343 sizes[0] = align_to(sizes[0], 16);
344 sizes[2] = align_to(sizes[2], 8);
348 sizes[0] = align_to(sizes[0], 16);
352 sizes[1] = align_to(sizes[1], 8);
353 sizes[3] = align_to(sizes[2]*sizes[3], 16);
356 return std::accumulate(
359 static_cast<size_t>(1),
360 std::multiplies<size_t>()
Not supported in current HW.
mutable_array_ref< value_type > feature
Feature maps.
mutable_array_ref< value_type > raw
Raw representation of all dimensions.
padding()
Constructs "zero-sized" padding.
int32_t format
Memor format (cldnn_format_type)
Represents data padding information.
layout(const cldnn_layout &other)
Construct C++ layout based on C API cldnn_layout.
layout(data_types data_type, cldnn::format fmt, tensor size, padding apadding=padding())
Constructs layout based on data_type and size information described by tensor.
size_t count() const
Returns tensor elements count calculated as multiplication of all elements.
tensor get_buffer_size() const
Layout size with padding included.
Converts data_types to C++ type.
static tensor max(tensor const &lhs, tensor const &rhs)
Returns a tensor containing values maximum from lhs and rhs.
mutable_array_ref< value_type > batch
Batch dimensions.
N-dimensional vector. Mostly used to represent memory size.
float filling_value() const
Filling value for padding area.
Helper class to identify key properties for data_types.
tensor upper_size() const
Gets upper padding sizes. For spatials, it means size of right (X) and bottom (Y) padding...
size_t count() const
Number of elements to be stored in this memory layout.
padding(const cldnn_padding &other)
Copy construction.
size_t get_linear_size() const
Get aligned linear size calculated as multiplication of all elements.
data_types data_type
Data type stored in memory (see. data_types)
size_t data_type
data type (cldnn_data_type) stored in memory.
mutable_array_ref< value_type > spatial
Spatial dimensions.
tensor size
The size of the memory (excluding padding)
Memory layout description.
data_types
Possible data types could be stored in memory.
padding(const std::vector< tensor::value_type > &sizes, float filling_value=0.0f)
Constrcuts symmetric padding.
int32_t value_type
Values type stored in tensor.
tensor add(const tensor &rhs) const
Returns a tensor with all elements added by appropriate elements of rhs.
padding data_padding
Explicit padding of the memory.
Converts C++ type to data_types .
cldnn::format format
Format stored in memory (see. format)
size_t bytes_count() const
Number of bytes needed to store this layout.
std::vector< value_type > sizes(cldnn::format fmt) const
Returns a vector of tensors values, ordered regarding to format.
constexpr auto fuse(data_types dt, cldnn::format::type fmt) -> decltype(static_cast< std::underlying_type< data_types >::type >(dt)|static_cast< std::underlying_type< format::type >::type >(fmt))
Helper function to get both data_types and format::type in a single, unique value. Useable in 'case' statement.
tensor lower_size() const
Gets lower padding sizes. For spatials, it means size of left (X) and top (Y) padding.
layout with_padding(padding const &padd) const
Modify padding in layout.
bool data_type_match(data_types data_type)
Helper function to check if C++ type matches data_type.
N-dimensional vector. Mostly used to represent memory size.
padding(const std::vector< tensor::value_type > &lower_sizes, const std::vector< tensor::value_type > &upper_sizes, float filling_value=0.0f)