Skip to content

Commit ba1f152

Browse files
[CLANG_FORMAT] Force west const (#29944)
It makes sense to have it alligned accross the whole product. The change is not that big. Also this helps clang-tidy to apply and format fixes in consistent style
1 parent 706c472 commit ba1f152

File tree

44 files changed

+123
-122
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+123
-122
lines changed

src/.clang-format

+1
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ DerivePointerAlignment: false
2323
FixNamespaceComments: true
2424
IndentCaseLabels: false
2525
IndentPPDirectives: AfterHash
26+
QualifierAlignment: Left
2627
ForEachMacros:
2728
- foreach
2829
- FOREACH_CHILD

src/bindings/c/src/ov_common.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
* @variable global value for error info.
88
* Don't change its order.
99
*/
10-
char const* error_infos[] = {"success",
10+
const char* error_infos[] = {"success",
1111
"general error",
1212
"it's not implement",
1313
"failed to network",

src/bindings/c/tests/ov_remote_context_test.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -47,11 +47,11 @@ class ov_remote_context_ocl : public ov_capi_test_base {
4747
std::vector<cl_platform_id> platform_ids(n);
4848
err = clGetPlatformIDs(n, platform_ids.data(), NULL);
4949

50-
for (auto const& id : platform_ids) {
50+
for (const auto& id : platform_ids) {
5151
cl::Platform platform = cl::Platform(id);
5252
std::vector<cl::Device> devices;
5353
platform.getDevices(CL_DEVICE_TYPE_GPU, &devices);
54-
for (auto const& d : devices) {
54+
for (const auto& d : devices) {
5555
if (refVendorID == d.getInfo<CL_DEVICE_VENDOR_ID>()) {
5656
cl_device = d;
5757
cl_context = cl::Context(cl_device);

src/bindings/js/node/src/async_reader.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ void ReaderWorker::OnOK() {
2222
_deferred.Resolve(model);
2323
}
2424

25-
void ReaderWorker::OnError(Napi::Error const& error) {
25+
void ReaderWorker::OnError(const Napi::Error& error) {
2626
_deferred.Reject(error.Value());
2727
}
2828

src/common/conditional_compilation/include/openvino/cc/factory.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@ class Factory;
2020

2121
template <typename Key, typename T, typename... Args>
2222
class Factory<Key, T(Args...)> {
23-
Factory(Factory const&) = delete;
24-
Factory& operator=(Factory const&) = delete;
23+
Factory(const Factory&) = delete;
24+
Factory& operator=(const Factory&) = delete;
2525

2626
public:
2727
using builder_t = std::function<T(Args...)>;

src/common/conditional_compilation/include/openvino/cc/selective_build.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ case_wrapper<C, T> make_case_wrapper(C&& val, const char* name) {
132132
}
133133

134134
template <openvino::itt::domain_t (*domain)(), template <typename...> class Fn, typename Ctx, typename T, typename Case>
135-
bool match(char const* region, Ctx&& ctx, T&& val, Case&& cs) {
135+
bool match(const char* region, Ctx&& ctx, T&& val, Case&& cs) {
136136
const bool is_matched = val == cs.value;
137137
if (is_matched) {
138138
openvino::itt::ScopedTask<domain> task(openvino::itt::handle(std::string(region) + "$" + cs.name));
@@ -148,7 +148,7 @@ template <openvino::itt::domain_t (*domain)(),
148148
typename T,
149149
typename Case,
150150
typename... Cases>
151-
bool match(char const* region, Ctx&& ctx, T&& val, Case&& cs, Cases&&... cases) {
151+
bool match(const char* region, Ctx&& ctx, T&& val, Case&& cs, Cases&&... cases) {
152152
if (match<domain, Fn>(region, std::forward<Ctx>(ctx), std::forward<T>(val), std::forward<Case>(cs)))
153153
return true;
154154
return match<domain, Fn>(region, std::forward<Ctx>(ctx), std::forward<T>(val), std::forward<Cases>(cases)...);

src/common/itt/include/openvino/itt.hpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,8 @@ typedef struct handle_ {
4141
* @cond
4242
*/
4343
namespace internal {
44-
domain_t domain(char const* name);
45-
handle_t handle(char const* name);
44+
domain_t domain(const char* name);
45+
handle_t handle(const char* name);
4646
void taskBegin(domain_t d, handle_t t);
4747
void taskEnd(domain_t d);
4848
void threadName(const char* name);
@@ -65,7 +65,7 @@ inline void threadName(const std::string& name) {
6565
internal::threadName(name.c_str());
6666
}
6767

68-
inline handle_t handle(char const* name) {
68+
inline handle_t handle(const char* name) {
6969
return internal::handle(name);
7070
}
7171

@@ -81,7 +81,7 @@ inline handle_t handle(const std::string& name) {
8181
* @param name [in] The annotation name
8282
*/
8383
template <typename Tag>
84-
handle_t handle(char const* name) {
84+
handle_t handle(const char* name) {
8585
static auto h = internal::handle(name);
8686
return h;
8787
}

src/common/itt/src/itt.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,11 @@ static size_t callStackDepth() {
2424

2525
static thread_local uint32_t call_stack_depth = 0;
2626

27-
domain_t domain(char const* name) {
27+
domain_t domain(const char* name) {
2828
return reinterpret_cast<domain_t>(__itt_domain_create(name));
2929
}
3030

31-
handle_t handle(char const* name) {
31+
handle_t handle(const char* name) {
3232
return reinterpret_cast<handle_t>(__itt_string_handle_create(name));
3333
}
3434

@@ -51,11 +51,11 @@ void threadName(const char* name) {
5151

5252
#else
5353

54-
domain_t domain(char const*) {
54+
domain_t domain(const char*) {
5555
return nullptr;
5656
}
5757

58-
handle_t handle(char const*) {
58+
handle_t handle(const char*) {
5959
return nullptr;
6060
}
6161

src/common/util/include/openvino/util/common_util.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ bool contains(const R& container, const V& value) {
150150
* @return result of multiplication
151151
*/
152152
template <typename T, typename A>
153-
T product(std::vector<T, A> const& vec) {
153+
T product(const std::vector<T, A>& vec) {
154154
return vec.empty() ? T{0} : std::accumulate(vec.begin(), vec.end(), T{1}, std::multiplies<T>());
155155
}
156156

src/core/reference/include/openvino/reference/autobroadcast_binop.hpp

+5-5
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ void numpy_broadcast_binop(const T* arg0,
126126
// [ 3, 2, 6]
127127
using namespace internal;
128128

129-
size_t const shape_rank = std::max(arg0_shape.size(), arg1_shape.size()) + 1;
129+
const size_t shape_rank = std::max(arg0_shape.size(), arg1_shape.size()) + 1;
130130

131131
// TODO: Use compiler-specific alloca() or variable-length array
132132
std::vector<size_t> tmp(shape_rank * 2);
@@ -137,16 +137,16 @@ void numpy_broadcast_binop(const T* arg0,
137137
row_major_strides(arg0_shape, strides0, shape_rank);
138138
row_major_strides(arg1_shape, strides1, shape_rank);
139139

140-
size_t const padding0 = shape_rank - arg0_shape.size();
141-
size_t const padding1 = shape_rank - arg1_shape.size();
140+
const size_t padding0 = shape_rank - arg0_shape.size();
141+
const size_t padding1 = shape_rank - arg1_shape.size();
142142

143143
Shape output_shape(shape_rank, 0);
144144

145145
size_t axis = 0;
146146

147147
for (size_t i = 0; i < shape_rank; ++i) {
148-
auto const dim0 = value_with_padding_or(arg0_shape, padding0, i, 1);
149-
auto const dim1 = value_with_padding_or(arg1_shape, padding1, i, 1);
148+
const auto dim0 = value_with_padding_or(arg0_shape, padding0, i, 1);
149+
const auto dim1 = value_with_padding_or(arg1_shape, padding1, i, 1);
150150

151151
output_shape[i] = std::max(dim0, dim1);
152152

src/core/reference/src/op/einsum.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1040,7 +1040,7 @@ void einsum_impl(const ov::TensorVector& inputs, ov::TensorVector& outputs, cons
10401040
fix_inputs_with_0d_ellipsis<T>(int_inputs, input_subscripts, output_subscript);
10411041

10421042
// contract inputs by Einsum until just one is remained
1043-
for (auto const& inds_pair : einsum_path) {
1043+
for (const auto& inds_pair : einsum_path) {
10441044
contract_two_inputs<T>(int_inputs, input_subscripts, output_subscript, inds_pair.first, inds_pair.second);
10451045
}
10461046

src/core/reference/src/utils/coordinate_transform.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ CoordinateIterator::CoordinateIterator(const Shape& target_shape, bool is_end)
3535
m_coordinate(target_shape.size(), 0) {
3636
// The case where we have a zero-length axis is a bit special, in that
3737
// the iterator always starts out of bounds.
38-
bool const empty = std::find(target_shape.begin(), target_shape.end(), 0) != target_shape.end();
38+
const bool empty = std::find(target_shape.begin(), target_shape.end(), 0) != target_shape.end();
3939

4040
m_oob = is_end || empty;
4141
}

src/core/shape_inference/include/einsum_shape_inference.hpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ std::vector<TRShape> shape_infer(const Einsum* op, const std::vector<T>& input_s
4949
"corresponding input subscript.");
5050
std::unordered_map<std::string, TRShape> single_input_label_to_shape;
5151
for (size_t label_ind = 0, dim_ind = 0; label_ind < labels.size() && dim_ind < input_rank; ++label_ind) {
52-
auto const& label = labels[label_ind];
52+
const auto& label = labels[label_ind];
5353
if (label.compare("...") == 0) {
5454
size_t num_broadcasted_dims = input_rank - labels.size() + 1;
5555
auto current_sub_pshape = T(std::vector<DimType>(pshape.begin() + dim_ind,
@@ -100,7 +100,7 @@ std::vector<TRShape> shape_infer(const Einsum* op, const std::vector<T>& input_s
100100
// Shape has dynamic rank and ellipsis
101101
return {pshape};
102102
}
103-
for (auto const& label : labels) {
103+
for (const auto& label : labels) {
104104
if (label_to_shape.find(label) == label_to_shape.end()) {
105105
label_to_shape[label] = ov::PartialShape{Dimension::dynamic()};
106106
}
@@ -112,7 +112,7 @@ std::vector<TRShape> shape_infer(const Einsum* op, const std::vector<T>& input_s
112112
auto output_shapes = std::vector<TRShape>(1);
113113
auto& output_shape = output_shapes[0];
114114

115-
for (auto const& output_label : output_labels) {
115+
for (const auto& output_label : output_labels) {
116116
if (output_label == "..." && label_to_shape.find(output_label) == label_to_shape.end()) {
117117
// Output labels may contain ellipsis that does not cover any dimensions.
118118
continue;

src/core/shape_inference/include/eye_shape_inference.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ void check_1D_or_scalar_shape(const ov::op::v9::Eye* op, const T& input_shape, c
2727
} // namespace util
2828

2929
namespace eye {
30-
constexpr std::array<char const*, 4> shape_names{"'num_rows'", "'num_columns'", "'diagonal_index'", "'batch_shape'"};
30+
constexpr std::array<const char*, 4> shape_names{"'num_rows'", "'num_columns'", "'diagonal_index'", "'batch_shape'"};
3131
}
3232

3333
namespace v9 {

src/core/shape_inference/include/prior_box_shape_inference_util.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
namespace ov {
1313
namespace op {
1414
namespace prior_box {
15-
constexpr std::array<char const*, 2> input_names{"output size", "image"};
15+
constexpr std::array<const char*, 2> input_names{"output size", "image"};
1616

1717
namespace validate {
1818
inline std::vector<PartialShape> inputs_et(const Node* const op) {

src/core/shape_inference/include/slice_shape_inference.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ namespace op {
1515

1616
namespace slice {
1717

18-
constexpr std::array<char const*, 4> shape_names{"start", "stop", "step", "axes"};
18+
constexpr std::array<const char*, 4> shape_names{"start", "stop", "step", "axes"};
1919

2020
struct AxesMap {
2121
bool is_valid{}; //!< Flag indicates current axes map has valid data (unique).

src/core/shape_inference/include/strided_slice_shape_inference.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ std::vector<TRShape> shape_infer(const StridedSlice* op,
2020
const std::vector<T>& input_shapes,
2121
const ITensorAccessor& ta = make_tensor_accessor()) {
2222
using DimType = typename T::value_type;
23-
static constexpr std::array<char const*, 3> shape_names{"Begin", "End", "Strides"};
23+
static constexpr std::array<const char*, 3> shape_names{"Begin", "End", "Strides"};
2424

2525
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3 || input_shapes.size() == 4));
2626

src/core/src/op/batch_to_space.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ bool batch_to_space_evaluate(TensorVector& outputs, const TensorVector& inputs)
7272

7373
auto data_shape = in.get_shape();
7474

75-
auto const block_values_size = shape_size(inputs[1].get_shape());
75+
const auto block_values_size = shape_size(inputs[1].get_shape());
7676

7777
const auto* block_values = inputs[1].data<int64_t>();
7878
const auto* crops_begin_values = inputs[2].data<int64_t>();

src/core/src/op/einsum.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -120,8 +120,8 @@ void op::v7::Einsum::parse_equation(const std::string& equation,
120120
// equation is in implicit mode so recover output subscript
121121
output_subscript = "";
122122
for (size_t ind = 0; ind < input_subscripts.size(); ++ind) {
123-
auto const& input_subscript = input_subscripts[ind];
124-
for (auto const& label : extract_labels(input_subscript)) {
123+
const auto& input_subscript = input_subscripts[ind];
124+
for (const auto& label : extract_labels(input_subscript)) {
125125
if (label != ellipsis && (is_label_elsewhere(input_subscripts, label, {ind}) == false)) {
126126
output_subscript += label;
127127
}

src/core/src/pass/perf_counters.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
namespace ov {
77
namespace pass {
8-
openvino::itt::handle_t PerfCounters::operator[](ov::Node::type_info_t const& type_inf) {
8+
openvino::itt::handle_t PerfCounters::operator[](const ov::Node::type_info_t& type_inf) {
99
std::lock_guard<std::mutex> guard(m_mutex);
1010
auto it = m_counters.find(&type_inf);
1111
if (it != m_counters.end())

src/core/src/pass/perf_counters.hpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -12,16 +12,16 @@
1212
namespace ov {
1313
namespace pass {
1414
class PerfCounters {
15-
PerfCounters(PerfCounters const&) = delete;
16-
PerfCounters& operator=(PerfCounters const&) = delete;
15+
PerfCounters(const PerfCounters&) = delete;
16+
PerfCounters& operator=(const PerfCounters&) = delete;
1717

1818
public:
1919
PerfCounters() = default;
2020

21-
openvino::itt::handle_t operator[](ov::Node::type_info_t const& type_inf);
21+
openvino::itt::handle_t operator[](const ov::Node::type_info_t& type_inf);
2222

2323
private:
24-
using key = ov::Node::type_info_t const*;
24+
using key = const ov::Node::type_info_t*;
2525
using value = openvino::itt::handle_t;
2626
using counters_map = std::unordered_map<key, value>;
2727

src/core/src/pass/serialize.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ class ConstantWriter {
8888
public:
8989
using FilePosition = int64_t;
9090
using HashValue = size_t;
91-
using ConstWritePositions = std::multimap<HashValue, std::pair<FilePosition, void const*>>;
91+
using ConstWritePositions = std::multimap<HashValue, std::pair<FilePosition, const void*>>;
9292

9393
ConstantWriter(std::ostream& bin_data, bool enable_compression = true)
9494
: m_binary_output(bin_data),
@@ -146,7 +146,7 @@ class ConstantWriter {
146146
if (!ptr_is_temporary) {
147147
// Since fp16_compressed data will be disposed at exit point and since we cannot reread it from the
148148
// ostream, we store pointer to the original uncompressed blob.
149-
m_hash_to_file_positions.insert({hash, {offset, static_cast<void const*>(ptr)}});
149+
m_hash_to_file_positions.insert({hash, {offset, static_cast<const void*>(ptr)}});
150150
}
151151
if (m_write_hash_value) {
152152
m_binary_output.write(reinterpret_cast<const char*>(&hash), sizeof(uint64_t));

src/core/tests/constant.cpp

+6-6
Original file line numberDiff line numberDiff line change
@@ -895,7 +895,7 @@ TEST(constant, uint2_string_broadcast) {
895895
}
896896

897897
TEST(constant, uint2_vector_less_than_single_byte) {
898-
auto const shape = Shape{3};
898+
const auto shape = Shape{3};
899899
const auto input = std::vector<uint8_t>{2, 3, 1};
900900

901901
op::v0::Constant c(element::u2, shape, input);
@@ -909,7 +909,7 @@ TEST(constant, uint2_vector_less_than_single_byte) {
909909
}
910910

911911
TEST(constant, uint2_vector_bigger_than_single_byte) {
912-
auto const shape = Shape{7};
912+
const auto shape = Shape{7};
913913
const auto input = std::vector<uint8_t>{2, 3, 1, 0, 1, 2, 0};
914914

915915
op::v0::Constant c(element::u2, shape, input);
@@ -1007,7 +1007,7 @@ TEST(constant, uint3_string_broadcast) {
10071007
}
10081008

10091009
TEST(constant, uint3_vector_less_than_one_storage_unit) {
1010-
auto const shape = Shape{3};
1010+
const auto shape = Shape{3};
10111011
const auto input = std::vector<uint8_t>{5, 3, 1};
10121012

10131013
op::v0::Constant c(element::u3, shape, input);
@@ -1023,7 +1023,7 @@ TEST(constant, uint3_vector_less_than_one_storage_unit) {
10231023
}
10241024

10251025
TEST(constant, uint3_vector_greater_than_one_storage_unit) {
1026-
auto const shape = Shape{10};
1026+
const auto shape = Shape{10};
10271027
const auto input = std::vector<uint8_t>{2, 3, 1, 0, 4, 5, 6, 7, 5, 2};
10281028

10291029
op::v0::Constant c(element::u3, shape, input);
@@ -1247,7 +1247,7 @@ TEST(constant, uint6_string_broadcast) {
12471247
}
12481248

12491249
TEST(constant, uint6_vector_less_than_one_storage_unit) {
1250-
auto const shape = Shape{3};
1250+
const auto shape = Shape{3};
12511251
const auto input = std::vector<uint8_t>{5, 23, 1};
12521252

12531253
op::v0::Constant c(element::u6, shape, input);
@@ -1263,7 +1263,7 @@ TEST(constant, uint6_vector_less_than_one_storage_unit) {
12631263
}
12641264

12651265
TEST(constant, uint6_vector_greater_than_one_storage_unit) {
1266-
auto const shape = Shape{6};
1266+
const auto shape = Shape{6};
12671267
const auto input = std::vector<uint8_t>{25, 3, 1, 0, 45, 5};
12681268

12691269
op::v0::Constant c(element::u6, shape, input);

src/frontends/ir/src/ir_deserializer.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -558,7 +558,7 @@ std::shared_ptr<ov::Model> ov::XmlDeserializer::parse_function(const pugi::xml_n
558558
OPENVINO_THROW("Attempt to access node ", e.fromLayerId, " that not in graph.");
559559
}
560560
auto& p_output = params[e.fromLayerId].params;
561-
size_t const realInputPortId = p.params.get_real_input_port_id(e.toPortId);
561+
const size_t realInputPortId = p.params.get_real_input_port_id(e.toPortId);
562562
if (realInputPortId >= inputs.size())
563563
OPENVINO_THROW(p.params.type,
564564
" layer ",
@@ -911,7 +911,7 @@ std::shared_ptr<ov::Node> ov::XmlDeserializer::create_node(const std::vector<ov:
911911
}
912912
}
913913

914-
auto const& opset = opsetIt->second;
914+
const auto& opset = opsetIt->second;
915915

916916
ovNode = std::shared_ptr<ov::Node>(opset.create_insensitive(type_name));
917917
if (!ovNode) {

src/frontends/onnx/frontend/src/core/model.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ class Model {
103103
std::int64_t get_opset_version(const std::string& domain) {
104104
try {
105105
return ov::frontend::onnx::get_opset_version(*this->m_model_proto, domain);
106-
} catch (ov::Exception const&) {
106+
} catch (const ov::Exception&) {
107107
return -1;
108108
}
109109
}

0 commit comments

Comments
 (0)