Skip to content

Commit 0d5f7ed

Browse files
ghehglanza
authored andcommitted
[CIR][CIRGen][Builtin][Neon] Lower neon vqadd_v (#890)
as title. Also add function buildCommonNeonBuiltinExpr just like OG's emitCommonNeonBuiltinExpr. This might help consolidate neon cases and share common code. Notice: - I pretty much keep the skeleton of OG's emitCommonNeonBuiltinExpr at the cost of that we didn't use a few variables they calculate. They might help in the future. - The purpose of having CommonNeonBuiltinExpr is to reduce implementation code duplication. So far, we only have one type implemented, and it's hard for CIR to be more generic. But we should see if in future we can have different types of intrinsics share more generic code path. --------- Co-authored-by: Guojin He <[email protected]>
1 parent e4cc937 commit 0d5f7ed

File tree

6 files changed

+250
-4
lines changed

6 files changed

+250
-4
lines changed

clang/include/clang/CIR/MissingFeatures.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,9 @@ struct MissingFeatures {
136136
// AArch64 Neon builtin related.
137137
static bool buildNeonShiftVector() { return false; }
138138

139+
// ABIInfo queries.
140+
static bool useTargetLoweringABIInfo() { return false; }
141+
139142
// Misc
140143
static bool cacheRecordLayouts() { return false; }
141144
static bool capturedByInit() { return false; }

clang/lib/CIR/CodeGen/ABIInfo.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ class ABIInfo {
3535

3636
virtual void computeInfo(CIRGenFunctionInfo &FI) const = 0;
3737

38+
virtual bool allowBFloatArgsAndRet() const { return false; }
39+
3840
// Implement the Type::IsPromotableIntegerType for ABI specific needs. The
3941
// only difference is that this consideres bit-precise integer types as well.
4042
bool isPromotableIntegerTypeForABI(clang::QualType Ty) const;

clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp

Lines changed: 55 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1603,7 +1603,7 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID,
16031603

16041604
mlir::Value buildNeonCall(unsigned int builtinID, CIRGenFunction &cgf,
16051605
llvm::SmallVector<mlir::Type> argTypes,
1606-
llvm::SmallVector<mlir::Value, 4> args,
1606+
llvm::SmallVectorImpl<mlir::Value> &args,
16071607
llvm::StringRef intrinsicName, mlir::Type funcResTy,
16081608
mlir::Location loc,
16091609
bool isConstrainedFPIntrinsic = false,
@@ -1640,6 +1640,55 @@ mlir::Value buildNeonCall(unsigned int builtinID, CIRGenFunction &cgf,
16401640
}
16411641
}
16421642

1643+
mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr(
1644+
unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic,
1645+
const char *nameHint, unsigned modifier, const CallExpr *e,
1646+
llvm::SmallVectorImpl<mlir::Value> &ops, cir::Address ptrOp0,
1647+
cir::Address ptrOp1, llvm::Triple::ArchType arch) {
1648+
// Get the last argument, which specifies the vector type.
1649+
const clang::Expr *arg = e->getArg(e->getNumArgs() - 1);
1650+
std::optional<llvm::APSInt> neonTypeConst =
1651+
arg->getIntegerConstantExpr(getContext());
1652+
if (!neonTypeConst)
1653+
return nullptr;
1654+
1655+
// Determine the type of this overloaded NEON intrinsic.
1656+
NeonTypeFlags neonType(neonTypeConst->getZExtValue());
1657+
bool isUnsigned = neonType.isUnsigned();
1658+
bool isQuad = neonType.isQuad();
1659+
const bool hasLegalHalfType = getTarget().hasLegalHalfType();
1660+
// The value of allowBFloatArgsAndRet is true for AArch64, but it should
1661+
// come from ABI info.
1662+
const bool allowBFloatArgsAndRet =
1663+
getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
1664+
1665+
mlir::Type vTy = GetNeonType(this, neonType, hasLegalHalfType, false,
1666+
allowBFloatArgsAndRet);
1667+
if (!vTy)
1668+
return nullptr;
1669+
1670+
unsigned intrinicId = llvmIntrinsic;
1671+
if ((modifier & UnsignedAlts) && !isUnsigned)
1672+
intrinicId = altLLVMIntrinsic;
1673+
1674+
switch (builtinID) {
1675+
default:
1676+
llvm_unreachable("NYI");
1677+
case NEON::BI__builtin_neon_vqadd_v:
1678+
mlir::Value res = buildNeonCall(builtinID, *this, {vTy, vTy}, ops,
1679+
(intrinicId != altLLVMIntrinsic)
1680+
? "llvm.aarch64.neon.uqadd"
1681+
: "llvm.aarch64.neon.sqadd",
1682+
vTy, getLoc(e->getExprLoc()));
1683+
mlir::Type resultType = ConvertType(e->getType());
1684+
// AArch64 intrinsic one-element vector type cast to
1685+
// scalar type expected by the builtin
1686+
return builder.createBitcast(res, resultType);
1687+
break;
1688+
}
1689+
return nullptr;
1690+
}
1691+
16431692
mlir::Value
16441693
CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
16451694
ReturnValueSlot ReturnValue,
@@ -2359,9 +2408,11 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
23592408
// defer to common code if it's been added to our special map.
23602409
Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
23612410
AArch64SIMDIntrinsicsProvenSorted);
2362-
if (Builtin) {
2363-
llvm_unreachable("NYI");
2364-
}
2411+
if (Builtin)
2412+
return buildCommonNeonBuiltinExpr(
2413+
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
2414+
Builtin->NameHint, Builtin->TypeModifier, E, Ops,
2415+
/*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
23652416

23662417
if (mlir::Value V =
23672418
buildAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))

clang/lib/CIR/CodeGen/CIRGenFunction.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -980,6 +980,11 @@ class CIRGenFunction : public CIRGenTypeCache {
980980
mlir::Value buildARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
981981
ReturnValueSlot ReturnValue,
982982
llvm::Triple::ArchType Arch);
983+
mlir::Value buildCommonNeonBuiltinExpr(
984+
unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic,
985+
const char *nameHint, unsigned modifier, const CallExpr *e,
986+
llvm::SmallVectorImpl<mlir::Value> &ops, cir::Address ptrOp0,
987+
cir::Address ptrOp1, llvm::Triple::ArchType arch);
983988

984989
mlir::Value buildAlignmentAssumption(mlir::Value ptrValue, QualType ty,
985990
SourceLocation loc,

clang/lib/CIR/CodeGen/TargetInfo.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include "CIRGenTypes.h"
66

77
#include "clang/Basic/TargetInfo.h"
8+
#include "clang/CIR/MissingFeatures.h"
89
#include "clang/CIR/Target/x86.h"
910

1011
using namespace cir;
@@ -103,6 +104,11 @@ class AArch64ABIInfo : public ABIInfo {
103104

104105
public:
105106
AArch64ABIInfo(CIRGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
107+
virtual bool allowBFloatArgsAndRet() const override {
108+
// TODO: Should query target info instead of hardcoding.
109+
assert(!cir::MissingFeatures::useTargetLoweringABIInfo());
110+
return true;
111+
}
106112

107113
private:
108114
ABIKind getABIKind() const { return Kind; }
Lines changed: 179 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,179 @@
1+
// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \
2+
// RUN: -emit-cir -target-feature +neon %s -o %t.cir
3+
// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
4+
// RUN: %clang_cc1 -triple aarch64-none-linux-android24 -fclangir \
5+
// RUN: -emit-llvm -target-feature +neon %s -o %t.ll
6+
// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s
7+
8+
// Tetsting normal situation of vdup lane intrinsics.
9+
10+
// REQUIRES: aarch64-registered-target || arm-registered-target
11+
#include <arm_neon.h>
12+
13+
uint8x8_t test_vqadd_u8(uint8x8_t a, uint8x8_t b) {
14+
return vqadd_u8(a,b);
15+
}
16+
17+
// CIR-LABEL: vqadd_u8
18+
// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} :
19+
// CIR-SAME: (!cir.vector<!u8i x 8>, !cir.vector<!u8i x 8>) -> !cir.vector<!u8i x 8>
20+
// CIR: cir.return
21+
22+
// LLVM: {{.*}}test_vqadd_u8(<8 x i8>{{.*}} [[A:%.*]], <8 x i8>{{.*}} [[B:%.*]])
23+
// LLVM: store <8 x i8> [[A]], ptr [[A_ADDR:%.*]], align 8
24+
// LLVM: store <8 x i8> [[B]], ptr [[B_ADDR:%.*]], align 8
25+
// LLVM: [[TMP_A:%.*]] = load <8 x i8>, ptr [[A_ADDR]], align 8
26+
// LLVM: [[TMP_B:%.*]] = load <8 x i8>, ptr [[B_ADDR]], align 8
27+
// LLVM: store <8 x i8> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8
28+
// LLVM: store <8 x i8> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8
29+
// LLVM: [[INTRN_A:%.*]] = load <8 x i8>, ptr [[P0_ADDR]], align 8
30+
// LLVM: [[INTRN_B:%.*]] = load <8 x i8>, ptr [[P1_ADDR]], align 8
31+
// LLVM: {{%.*}} = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> [[INTRN_A]], <8 x i8> [[INTRN_B]])
32+
// LLVM: ret <8 x i8>
33+
34+
int8x8_t test_vqadd_s8(int8x8_t a, int8x8_t b) {
35+
return vqadd_s8(a,b);
36+
}
37+
38+
// CIR-LABEL: vqadd_s8
39+
// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} :
40+
// CIR-SAME: (!cir.vector<!s8i x 8>, !cir.vector<!s8i x 8>) -> !cir.vector<!s8i x 8>
41+
// CIR: cir.return
42+
43+
// LLVM: {{.*}}test_vqadd_s8(<8 x i8>{{.*}} [[A:%.*]], <8 x i8>{{.*}} [[B:%.*]])
44+
// LLVM: store <8 x i8> [[A]], ptr [[A_ADDR:%.*]], align 8
45+
// LLVM: store <8 x i8> [[B]], ptr [[B_ADDR:%.*]], align 8
46+
// LLVM: [[TMP_A:%.*]] = load <8 x i8>, ptr [[A_ADDR]], align 8
47+
// LLVM: [[TMP_B:%.*]] = load <8 x i8>, ptr [[B_ADDR]], align 8
48+
// LLVM: store <8 x i8> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8
49+
// LLVM: store <8 x i8> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8
50+
// LLVM: [[INTRN_A:%.*]] = load <8 x i8>, ptr [[P0_ADDR]], align 8
51+
// LLVM: [[INTRN_B:%.*]] = load <8 x i8>, ptr [[P1_ADDR]], align 8
52+
// LLVM: {{%.*}} = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> [[INTRN_A]], <8 x i8> [[INTRN_B]])
53+
// LLVM: ret <8 x i8>
54+
55+
uint16x4_t test_vqadd_u16(uint16x4_t a, uint16x4_t b) {
56+
return vqadd_u16(a,b);
57+
}
58+
59+
// CIR-LABEL: vqadd_u16
60+
// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} :
61+
// CIR-SAME: (!cir.vector<!u16i x 4>, !cir.vector<!u16i x 4>) -> !cir.vector<!u16i x 4>
62+
// CIR: cir.return
63+
64+
// LLVM: {{.*}}test_vqadd_u16(<4 x i16>{{.*}} [[A:%.*]], <4 x i16>{{.*}} [[B:%.*]])
65+
// LLVM: store <4 x i16> [[A]], ptr [[A_ADDR:%.*]], align 8
66+
// LLVM: store <4 x i16> [[B]], ptr [[B_ADDR:%.*]], align 8
67+
// LLVM: [[TMP_A:%.*]] = load <4 x i16>, ptr [[A_ADDR]], align 8
68+
// LLVM: [[TMP_B:%.*]] = load <4 x i16>, ptr [[B_ADDR]], align 8
69+
// LLVM: store <4 x i16> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8
70+
// LLVM: store <4 x i16> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8
71+
// LLVM: [[INTRN_A:%.*]] = load <4 x i16>, ptr [[P0_ADDR]], align 8
72+
// LLVM: [[INTRN_B:%.*]] = load <4 x i16>, ptr [[P1_ADDR]], align 8
73+
// LLVM: {{%.*}} = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> [[INTRN_A]], <4 x i16> [[INTRN_B]])
74+
// LLVM: ret <4 x i16>
75+
76+
int16x4_t test_vqadd_s16(int16x4_t a, int16x4_t b) {
77+
return vqadd_s16(a,b);
78+
}
79+
80+
// CIR-LABEL: vqadd_u16
81+
// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} :
82+
// CIR-SAME: (!cir.vector<!s16i x 4>, !cir.vector<!s16i x 4>) -> !cir.vector<!s16i x 4>
83+
// CIR: cir.return
84+
85+
// LLVM: {{.*}}test_vqadd_s16(<4 x i16>{{.*}} [[A:%.*]], <4 x i16>{{.*}} [[B:%.*]])
86+
// LLVM: store <4 x i16> [[A]], ptr [[A_ADDR:%.*]], align 8
87+
// LLVM: store <4 x i16> [[B]], ptr [[B_ADDR:%.*]], align 8
88+
// LLVM: [[TMP_A:%.*]] = load <4 x i16>, ptr [[A_ADDR]], align 8
89+
// LLVM: [[TMP_B:%.*]] = load <4 x i16>, ptr [[B_ADDR]], align 8
90+
// LLVM: store <4 x i16> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8
91+
// LLVM: store <4 x i16> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8
92+
// LLVM: [[INTRN_A:%.*]] = load <4 x i16>, ptr [[P0_ADDR]], align 8
93+
// LLVM: [[INTRN_B:%.*]] = load <4 x i16>, ptr [[P1_ADDR]], align 8
94+
// LLVM: {{%.*}} = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> [[INTRN_A]], <4 x i16> [[INTRN_B]])
95+
// LLVM: ret <4 x i16>
96+
97+
uint32x2_t test_vqadd_u32(uint32x2_t a, uint32x2_t b) {
98+
return vqadd_u32(a,b);
99+
}
100+
101+
// CIR-LABEL: vqadd_u32
102+
// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} :
103+
// CIR-SAME: (!cir.vector<!u32i x 2>, !cir.vector<!u32i x 2>) -> !cir.vector<!u32i x 2>
104+
// CIR: cir.return
105+
106+
// LLVM: {{.*}}test_vqadd_u32(<2 x i32>{{.*}} [[A:%.*]], <2 x i32>{{.*}} [[B:%.*]])
107+
// LLVM: store <2 x i32> [[A]], ptr [[A_ADDR:%.*]], align 8
108+
// LLVM: store <2 x i32> [[B]], ptr [[B_ADDR:%.*]], align 8
109+
// LLVM: [[TMP_A:%.*]] = load <2 x i32>, ptr [[A_ADDR]], align 8
110+
// LLVM: [[TMP_B:%.*]] = load <2 x i32>, ptr [[B_ADDR]], align 8
111+
// LLVM: store <2 x i32> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8
112+
// LLVM: store <2 x i32> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8
113+
// LLVM: [[INTRN_A:%.*]] = load <2 x i32>, ptr [[P0_ADDR]], align 8
114+
// LLVM: [[INTRN_B:%.*]] = load <2 x i32>, ptr [[P1_ADDR]], align 8
115+
// LLVM: {{%.*}} = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> [[INTRN_A]], <2 x i32> [[INTRN_B]])
116+
// LLVM: ret <2 x i32>
117+
118+
int32x2_t test_vqadd_s32(int32x2_t a, int32x2_t b) {
119+
return vqadd_s32(a,b);
120+
}
121+
122+
// CIR-LABEL: vqadd_s32
123+
// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} :
124+
// CIR-SAME: (!cir.vector<!s32i x 2>, !cir.vector<!s32i x 2>) -> !cir.vector<!s32i x 2>
125+
// CIR: cir.return
126+
127+
// LLVM: {{.*}}test_vqadd_s32(<2 x i32>{{.*}} [[A:%.*]], <2 x i32>{{.*}} [[B:%.*]])
128+
// LLVM: store <2 x i32> [[A]], ptr [[A_ADDR:%.*]], align 8
129+
// LLVM: store <2 x i32> [[B]], ptr [[B_ADDR:%.*]], align 8
130+
// LLVM: [[TMP_A:%.*]] = load <2 x i32>, ptr [[A_ADDR]], align 8
131+
// LLVM: [[TMP_B:%.*]] = load <2 x i32>, ptr [[B_ADDR]], align 8
132+
// LLVM: store <2 x i32> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8
133+
// LLVM: store <2 x i32> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8
134+
// LLVM: [[INTRN_A:%.*]] = load <2 x i32>, ptr [[P0_ADDR]], align 8
135+
// LLVM: [[INTRN_B:%.*]] = load <2 x i32>, ptr [[P1_ADDR]], align 8
136+
// LLVM: {{%.*}} = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> [[INTRN_A]], <2 x i32> [[INTRN_B]])
137+
// LLVM: ret <2 x i32>
138+
139+
uint64x1_t test_vqadd_u64(uint64x1_t a, uint64x1_t b) {
140+
return vqadd_u64(a,b);
141+
}
142+
143+
// CIR-LABEL: vqadd_u64
144+
// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} :
145+
// CIR-SAME: (!cir.vector<!u64i x 1>, !cir.vector<!u64i x 1>) -> !cir.vector<!u64i x 1>
146+
// CIR: cir.return
147+
148+
// LLVM: {{.*}}test_vqadd_u64(<1 x i64>{{.*}} [[A:%.*]], <1 x i64>{{.*}} [[B:%.*]])
149+
// LLVM: store <1 x i64> [[A]], ptr [[A_ADDR:%.*]], align 8
150+
// LLVM: store <1 x i64> [[B]], ptr [[B_ADDR:%.*]], align 8
151+
// LLVM: [[TMP_A:%.*]] = load <1 x i64>, ptr [[A_ADDR]], align 8
152+
// LLVM: [[TMP_B:%.*]] = load <1 x i64>, ptr [[B_ADDR]], align 8
153+
// LLVM: store <1 x i64> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8
154+
// LLVM: store <1 x i64> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8
155+
// LLVM: [[INTRN_A:%.*]] = load <1 x i64>, ptr [[P0_ADDR]], align 8
156+
// LLVM: [[INTRN_B:%.*]] = load <1 x i64>, ptr [[P1_ADDR]], align 8
157+
// LLVM: {{%.*}} = call <1 x i64> @llvm.aarch64.neon.uqadd.v1i64(<1 x i64> [[INTRN_A]], <1 x i64> [[INTRN_B]])
158+
// LLVM: ret <1 x i64>
159+
160+
int64x1_t test_vqadd_s64(int64x1_t a, int64x1_t b) {
161+
return vqadd_s64(a,b);
162+
}
163+
164+
// CIR-LABEL: vqadd_s64
165+
// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} :
166+
// CIR-SAME: (!cir.vector<!s64i x 1>, !cir.vector<!s64i x 1>) -> !cir.vector<!s64i x 1>
167+
// CIR: cir.return
168+
169+
// LLVM: {{.*}}test_vqadd_s64(<1 x i64>{{.*}} [[A:%.*]], <1 x i64>{{.*}} [[B:%.*]])
170+
// LLVM: store <1 x i64> [[A]], ptr [[A_ADDR:%.*]], align 8
171+
// LLVM: store <1 x i64> [[B]], ptr [[B_ADDR:%.*]], align 8
172+
// LLVM: [[TMP_A:%.*]] = load <1 x i64>, ptr [[A_ADDR]], align 8
173+
// LLVM: [[TMP_B:%.*]] = load <1 x i64>, ptr [[B_ADDR]], align 8
174+
// LLVM: store <1 x i64> [[TMP_A]], ptr [[P0_ADDR:%.*]], align 8
175+
// LLVM: store <1 x i64> [[TMP_B]], ptr [[P1_ADDR:%.*]], align 8
176+
// LLVM: [[INTRN_A:%.*]] = load <1 x i64>, ptr [[P0_ADDR]], align 8
177+
// LLVM: [[INTRN_B:%.*]] = load <1 x i64>, ptr [[P1_ADDR]], align 8
178+
// LLVM: {{%.*}} = call <1 x i64> @llvm.aarch64.neon.sqadd.v1i64(<1 x i64> [[INTRN_A]], <1 x i64> [[INTRN_B]])
179+
// LLVM: ret <1 x i64>

0 commit comments

Comments
 (0)