@@ -266,8 +266,8 @@ static void test_default()
266
266
| [ SubPixelCNN] ( https://github.com/niazwazir/SUB_PIXEL_CNN ) | 234K | * resolution* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_subpixel_cnn.cpp ) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | ❔ |
267
267
| [ SubPixelCNN] ( https://github.com/niazwazir/SUB_PIXEL_CNN ) | 234K | * resolution* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_subpixel_cnn.cpp ) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | ❔ |
268
268
| [ InsectDet] ( https://github.com/quarrying/quarrying-insect-id ) | 27M | * detection* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_insectdet.cpp ) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | ❔ |
269
- | [ InsectID] ( https://github.com/quarrying/quarrying-insect-id ) | 22M | * classification* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_insectid.cpp ) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ |
270
- | [ PlantID] ( https://github.com/quarrying/quarrying-plant-id ) | 30M | * classification* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_plantid.cpp ) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ |
269
+ | [ InsectID] ( https://github.com/quarrying/quarrying-insect-id ) | 22M | * classification* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_insectid.cpp ) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ✔️ | ❔ |
270
+ | [ PlantID] ( https://github.com/quarrying/quarrying-plant-id ) | 30M | * classification* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_plantid.cpp ) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ✔️ | ❔ |
271
271
| [ YOLOv5BlazeFace] ( https://github.com/deepcam-cn/yolov5-face ) | 3.4M | * face::detect* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolov5_blazeface.cpp ) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ |
272
272
| [ YoloV5_V_6_1] ( https://github.com/ultralytics/yolov5/releases/tag/v6.1 ) | 7.5M | * detection* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolov5_v6.1.cpp ) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ |
273
273
| [ HeadSeg] ( https://github.com/minivision-ai/photo2cartoon ) | 31M | * segmentation* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_head_seg.cpp ) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | ❔ |
@@ -280,6 +280,8 @@ static void test_default()
280
280
| [ MobileHumanMatting] ( https://github.com/lizhengwei1992/mobile_phone_human_matting ) | 3M | * matting* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mobile_human_matting.cpp ) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ |
281
281
| [ MobileHairSeg] ( https://github.com/wonbeomjang/mobile-hair-segmentation-pytorch ) | 14M | * segmentation* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mobile_hair_seg.cpp ) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ |
282
282
| [ YOLOv6] ( https://github.com/meituan/YOLOv6 ) | 17M | * detection* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolov6.cpp ) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ |
283
+ | [ FaceParsingBiSeNet] ( https://github.com/zllrunning/face-parsing.PyTorch ) | 50M | * segmentation* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_face_parsing_bisenet.cpp ) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ |
284
+ | [ FaceParsingBiSeNetDyn] ( https://github.com/zllrunning/face-parsing.PyTorch ) | 50M | * segmentation* | [ demo] ( https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_face_parsing_bisenet_dyn.cpp ) | ✅ | / | / | / | / | ✔️ | ✔️ | ❔ |
283
285
284
286
285
287
## 4. 编译文档
@@ -877,8 +879,6 @@ static void test_default()
877
879
lite::utils::draw_boxes_with_landmarks_inplace (img_bgr, detected_boxes);
878
880
cv::imwrite (save_img_path, img_bgr);
879
881
880
- std::cout << "Default Version Done! Detected Face Num: " << detected_boxes.size() << std::endl;
881
-
882
882
delete scrfd;
883
883
}
884
884
```
@@ -970,7 +970,6 @@ static void test_default()
970
970
ssrnet->detect(img_bgr, age);
971
971
lite::utils::draw_age_inplace (img_bgr, age);
972
972
cv::imwrite (save_img_path, img_bgr);
973
- std::cout << "Default Version Done! Detected SSRNet Age: " << age.age << std::endl;
974
973
975
974
delete ssrnet;
976
975
}
@@ -1219,12 +1218,52 @@ static void test_default()
1219
1218
<img src =' docs/resources/female_photo2cartoon_cartoon_1_out.jpg ' height =" 180px " width =" 180px " >
1220
1219
</div >
1221
1220
1222
- 更多的人像风格化模型
1221
+ 更多的人像风格化模型:
1223
1222
``` c++
1224
1223
auto *transfer = new lite::cv::style::FemalePhoto2Cartoon(onnx_path);
1224
+ ```
1225
+
1226
+ ****
1227
+
1228
+ #### Example13: 使用 [ FaceParsing] ( https://github.com/zllrunning/face-parsing.PyTorch ) 进行五官分割. 请从Model-Zoo[ <sup >2</sup >] ( #lite.ai.toolkit-2 ) 下载模型文件。
1229
+ ``` c++
1230
+ #include " lite/lite.h"
1231
+
1232
+ static void test_default ()
1233
+ {
1234
+ std::string onnx_path = "../../../hub/onnx/cv/face_parsing_512x512.onnx";
1235
+ std::string test_img_path = "../../../examples/lite/resources/test_lite_face_parsing.png";
1236
+ std::string save_img_path = "../../../logs/test_lite_face_parsing_bisenet.jpg";
1237
+
1238
+ auto * face_parsing_bisenet = new lite::cv::segmentation::FaceParsingBiSeNet(onnx_path, 8); // 8 threads
1239
+
1240
+ lite::types::FaceParsingContent content;
1241
+ cv::Mat img_bgr = cv::imread(test_img_path);
1242
+ face_parsing_bisenet->detect(img_bgr, content);
1243
+
1244
+ if (content.flag && !content.merge.empty())
1245
+ cv::imwrite(save_img_path, content.merge);
1246
+
1247
+ delete face_parsing_bisenet;
1248
+ }
1249
+ ```
1250
+ 输出的结果是:
1251
+
1252
+ <div align =' center ' >
1253
+ <img src =' docs/resources/face_parsing.png ' height =" 180px " width =" 180px " >
1254
+ <img src =' docs/resources/face_parsing_merge.jpg ' height =" 180px " width =" 180px " >
1255
+ <img src =' docs/resources/face_parsing_1.png ' height =" 180px " width =" 180px " >
1256
+ <img src =' docs/resources/face_parsing_1_merge.jpg ' height =" 180px " width =" 180px " >
1257
+ </div >
1258
+
1259
+ 更多的进行五官分割的模型 (hair, eyes, nose, mouth, others):
1260
+ ``` c++
1261
+ auto *segment = new lite::cv::segmentation::FaceParsingBiSeNet(onnx_path); // 50Mb
1262
+ auto *segment = new lite::cv::segmentation::FaceParsingBiSeNetDyn(onnx_path); // Dynamic Shape Inference.
1225
1263
```
1226
1264
1227
1265
1266
+
1228
1267
## 7. 开源协议
1229
1268
1230
1269
<div id =" lite.ai.toolkit-License " ></div >
0 commit comments