[AI] ACGPN
DeepFashion Try-On
Towards Photo-Realistic Virtual Try-On by Adaptively Generating↔Preserving Image Content, CVPR’20.
For inferencing ACGPN!
ACGPN repo: https://github.com/switchablenorms/DeepFashion_Try_On
This notebook is hard coded for inferencing one image at a time.
Notebook by Levin Dabhi
author = {Yang, Han and Zhang, Ruimao and Guo, Xiaobao and Liu, Wei and Zuo, Wangmeng and Luo, Ping},
title = {Towards Photo-Realistic Virtual Try-On by Adaptively Generating-Preserving Image Content},
booktitle = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2020}
}
@inproceedings{ge2021disentangled,
title={Disentangled Cycle Consistency for Highly-realistic Virtual Try-On},
author={Ge, Chongjian and Song, Yibing and Ge, Yuying and Yang, Han and Liu, Wei and Luo, Ping},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages={16928--16937},
year={2021}
}
@inproceedings{yang2022full,
title = {Full-Range Virtual Try-On With Recurrent Tri-Level Transform},
author = {Yang, Han and Yu, Xinrui and Liu, Ziwei},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {3460--3469}
year = {2022}
}
ACGPN
- Original: https://github.com/levindabhi/ACGPN.git
- Modified: https://github.com/kairess/ACGPN.git
!git clone https://github.com/kairess/ACGPN.git
%cd ACGPN
Dependencies
!pip install -U --no-cache-dir gdown --pre -qq
!pip install ninja -qq
import gdown
import numpy as np
from PIL import Image
import IPython
import gdown
import os
import sys
import time
from predict_pose import generate_pose_keypoints
!mkdir Data_preprocessing/test_color
!mkdir Data_preprocessing/test_colormask
!mkdir Data_preprocessing/test_edge
!mkdir Data_preprocessing/test_img
!mkdir Data_preprocessing/test_label
!mkdir Data_preprocessing/test_mask
!mkdir Data_preprocessing/test_pose
!mkdir inputs
!mkdir inputs/img
!mkdir inputs/cloth
!git clone https://github.com/levindabhi/Self-Correction-Human-Parsing-for-ACGPN.git # 분류
!git clone https://github.com/levindabhi/U-2-Net.git # 누끼
사전학습 모델
VITON 데이터셋
https://drive.google.com/uc?id=1tE7hcVFm8Td8kRh5iYRBSDFdvZIkbUIR AI허브 패션 데이터셋
https://www.aihub.or.kr/aihubdata/data/view.do?currMenu=115&topMenu=100&aihubDataSe=realm&dataSetSn=78
옷 마스크 추출
sorted(os.listdir('inputs/cloth'))
cloth_name = f'cloth_{int(time.time())}.png'
cloth_path = os.path.join('inputs/cloth', sorted(os.listdir('inputs/cloth'))[0])
cloth = Image.open(cloth_path)
cloth = cloth.resize((192, 256), Image.BICUBIC).convert('RGB')
cloth.save(os.path.join('Data_preprocessing/test_color', cloth_name))
u2net_run.infer(u2net, 'Data_preprocessing/test_color', 'Data_preprocessing/test_edge')
Image.open(f'Data_preprocessing/test_edge/{cloth_name}')
모델 포즈, 세그멘테이션
sorted(os.listdir('inputs/img'))
img_name = f'img_{int(time.time())}.png'
img_path = os.path.join('inputs/img', sorted(os.listdir('inputs/img'))[0])
img = Image.open(img_path)
img = img.resize((192,256), Image.BICUBIC)
img_path = os.path.join('Data_preprocessing/test_img', img_name)
img.save(img_path)
!python3 Self-Correction-Human-Parsing-for-ACGPN/simple_extractor.py --dataset 'lip' --model-restore 'lip_final.pth' --input-dir 'Data_preprocessing/test_img' --output-dir 'Data_preprocessing/test_label'
pose_path = os.path.join('Data_preprocessing/test_pose', img_name.replace('.png', '_keypoints.json'))
generate_pose_keypoints(img_path, pose_path)
추론
!rm -rf Data_preprocessing/test_pairs.txt
with open('Data_preprocessing/test_pairs.txt', 'w') as f:
f.write(f'{img_name} {cloth_name}')
!python test.py
결과
output_grid = np.concatenate([
np.array(Image.open(f'Data_preprocessing/test_img/{img_name}')),
np.array(Image.open(f'Data_preprocessing/test_color/{cloth_name}')),
np.array(Image.open(f'results/test/try-on/{img_name}'))
], axis=1)
image_grid = Image.fromarray(output_grid)
image_grid
댓글남기기