AI人脸编辑-代码参数化

举报
HWCloudAI 发表于 2022/12/05 15:48:28 2022/12/05
【摘要】 高保真图像编辑注意:本案例必须使用GPU运行,请查看《ModelArts JupyterLab 硬件规格使用指南》了解切换硬件规格的方法High-Fidelity GAN Inversion for Image Attribute Editing (CVPR 2022)https://tengfei-wang.github.io/HFGI/ 1 下载代码和数据import os impor...

高保真图像编辑

注意:本案例必须使用GPU运行,请查看《ModelArts JupyterLab 硬件规格使用指南》了解切换硬件规格的方法

High-Fidelity GAN Inversion for Image Attribute Editing (CVPR 2022)
https://tengfei-wang.github.io/HFGI/

1 下载代码和数据

import os 
import moxing as mox
if not os.path.exists("/home/ma-user/work/ma_share/HFGI/HFGI"):
    mox.file.copy_parallel('obs://modelarts-labs-bj4-v2/case_zoo/HFGI/HFGI.zip',"/home/ma-user/work/ma_share/HFGI/HFGI.zip")
    os.system("cd /home/ma-user/work/ma_share/HFGI;unzip HFGI.zip;rm HFGI.zip")
    if os.path.exists("/home/ma-user/work/ma_share/HFGI/HFGI"):
        print('Download success')
    else:
        raise Exception('Download Failed')
else:
    print("Project already exists")

2 安装依赖环境

!pip install ninja
!pip install dlib
!pip uninstall -y torch
!pip uninstall -y torchvision
!pip install torch==1.6.0
!pip install torchvision==0.7.0

安装完后,需要重启一下kernel,点击上方Restart the kernel

%cd HFGI

3 开始运行代码

#@title Setup Repository
import os
from argparse import Namespace
import time
import os
import sys
import numpy as np
from PIL import Image
import torch
import torchvision.transforms as transforms


# from utils.common import tensor2im
from models.psp import pSp  # we use the pSp framework to load the e4e encoder.

%load_ext autoreload
%autoreload 2
def tensor2im(var):
    # var shape: (3, H, W)
    var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy()
    var = ((var + 1) / 2)
    var[var < 0] = 0
    var[var > 1] = 1
    var = var * 255
    return Image.fromarray(var.astype('uint8'))

Step 1: 加载预训练模型

model_path = "checkpoint/ckpt.pt"
ckpt = torch.load(model_path, map_location='cpu')
opts = ckpt['opts']
opts['is_train'] = False
opts['checkpoint_path'] = model_path
opts= Namespace(**opts)
net = pSp(opts)
net.eval()
net.cuda()
print('Model successfully loaded!')

Step 2: 设置输入图像

#@title 设置输入图像 
# Setup required image transformations
input_img_path = "test_imgs/1919116757.jpg" #@param {type:"string", dropdown}
EXPERIMENT_ARGS = {"image_path": input_img_path}

EXPERIMENT_ARGS['transform'] = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
resize_dims = (256, 256)
image_path = EXPERIMENT_ARGS["image_path"]
original_image = Image.open(image_path)
original_image = original_image.convert("RGB")

run_align = True

图像对齐

import numpy as np
import PIL
import PIL.Image
import scipy
import scipy.ndimage
import dlib


def get_landmark(filepath, predictor):
    """get landmark with dlib
    :return: np.array shape=(68, 2)
    """
    detector = dlib.get_frontal_face_detector()

    img = dlib.load_rgb_image(filepath)
    dets = detector(img, 1)

    for k, d in enumerate(dets):
        shape = predictor(img, d)

    t = list(shape.parts())
    a = []
    for tt in t:
        a.append([tt.x, tt.y])
    lm = np.array(a)
    return lm


def align_face(filepath, predictor):
    """
    :param filepath: str
    :return: PIL Image
    """

    lm = get_landmark(filepath, predictor)

    lm_chin = lm[0: 17]  # left-right
    lm_eyebrow_left = lm[17: 22]  # left-right
    lm_eyebrow_right = lm[22: 27]  # left-right
    lm_nose = lm[27: 31]  # top-down
    lm_nostrils = lm[31: 36]  # top-down
    lm_eye_left = lm[36: 42]  # left-clockwise
    lm_eye_right = lm[42: 48]  # left-clockwise
    lm_mouth_outer = lm[48: 60]  # left-clockwise
    lm_mouth_inner = lm[60: 68]  # left-clockwise

    # Calculate auxiliary vectors.
    eye_left = np.mean(lm_eye_left, axis=0)
    eye_right = np.mean(lm_eye_right, axis=0)
    eye_avg = (eye_left + eye_right) * 0.5
    eye_to_eye = eye_right - eye_left
    mouth_left = lm_mouth_outer[0]
    mouth_right = lm_mouth_outer[6]
    mouth_avg = (mouth_left + mouth_right) * 0.5
    eye_to_mouth = mouth_avg - eye_avg

    # Choose oriented crop rectangle.
    x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
    x /= np.hypot(*x)
    x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
    y = np.flipud(x) * [-1, 1]
    c = eye_avg + eye_to_mouth * 0.1
    quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
    qsize = np.hypot(*x) * 2

    # read image
    img = PIL.Image.open(filepath)

    output_size = 256
    transform_size = 256
    enable_padding = True

    # Shrink.
    shrink = int(np.floor(qsize / output_size * 0.5))
    if shrink > 1:
        rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
        img = img.resize(rsize, PIL.Image.ANTIALIAS)
        quad /= shrink
        qsize /= shrink

    # Crop.
    border = max(int(np.rint(qsize * 0.1)), 3)
    crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
            int(np.ceil(max(quad[:, 1]))))
    crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
            min(crop[3] + border, img.size[1]))
    if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
        img = img.crop(crop)
        quad -= crop[0:2]

    # Pad.
    pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
           int(np.ceil(max(quad[:, 1]))))
    pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
           max(pad[3] - img.size[1] + border, 0))
    if enable_padding and max(pad) > border - 4:
        pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
        img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
        h, w, _ = img.shape
        y, x, _ = np.ogrid[:h, :w, :1]
        mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
                          1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
        blur = qsize * 0.02
        img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
        img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
        img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
        quad += pad[:2]

    # Transform.
    img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
    if output_size < transform_size:
        img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)

    # Return aligned image.
    return img
if  'shape_predictor_68_face_landmarks.dat' not in os.listdir():
#     !wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
    !bzip2 -dk shape_predictor_68_face_landmarks.dat.bz2

def run_alignment(image_path):
  import dlib
  predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
  aligned_image = align_face(filepath=image_path, predictor=predictor) 
  print("Aligned image has shape: {}".format(aligned_image.size))
  return aligned_image 

if run_align:
  input_image = run_alignment(image_path)
else:
  input_image = original_image

input_image.resize(resize_dims)
Aligned image has shape: (256, 256)

img_transforms = EXPERIMENT_ARGS['transform']
transformed_image = img_transforms(input_image)

Step 3: 高保真逆向映射(inversion)

def display_alongside_source_image(result_image, source_image):
    res = np.concatenate([np.array(source_image.resize(resize_dims)),
                          np.array(result_image.resize(resize_dims))], axis=1)
    return Image.fromarray(res)

def get_latents(net, x, is_cars=False):
    codes = net.encoder(x)
    if net.opts.start_from_latent_avg:
        if codes.ndim == 2:
            codes = codes + net.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :]
        else:
            codes = codes + net.latent_avg.repeat(codes.shape[0], 1, 1)
    if codes.shape[1] == 18 and is_cars:
        codes = codes[:, :16, :]
    return codes
#@title 设置上采样模式 
mode = "bilinear" #@param {type:"string", dropdown}
with torch.no_grad():
    x = transformed_image.unsqueeze(0).cuda()

    tic = time.time()
    latent_codes = get_latents(net, x)
    
    # calculate the distortion map
    imgs, _ = net.decoder([latent_codes[0].unsqueeze(0).cuda()],None, input_is_latent=True, randomize_noise=False, return_latents=True)
    
    res = x -  torch.nn.functional.interpolate(torch.clamp(imgs, -1., 1.), size=(256,256) , mode=mode)

    # ADA
    img_edit = torch.nn.functional.interpolate(torch.clamp(imgs, -1., 1.), size=(256,256) , mode=mode)
    res_align  = net.grid_align(torch.cat((res, img_edit  ), 1))

    # consultation fusion
    conditions = net.residue(res_align)

    result_image, _ = net.decoder([latent_codes],conditions, input_is_latent=True, randomize_noise=False, return_latents=True)
    toc = time.time()
    print('Inference took {:.4f} seconds.'.format(toc - tic))

# Display inversion:
display_alongside_source_image(tensor2im(result_image[0]), input_image)

Step 4: 高保真图像编辑

可以编辑的内容如下:

–edit_attribute 编辑属性(inversion’, ‘age’, ‘smile’, ‘eyes’, ‘lip’ ,‘beard’ )

–edit_degree 控制编辑程度(适用于“age”和“smile”)。

from editings import latent_editor
editor = latent_editor.LatentEditor(net.decoder)
#@title 图像编辑设置 
# interface-GAN
age_model = "./editings/interfacegan_directions/age.pt" #@param {type:"string", dropdown}
smile_model = "./editings/interfacegan_directions/smile.pt" #@param {type:"string", dropdown}
interfacegan_directions = {
        'age': age_model,
        'smile': smile_model }
edit_status = "smile" #@param {type:"string", dropdown}
edit_direction = torch.load(interfacegan_directions[edit_status]).cuda() 
# 微笑程度

edit_degree = 0.6 #@param {type:"slider", min:-5, max:5, step:0.1}
#@title 上采样模型设置 
img_edit, edit_latents = editor.apply_interfacegan(latent_codes[0].unsqueeze(0).cuda(), edit_direction, factor=edit_degree)  # 设置微笑
# align the distortion map
mode = "bilinear" #@param {type:"string", dropdown}
img_edit = torch.nn.functional.interpolate(torch.clamp(img_edit, -1., 1.), size=(256,256) , mode=mode)
res_align  = net.grid_align(torch.cat((res, img_edit  ), 1))

# fusion
conditions = net.residue(res_align)
result, _ = net.decoder([edit_latents],conditions, input_is_latent=True, randomize_noise=False, return_latents=True)

result = torch.nn.functional.interpolate(result, size=(256,256) , mode=mode)
display_alongside_source_image(tensor2im(result[0]), input_image)

#@title ganspace设置 
# GANSpace
model = "./editings/ganspace_pca/ffhq_pca.pt" #@param {type:"string", dropdown}
ganspace_pca = torch.load(model) 
eyes_param = (54,  7,  8,  20) #@param {type:"raw"}
beard_param = (58,  7,  9,  -20) #@param {type:"raw"}
lip_param = (34, 10, 11,  20) #@param {type:"raw"}
ganspace_directions = {
    'eyes': eyes_param,    # 眼睛    
    'beard': beard_param,  # 胡子
    'lip': lip_param }     # 嘴唇
edit_option = "lip" #@param {type:"string", dropdown}
edit_direction = ganspace_directions[edit_option]
#@title 上采样模式设置 
img_edit, edit_latents = editor.apply_ganspace(latent_codes[0].unsqueeze(0).cuda(), ganspace_pca, [edit_direction])
# align the distortion map
mode = "bilinear" #@param {type:"string", dropdown}
img_edit = torch.nn.functional.interpolate(torch.clamp(img_edit, -1., 1.), size=(256,256) , mode=mode)
res_align  = net.grid_align(torch.cat((res, img_edit  ), 1))
conditions = net.residue(res_align)
result, _ = net.decoder([edit_latents],conditions, input_is_latent=True, randomize_noise=False, return_latents=True)
result = torch.nn.functional.interpolate(result, size=(256,256) , mode=mode)
display_alongside_source_image(tensor2im(result[0]), input_image)

训练

如果需要训练,可以按以下描述进行:

准备

  1. 下载数据集并相应地修改数据集路径./configs/paths_config.py .
  2. 下载一些预训练模型并将它们放入 ./pretrained.
Model Description
StyleGAN2 (FFHQ) Pretrained face generator on FFHQ from rosinality.
e4e (FFHQ) Pretrained initial encoder on FFHQ from omertov.
Feature extractor (for face) Pretrained IR-SE50 model taken from TreB1eN for ID loss calculation.
Feature extractor (for car) Pretrained ResNet-50 model taken from omertov for ID loss calculation.

开始训练

修改 optiontraining.sh ,开始运行:

bash train.sh
【版权声明】本文为华为云社区用户原创内容,转载时必须标注文章的来源(华为云社区)、文章链接、文章作者等基本信息, 否则作者和本社区有权追究责任。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。