This commit is contained in:
Davis King 2017-09-27 06:48:04 -04:00
commit 6283a4b51c
4 changed files with 196 additions and 12 deletions

View File

@ -0,0 +1,93 @@
#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This example shows how to use dlib's face recognition tool for image alignment.
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
# You can install dlib using the command:
# pip install dlib
#
# Alternatively, if you want to compile dlib yourself then go into the dlib
# root folder and run:
# python setup.py install
# or
# python setup.py install --yes USE_AVX_INSTRUCTIONS
# if you have a CPU that supports AVX instructions, since this makes some
# things run faster. This code will also use CUDA if you have CUDA and cuDNN
# installed.
#
# Compiling dlib should work on any operating system so long as you have
# CMake and boost-python installed. On Ubuntu, this can be done easily by
# running the command:
# sudo apt-get install libboost-python-dev cmake
#
# Also note that this example requires OpenCV and Numpy which can be installed
# via the command:
# pip install opencv-python numpy
# Or downloaded from http://opencv.org/releases.html
import sys
import dlib
import cv2
import numpy as np
if len(sys.argv) != 3:
print(
"Call this program like this:\n"
" ./face_alignment.py shape_predictor_5_face_landmarks.dat ../examples/faces/bald_guys.jpg\n"
"You can download a trained facial shape predictor from:\n"
" http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n")
exit()
predictor_path = sys.argv[1]
face_file_path = sys.argv[2]
# Load all the models we need: a detector to find the faces, a shape predictor
# to find face landmarks so we can precisely localize the face
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
# Load the image using OpenCV
bgr_img = cv2.imread(face_file_path)
if bgr_img is None:
print("Sorry, we could not load '{}' as an image".format(face_file_path))
exit()
# Convert to RGB since dlib uses RGB images
img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(img, 1)
num_faces = len(dets)
if num_faces == 0:
print("Sorry, there were no faces found in '{}'".format(face_file_path))
exit()
# Find the 5 face landmarks we need to do the alignment.
faces = dlib.full_object_detections()
for detection in dets:
faces.append(sp(img, detection))
# Get the aligned face images
# Optionally:
# images = dlib.get_face_chips(img, faces, size=160, padding=0.25)
images = dlib.get_face_chips(img, faces, size=320)
for image in images:
cv_rgb_image = np.array(image).astype(np.uint8)
cv_bgr_img = cv2.cvtColor(cv_rgb_image, cv2.COLOR_RGB2BGR)
cv2.imshow('image',cv_bgr_img)
cv2.waitKey(0)
# It is also possible to get a single chip
image = dlib.get_face_chip(img, faces[0])
cv_rgb_image = np.array(image).astype(np.uint8)
cv_bgr_img = cv2.cvtColor(cv_rgb_image, cv2.COLOR_RGB2BGR)
cv2.imshow('image',cv_bgr_img)
cv2.waitKey(0)
cv2.destroyAllWindows()

View File

@ -119,7 +119,8 @@ print("Saving faces in largest cluster to output folder...")
for i, index in enumerate(indices):
img, shape = images[index]
file_path = os.path.join(output_folder_path, "face_" + str(i))
dlib.save_face_chip(img, shape, file_path)
# The size and padding arguments are optional with default size=150x150 and padding=0.25
dlib.save_face_chip(img, shape, file_path, size=150, padding=0.25)

View File

@ -1 +1,3 @@
scikit-image>=0.9.3
opencv-python
numpy

View File

@ -177,13 +177,18 @@ boost::python::list chinese_whispers_clustering(boost::python::list descriptors,
void save_face_chips (
object img,
const std::vector<full_object_detection>& faces,
const std::string& chip_filename
const std::string& chip_filename,
size_t size = 150,
float padding = 0.25
)
{
if (!is_rgb_python_image(img))
throw dlib::error("Unsupported image type, must be RGB image.");
int num_faces = faces.size();
std::vector<chip_details> dets;
for (auto& f : faces)
dets.push_back(get_face_chip_details(f, 150, 0.25));
dets.push_back(get_face_chip_details(f, size, padding));
dlib::array<matrix<rgb_pixel>> face_chips;
extract_image_chips(numpy_rgb_image(img), dets, face_chips);
int i=0;
@ -206,14 +211,87 @@ void save_face_chips (
void save_face_chip (
object img,
const full_object_detection& face,
const std::string& chip_filename
const std::string& chip_filename,
size_t size = 150,
float padding = 0.25
)
{
std::vector<full_object_detection> faces(1, face);
save_face_chips(img, faces, chip_filename);
save_face_chips(img, faces, chip_filename, size, padding);
return;
}
BOOST_PYTHON_FUNCTION_OVERLOADS(save_face_chip_with_defaults, save_face_chip, 3, 5)
BOOST_PYTHON_FUNCTION_OVERLOADS(save_face_chips_with_defaults, save_face_chips, 3, 5)
// ----------------------------------------------------------------------------------------
boost::python::list get_face_chips (
object img,
const std::vector<full_object_detection>& faces,
size_t size = 150,
float padding = 0.25
)
{
if (!is_rgb_python_image(img))
throw dlib::error("Unsupported image type, must be RGB image.");
if (faces.size() < 1) {
throw dlib::error("No face were specified in the faces array.");
}
boost::python::list chips_list;
int num_faces = faces.size();
std::vector<chip_details> dets;
for (auto& f : faces)
dets.push_back(get_face_chip_details(f, size, padding));
dlib::array<matrix<rgb_pixel>> face_chips;
extract_image_chips(numpy_rgb_image(img), dets, face_chips);
for (auto& chip : face_chips)
{
boost::python::list img;
for(int row=0; row<size; row++) {
boost::python::list row_list;
for(int col=0; col<size; col++) {
rgb_pixel pixel = chip(row, col);
boost::python::list item;
item.append(pixel.red);
item.append(pixel.green);
item.append(pixel.blue);
row_list.append(item);
}
img.append(row_list);
}
chips_list.append(img);
}
return chips_list;
}
boost::python::list get_face_chip (
object img,
const full_object_detection& face,
size_t size = 150,
float padding = 0.25
)
{
std::vector<full_object_detection> faces(1, face);
boost::python::list result = get_face_chips(img, faces, size, padding);
size_t num_images = boost::python::len(result);
if(num_images == 1) {
return boost::python::extract<boost::python::list>(result[0]);
} else {
throw dlib::error("No face chips found!");
}
}
BOOST_PYTHON_FUNCTION_OVERLOADS(get_face_chip_with_defaults, get_face_chip, 2, 4)
BOOST_PYTHON_FUNCTION_OVERLOADS(get_face_chips_with_defaults, get_face_chips, 2, 4)
// ----------------------------------------------------------------------------------------
@ -232,17 +310,27 @@ void bind_face_recognition()
);
}
def("save_face_chip", &save_face_chip, (arg("img"),arg("face"),arg("chip_filename")),
"Takes an image and a full_object_detection that references a face in that image and saves the face with the specified file name prefix. The face will be rotated upright and scaled to 150x150 pixels."
);
def("save_face_chips", &save_face_chips, (arg("img"),arg("faces"),arg("chip_filename")),
"Takes an image and a full_object_detections object that reference faces in that image and saves the faces with the specified file name prefix. The faces will be rotated upright and scaled to 150x150 pixels."
);
def("save_face_chip", &save_face_chip, save_face_chip_with_defaults(
"Takes an image and a full_object_detection that references a face in that image and saves the face with the specified file name prefix. The face will be rotated upright and scaled to 150x150 pixels or with the optional specified size and padding.",
(arg("img"), arg("face"), arg("chip_filename"), arg("size"), arg("padding"))
));
def("save_face_chips", &save_face_chips, save_face_chips_with_defaults(
"Takes an image and a full_object_detections object that reference faces in that image and saves the faces with the specified file name prefix. The faces will be rotated upright and scaled to 150x150 pixels or with the optional specified size and padding.",
(arg("img"), arg("faces"), arg("chip_filename"), arg("size"), arg("padding"))
));
def("get_face_chip", &get_face_chip, get_face_chip_with_defaults(
"Takes an image and a full_object_detection that references a face in that image and returns the face as a list of lists representing the image. The face will be rotated upright and scaled to 150x150 pixels or with the optional specified size and padding.",
(arg("img"), arg("face"), arg("size"), arg("padding"))
));
def("get_face_chips", &get_face_chips, get_face_chips_with_defaults(
"Takes an image and a full_object_detections object that reference faces in that image and returns the faces as a list of list of lists representing the image. The faces will be rotated upright and scaled to 150x150 pixels or with the optional specified size and padding.",
(arg("img"), arg("faces"), arg("size"), arg("padding"))
));
def("chinese_whispers_clustering", &chinese_whispers_clustering, (arg("descriptors"), arg("threshold")),
"Takes a list of descriptors and returns a list that contains a label for each descriptor. Clustering is done using dlib::chinese_whispers."
);
{
{
typedef std::vector<full_object_detection> type;
class_<type>("full_object_detections", "An array of full_object_detection objects.")
.def(vector_indexing_suite<type>())