import numpy as np
import cv2
import mediapipe as mp
from abc import ABC, abstractmethod
from tensorflow import keras
import h5py
from IPython.display import Video
# Abstract Detector Class
class Detector(ABC):
def __init__(self):
self.load()
@abstractmethod
def load(self):
pass
@abstractmethod
def findFaces(self,img):
pass
# Function for Deep Neural Network Face Detection
class DNN(Detector):
def load(self):
self.model = cv2.dnn.readNetFromCaffe("./Data/models/deploy.prototxt.txt", "./Data/models/classifier.caffemodel")
def findFaces(self,img):
faces = []
(h, w) = img.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
self.model.setInput(blob)
detections = self.model.forward()
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > .5:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
faces.append(box.astype("int"))
return faces
# Function for Harr Cascade Classifier Face Detection
class HCC(Detector):
def load(self):
self.model = cv2.CascadeClassifier("./Data/models/haarcascade_frontalface_default.xml")
def findFaces(self,img):
faces = []
detections = self.model.detectMultiScale(img,scaleFactor=1.1,minNeighbors=3)
for detection in detections:
xmin = int(detection[0])
ymin = int(detection[1])
width = int(detection[2])
height = int(detection[3])
xmax = xmin+width
ymax = ymin+height
faces.append((xmin,ymin,xmax,ymax))
return faces
# Mediapipe Face Detection
class MPFD(Detector):
def load(self):
self.model = mp.solutions.face_detection.FaceDetection(0.35)
def findFaces(self,img):
faces = []
(h, w) = img.shape[:2]
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = self.model.process(imgRGB)
if results.detections:
for detection in results.detections:
bbox = detection.location_data.relative_bounding_box
faces.append((int(bbox.xmin*w), int(bbox.ymin*h), int((bbox.xmin+bbox.width)*w), int((bbox.ymin+bbox.height)*h)))
return faces
# Abstract Classifier Class
class Classifier(ABC):
def __init__(self):
self.load()
@abstractmethod
def load(self):
pass
@abstractmethod
def classifyFace(self,img):
pass
# Function for Convolutional Neural Network Mask Classifier
class CNN(Classifier):
def load(self):
self.model = keras.models.load_model("./Data/models/classifier.h5")
def classifyFace(self, img):
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(grayscale,(32,32))
scaledInput = resized/255.0
test = np.stack(([scaledInput],),axis=-1)
prediction = self.model.predict(test)
return np.argmax(prediction[0])
# Mask Detection Function for Processing Singular Images and Adding Annotations
def MaskDetection(img,detector,classifier):
detections = detector.findFaces(img)
for (xmin, ymin, xmax, ymax) in detections:
face = img[ymin:ymax, xmin:xmax]
prediction = classifier.classifyFace(face)
color = [0,0,255] if prediction else [0,255,0]
cv2.rectangle(img,(xmin,ymin),(xmax,ymax),color,2)
text = "PLEASE PUT YOUR MASK ON!" if prediction else "Thank you for wearing a mask!"
y = ymin - 10 if ymin - 10 > 10 else ymin + 10
cv2.putText(img, text, (xmin, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
return img
# Demo Processing
def demo(inFile,outFile):
print("Starting processing on " + inFile)
detector = DNN()
classifier = CNN()
vid = cv2.VideoCapture(inFile)
total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
frame_width = int( vid.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height =int( vid.get( cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'vp09')
out = cv2.VideoWriter(outFile, fourcc, 30.0, (frame_width, frame_height))
i=0
while (i<total):
success, frame = vid.read()
if success:
classified = MaskDetection(frame,detector,classifier)
out.write(classified)
i+=1
if i%50==0:
print("Frame " + str(i) +" out of " + str(total) + " processed")
out.release()
vid.release()
cv2.destroyAllWindows()
print("Finished processing; output available at " + outFile)
## Uncomment to perform processing - may take a few minutes
demo('./Data/video/sample1.mp4','./Data/video/sample1_processed.mp4')
demo('./Data/video/sample2.mp4','./Data/video/sample2_processed.mp4')
demo('./Data/video/sample3.mp4','./Data/video/sample3_processed.mp4')
Starting processing on ./Data/video/sample1.mp4 Frame 50 out of 502 processed Frame 100 out of 502 processed Frame 150 out of 502 processed Frame 200 out of 502 processed Frame 250 out of 502 processed Frame 300 out of 502 processed Frame 350 out of 502 processed Frame 400 out of 502 processed Frame 450 out of 502 processed Frame 500 out of 502 processed Finished processing; output available at ./Data/video/sample1_processed.mp4 Starting processing on ./Data/video/sample2.mp4 Frame 50 out of 437 processed Frame 100 out of 437 processed Frame 150 out of 437 processed Frame 200 out of 437 processed Frame 250 out of 437 processed Frame 300 out of 437 processed Frame 350 out of 437 processed Frame 400 out of 437 processed Finished processing; output available at ./Data/video/sample2_processed.mp4 Starting processing on ./Data/video/sample3.mp4 Frame 50 out of 193 processed Frame 100 out of 193 processed Frame 150 out of 193 processed Finished processing; output available at ./Data/video/sample3_processed.mp4
# Display Sample 1
Video('./Data/video/sample1_processed.mp4',embed=True)