Recognizing Face and sending automated mails and Whats app messages along with creating EC2 instance in AWS

HERE IS THE CODE …

KARTHICK P
3 min readAug 11, 2021

import cv2
import numpy

face_classifier=cv2.CascadeClassifier(‘haarcascade_frontalface_default.xml’)

def face_extractor(img):

gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=face_classifier.detectMultiScale(gray, 1.3, 5)

if faces is ():
return None

for (x,y,w,h) in faces:
cropped_face = img[y:y+h, x:x+w]

return cropped_face

#initialize webcam
cap=cv2.VideoCapture(0)
count=0

#collect 100 samples of your face from webcam input
while True:

ret, frame=cap.read()
if face_extractor(frame) is not None:
count +=1
face = cv2.resize(face_extractor(frame), (200,200))
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)

#save file in specified directory with unique name
file_name_path = ‘./faces/user/’ + str(count) + ‘.jpg’
cv2.imwrite(file_name_path, face)

#put count on images and display live count
cv2.putText(face, str(count), (50,50), cv2.FONT_HERSHEY_COMPLEX,1, (0,255,0), 2)
cv2.imshow(‘Face Cropper’, face)

else:
print(“Face not found”)
pass
if cv2.waitKey(1) == 13 or count ==100:
break

cap.release()
cv2.destroyAllWindows()
print(“collecting samples completed”)

import cv2
import numpy
from os import listdir
from os.path import isfile, join

#get the training data we previously made
data_path = ‘./faces/user/’
onlyfiles = [f for f in listdir(data_path) if isfile(join(data_path, f))]

#create arrays for training data and labels
Training_Data, Labels = [], []

#open training images in our datapath
#create a numpy array for training data

for i, files in enumerate(onlyfiles):
image_path = data_path + onlyfiles[i]
images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
Training_Data.append(numpy.asarray(images, dtype = numpy.uint8))
Labels = numpy.append(Labels,i)

#create a numpy array for both training and labels
Labels = numpy.asarray(Labels, dtype = numpy.int32)

#initialize facial recognizer
#model=cv2.face.createLBPHFaceRecogniser()
#Note: for opencv 3.0 use cv2.face.createLBPHfacerecognizer()
#pip install opencv-contrib-python
#model= cv2.createLBPHfacerecognizer()

Saketh_model=cv2.face_LBPHFaceRecognizer.create()
#train model
Saketh_model.train(numpy.asarray(Training_Data), numpy.asarray(Labels))
print(“Model trained successfully”)

import cv2
import numpy
import smtplib
import pywhatkit as kt
import getpass as gp
import os
import subprocess
import time
#creating function to send mail automatically
def sendemail():

server = smtplib.SMTP_SSL(“smtp.gmail.com”,465)
server.login(“
mlops.sana@gmail.com”,”Sana@Sana1")
server.sendmail(
mlops.sana@gmail.com”,
sweetbutpsyco486@gmail.com”,
“Hello How are you?”)
server.quit()

#creating function to send whatsapp msg
def sendwhatsapp():
print(“HI”)
p_num = gp.getpass(prompt = “Type your number here”, stream=None)
msg = ‘Hii’
kt.sendwhatmsg_instantly(p_num,msg)

#Creating EC2 and attaching EBS volume
def aws_pull():
os.system(‘aws ec2 run-instances — image-id ami-0ad704c126371a549 — instance-type t2.micro — count 1 — subnet-id subnet-433dd728 — security-group-ids sg-0bcfecf55e07cb741 — tag-specifications ResourceType=instance,Tags=[{Key=Name,Value=Helo}] — key-name saketh’)
os.system(‘aws ec2 create-volume — volume-type gp2 — size 11 — availability-zone ap-south-1a — tag-specifications ResourceType=volume,Tags=[{Key=Name,Value=Hi}]’)
time.sleep(60)
ins_id = subprocess.getoutput(“aws ec2 describe-instances — filters Name=tag:Name,Values=Helo — query Reservations[*].Instances[*].[InstanceId] — output text”)
vol_id = subprocess.getoutput(“aws ec2 describe-volumes — filters Name=tag:Name,Values=Hi — query Volumes[*].[VolumeId] — output text”)
os.system(“aws ec2 attach-volume — device /dev/xvdg — volume-id {y} — instance-id {x}”.format( x = ins_id, y = vol_id))

face_classifier = cv2.CascadeClassifier(‘haarcascade_frontalface_default.xml’)

def face_detector(img, size=0.5):
#convert image to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return img, []

for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2)
roi = img[y:y+h,x:x+w]
roi = cv2.resize(roi,(200,200))
return img, roi
#open webcam
cap =cv2.VideoCapture(0)

while True:

ret, frame = cap.read()
image, face = face_detector(frame)

try:
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)

#pass face to prediction model
#”results” comprises of a tuple containing the label and the confidence value
results = Saketh_model.predict(face)

if results[1] < 500:
confidence = int( 100*(1 — (results[1])/400))

display_string = str(confidence) + ‘% confident it is User’

cv2.putText(image, display_string, (100,120),cv2.FONT_HERSHEY_COMPLEX,1, (255,120,150), 2)

if confidence > 95:
cv2.putText(image, ‘YO Saketh wassapp’, (250,450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow(‘Face Recognition’, image)
sendemail()
sendwhatsapp()
break
else:
cv2.putText(image,”i dont know, who is this”, (250,450), cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0), 2)
cv2.imshow(‘face recognition’, image)
aws_pull()
break

except:
cv2.putText(image,”No face found”, (220,120), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
cv2.putText(image,”looking for face”, (250,450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
cv2.imshow(‘face recognition’, image)
pass

if cv2.waitKey(1) == 13:
break

cap.release()
cv2.destroyAllWindows()

--

--