diff --git a/.all-contributorsrc b/.all-contributorsrc
new file mode 100644
index 0000000..2062013
--- /dev/null
+++ b/.all-contributorsrc
@@ -0,0 +1,35 @@
+{
+ "files": [
+ "README.md"
+ ],
+ "imageSize": 100,
+ "commit": false,
+ "commitType": "docs",
+ "commitConvention": "angular",
+ "contributors": [
+ {
+ "login": "abhijitjadhav1998",
+ "name": "Abhijit Jadhav",
+ "avatar_url": "https://avatars.githubusercontent.com/u/38549908?v=4",
+ "profile": "https://www.linkedin.com/in/abhijitjadhav1998/",
+ "contributions": [
+ "projectManagement"
+ ]
+ },
+ {
+ "login": "vthonte",
+ "name": "Vishwesh Thonte",
+ "avatar_url": "https://avatars.githubusercontent.com/u/43621438?v=4",
+ "profile": "http://vthonte.vercel.app/",
+ "contributions": [
+ "maintenance"
+ ]
+ }
+ ],
+ "contributorsPerLine": 7,
+ "skipCi": true,
+ "repoType": "github",
+ "repoHost": "https://github.com",
+ "projectName": "Deepfake_detection_using_deep_learning",
+ "projectOwner": "abhijitjadhav1998"
+}
diff --git a/Django Application/README.md b/Django Application/README.md
index 227d811..2801a8e 100644
--- a/Django Application/README.md
+++ b/Django Application/README.md
@@ -36,7 +36,7 @@ docker run -p 80:80 --volumes-from deepfakeapplication -v static_volume:/home/ap
### Step 5: Star⭐ this repo 😉 on and Star⭐ this image on
-## We deserve a Coffee ☕
+## We deserve a Coffee ☕
Please note that currently we have only pushed the image of 20 Frames model, If you can to create your own image of other frames model follow the steps given in the [blog](https://abhijithjadhav.medium.com/dockerise-deepfake-detection-django-application-using-nvidia-cuda-40cdda3b6d38).
diff --git a/Django Application/ml_app/views.py b/Django Application/ml_app/views.py
index ae8f9ed..6d4d827 100644
--- a/Django Application/ml_app/views.py
+++ b/Django Application/ml_app/views.py
@@ -25,12 +25,17 @@
index_template_name = 'index.html'
predict_template_name = 'predict.html'
+about_template_name = "about.html"
im_size = 112
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
sm = nn.Softmax()
inv_normalize = transforms.Normalize(mean=-1*np.divide(mean,std),std=np.divide([1,1,1],std))
+if torch.cuda.is_available():
+ device = 'gpu'
+else:
+ device = 'cpu'
train_transforms = transforms.Compose([
transforms.ToPILImage(),
@@ -124,12 +129,12 @@ def im_plot(tensor):
image = cv2.merge((r,g,b))
image = image*[0.22803, 0.22145, 0.216989] + [0.43216, 0.394666, 0.37645]
image = image*255.0
- plt.imshow(image.astype(int))
+ plt.imshow(image.astype('uint8'))
plt.show()
def predict(model,img,path = './', video_file_name=""):
- fmap,logits = model(img.to('cuda'))
+ fmap,logits = model(img.to(device))
img = im_convert(img[:,-1,:,:,:], video_file_name)
params = list(model.parameters())
weight_softmax = model.linear1.weight.detach().cpu().numpy()
@@ -140,7 +145,7 @@ def predict(model,img,path = './', video_file_name=""):
return [int(prediction.item()),confidence]
def plot_heat_map(i, model, img, path = './', video_file_name=''):
- fmap,logits = model(img.to('cuda'))
+ fmap,logits = model(img.to(device))
params = list(model.parameters())
weight_softmax = model.linear1.weight.detach().cpu().numpy()
logits = sm(logits)
@@ -173,25 +178,30 @@ def get_accurate_model(sequence_length):
sequence_model = []
final_model = ""
list_models = glob.glob(os.path.join(settings.PROJECT_DIR, "models", "*.pt"))
- for i in list_models:
- model_name.append(i.split("\\")[-1])
- for i in model_name:
+
+ for model_path in list_models:
+ model_name.append(os.path.basename(model_path))
+
+ for model_filename in model_name:
try:
- seq = i.split("_")[3]
- if (int(seq) == sequence_length):
- sequence_model.append(i)
- except:
- pass
+ seq = model_filename.split("_")[3]
+ if int(seq) == sequence_length:
+ sequence_model.append(model_filename)
+ except IndexError:
+ pass # Handle cases where the filename format doesn't match expected
if len(sequence_model) > 1:
accuracy = []
- for i in sequence_model:
- acc = i.split("_")[1]
- accuracy.append(acc)
+ for filename in sequence_model:
+ acc = filename.split("_")[1]
+ accuracy.append(acc) # Convert accuracy to float for proper comparison
max_index = accuracy.index(max(accuracy))
- final_model = sequence_model[max_index]
+ final_model = os.path.join(settings.PROJECT_DIR, "models", sequence_model[max_index])
+ elif len(sequence_model) == 1:
+ final_model = os.path.join(settings.PROJECT_DIR, "models", sequence_model[0])
else:
- final_model = sequence_model[0]
+ print("No model found for the specified sequence length.") # Handle no models found case
+
return final_model
ALLOWED_VIDEO_EXTENSIONS = set(['mp4','gif','webm','avi','3gp','wmv','flv','mkv'])
@@ -248,6 +258,7 @@ def index(request):
def predict_page(request):
if request.method == "GET":
+ # Redirect to 'home' if 'file_name' is not in session
if 'file_name' not in request.session:
return redirect("ml_app:home")
if 'file_name' in request.session:
@@ -255,107 +266,122 @@ def predict_page(request):
if 'sequence_length' in request.session:
sequence_length = request.session['sequence_length']
path_to_videos = [video_file]
- video_file_name = video_file.split('\\')[-1]
- if settings.DEBUG == False:
- production_video_name = video_file_name.split('/')[3:]
- production_video_name = '/'.join([str(elem) for elem in production_video_name])
- print("Production file name",production_video_name)
- video_file_name_only = video_file_name.split('.')[0]
- video_dataset = validation_dataset(path_to_videos, sequence_length=sequence_length,transform= train_transforms)
- model = Model(2).cuda()
- model_name = os.path.join(settings.PROJECT_DIR,'models', get_accurate_model(sequence_length))
- models_location = os.path.join(settings.PROJECT_DIR,'models')
+ video_file_name = os.path.basename(video_file)
+ video_file_name_only = os.path.splitext(video_file_name)[0]
+ # Production environment adjustments
+ if not settings.DEBUG:
+ production_video_name = os.path.join('/home/app/staticfiles/', video_file_name.split('/')[3])
+ print("Production file name", production_video_name)
+ else:
+ production_video_name = video_file_name
+
+ # Load validation dataset
+ video_dataset = validation_dataset(path_to_videos, sequence_length=sequence_length, transform=train_transforms)
+
+ # Load model
+ if(device == "gpu"):
+ model = Model(2).cuda() # Adjust the model instantiation according to your model structure
+ else:
+ model = Model(2).cpu() # Adjust the model instantiation according to your model structure
+ model_name = os.path.join(settings.PROJECT_DIR, 'models', get_accurate_model(sequence_length))
path_to_model = os.path.join(settings.PROJECT_DIR, model_name)
- model.load_state_dict(torch.load(path_to_model))
+ model.load_state_dict(torch.load(path_to_model, map_location=torch.device('cpu')))
model.eval()
start_time = time.time()
- # Start: Displaying preprocessing images
+ # Display preprocessing images
print("<=== | Started Videos Splitting | ===>")
preprocessed_images = []
faces_cropped_images = []
cap = cv2.VideoCapture(video_file)
-
frames = []
- while(cap.isOpened()):
+ while cap.isOpened():
ret, frame = cap.read()
- if ret==True:
+ if ret:
frames.append(frame)
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
else:
break
cap.release()
- for i in range(1, sequence_length+1):
- frame = frames[i]
- image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
- img = pImage.fromarray(image, 'RGB')
- image_name = video_file_name_only+"_preprocessed_"+str(i)+'.png'
- if settings.DEBUG:
- image_path = os.path.join(settings.PROJECT_DIR, 'uploaded_images', image_name)
- else:
- print("image_name",image_name)
- image_path = "/home/app/staticfiles" + image_name
- img.save(image_path)
- preprocessed_images.append(image_name)
- print("<=== | Videos Splitting Done | ===>")
- print("--- %s seconds ---" % (time.time() - start_time))
- # End: Displaying preprocessing images
-
-
- # Start: Displaying Faces Cropped Images
- print("<=== | Started Face Cropping Each Frame | ===>")
+ print(f"Number of frames: {len(frames)}")
+ # Process each frame for preprocessing and face cropping
padding = 40
faces_found = 0
- for i in range(1, sequence_length+1):
+ for i in range(sequence_length):
+ if i >= len(frames):
+ break
frame = frames[i]
- #fig, ax = plt.subplots(1,1, figsize=(5, 5))
- face_locations = face_recognition.face_locations(frame)
+
+ # Convert BGR to RGB
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+
+ # Save preprocessed image
+ image_name = f"{video_file_name_only}_preprocessed_{i+1}.png"
+ image_path = os.path.join(settings.PROJECT_DIR, 'uploaded_images', image_name)
+ img_rgb = pImage.fromarray(rgb_frame, 'RGB')
+ img_rgb.save(image_path)
+ preprocessed_images.append(image_name)
+
+ # Face detection and cropping
+ face_locations = face_recognition.face_locations(rgb_frame)
if len(face_locations) == 0:
continue
+
top, right, bottom, left = face_locations[0]
- frame_face = frame[top-padding:bottom+padding, left-padding:right+padding]
- image = cv2.cvtColor(frame_face, cv2.COLOR_BGR2RGB)
+ frame_face = frame[top - padding:bottom + padding, left - padding:right + padding]
- img = pImage.fromarray(image, 'RGB')
- image_name = video_file_name_only+"_cropped_faces_"+str(i)+'.png'
- if settings.DEBUG:
- image_path = os.path.join(settings.PROJECT_DIR, 'uploaded_images', video_file_name_only+"_cropped_faces_"+str(i)+'.png')
- else:
- image_path = "/home/app/staticfiles" + image_name
- img.save(image_path)
- faces_found = faces_found + 1
+ # Convert cropped face image to RGB and save
+ rgb_face = cv2.cvtColor(frame_face, cv2.COLOR_BGR2RGB)
+ img_face_rgb = pImage.fromarray(rgb_face, 'RGB')
+ image_name = f"{video_file_name_only}_cropped_faces_{i+1}.png"
+ image_path = os.path.join(settings.PROJECT_DIR, 'uploaded_images', image_name)
+ img_face_rgb.save(image_path)
+ faces_found += 1
faces_cropped_images.append(image_name)
- print("<=== | Face Cropping Each Frame Done | ===>")
+
+ print("<=== | Videos Splitting and Face Cropping Done | ===>")
print("--- %s seconds ---" % (time.time() - start_time))
- # No face is detected
+ # No face detected
if faces_found == 0:
- return render(request, predict_template_name, {"no_faces": True})
+ return render(request, 'predict_template_name.html', {"no_faces": True})
- # End: Displaying Faces Cropped Images
+ # Perform prediction
try:
heatmap_images = []
- for i in range(0, len(path_to_videos)):
- output = ""
- print("<=== | Started Predicition | ===>")
+ output = ""
+ confidence = 0.0
+
+ for i in range(len(path_to_videos)):
+ print("<=== | Started Prediction | ===>")
prediction = predict(model, video_dataset[i], './', video_file_name_only)
confidence = round(prediction[1], 1)
- print("<=== | Predicition Done | ===>")
- # print("<=== | Heat map creation started | ===>")
- # for j in range(0, sequence_length):
- # heatmap_images.append(plot_heat_map(j, model, video_dataset[i], './', video_file_name_only))
- if prediction[0] == 1:
- output = "REAL"
- else:
- output = "FAKE"
- print("Prediction : " , prediction[0],"==",output ,"Confidence : " , confidence)
+ output = "REAL" if prediction[0] == 1 else "FAKE"
+ print("Prediction:", prediction[0], "==", output, "Confidence:", confidence)
+ print("<=== | Prediction Done | ===>")
print("--- %s seconds ---" % (time.time() - start_time))
+
+ # Uncomment if you want to create heat map images
+ # for j in range(sequence_length):
+ # heatmap_images.append(plot_heat_map(j, model, video_dataset[i], './', video_file_name_only))
+
+ # Render results
+ context = {
+ 'preprocessed_images': preprocessed_images,
+ 'faces_cropped_images': faces_cropped_images,
+ 'heatmap_images': heatmap_images,
+ 'original_video': production_video_name,
+ 'models_location': os.path.join(settings.PROJECT_DIR, 'models'),
+ 'output': output,
+ 'confidence': confidence
+ }
+
if settings.DEBUG:
- return render(request, predict_template_name, {'preprocessed_images': preprocessed_images, 'heatmap_images': heatmap_images, "faces_cropped_images": faces_cropped_images, "original_video": video_file_name, "models_location": models_location, "output": output, "confidence": confidence})
+ return render(request, predict_template_name, context)
else:
- return render(request, predict_template_name, {'preprocessed_images': preprocessed_images, 'heatmap_images': heatmap_images, "faces_cropped_images": faces_cropped_images, "original_video": production_video_name, "models_location": models_location, "output": output, "confidence": confidence})
- except:
+ return render(request, predict_template_name, context)
+
+ except Exception as e:
+ print(f"Exception occurred during prediction: {e}")
return render(request, 'cuda_full.html')
def about(request):
return render(request, about_template_name)
diff --git a/Django Application/requirements.txt b/Django Application/requirements.txt
index 467398f..e863264 100644
--- a/Django Application/requirements.txt
+++ b/Django Application/requirements.txt
@@ -1,101 +1,101 @@
-altair==4.1.0
-asgiref==3.2.7
+altair==5.3.0
+asgiref==3.8.1
astor==0.8.1
-attrs==19.3.0
-backcall==0.1.0
-base58==2.0.0
-bleach==3.1.4
-blinker==1.4
-cachetools==4.0.0
-certifi==2019.11.28
-chardet==3.0.4
-click==7.1.1
-cmake==3.16.3
-colorama==0.4.3
-cycler==0.10.0
-decorator==4.4.2
-defusedxml==0.6.0
-Django==3.0.5
-dlib==19.19.0
-docutils==0.15.2
-entrypoints==0.3
+attrs==23.2.0
+backcall==0.2.0
+base58==2.1.1
+bleach==6.1.0
+blinker==1.8.2
+cachetools==5.3.3
+certifi==2024.6.2
+chardet==5.2.0
+click==8.1.7
+cmake==3.29.6
+colorama==0.4.6
+cycler==0.12.1
+decorator==5.1.1
+defusedxml==0.7.1
+Django==5.0.6
+dlib==19.24.2
+docutils==0.21.2
+entrypoints==0.4
enum-compat==0.0.3
face-recognition==1.3.0
face-recognition-models==0.3.0
-future==0.18.2
-google==2.0.3
-google-api-core==1.16.0
-google-api-python-client==1.8.0
-google-auth==1.12.0
-google-auth-httplib2==0.0.3
-googleapis-common-protos==1.51.0
-httplib2==0.17.0
-idna==2.9
-ipykernel==5.2.0
-ipython==7.13.0
+future==1.0.0
+google==3.0.0
+google-api-core==2.19.1
+google-api-python-client==2.134.0
+google-auth==2.30.0
+google-auth-httplib2==0.2.0
+googleapis-common-protos==1.63.2
+httplib2==0.22.0
+idna==3.7
+ipykernel==6.29.4
+ipython==8.12.3
ipython-genutils==0.2.0
-ipywidgets==7.5.1
+ipywidgets==8.1.3
jedi==0.16.0
-Jinja2==2.11.1
-jmespath==0.9.5
-json5==0.9.4
-jsonschema==3.2.0
-jupyter-client==6.1.2
-jupyter-core==4.6.3
-jupyterlab==2.0.1
-jupyterlab-server==1.0.7
-kiwisolver==1.1.0
-MarkupSafe==1.1.1
-matplotlib==3.2.1
-mistune==0.8.4
-nbconvert==5.6.1
-nbformat==5.0.4
-notebook==6.0.3
-numpy==1.18.2
-opencv-python==4.2.0.32
-packaging==20.3
-pandas==1.0.3
-pandocfilters==1.4.2
-parso==0.6.2
+Jinja2==3.1.4
+jmespath==1.0.1
+json5==0.9.25
+jsonschema==4.22.0
+jupyter-client==8.6.2
+jupyter-core==5.7.2
+jupyterlab==4.2.2
+jupyterlab-server==2.27.2
+kiwisolver==1.4.5
+MarkupSafe==2.1.5
+matplotlib==3.9.0
+mistune==3.0.2
+nbconvert==7.16.4
+nbformat==5.10.4
+notebook==7.2.1
+numpy===1.26.4
+opencv-python==4.10.0.84
+packaging==24.1
+pandas==2.2.2
+pandocfilters==1.5.1
+parso==0.8.4
pathtools==0.1.2
pickleshare==0.7.5
-Pillow==7.0.0
-prometheus-client==0.7.1
-prompt-toolkit==3.0.5
-protobuf==3.11.3
-pyasn1==0.4.8
-pyasn1-modules==0.2.8
-pycodestyle==2.5.0
-pydeck==0.3.0b3
-Pygments==2.6.1
-pyparsing==2.4.6
-pyrsistent==0.16.0
-python-dateutil==2.8.1
+Pillow==10.3.0
+prometheus-client==0.20.0
+prompt-toolkit==3.0.47
+protobuf==5.27.2
+pyasn1==0.6.0
+pyasn1-modules==0.4.0
+pycodestyle==2.12.0
+pydeck==0.9.0b1
+Pygments==2.18.0
+pyparsing==3.1.2
+pyrsistent==0.20.0
+python-dateutil==2.9.0
# pytz==2019.3 Commenting for production deployment
# pywin32==227
pywinpty==0.5.7
-PyYAML==5.3.1
-pyzmq==19.0.0
-requests==2.23.0
-rsa==4.0
-s3transfer==0.3.3
-Send2Trash==1.5.0
-six==1.14.0
-soupsieve==2.0
-sqlparse==0.3.1
-terminado==0.8.3
-testpath==0.4.4
-toml==0.10.0
-toolz==0.10.0
-torch==1.4.0
-torchvision==0.5.0
-tornado==5.1.1
-traitlets==4.3.3
-tzlocal==2.0.0
-uritemplate==3.0.1
-urllib3==1.25.8
-validators==0.14.2
-watchdog==0.10.2
-wcwidth==0.1.9
+PyYAML==6.0.1
+pyzmq==26.0.3
+requests==2.32.3
+rsa==4.9
+s3transfer==0.10.2
+Send2Trash==1.8.3
+six==1.16.0
+soupsieve==2.5
+sqlparse==0.5.0
+terminado==0.18.1
+testpath==0.6.0
+toml==0.10.2
+toolz==0.12.1
+torch==2.3.1
+torchvision==0.18.1
+tornado==6.4.1
+traitlets==5.14.3
+tzlocal==5.2
+uritemplate==4.1.1
+urllib3==2.2.2
+validators==0.28.3
+watchdog==4.0.1
+wcwidth==0.2.13
webencodings==0.5.1
-widgetsnbextension==3.5.1
+widgetsnbextension==4.0.11
diff --git a/README.md b/README.md
index b45f5f6..2002370 100644
--- a/README.md
+++ b/README.md
@@ -57,10 +57,35 @@ Deepfake_detection_using_deep_learning
|model_93_acc_100_frames_final_data.pt| 6000 | 100 | 93.58794|
## 6. Contributors
- 1. Abhijit Jadhav
- 2. Jay Patel
- 3. Hitendra Patil
- 4. Abhishek Patange
+
+
+
+
+
Abhijit Jadhav 📆 |
+ Vishwesh Thonte 🚧 |
+