diff --git a/Lab4/1/output_faces.mp4 b/Lab4/1/output_faces.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..56949b1825df311e693764bc33cb2ea8fa1f633c
Binary files /dev/null and b/Lab4/1/output_faces.mp4 differ
diff --git a/Lab4/2/streamlit.webm b/Lab4/2/streamlit.webm
new file mode 100644
index 0000000000000000000000000000000000000000..b339ab618506b012059be7e6da268f8e01f04ca5
Binary files /dev/null and b/Lab4/2/streamlit.webm differ
diff --git a/Lab4/3/archive/arc1.png b/Lab4/3/archive/arc1.png
new file mode 100644
index 0000000000000000000000000000000000000000..69f8653272fff8f46e3d264fe5c97d1c720baa0e
Binary files /dev/null and b/Lab4/3/archive/arc1.png differ
diff --git a/Lab4/3/archive/arc10.jpg b/Lab4/3/archive/arc10.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3b4795fb82f533ff88f3126e1669feb4999fbc8b
Binary files /dev/null and b/Lab4/3/archive/arc10.jpg differ
diff --git a/Lab4/3/archive/arc11.jpg b/Lab4/3/archive/arc11.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8962bbd52fc0646591e4c5468a6a4346e918c2c3
Binary files /dev/null and b/Lab4/3/archive/arc11.jpg differ
diff --git a/Lab4/3/archive/arc12.jpg b/Lab4/3/archive/arc12.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ecc11c6f9b1b29e11af7e194ccef484ae058c44a
Binary files /dev/null and b/Lab4/3/archive/arc12.jpg differ
diff --git a/Lab4/3/archive/arc13.jpg b/Lab4/3/archive/arc13.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..edceba13b9c9993d23d7351e3b9c96db39f96a6d
Binary files /dev/null and b/Lab4/3/archive/arc13.jpg differ
diff --git a/Lab4/3/archive/arc14.jpg b/Lab4/3/archive/arc14.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0b15fc8e2ff7f5bc02ddaa58a207ff70c5de078a
Binary files /dev/null and b/Lab4/3/archive/arc14.jpg differ
diff --git a/Lab4/3/archive/arc15.jpg b/Lab4/3/archive/arc15.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..efce0477cb0232909f64ffed66122cd554640398
Binary files /dev/null and b/Lab4/3/archive/arc15.jpg differ
diff --git a/Lab4/3/archive/arc16.jpg b/Lab4/3/archive/arc16.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3a380f4f46dfda7f228eed3e8e70b976e6b341c9
Binary files /dev/null and b/Lab4/3/archive/arc16.jpg differ
diff --git a/Lab4/3/archive/arc17.jpg b/Lab4/3/archive/arc17.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..83c85a1e118a094fc70fa031a1ce7b57a223424c
Binary files /dev/null and b/Lab4/3/archive/arc17.jpg differ
diff --git a/Lab4/3/archive/arc18.jpg b/Lab4/3/archive/arc18.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..cf47ffec0ffbfae89a08ba26f82dad9b44214f68
Binary files /dev/null and b/Lab4/3/archive/arc18.jpg differ
diff --git a/Lab4/3/archive/arc19.jpg b/Lab4/3/archive/arc19.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a8e8c789d70857c6e3bcd9ad07421b6b8cbde09c
Binary files /dev/null and b/Lab4/3/archive/arc19.jpg differ
diff --git a/Lab4/3/archive/arc2.jpg b/Lab4/3/archive/arc2.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6dc2597ba1733b1bd36928569ef61166cf2eb188
Binary files /dev/null and b/Lab4/3/archive/arc2.jpg differ
diff --git a/Lab4/3/archive/arc20.jpg b/Lab4/3/archive/arc20.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6e40b6caa22dc393d39eda1d2540ab0f0b140829
Binary files /dev/null and b/Lab4/3/archive/arc20.jpg differ
diff --git a/Lab4/3/archive/arc3.jpg b/Lab4/3/archive/arc3.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b7c3aa02415736d07720fbdde1e2ae3e67a545fb
Binary files /dev/null and b/Lab4/3/archive/arc3.jpg differ
diff --git a/Lab4/3/archive/arc4.jpg b/Lab4/3/archive/arc4.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d7e939c9f87fc41c5214b35aa8f180ad2baf24d8
Binary files /dev/null and b/Lab4/3/archive/arc4.jpg differ
diff --git a/Lab4/3/archive/arc5.jpg b/Lab4/3/archive/arc5.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..04650508d1a486be77951070280d1c3ec432e6ca
Binary files /dev/null and b/Lab4/3/archive/arc5.jpg differ
diff --git a/Lab4/3/archive/arc6.jpg b/Lab4/3/archive/arc6.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9b1df2908746de83f6fb7b4610f7bd4c9662684e
Binary files /dev/null and b/Lab4/3/archive/arc6.jpg differ
diff --git a/Lab4/3/archive/arc7.jpg b/Lab4/3/archive/arc7.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..2bce5fbfc4a43b38a8d3ed985959ba4ca0efe8b6
Binary files /dev/null and b/Lab4/3/archive/arc7.jpg differ
diff --git a/Lab4/3/archive/arc8.jpg b/Lab4/3/archive/arc8.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9fe7f1e132fabd54fa2078f098e922e6f2c023d6
Binary files /dev/null and b/Lab4/3/archive/arc8.jpg differ
diff --git a/Lab4/3/archive/arc9.jpg b/Lab4/3/archive/arc9.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e34df6fa7d842335fcd526b2b6985a2da9f62a48
Binary files /dev/null and b/Lab4/3/archive/arc9.jpg differ
diff --git "a/Lab4/\320\220\320\273\320\264\320\260\321\200\320\276\320\262\320\220\320\232_\320\221\320\230\320\222211_\320\237\320\2404.ipynb" "b/Lab4/\320\220\320\273\320\264\320\260\321\200\320\276\320\262\320\220\320\232_\320\221\320\230\320\222211_\320\237\320\2404.ipynb"
new file mode 100644
index 0000000000000000000000000000000000000000..9319eb14a279ac995ae02e8f0c434e45d1addd6e
--- /dev/null
+++ "b/Lab4/\320\220\320\273\320\264\320\260\321\200\320\276\320\262\320\220\320\232_\320\221\320\230\320\222211_\320\237\320\2404.ipynb"
@@ -0,0 +1,890 @@
+{
+  "cells": [
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "V1EHcMnf0YCM"
+      },
+      "source": [
+        "# 1 Детекция + верификация (3 балла) DONE"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "86yB1xI5-8ng"
+      },
+      "source": [
+        "Часть 1: Обнаружение лиц в реальном времени или на видео (3 балла)\n",
+        "Задача:\n",
+        "\n",
+        "Используя библиотеку OpenCV и метод [YuNET](https://docs.opencv.org/4.x/d0/dd4/tutorial_dnn_face.html) (или другой нейросетевой метод обнаружения лиц на ваше усмотрение), реализуйте обнаружение лиц на видео (или веб-камере).\n",
+        "Программа должна обрабатывать кадры с вебкамеры и корректно распознавать ваше лицо. Ваше лицо должно быть выделено зелёным прямоугольником, а чужие - красными."
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "YXULvH2Vlxn0"
+      },
+      "source": [
+        "## Main program"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "D9yGlaFSgPcJ"
+      },
+      "outputs": [],
+      "source": [
+        "!pip install deepface -q"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "EcJJP7S0k9iD"
+      },
+      "outputs": [],
+      "source": [
+        "import cv2\n",
+        "import numpy as np\n",
+        "from deepface import DeepFace\n",
+        "from google.colab.patches import cv2_imshow\n",
+        "\n",
+        "cap = cv2.VideoCapture('v5.mp4')\n",
+        "\n",
+        "out = cv2.VideoWriter(\n",
+        "    'outpy.mp4',\n",
+        "    cv2.VideoWriter_fourcc(*'MP4V'),\n",
+        "    20,\n",
+        "    (int(cap.get(3)), int(cap.get(4)))\n",
+        ")\n",
+        "\n",
+        "cap.set(cv2.CAP_PROP_POS_FRAMES, 400)\n",
+        "\n",
+        "filt_keys = ['x', 'y', 'w', 'h']\n",
+        "for i in range(120):\n",
+        "    ret, img = cap.read()\n",
+        "    if not ret:\n",
+        "        break\n",
+        "    faces = DeepFace.extract_faces(img, enforce_detection=False)\n",
+        "\n",
+        "    if len(faces) > 0:\n",
+        "        box_vrf = []\n",
+        "\n",
+        "        result = DeepFace.verify(\n",
+        "            'photo.jpg',\n",
+        "            img,\n",
+        "            enforce_detection=False\n",
+        "        )\n",
+        "        if result['verified']:\n",
+        "            box_vrf = [result['facial_areas']['img2'][key] for key in filt_keys]\n",
+        "\n",
+        "        for face in faces:\n",
+        "            box = [face['facial_area'][key] for key in filt_keys]\n",
+        "            color = (0, 255, 0) if box == box_vrf else (0, 0, 255)\n",
+        "            cv2.rectangle(img, box, color, 2)\n",
+        "    out.write(img)\n",
+        "cap.release()\n",
+        "out.release()"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "MjhODhxJ0kGJ"
+      },
+      "source": [
+        "# 2 MIEM Lookalike (2 + 1 балл) DONE"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "knOMSWM60mKg"
+      },
+      "source": [
+        "Реализовать задачу поиска лиц с использованием эмбеддингов и FAISS\n",
+        "\n",
+        "Задача:\n",
+        "Используйте предобученную модель распознавания лиц (например, FaceNet из библиотеки DeepFace) для создания эмбеддингов (векторных представлений) для всех изображений лиц из [выгрузки фотографий](https://drive.google.com/drive/folders/1I7EzyoJYYiLuOiPyjAHeSdTgzmDUAp2X?usp=sharing) сотрудников МИЭМ.\n",
+        "Создайте [FAISS](https://habr.com/ru/companies/okkamgroup/articles/509204/)-базу данных для хранения эмбеддингов лиц.\n",
+        "Реализуйте функцию поиска:\n",
+        "* Загрузите изображение нового лица.\n",
+        "* Извлеките эмбеддинг и найдите ближайший эмбеддинг в FAISS.\n",
+        "* Реализуйте выдачу имени человека с наиболее похожим лицом и его расстояние до текущего лица.\n",
+        "\n",
+        "По-умолчанию предполагается выполнение решения в виде консольного скрипта или внутри блокнота Jupyter. Дополнительно можно реализовать веб-интерфейс с помощью библиотеки streamlit или другого веб-фреймворка на ваше усмотрение (+1 балл).\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "collapsed": true,
+        "id": "wUQ3bMfGUqHv"
+      },
+      "outputs": [],
+      "source": [
+        "!pip install faiss-cpu streamlit"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "! unzip hse_faces_miem.zip"
+      ],
+      "metadata": {
+        "id": "351IeaIih5xa"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "collapsed": true,
+        "id": "4Ou5ihKzWv_U"
+      },
+      "outputs": [],
+      "source": [
+        "import pandas as pd\n",
+        "\n",
+        "# Загружаем метаданные\n",
+        "metadata = pd.read_csv('/content/hse_faces_miem/staff_photo.csv')"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "Q55zHlyHW-yj"
+      },
+      "outputs": [],
+      "source": [
+        "from deepface import DeepFace\n",
+        "import numpy as np\n",
+        "import faiss\n",
+        "import cv2\n",
+        "import os\n",
+        "from natsort import natsorted\n",
+        "\n",
+        "# Функция для извлечения эмбеддинга\n",
+        "def get_embedding(img_path):\n",
+        "    img = cv2.imread(img_path)\n",
+        "    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
+        "    embedding = DeepFace.represent(img_path=img_path, model_name='Facenet', enforce_detection=False)\n",
+        "    return np.array(embedding)\n",
+        "\n",
+        "# Создание списка эмбеддингов и имен\n",
+        "embeddings = []\n",
+        "names = []\n",
+        "\n",
+        "for filename in natsorted(os.listdir('hse_faces_miem')):\n",
+        "    if not filename.endswith(\"jpeg\"):\n",
+        "        continue\n",
+        "    img_path = f\"/content/hse_faces_miem/{filename}\"\n",
+        "    embedding = get_embedding(img_path)\n",
+        "    embeddings.append(embedding)\n",
+        "    names.append(filename)\n",
+        "\n",
+        "# Преобразование в numpy массив\n",
+        "for i, embedding in enumerate(embeddings):\n",
+        "    embeddings[i] = embedding[0]['embedding']\n",
+        "embeddings = np.array(embeddings)\n",
+        "\n",
+        "# Создание индекса FAISS\n",
+        "dimension = embeddings.shape[1]\n",
+        "index = faiss.IndexFlatL2(dimension)\n",
+        "index.add(embeddings)"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "ROt5XbuYMTGz"
+      },
+      "outputs": [],
+      "source": [
+        "img_path = f\"my_face.jpg\"\n",
+        "query_embedding = get_embedding(img_path)[0]['embedding']\n",
+        "\n",
+        "D, I = index.search(np.array([query_embedding]), 2)\n",
+        "distance = D[0]\n",
+        "closest_name = [names[i] for i in I[0]]\n",
+        "\n",
+        "print(f\"Наиболее похожие 2 человека: {', '.join(metadata.name[metadata.filename.isin(closest_name)].values)}\")\n",
+        "print(f\"Расстояние до текущего лица: {', '.join(str(i) for i in distance)}\")"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "-AE7dRXfSpcA"
+      },
+      "outputs": [],
+      "source": [
+        "closest_name = [names[i] for i in I[0]]\n",
+        "metadata.name[metadata.filename.isin(closest_name)].tolist()"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "iM95XzbfOh2e"
+      },
+      "source": [
+        "## Web GUI"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "W42_FcbMW_0y",
+        "outputId": "c4bb0943-ffe8-46b6-95da-3230197e7cfc"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "Writing app.py\n"
+          ]
+        }
+      ],
+      "source": [
+        "%%writefile app.py\n",
+        "import streamlit as st\n",
+        "from deepface import DeepFace\n",
+        "import numpy as np\n",
+        "import faiss, cv2, os\n",
+        "import pandas as pd\n",
+        "from natsort import natsorted\n",
+        "\n",
+        "metadata = pd.read_csv('/content/hse_faces_miem/staff_photo.csv')\n",
+        "\n",
+        "st.title('Face Recognition App')\n",
+        "\n",
+        "def get_embedding(img_path):\n",
+        "    img = cv2.imread(img_path)\n",
+        "    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
+        "    embedding = DeepFace.represent(img_path=img_path, enforce_detection=False)\n",
+        "    return np.array(embedding)\n",
+        "\n",
+        "# Создание списка эмбеддингов и имен\n",
+        "embeddings = []\n",
+        "names = []\n",
+        "\n",
+        "for filename in natsorted(os.listdir('hse_faces_miem')):\n",
+        "    if not filename.endswith(\"jpeg\"):\n",
+        "        continue\n",
+        "    img_path = f\"/content/hse_faces_miem/{filename}\"\n",
+        "    embedding = get_embedding(img_path)\n",
+        "    embeddings.append(embedding)\n",
+        "    names.append(filename)\n",
+        "\n",
+        "for i, embedding in enumerate(embeddings):\n",
+        "    embeddings[i] = embedding[0]['embedding']\n",
+        "embeddings = np.array(embeddings)\n",
+        "\n",
+        "# Создание индекса FAISS\n",
+        "dimension = embeddings.shape[1]\n",
+        "index = faiss.IndexFlatL2(dimension)\n",
+        "index.add(embeddings)\n",
+        "\n",
+        "# Загрузка изображения\n",
+        "uploaded_file = st.file_uploader(\"Загрузите изображение\", type=[\"jpg\", \"jpeg\", \"png\"])\n",
+        "\n",
+        "if uploaded_file is not None:\n",
+        "    # Сохранение загруженного изображения\n",
+        "    img_path = f\"uploaded_image.jpeg\"\n",
+        "    with open(img_path, \"wb\") as f:\n",
+        "        f.write(uploaded_file.getbuffer())\n",
+        "\n",
+        "    # Отображение загруженного изображения\n",
+        "    st.image(img_path, caption='Загруженное изображение', use_container_width=True)\n",
+        "\n",
+        "    # Извлечение эмбеддинга\n",
+        "    query_embedding = get_embedding(img_path)[0]['embedding']\n",
+        "\n",
+        "    # Поиск ближайшего соседа в FAISS\n",
+        "    D, I = index.search(np.array([query_embedding]), 2)\n",
+        "    distance = D[0]\n",
+        "    closest_name = [names[i] for i in I[0]]\n",
+        "\n",
+        "    st.write(f\"Наиболее похожие 2 человека: {', '.join(metadata.name[metadata.filename.isin(closest_name)].values)}\")\n",
+        "    st.write(f\"Расстояние до текущего лица: {', '.join(str(i) for i in distance)}\")"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "oKlz5GVQExev"
+      },
+      "outputs": [],
+      "source": [
+        "!streamlit run app.py & npx localtunnel --port 8501 & curl ipv4.icanhazip.com"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "BdzH5yGw--rt"
+      },
+      "source": [
+        "# 3 Анализ устойчивости распознавания лиц (4 балла)\n",
+        "\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "G_RL2wOgfAUe"
+      },
+      "source": [
+        "## Часть 1. Оценить точность работы методов из DeepFace на тестовом видео (2 балла)."
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "5UsJb8VkPzd0"
+      },
+      "source": [
+        "В данном задании предлагается рассмотреть задачу распознавания лиц как задачу классификации для оценки точности.\n",
+        "Вам даны [видео](https://drive.google.com/drive/folders/1z_YCdJF2Rf9WzlNSY3BpNFiakNisq5VB?usp=sharing), для которых представлена разметка в виде тайм-кодов и база фотографий людей с этих видео.\n",
+        "Необходимо взять каждый 50-й кадр видео (способ разбиения на кадры с учётом разметки - на ваше усмотрение) и для полученного набора изображений оценить метрику Recall на данном наборе изображений для всех лиц, присутствующих на видео и в разметке.\n",
+        "\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "!pip install deepface -q"
+      ],
+      "metadata": {
+        "id": "ngu9fSxziZC1"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "from google.colab import drive\n",
+        "drive.mount('/content/gdrive/')"
+      ],
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "dgwHIvCE8uqg",
+        "outputId": "8dcca84f-1bc5-42d1-d5db-936c03c2f825"
+      },
+      "execution_count": null,
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "Mounted at /content/gdrive/\n"
+          ]
+        }
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "from deepface import DeepFace\n",
+        "import cv2\n",
+        "import numpy as np\n",
+        "import pandas as pd\n",
+        "import os\n",
+        "\n",
+        "photos_dir = 'gdrive/MyDrive/aimm-face-videos/photos'\n",
+        "labels_dir = 'gdrive/MyDrive/aimm-face-videos/labels'\n",
+        "videos_dir = 'gdrive/MyDrive/aimm-face-videos/videos'"
+      ],
+      "metadata": {
+        "id": "yEw4Kvel-_In"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "def time_to_frame(time_str, fps):\n",
+        "    time_parts = time_str.split(':')\n",
+        "    if len(time_parts) == 3:\n",
+        "        hours, minutes, seconds = map(float, time_parts)\n",
+        "    else:\n",
+        "        raise ValueError(\"Time format should be 'HH:MM:SS'\")\n",
+        "\n",
+        "    total_seconds = hours * 3600 + minutes * 60 + seconds\n",
+        "    frame_number = int(total_seconds * fps)\n",
+        "    return frame_number\n",
+        "\n",
+        "def get_labeled_persons_for_frame(labels, frame_number):\n",
+        "    for interval in labels:\n",
+        "        if interval[0] <= frame_number <= interval[1]:\n",
+        "            return interval[2]\n",
+        "    return []"
+      ],
+      "metadata": {
+        "id": "IC_cGfKDcW79"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "8BStv1QrUyQL",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "bc745680-4ec4-4b2c-a1c1-affe7623f497"
+      },
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "0 2 1\n",
+            "50 4 2\n",
+            "100 6 3\n",
+            "150 8 4\n",
+            "200 10 5\n",
+            "250 11 7\n",
+            "300 13 8\n",
+            "350 14 10\n",
+            "400 15 12\n",
+            "450 17 13\n",
+            "500 19 14\n",
+            "550 21 15\n",
+            "600 23 16\n",
+            "650 25 17\n",
+            "700 27 18\n"
+          ]
+        }
+      ],
+      "source": [
+        "recalls = []\n",
+        "\n",
+        "for video in os.listdir(videos_dir):\n",
+        "    video_path = os.path.join(videos_dir, video)\n",
+        "    labels_path = os.path.join(labels_dir, os.path.splitext(video)[0] + \".csv\")\n",
+        "\n",
+        "    cap = cv2.VideoCapture(video_path)\n",
+        "    df = pd.read_csv(labels_path)\n",
+        "    fps = cap.get(cv2.CAP_PROP_FPS)\n",
+        "\n",
+        "    tp, fn = 0, 0\n",
+        "    labels = []\n",
+        "\n",
+        "    for index, row in df.iterrows():\n",
+        "        ts_from = time_to_frame(row['from'], fps)\n",
+        "        ts_to = time_to_frame(row['to'], fps)\n",
+        "        ts_persons = row['persons'].replace(',',' ').split()\n",
+        "        labels.append([ts_from, ts_to, ts_persons])\n",
+        "\n",
+        "    count = 0\n",
+        "    while count < int(cap.get(cv2.CAP_PROP_FRAME_COUNT)):\n",
+        "        ret, frame = cap.read()\n",
+        "        if not ret:\n",
+        "            break\n",
+        "\n",
+        "        persons = get_labeled_persons_for_frame(labels, count)\n",
+        "        for person in persons:\n",
+        "            verified = False\n",
+        "            person_path = os.path.join(photos_dir, person)\n",
+        "            for filename in os.listdir(person_path):\n",
+        "                ref = os.path.join(person_path, filename)\n",
+        "                try:\n",
+        "                    result = DeepFace.verify(ref, frame)\n",
+        "                    verified = result['verified']\n",
+        "                except ValueError:\n",
+        "                    pass\n",
+        "                if verified:\n",
+        "                    break\n",
+        "            if verified:\n",
+        "                tp += 1\n",
+        "            else:\n",
+        "                fn += 1\n",
+        "\n",
+        "        print(count, tp, fn)\n",
+        "        count += 50\n",
+        "        cap.set(cv2.CAP_PROP_POS_FRAMES, count)\n",
+        "    recalls.append(tp / (tp + fn))"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "2iRrc6SRg4-d"
+      },
+      "source": [
+        "## Часть 2. Оценить точность (полноту) работы методов из DeepFace на аугментированных данных (2 балла)."
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "aFA9PwlwjeDV"
+      },
+      "source": [
+        "Необходимо собрать собственный набор данных из **различных** изображений Вашего лица с разных ракурсов, желательно настоящие фотографии из личного архива (20 штук)\\\n",
+        "Возьмите эталонное изображение (как в паспорте) и при помощи библиотеки [DeepFace](https://github.com/serengil/deepface) проверьте его на соответствие всему датасету. Посчитайте метрику Recall. \\\n",
+        "\\\n",
+        "Примените каждую из перечисленных ниже аугментаций (**по отдельности**) ко всему датасету и измерьте метрику Recall для измененнного датасета:\n",
+        "*   Поворот изображения на 45° и 90°.\n",
+        "*   Добавление шума (Gaussian Noise).\n",
+        "*   Изменение яркости (увеличение и уменьшение на 50%).\n",
+        "*   Размытие с различными параметрами.\n",
+        "\\\n",
+        "Результаты соберите в таблицу вида:\n",
+        "\n",
+        "Метод | Исходный датасет | Поворот на 45° | Поворот на 90° | Изображение с шумом | Увеличение яркости | Уменьшение яркости | Размытие | Примечание\n",
+        "--- | --- | --- | --- | --- | --- | --- | --- | --- |\n",
+        "VGG-Face | 0.7 | 0.0 | 0.0 | 0.0 | 0.8 | 0.65 | 0.7 | самая большая доля правильных ответов |\n",
+        "Facenet | 0.4 | 0.0 | 0.0 | 0.0 | 0.65 | 0.05 | 0.2 |  |\n",
+        "Facenet512 | 0.5 | 0.0 | 0.0 | 0.0 | 0.5 | 0.2 | 0.15 |  |\n",
+        "OpenFace | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 |  |\n",
+        "DeepFace | --- | --- | --- | --- | --- | --- | --- | не удалось запустить (tf > 2.12) |\n",
+        "DeepID | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 |  |\n",
+        "ArcFace | 0.7 | 0.0 | 0.0 | 0.0 | 0.8 | 0.7 | 0.6 |  |\n",
+        "Dlib | --- | --- | --- | --- | --- | --- | --- | не удалось запустить (проблемы с CUDA) |\n",
+        "SFace | 0.65 | 0.0 | 0.0 | 0.0 | 0.65 | 0.65 | 0.45 | высокая точность для своего размера |\n",
+        "GhostFaceNet | 0.65 | 0.0 | 0.0 | 0.0 | 0.75 | 0.6 | 0.6 | сравнима с VGG-Face, но легче |\n",
+        "\n",
+        "Для обрезанных фото:\n",
+        "\n",
+        "Метод | Исходный датасет | Поворот на 45° | Поворот на 90° | Изображение с шумом | Увеличение яркости | Уменьшение яркости | Размытие | Примечание\n",
+        "--- | --- | --- | --- | --- | --- | --- | --- | --- |\n",
+        "VGG-Face | 0.8 | 0.05 | 0.0 | 0.05 | 0.85 | 0.75 | 0.7 | ^^=^^^^ |\n",
+        "ArcFace | 0.8 | 0.0 | 0.0 | 0.0 | 0.8 | 0.7 | 0.65 | ^=====^ |\n",
+        "SFace | 0.65 | 0.0 | 0.0 | 0.0 | 0.55 | 0.5 | 0.6 | ====vv^ |\n",
+        "GhostFaceNet | 0.7 | 0.0 | 0.0 | 0.0 | 0.7 | 0.65 | 0.6 | ^===v^= |"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "PalUh0KGFUrs"
+      },
+      "outputs": [],
+      "source": [
+        "!pip install deepface -q"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "qx8pKK01Wtol"
+      },
+      "outputs": [],
+      "source": [
+        "dir = '/content/archive_crop/'\n",
+        "model = 'VGG-Face'\n",
+        "recalls = []"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "mJ74_c06Ez21"
+      },
+      "outputs": [],
+      "source": [
+        "import os\n",
+        "from natsort import natsorted\n",
+        "import cv2\n",
+        "import numpy as np\n",
+        "from google.colab.patches import cv2_imshow"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "D_8NgR_TE2s1"
+      },
+      "outputs": [],
+      "source": [
+        "files = natsorted(os.listdir('archive_crop'))"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "PtPM9uLyGtVa",
+        "outputId": "36bd5d13-263b-40e3-b124-a004490ffd33"
+      },
+      "outputs": [
+        {
+          "output_type": "execute_result",
+          "data": {
+            "text/plain": [
+              "[0.8, 0.05, 0.0, 0.05, 0.85, 0.75, 0.7]"
+            ]
+          },
+          "metadata": {},
+          "execution_count": 124
+        }
+      ],
+      "source": [
+        "recalls"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "3PBavAqvffxI"
+      },
+      "source": [
+        "### Original"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "NqSWfowJUzrs"
+      },
+      "outputs": [],
+      "source": [
+        "from deepface import DeepFace\n",
+        "\n",
+        "tp = 0\n",
+        "for fn in files:\n",
+        "    result = DeepFace.verify(\n",
+        "            'photo.jpg',\n",
+        "            dir + fn,\n",
+        "            model_name=model,\n",
+        "            enforce_detection=False\n",
+        "    )\n",
+        "    if result['verified']:\n",
+        "        tp += 1\n",
+        "recalls.append(tp / 20)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "Z7r9oITFfiIi"
+      },
+      "source": [
+        "### Rotated 45 deg."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "FCztHzhtWZvF"
+      },
+      "outputs": [],
+      "source": [
+        "import imutils\n",
+        "\n",
+        "tp = 0\n",
+        "for fn in files:\n",
+        "    image = cv2.imread(dir + fn)\n",
+        "    image = imutils.rotate(image, angle=45)\n",
+        "    result = DeepFace.verify(\n",
+        "            'photo.jpg',\n",
+        "            image,\n",
+        "            model_name=model,\n",
+        "            enforce_detection=False\n",
+        "    )\n",
+        "    if result['verified']:\n",
+        "        tp += 1\n",
+        "recalls.append(tp / 20)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "_UHcCw7kfmF4"
+      },
+      "source": [
+        "### Rotated 90 deg."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "KF4YXRXMW7d2"
+      },
+      "outputs": [],
+      "source": [
+        "tp = 0\n",
+        "for fn in files:\n",
+        "    image = cv2.imread(dir + fn)\n",
+        "    image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)\n",
+        "    result = DeepFace.verify(\n",
+        "            'photo.jpg',\n",
+        "            image,\n",
+        "            model_name=model,\n",
+        "            enforce_detection=False\n",
+        "    )\n",
+        "    if result['verified']:\n",
+        "        tp += 1\n",
+        "recalls.append(tp / 20)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "GHwS6peNfo1j"
+      },
+      "source": [
+        "### With Gaussian noise"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "hwxGvzzhXIw2"
+      },
+      "outputs": [],
+      "source": [
+        "def add_gaussian_noise(image, mean=0, std=25):\n",
+        "    noise = np.random.normal(mean, std, image.shape).astype(np.uint8)\n",
+        "    noisy_image = cv2.add(image, noise)\n",
+        "    return noisy_image\n",
+        "\n",
+        "tp = 0\n",
+        "for fn in files:\n",
+        "    image = cv2.imread(dir + fn)\n",
+        "    image = add_gaussian_noise(image)\n",
+        "    result = DeepFace.verify(\n",
+        "            'photo.jpg',\n",
+        "            image,\n",
+        "            model_name=model,\n",
+        "            enforce_detection=False\n",
+        "    )\n",
+        "    if result['verified']:\n",
+        "        tp += 1\n",
+        "recalls.append(tp / 20)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "g1CYHwRAfr67"
+      },
+      "source": [
+        "### With changed brightness"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "QBFLFA2_Y3mF"
+      },
+      "outputs": [],
+      "source": [
+        "import PIL\n",
+        "\n",
+        "tp = 0\n",
+        "for fn in files:\n",
+        "    image = PIL.Image.open(dir + fn)\n",
+        "    image = PIL.ImageEnhance.Brightness(image).enhance(1.5)\n",
+        "    result = DeepFace.verify(\n",
+        "            'photo.jpg',\n",
+        "            np.array(image),\n",
+        "            model_name=model,\n",
+        "            enforce_detection=False\n",
+        "    )\n",
+        "    if result['verified']:\n",
+        "        tp += 1\n",
+        "recalls.append(tp / 20)\n",
+        "\n",
+        "tp = 0\n",
+        "for fn in files:\n",
+        "    image = PIL.Image.open(dir + fn)\n",
+        "    image = PIL.ImageEnhance.Brightness(image).enhance(0.5)\n",
+        "    result = DeepFace.verify(\n",
+        "            'photo.jpg',\n",
+        "            np.array(image),\n",
+        "            model_name=model,\n",
+        "            enforce_detection=False\n",
+        "    )\n",
+        "    if result['verified']:\n",
+        "        tp += 1\n",
+        "recalls.append(tp / 20)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "yRkDOEZsfx3q"
+      },
+      "source": [
+        "### Blurred"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "Oz5lkzymbGbR"
+      },
+      "outputs": [],
+      "source": [
+        "tp = 0\n",
+        "for fn in files:\n",
+        "    image = cv2.imread(dir + fn)\n",
+        "    image = cv2.GaussianBlur(image, (5, 5), cv2.BORDER_DEFAULT)\n",
+        "    result = DeepFace.verify(\n",
+        "            'photo.jpg',\n",
+        "            image,\n",
+        "            model_name=model,\n",
+        "            enforce_detection=False\n",
+        "    )\n",
+        "    if result['verified']:\n",
+        "        tp += 1\n",
+        "recalls.append(tp / 20)"
+      ]
+    }
+  ],
+  "metadata": {
+    "colab": {
+      "provenance": []
+    },
+    "kernelspec": {
+      "display_name": "Python 3",
+      "name": "python3"
+    },
+    "language_info": {
+      "name": "python"
+    }
+  },
+  "nbformat": 4,
+  "nbformat_minor": 0
+}
\ No newline at end of file
diff --git a/README.md b/README.md
index 4a03cc28b0b6d7b28a9d4b82ba58643c28205de2..6c287d0b8367d93df03c12fb7fd37d81bd61f37d 100644
--- a/README.md
+++ b/README.md
@@ -17,3 +17,8 @@
 
 **Ссылки:**
 - [Папка с видео](https://drive.google.com/drive/folders/1rt59nrTpbuNTT8p6vlGXkBFbFPjs3-p4?usp=sharing)
+
+## РџР 4
+
+**Ссылки:**
+- Ссылок нет, все в репозитории