diff --git a/host/config.json b/host/config.json
index 3e45bc13058d8f89a68d2abbd0dfc93524c15013..6e04ee90119b804abb85dabaf5fc6200832d4615 100644
--- a/host/config.json
+++ b/host/config.json
@@ -1,5 +1,5 @@
 {
-  "use_machine": false,
+  "use_machine": true,
   "rotate": false,
   "fullscreen": false,
   "overlays": {
diff --git a/host/filtering.py b/host/filtering.py
index 97d6a56c962a07eeb24e190f27daa1f14338c029..df2cbc58e3168b47c1b66b554af8668e96b3046f 100644
--- a/host/filtering.py
+++ b/host/filtering.py
@@ -14,27 +14,49 @@ def bf(img):
 
     gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     gray = cv2.equalizeHist(gray)
-    filtered = cv2.bilateralFilter(gray, d=3, sigmaColor=10, sigmaSpace=10)
+    filtered = cv2.bilateralFilter(gray, d=2, sigmaColor=10, sigmaSpace=10)
     # filtered = cv2.Sobel(filtered, ddepth=cv2.CV_8U, dx=1, dy=1, ksize=)
     # gray = cv2.cvtColor(filtered, cv2.COLOR_BGR2GRAY)
     edges = cv2.adaptiveThreshold(
-        filtered, 250, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=11, C=4)
+        filtered, 250, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=13, C=4)
     # post_filtered = cv2.bilateralFilter(edges, d=9, sigmaColor=75, sigmaSpace=75)
     edges = ~edges
 
     edges = cv2.dilate(edges, cv2.getStructuringElement(
-        cv2.MORPH_ELLIPSE, (5, 5)))
+        cv2.MORPH_ELLIPSE, (3, 3)))
 
     # return post_filtered
     return edges
 
 
+def increase_bbox_size(bbox, pct=0.1):
+    """
+    Increase the bounding box by a percentage of its size.
+
+    Parameters:
+    - bbox: The bounding box in the format (x, y, width, height)
+    - pct: The percentage by which to increase the bounding box size
+
+    Returns:
+    - A new bounding box increased by the given percentage
+    """
+    x, y, w, h = bbox
+    x_new = int(x - pct * w)
+    y_new = int(y - pct * h)
+    w_new = int(w * (1 + 2 * pct))
+    h_new = int(h * (1 + 2 * pct))
+    return (x_new, y_new, w_new, h_new)
+
+
 def get_polylines(img, min_length=5.0, n_max=200):
 
     face_cascade = cv2.CascadeClassifier(
         cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
     faces = face_cascade.detectMultiScale(
         img, scaleFactor=1.8, minNeighbors=5, minSize=(50, 50))
+
+    faces = [increase_bbox_size(face, 0.1) for face in faces]
+
     # Create a mask where the faces are 0 and the rest is 1
     height, width, _ = img.shape
     mask = np.zeros((height, width), dtype=bool)  # White mask