diff --git a/01_Code/physical_computing_interface/demos/app1.js b/01_Code/physical_computing_interface/demos/app1.js
new file mode 100644
index 0000000000000000000000000000000000000000..94f09b4bf31175a79dabab558209d1fc53146a68
--- /dev/null
+++ b/01_Code/physical_computing_interface/demos/app1.js
@@ -0,0 +1,126 @@
+// Amira Abdel-Rahman
+// (c) Massachusetts Institute of Technology 2020
+
+/////////////////function calls/////////////////
+//todo when calling say which gridsize and grid type
+var utils= new utilities();
+var GLOBALS=new globals(utils);
+
+// 
+var three=new threejs(GLOBALS,utils,'webgl','threejs1');
+three.init();
+
+
+initGraph();// todo enclose into class
+initEditor();// todo enclose into class
+
+var assembler= new Assembler(three,GLOBALS,1,50,[new THREE.Vector3(0,0,0)],[new THREE.Vector3(GLOBALS.gridSize/2.0*GLOBALS.voxelSpacing,0,0)]);
+assembler.run();
+
+var info={
+    name:"MNIST",
+    imageSize:"(28,28)",
+    numDatasets:65000,
+    numTraining:55000,
+    numTest:65000-55000,
+};
+GLOBALS.addNode(0,8,0 ,false,info);
+info={
+    name:"conv2d_Conv2D1",
+    inputShape:"(batch,28,28,1)",
+    kernelSize:5,
+    filters:8,
+    strides:1,
+    activation: 'relu',
+    kernelInitializer: 'varianceScaling',
+    outputShape:"(batch,24,24,8)",
+    numParams:208,
+    Trainable:true
+
+};
+GLOBALS.addNode(0,8,1 ,false,info);
+
+info={
+    name:"testImage",
+    imageSize:"(1,28,28)"
+};
+GLOBALS.addNode(0,8,2 ,false,info);
+
+
+info={
+    name:"max_pooling2d_MaxPooling2D1",
+    inputShape:"(batch,24,24,8)",
+    poolSize:"[2,2]",
+    strides:"[2,2]",
+    outputShape:"(batch,12,12,8)",
+    numParams:0,
+    Trainable:true
+
+};
+GLOBALS.addNode(0,9,0 ,false,info);
+
+
+info={
+    name:"conv2d_Conv2D2",
+    inputShape:"(batch,12,12,8)",
+    kernelSize:5,
+    filters:16,
+    strides:1,
+    activation: 'relu',
+    kernelInitializer: 'varianceScaling',
+    outputShape:"(batch,8,8,16)",
+    numParams:3216,
+    Trainable:true
+
+};
+GLOBALS.addNode(0,9,1 ,false,info);
+
+info={
+    name:"max_pooling2d_MaxPooling2D2",
+    inputShape:"(batch,12,12,8)",
+    poolSize:"[2,2]",
+    strides:"[2,2]",
+    outputShape:"(batch,4,4,16)",
+    numParams:0,
+    Trainable:true
+
+};
+GLOBALS.addNode(0,10,0,false,info);
+
+info={
+    name:"flatten_Flatten1",
+    inputShape:"(batch,4,4,16)",
+    outputShape:"(batch,256)",
+    numParams:0,
+    Trainable:true
+
+};
+GLOBALS.addNode(0,10,1,false,info);
+
+info={
+    name:"dense_Dense1",
+    inputShape:"(batch,256)",
+    kernelInitializer: 'varianceScaling',
+    activation: 'softmax',
+    outputShape:"(batch,10)",
+    numParams:2570,
+    Trainable:true
+
+};
+GLOBALS.addNode(0,11,0,false,info);
+
+
+info={
+    name:"loss_categoricalCrossentropy",
+    metrics: ['accuracy'],
+
+};
+GLOBALS.addNode(0,11,1,false,info);
+
+info={
+    name:"prediction"
+};
+GLOBALS.addNode(0,12,0,false,info);
+
+
+
diff --git a/01_Code/physical_computing_interface/demos/indexDNN.html b/01_Code/physical_computing_interface/demos/indexDNN.html
new file mode 100644
index 0000000000000000000000000000000000000000..edfaa104c9efd64614eebe81e867a0d970881c67
--- /dev/null
+++ b/01_Code/physical_computing_interface/demos/indexDNN.html
@@ -0,0 +1,212 @@
+<html>
+
+<head>
+    <title>Physical Computing Interface</title>
+    <link rel="stylesheet" type="text/css" href="style.css" media="screen"/>
+    <link rel="stylesheet" type="text/css" href="../lib/jsoneditor/jsoneditor.css" >
+    <!-- <link href="https://unpkg.com/font-awesome@5.8.0/css/font-awesome.min.css" rel="stylesheet" type="text/css" /> -->
+    <!-- <link href="//netdna.bootstrapcdn.com/font-awesome/3.2.1/css/font-awesome.css" rel="stylesheet"> -->
+    <script src="https://kit.fontawesome.com/99c302ff33.js" crossorigin="anonymous"></script>
+
+</head>
+        
+<body>
+
+    <div id="threejs">
+        <div id="threejs1">
+            <div class="header1">
+                    <i> Assembly</i>
+                    <!-- Three.js -->
+            </div>
+            
+            <div id="webgl"></div>
+        </div>
+        <div class="slidecontainer">
+                <input type="range" min="0" max="0" value="1" class="slider" id="time">
+        </div>
+            
+    </div>
+
+    <div id="simulation">
+        <div id="threejs1">
+            <div class="header1">
+                    <i> Deep Neural Networks</i>
+            </div>
+            <div class="dragbar2"></div>
+            <!-- <div id="webgl1"></div> -->
+            <!-- todo put this somewhere elseß -->
+            <div id=cyy>
+                
+            </div>
+            
+        </div>
+            
+    </div>
+    
+
+
+    <div id="graph">
+            <div class="header2">
+                    <div class="dragbar"></div> 
+                <i> Graph</i>
+            </div>
+            <div id=jsondiveditor>
+                
+                <div id="cy"></div>
+            </div>
+            <div class="dragbar"></div> 
+            
+            
+    </div>
+
+    <div id="json">
+            
+
+            <div class="header2">
+                    <div class="dragbar"></div> 
+                <i> Node</i>
+            </div>
+            
+            
+            <div id=jsondiveditor>
+                    
+                <br></br>
+                    <!-- <p>
+                        <button class="button" id="setJSON">Get Info</button>
+                        <button class="button" id="getJSON">Set Info</button>
+                    </p> -->
+                
+                    <div id="jsoneditor"></div>
+            </div>
+            <div class="dragbar"></div> 
+            <div class="dragbar1"></div>
+            
+            
+            
+    </div>
+
+    <div class="footer1">
+        <strong>left-click</strong>: place voxel/orbit, <strong>right-click</strong>: radial menu
+    </div>
+
+    <div class="footer2">
+        <!-- update change to more instructions/feedback -->
+        <i>instructions</i>
+    </div>
+
+</body>
+
+<!-- TODO: 
+            Clean structure to modules?
+            Add another footer
+             
+-->
+
+
+<!-- libraries -->
+<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"></script>
+
+<!-- Import TensorFlow.js -->
+<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.0.0/dist/tf.min.js"></script>
+<!-- Import tfjs-vis -->
+<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-vis@1.0.2/dist/tfjs-vis.umd.min.js"></script>
+
+
+
+<script src="../lib/cytoscape.min.js"></script>
+<script src="../lib/cytoscape-cxtmenu.js"></script>
+<script src="https://unpkg.com/layout-base/layout-base.js"></script>
+<script src="https://unpkg.com/cose-base/cose-base.js"></script>
+<script src="https://unpkg.com/cytoscape-cose-bilkent/cytoscape-cose-bilkent.js"></script>
+<script src="../lib/cytoscape-expand-collapse.js"></script>
+
+
+<script src="../lib/jsoneditor/jsoneditor.js"></script>
+
+
+<script src="../lib/three.min.js"></script>
+<script src="../lib/OrbitControls.js"></script>
+<script src="../lib/dat.gui.min.js"></script>
+<script src="../lib/TransformControls.js"></script>
+
+<script src="../assembly/InverseKinematic.js"></script><!-- TODO LOCATION -->
+<script src="../assembly/voxel.js"></script><!-- TODO CHANGE TO DICE PIECES -->
+
+
+<!-- code -->
+<script src="../globals.js"></script> <!-- event handling and GLOBALS,UTILS -->
+<script src="../threejs/grid.js"></script><!-- threejs visualization -->
+<script src="../assembly/assembly.js"></script><!-- robot assembly -->
+<script src="../assembly/replay.js"></script><!-- assembly and timestep handling -->
+
+<script src="../graph/graph.js"></script><!--graph flow visualization-->
+<script src="../dnn/data.js" type="module"></script><!-- graph flow visualization-->
+<script src="../dnn/graph.js"></script><!-- graph flow visualization-->
+
+<script src="../dnn/script.js" type="module"></script><!-- graph flow visualization-->
+
+
+
+
+<script src="../json/json.js"></script><!-- json -->
+
+
+<script src="./app1.js"></script><!-- threejs visualization -->
+
+
+
+
+
+<!-- windows control -->
+<script>
+    var i = 0;
+    $('.dragbar').mousedown(function(e){
+       
+       e.preventDefault();
+       $(document).mousemove(function(e){
+       //   $('#position').html(e.pageX +', '+ e.pageY);
+         $('#threejs').css("width",e.pageX+2);
+
+         $('#simulation').css("width",e.pageX+2);
+
+         $('#graph').css("left",e.pageX+2);
+         $('#json').css("left",e.pageX+2);
+         $('.footer1').css("width",e.pageX+2);
+         $('.footer2').css("left",e.pageX+2);
+         
+      })
+   //    onWindowResize();//todo change location
+   });
+
+   $('.dragbar1').mousedown(function(e){
+      e.preventDefault();
+      // $('#mousestatus').html("mousedown" + i++);
+      $(document).mousemove(function(e){
+       $('#graph').css("height",e.pageY+2);
+       $('#json').css("top",e.pageY+2);
+        
+     })
+  });
+
+  $('.dragbar2').mousedown(function(e){
+      e.preventDefault();
+      // $('#mousestatus').html("mousedown" + i++);
+      $(document).mousemove(function(e){
+       $('#threejs').css("height",e.pageY+2);
+       $('#simulation').css("top",e.pageY+2);
+        
+     })
+   //   onWindowResize();//todo change location
+  });
+
+  $(document).mouseup(function(e){
+      $(document).unbind('mousemove');
+   });
+
+
+</script>
+
+
+
+
+</html>
diff --git a/01_Code/physical_computing_interface/demos/style.css b/01_Code/physical_computing_interface/demos/style.css
new file mode 100644
index 0000000000000000000000000000000000000000..339f62f2b814ab24d199e7c4e93c7006ad3b48f6
--- /dev/null
+++ b/01_Code/physical_computing_interface/demos/style.css
@@ -0,0 +1,317 @@
+body,html{width:100%;height:100%;padding:0;margin:0;}
+
+:root {
+    --color1: #ffffff; /*white*/
+    --color11: #ffffff8c; /*shafaf*/
+    --color2: #020227;  /*kohly*/
+    --color3: #1c5c61; /*teal*/
+    --top: 20px;  
+    --dragwidth: 10px; 
+    --bottom: 20px;
+    --font: "Times New Roman", Times, serif;
+    /* --font: "Andale Mono", AndaleMono, monospace; */
+    /* --font: "Courier New", Courier, monospace; */
+}
+
+.header1{
+    background-color: rgba(0, 0, 0, 0);/*transparent*/
+
+    height: var(--top);
+    width: 50%;
+    float: left;
+    position: absolute;
+
+    bottom: var(--bottom);
+    overflow-y: hidden;
+    top:var(--dragwidth);
+    /* left:var(--dragwidth); */
+
+    font-family: var(--font);
+    color: var(--color2);
+    top:var(--dragwidth);
+}
+
+.header2{
+    background-color:  rgba(0, 0, 0, 0);/*transparent*/
+
+    height: var(--top);
+    float: left;
+    position: absolute;
+
+    bottom: var(--bottom);
+    top:var(--dragwidth);
+    /* left:var(--dragwidth); */
+
+    
+    font-family: var(--font);
+    color: var(--color1);
+    
+}
+
+#threejs1{
+    background-color: var(--color11);
+    /* background-image: linear-gradient(+90deg, #03023d 90%,#ffffff ); */
+    width: 100%;
+    height: 100%;
+    float: top;
+    /* z-index: -1; */
+}
+
+#threejs{
+   background-color: var(--color11);
+   /* background-image: linear-gradient(+90deg, #03023d 90%,#ffffff ); */
+   width: 50%;
+   float: left;
+   position: absolute;
+   top:0px;
+   bottom: 50%;
+   /* bottom: var(--bottom); */
+   overflow-y: hidden;
+   font-family: var(--font);
+   color: var(--color2);
+   /* z-index: -1; */
+}
+
+#simulation{
+    background-color: var(--color11);
+    /* background-image: linear-gradient(+90deg, #03023d 90%,#ffffff ); */
+    width: 50%;
+    float: left;
+    position: absolute;
+    top:50%;
+    bottom: var(--bottom);
+    /* overflow-y: hidden; */
+    overflow: scroll;
+    font-family: var(--font);
+    color: var(--color2);
+    /* z-index: -1; */
+}
+
+#webgl{
+    top:var(--top);
+    bottom: var(--bottom);
+    overflow-y: hidden;
+    font-family: var(--font);
+    color: var(--color2);
+}
+
+#webgl1{
+    top:var(--top);
+    bottom: var(--bottom);
+    overflow-y: hidden;
+    font-family: var(--font);
+    color: var(--color1);
+}
+
+#graph{
+    background-color: var(--color2);
+    /* background-image: linear-gradient(+90deg, #ffffff, #ffffff); */
+    float: right;
+    float: top;
+    position: absolute;
+    top: 0px;
+    bottom: 50%;
+    /* bottom: var(--bottom); */
+    right: 0;
+    left:50%;
+    font-family: var(--font);
+    color: var(--color1);
+}
+
+#json{
+    background-color: var(--color2);
+    /* background-image: linear-gradient(+90deg, #ffffff, #ffffff); */
+    float: right;
+    float: bottom;
+    position: absolute;
+    /* top:var(--top); */
+    bottom: var(--bottom);
+    right: 0;
+    left:50%;
+    top:50%;
+    font-family: var(--font);
+    color: var(--color1);
+}
+
+.footer1{
+    background-color: var(--color1);
+    width: 50%;
+    float: left;
+    position: absolute;
+    overflow-y: hidden;
+    font-family: var(--font);
+    color: var(--color2);
+    height: var(--bottom);
+    bottom:0;
+}
+ 
+.footer2{
+    background-color: var(--color1);
+    float: right;
+    position: absolute;
+    right: 0;
+    left:50%;
+    font-family: var(--font);
+    color: var(--color2);
+    height: var(--bottom);
+    bottom:0;
+}
+
+
+.dragbar{
+    /* background-color: var(--color3); */
+   background-image: linear-gradient(+90deg, var(--color1) ,var(--color11) ,var(--color2));
+   height:100%;
+   float: left;
+   width: var(--dragwidth);
+   cursor: col-resize;
+}
+
+.dragbar1{
+    /* background-color: var(--color3); */
+    background-image: linear-gradient(+0deg, var(--color2)50% ,var(--color1)60% ,var(--color2));
+    width:100%;
+    left:var(--dragwidth);
+    float: bottom;
+    /* position: absolute; */
+    height: var(--dragwidth);
+    cursor: row-resize;
+}
+
+.dragbar2{
+    /* background-color: var(--color3); */
+    background-image: linear-gradient(+0deg, var(--color1)50% ,var(--color2)60% ,var(--color1));
+    width:100%;
+    left:var(--dragwidth);
+    float: bottom;
+    /* position: absolute; */
+    height: var(--dragwidth);
+    cursor: row-resize;
+}
+
+#cy {
+    height: 100%;
+    width: 100%;
+    /* position: absolute; */
+    /* float: right; */
+    float: top;
+    font-family: var(--font);
+    /* left: var(--dragwidth);
+    top:var(--top); */
+}
+
+#cyy {
+    height: 100%;
+    width: 100%;
+    /* position: absolute; */
+    /* float: right; */
+    float: top;
+    font-family: var(--font);
+    /* left: var(--dragwidth);
+    top:var(--top); */
+}
+
+#jsoneditor {
+    width: 100%;
+    height: 100%;
+    float: bottom;
+}
+
+#jsondiveditor {
+    width: 95%;
+    height: 90%;
+    left: var(--top);
+    top:var(--top);
+    float: right;
+    float: top;
+    position: absolute;
+    color: var(--color1);
+}
+
+.button {
+    background-color: transparent;
+    border: 0;
+    color: var(--color1);
+    border: 1px solid var(--color3);
+    padding: 4px 12px;
+    
+    -o-transition: background-color .2s ease-in; 
+	-moz-transition: background-color .2s ease-in;
+	-webkit-transition: background-color .2s ease-in; 
+	transition: background-color .2s ease-in; 
+}
+
+.button:hover {
+    background-color: #e5e5e5;    
+}
+
+.button:active {
+    background-color: #ccc;
+}
+
+#time{
+    z-index: 9999999;
+
+}
+
+.slidecontainer {
+    width: 100%;
+    /* background-color: var(--color1); */
+    /* opacity: 0.0; */
+
+    height: var(--top);
+    /* width: 100%; */
+    float: left;
+    float: bottom;
+    position: absolute;
+    border-width: 10px;
+
+    bottom: var(--bottom);
+    overflow-y: hidden;
+    /* top:var(--dragwidth); */
+    /* left:var(--dragwidth); */
+
+    font-family: var(--font);
+    color: var(--color2);
+    z-index: 1000000;
+    /* top:var(--dragwidth); */
+}
+  
+
+.slider {
+  -webkit-appearance: none;
+  width: 80%;
+  margin-left: 10%;
+  /* left: 50%; */
+  height: var(--dragwidth/3);
+  /* border-width: 10px; */
+  border-radius: 5px;
+  background: #d3d3d3;
+  outline: none;
+  opacity: 0.7;
+  -webkit-transition: .2s;
+  transition: opacity .2s;
+}
+
+.slider:hover {
+  opacity: 1;
+}
+
+.slider::-webkit-slider-thumb {
+  -webkit-appearance: none;
+  appearance: none;
+  width: 10px;
+  height: 10px;
+  border-radius: 50%;
+  background: var(--color3);
+  cursor: pointer;
+}
+
+.slider::-moz-range-thumb {
+  width: 10px;
+  height: 10px;
+  border-radius: 50%;
+  background: var(--color3);
+  cursor: pointer;
+}
+
diff --git a/01_Code/physical_computing_interface/dnn/data.js b/01_Code/physical_computing_interface/dnn/data.js
new file mode 100644
index 0000000000000000000000000000000000000000..407a5f4d2511cbad799f7fd96c176d3ee3954ac7
--- /dev/null
+++ b/01_Code/physical_computing_interface/dnn/data.js
@@ -0,0 +1,142 @@
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+const IMAGE_SIZE = 784;
+const NUM_CLASSES = 10;
+const NUM_DATASET_ELEMENTS = 65000;
+
+const NUM_TRAIN_ELEMENTS = 55000;
+const NUM_TEST_ELEMENTS = NUM_DATASET_ELEMENTS - NUM_TRAIN_ELEMENTS;
+
+const MNIST_IMAGES_SPRITE_PATH =
+    'https://storage.googleapis.com/learnjs-data/model-builder/mnist_images.png';
+const MNIST_LABELS_PATH =
+    'https://storage.googleapis.com/learnjs-data/model-builder/mnist_labels_uint8';
+
+/**
+ * A class that fetches the sprited MNIST dataset and returns shuffled batches.
+ *
+ * NOTE: This will get much easier. For now, we do data fetching and
+ * manipulation manually.
+ */
+export class MnistData {
+  constructor() {
+    this.shuffledTrainIndex = 0;
+    this.shuffledTestIndex = 0;
+  }
+
+  async load() {
+    // Make a request for the MNIST sprited image.
+    const img = new Image();
+    const canvas = document.createElement('canvas');
+    const ctx = canvas.getContext('2d');
+    const imgRequest = new Promise((resolve, reject) => {
+      img.crossOrigin = '';
+      img.onload = () => {
+        img.width = img.naturalWidth;
+        img.height = img.naturalHeight;
+
+        const datasetBytesBuffer =
+            new ArrayBuffer(NUM_DATASET_ELEMENTS * IMAGE_SIZE * 4);
+
+        const chunkSize = 5000;
+        canvas.width = img.width;
+        canvas.height = chunkSize;
+
+        for (let i = 0; i < NUM_DATASET_ELEMENTS / chunkSize; i++) {
+          const datasetBytesView = new Float32Array(
+              datasetBytesBuffer, i * IMAGE_SIZE * chunkSize * 4,
+              IMAGE_SIZE * chunkSize);
+          ctx.drawImage(
+              img, 0, i * chunkSize, img.width, chunkSize, 0, 0, img.width,
+              chunkSize);
+
+          const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+
+          for (let j = 0; j < imageData.data.length / 4; j++) {
+            // All channels hold an equal value since the image is grayscale, so
+            // just read the red channel.
+            datasetBytesView[j] = imageData.data[j * 4] / 255;
+          }
+        }
+        this.datasetImages = new Float32Array(datasetBytesBuffer);
+
+        resolve();
+      };
+      img.src = MNIST_IMAGES_SPRITE_PATH;
+    });
+
+    const labelsRequest = fetch(MNIST_LABELS_PATH);
+    const [imgResponse, labelsResponse] =
+        await Promise.all([imgRequest, labelsRequest]);
+
+    this.datasetLabels = new Uint8Array(await labelsResponse.arrayBuffer());
+
+    // Create shuffled indices into the train/test set for when we select a
+    // random dataset element for training / validation.
+    this.trainIndices = tf.util.createShuffledIndices(NUM_TRAIN_ELEMENTS);
+    this.testIndices = tf.util.createShuffledIndices(NUM_TEST_ELEMENTS);
+
+    // Slice the the images and labels into train and test sets.
+    this.trainImages =
+        this.datasetImages.slice(0, IMAGE_SIZE * NUM_TRAIN_ELEMENTS);
+    this.testImages = this.datasetImages.slice(IMAGE_SIZE * NUM_TRAIN_ELEMENTS);
+    this.trainLabels =
+        this.datasetLabels.slice(0, NUM_CLASSES * NUM_TRAIN_ELEMENTS);
+    this.testLabels =
+        this.datasetLabels.slice(NUM_CLASSES * NUM_TRAIN_ELEMENTS);
+  }
+
+  nextTrainBatch(batchSize) {
+    return this.nextBatch(
+        batchSize, [this.trainImages, this.trainLabels], () => {
+          this.shuffledTrainIndex =
+              (this.shuffledTrainIndex + 1) % this.trainIndices.length;
+          return this.trainIndices[this.shuffledTrainIndex];
+        });
+  }
+
+  nextTestBatch(batchSize) {
+    return this.nextBatch(batchSize, [this.testImages, this.testLabels], () => {
+      this.shuffledTestIndex =
+          (this.shuffledTestIndex + 1) % this.testIndices.length;
+      return this.testIndices[this.shuffledTestIndex];
+    });
+  }
+
+  nextBatch(batchSize, data, index) {
+    const batchImagesArray = new Float32Array(batchSize * IMAGE_SIZE);
+    const batchLabelsArray = new Uint8Array(batchSize * NUM_CLASSES);
+
+    for (let i = 0; i < batchSize; i++) {
+      const idx = index();
+
+      const image =
+          data[0].slice(idx * IMAGE_SIZE, idx * IMAGE_SIZE + IMAGE_SIZE);
+      batchImagesArray.set(image, i * IMAGE_SIZE);
+
+      const label =
+          data[1].slice(idx * NUM_CLASSES, idx * NUM_CLASSES + NUM_CLASSES);
+      batchLabelsArray.set(label, i * NUM_CLASSES);
+    }
+
+    const xs = tf.tensor2d(batchImagesArray, [batchSize, IMAGE_SIZE]);
+    const labels = tf.tensor2d(batchLabelsArray, [batchSize, NUM_CLASSES]);
+
+    return {xs, labels};
+  }
+}
\ No newline at end of file
diff --git a/01_Code/physical_computing_interface/dnn/graph.js b/01_Code/physical_computing_interface/dnn/graph.js
new file mode 100644
index 0000000000000000000000000000000000000000..1acf35f5e494b807bd5aefbebeea66c893f7f379
--- /dev/null
+++ b/01_Code/physical_computing_interface/dnn/graph.js
@@ -0,0 +1,297 @@
+var color1= "#ffffff"; //white
+var color11= "#ffffff8c"; //white transparent
+var color2= "#020227";  //dark blue
+var color3= "#1c5c61"; //teal
+var color33= "#1c5c618c"; //teal transparent
+var color4= "#fa6e70"; //red/orange
+var color44= "#fa6e708c"; //red/orange
+var color5="#380152"; //purple
+var color6="#696767"; //grey
+var font= "consolas";//'font-family'
+
+var cyy = cytoscape({
+  container: document.getElementById('cyy'),
+  ready: function(){
+      var api = this.expandCollapse({
+          layoutBy: {
+              name: "cose-bilkent",
+              animate: "end",
+              randomize: false,
+              fit: true
+          },
+          fisheye: true,
+          animate: false,
+          undoable: false
+      });
+      // api.collapseAll();
+  },
+  style: cytoscape.stylesheet()
+    .selector('node')
+      .css({
+        'content': 'data(name)',
+        'text-valign': 'center',
+        'color': 'white',
+        'font-family': "consolas",
+        // 'font-family':"Times New Roman",
+        'width': 80,
+        'height': 80
+      })
+    .selector('edge')
+      .css({
+        'content': 'data(name)',
+        'width': 8,
+        'line-color': '#888',
+        'target-arrow-color': '#888',
+        'source-arrow-color': '#888',
+        'target-arrow-shape': 'triangle'
+      })
+    .selector(':selected')
+    .selector('$node > node')
+      .css({
+        'shape': 'roundrectangle',
+        'text-valign': 'top',
+        'background-color': '#ccc',
+        'background-opacity': 0.1,
+        'color': '#888',
+        'text-outline-width':
+        0,
+        'font-size': 25
+      })
+    .selector('#core, #app')
+      .css({
+        'width': 120,
+        'height': 120,
+        'font-size': 25
+      })
+    .selector('#api')
+      .css({
+        'padding-top': 20,
+        'padding-left': 20,
+        'padding-bottom': 20,
+        'padding-right': 20
+      })
+    .selector('#ext, .ext')
+      .css({
+        'background-color': color6,
+        // 'text-outline-color': '#93CDDD',
+        'line-color': color6,
+        'target-arrow-color': color6,
+        'color': color1,
+        'font-family':font,
+        "text-wrap": "wrap",
+      })
+    .selector('#input, .input')
+      .css({
+        'background-color': color3,
+        // 'text-outline-color': '#93CDDD',
+        'line-color': color3,
+        'target-arrow-color': color3,
+        'color': color1,
+        'font-family':font,
+        "text-wrap": "wrap",
+      })
+    .selector('#output, .output')
+      .css({
+        'background-color': color4,
+        // 'text-outline-color': '#93CDDD',
+        'line-color': color4,
+        'target-arrow-color': color4,
+        'color': color1,
+        'font-family':font,
+        "text-wrap": "wrap",
+      })
+    .selector('#layers, .layers')
+      .css({
+        'background-color': color5,
+        // 'text-outline-color': '#93CDDD',
+        'line-color': color5,
+        'target-arrow-color': color5,
+        'color': color1,
+        'font-family':font,
+        "text-wrap": "wrap",
+      })
+      .selector('#exte, .exte')
+      .css({
+        'background-color': color3,
+        // 'text-outline-color': '#93CDDD',
+        'line-color': color3,
+        'target-arrow-color': color3,
+        'color': color1,
+        'font-family':font,
+        'curve-style': 'bezier',
+        'width': 2,
+        "text-wrap": "wrap",
+      })
+    .selector('#app, .app')
+      .css({
+        'background-color': '#F79646',
+        'text-outline-color': '#F79646',
+        'line-color': '#F79646',
+        'target-arrow-color': '#F79646',
+        
+        'color': '#fff'
+      })
+    .selector('#viz, .viz')
+      .css({
+        'background-fit': 'cover',
+        'background-image': 'https://live.staticflickr.com/7272/7633179468_3e19e45a0c_b.jpg'
+      })
+    .selector('#cy')
+      .css({
+        'background-opacity': 0,
+        'border-width': 1,
+        'border-color': '#aaa',
+        'border-opacity': 0.3,
+        'font-size': 50,
+        'padding-top': 40,
+        'padding-left': 40,
+        'padding-bottom': 40,
+        'padding-right': 40
+      }),
+
+  elements: {
+    nodes: [
+      
+      ///legend
+      {
+        data: { id: 'legend', name: 'Legend' ,},
+      },
+      {
+        data: { id: 'inputs', name: 'inputs',parent:'legend' },
+        position: { x: 0, y: 0 },classes: 'input',
+      },
+      {
+        data: { id: 'outputs', name: 'outputs',parent:'legend'},
+        position: { x: 0, y: 0 },classes: 'output',
+      },
+      {
+        data: { id: 'layers', name: 'Layers',parent:'legend'},
+        position: { x: 0, y: 0 },classes: 'layers',
+      },
+      {
+        data: { id: 'losses', name: 'Losses',parent:'legend'},
+        position: { x: 0, y: 0 },classes: 'loss',
+      },
+      {
+        data: { id: 'vizs', name: 'Viz',parent:'legend'},
+        position: { x: 0, y: 0 },classes: 'viz',
+      },
+      ////
+
+      {
+        data: { id: 'CNN', name: 'CNN' },
+      },
+
+      ///////////hardware
+      
+      
+      
+      {
+        data: { id: 'pred', name: 'prediction', parent: 'CNN' },
+        position: { x: 0, y: 0 },classes: 'output',
+      },
+      {
+        data: { id: '7', name: 'categoricalCrossentropy', parent: 'CNN' },
+        position: { x: 0, y: 0 },classes: 'loss',
+      },
+      {
+        data: { id: '6', name: 'dense', parent: 'CNN' },
+        position: { x: 0, y: 0 },classes: 'layers',
+      },
+      {
+        data: { id: '5', name: 'flatten', parent: 'CNN' },
+        position: { x: 0, y: 0 },classes: 'layers',
+      },
+      {
+        data: { id: '4', name: 'maxPooling2d', parent: 'CNN' },
+        position: { x: 0, y: 0 },classes: 'layers',
+      },
+      {
+        data: { id: '3', name: 'conv2d', parent: 'CNN' },
+        position: { x: 0, y: 0 },classes: 'layers',
+      },
+      {
+        data: { id: '2', name: 'maxPooling2d', parent: 'CNN' },
+        position: { x: 0, y: 0 },classes: 'layers',
+      },
+      {
+        data: { id: '1', name: 'conv2d', parent: 'CNN' },
+        position: { x: 0, y: 0 },classes: 'layers',
+      },
+      {
+        data: { id: '0', name: 'MNIST',parent:'CNN' },
+        position: { x: 0, y: 0 },classes: 'input',
+      },
+      {
+        data: { id: 'ex', name: 'Test',parent:'CNN' },
+        position: { x: 0, y: 0 },classes: 'viz',
+      },
+
+
+    ],
+    edges: [
+
+      { data: { source: '0', target: '1' , name: ''},classes: 'exte', },
+      { data: { source: '1', target: '2' , name: ''},classes: 'exte', },
+      { data: { source: '2', target: '3' , name: ''},classes: 'exte', },
+      { data: { source: '3', target: '4' , name: ''},classes: 'exte', },
+      { data: { source: '4', target: '5' , name: ''},classes: 'exte', },
+      { data: { source: '5', target: '6' , name: ''},classes: 'exte', },
+      { data: { source: '6', target: '7' , name: ''},classes: 'exte', },
+      { data: { source: '7', target: 'pred' , name: ''},classes: 'exte', },
+      { data: { source: 'ex', target: '1' , name: ''},classes: 'exte', },
+
+
+
+    ]
+  },
+
+  layout: {
+    name: 'preset'
+  }
+});
+
+var api = cyy.expandCollapse('get');
+
+api.expandAll();
+
+// cy.$('#vizs').style('background-image', 'https://farm6.staticflickr.com/5109/5817854163_eaccd688f5_b.jpg');
+// cy.$('#vizs').style('background-image', dataUri);
+
+console.log(cyy.$id("vizs"))
+
+
+function createImage(width,height){
+    var buffer = new Uint8ClampedArray(width * height * 4); // have enough bytes
+
+    for(var y = 0; y < height; y++) {
+        for(var x = 0; x < width; x++) {
+            var pos = (y * width + x) * 4; // position in buffer based on x and y
+            buffer[pos  ] = 255;           // some R value [0, 255]
+            buffer[pos+1] = 0;           // some G value
+            buffer[pos+2] = 0;           // some B value
+            buffer[pos+3] = 255;           // set alpha channel
+        }
+    }
+    // create off-screen canvas element
+    var canvas = document.createElement('canvas'),
+        ctx = canvas.getContext('2d');
+
+    canvas.width = width;
+    canvas.height = height;
+
+    // create imageData object
+    var idata = ctx.createImageData(width, height);
+
+    // set our buffer as source
+    idata.data.set(buffer);
+
+    // update canvas with new data
+    ctx.putImageData(idata, 0, 0);
+
+    var dataUri = canvas.toDataURL(); // produces a PNG file
+    // console.log(dataUri)
+    return dataUri;
+    
+}
+
diff --git a/01_Code/physical_computing_interface/dnn/index.html b/01_Code/physical_computing_interface/dnn/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..5f6080822e17b9d40389c12a50c97a04196b3b95
--- /dev/null
+++ b/01_Code/physical_computing_interface/dnn/index.html
@@ -0,0 +1,81 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <meta charset="utf-8">
+  <meta http-equiv="X-UA-Compatible" content="IE=edge">
+  <meta name="viewport" content="width=device-width, initial-scale=1.0">
+  <title>CNN</title>
+
+  <!-- Import TensorFlow.js -->
+  <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.0.0/dist/tf.min.js"></script>
+  <!-- Import tfjs-vis -->
+  <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-vis@1.0.2/dist/tfjs-vis.umd.min.js"></script>
+  
+  <script src="../lib/cytoscape.min.js"></script>
+  <script src="../lib/cytoscape-cxtmenu.js"></script>
+  <script src="https://unpkg.com/layout-base/layout-base.js"></script>
+  <script src="https://unpkg.com/cose-base/cose-base.js"></script>
+  <script src="https://unpkg.com/cytoscape-cose-bilkent/cytoscape-cose-bilkent.js"></script>
+  <script src="../lib/cytoscape-expand-collapse.js"></script>
+    
+  
+  
+  <!-- Import the data file -->
+  <script src="data.js" type="module"></script>
+
+  <!-- Import the main script file -->
+  <script src="script.js" type="module"></script>
+
+  <script src="graph.js" type="module"></script>
+
+</head>
+<style>
+    :root {
+        --color1: #ffffff; /*white*/
+        --color11: #ffffff8c; /*shafaf*/
+        --color2: #020227;  /*kohly*/
+        --color3: #1c5c61; /*teal*/
+        --top: 20px;  
+        --dragwidth: 0.4%; 
+        --bottom: 20px;
+        --font: "Times New Roman", Times, serif;
+    }
+    body { 
+        font: 8px helvetica neue, helvetica, arial, sans-serif;
+    }
+    #cy {
+        background-color: var(--color2);
+        height: 100%;
+        width: 100%;
+        /* position: absolute; */
+        float: bottom;
+        left: 0;
+        top: var(--dragwidth);
+    }
+    #container {
+        background-color: var(--color2);
+        height: 100%;
+        width: 100%;
+        position: absolute;
+        float: bottom;
+        left: 0;
+        top: 0;
+    }
+
+    /* you can set the disabled style how you like on the text/icon */
+    .cxtmenu-disabled {
+        opacity: 0.1;
+    }
+    /* you can set the disabled style how you like on the text/icon */
+    .cxtmenu {
+        opacity: 0.1;
+    }
+    
+</style>
+
+<body>
+    <div id="container">
+        <div id="cy"></div>
+    </div>
+</body>
+</html>
\ No newline at end of file
diff --git a/01_Code/physical_computing_interface/dnn/script.js b/01_Code/physical_computing_interface/dnn/script.js
new file mode 100644
index 0000000000000000000000000000000000000000..8418b4077cde2c33c7b8270eff93936dd318bd30
--- /dev/null
+++ b/01_Code/physical_computing_interface/dnn/script.js
@@ -0,0 +1,282 @@
+console.log('Hello TensorFlow');
+
+import {MnistData} from './data.js';
+
+async function showExamples(data) {
+  // Create a container in the visor
+  const surface =
+    tfvis.visor().surface({ name: 'Input Data Examples', tab: 'Input Data'});  
+
+  // Get the examples
+  const examples = data.nextTestBatch(20);
+  const numExamples = examples.xs.shape[0];
+  
+  // Create a canvas element to render each example
+  for (let i = 0; i < numExamples; i++) {
+    const imageTensor = tf.tidy(() => {
+      // Reshape the image to 28x28 px
+      return examples.xs
+        .slice([i, 0], [1, examples.xs.shape[1]])
+        .reshape([28, 28, 1]);
+    });
+    
+    const canvas = document.createElement('canvas');
+    canvas.width = 28;
+    canvas.height = 28;
+    canvas.style = 'margin: 4px;';
+    
+    await tf.browser.toPixels(imageTensor, canvas);
+    console.log(imageTensor)
+    surface.drawArea.appendChild(canvas);
+
+    imageTensor.dispose();
+  }
+}
+
+async function vizExample(data){
+    const examples = data.nextTestBatch(1);
+    const imageTensor = tf.tidy(() => {
+        // Reshape the image to 28x28 px
+        return examples.xs
+            .slice([0, 0], [1, examples.xs.shape[1]])
+            .reshape([28, 28, 1]);
+    });
+    const imageArray=imageTensor.arraySync();
+    var dataUri= createImage(imageArray);
+    cyy.$('#ex').style('background-image', dataUri);
+    updateVariable('pred', "????");
+
+    
+}
+
+function updateVariable(name, value){
+    cyy.$id(name).data('name', 'Prediction\n'+value);
+}
+
+function createImage(imageArray){
+    const width=imageArray.length;
+    const height=imageArray[0].length;
+
+    var buffer = new Uint8ClampedArray(width * height * 4); // have enough bytes
+
+    for(var y = 0; y < height; y++) {
+        for(var x = 0; x < width; x++) {
+            var pos = (y * width + x) * 4; // position in buffer based on x and y
+            buffer[pos  ] = imageArray[y][x]*255;           // some R value [0, 255]
+            buffer[pos+1] = imageArray[y][x]*255;           // some G value
+            buffer[pos+2] = imageArray[y][x]*255;           // some B value
+            buffer[pos+3] = 255;           // set alpha channel
+        }
+    }
+    // create off-screen canvas element
+    var canvas = document.createElement('canvas'),
+        ctx = canvas.getContext('2d');
+
+    canvas.width = width;
+    canvas.height = height;
+
+    // create imageData object
+    var idata = ctx.createImageData(width, height);
+
+    // set our buffer as source
+    idata.data.set(buffer);
+
+    // update canvas with new data
+    ctx.putImageData(idata, 0, 0);
+
+    var dataUri = canvas.toDataURL(); // produces a PNG file
+    // console.log(dataUri)
+    return dataUri;
+    
+}
+
+var data,model;
+async function load(){
+    data = new MnistData();
+    await data.load();
+    await vizExample(data);
+    console.log(data)
+    model = getModel();
+}
+
+async function run() {
+    // const data = new MnistData();
+    // await data.load();
+    // await vizExample(data)
+    // // await showExamples(data);
+    // console.log(data)
+   
+    tfvis.show.modelSummary({name: 'Model Architecture'}, model);
+    await train(model, data);
+    await doPrediction1(model, data,  1)
+    await showAccuracy(model, data);
+    await showConfusion(model, data);
+
+
+}
+
+function getModel() {
+    const model = tf.sequential();
+
+    const IMAGE_WIDTH = 28;
+    const IMAGE_HEIGHT = 28;
+    const IMAGE_CHANNELS = 1;  
+
+    // In the first layer of our convolutional neural network we have 
+    // to specify the input shape. Then we specify some parameters for 
+    // the convolution operation that takes place in this layer.
+    model.add(tf.layers.conv2d({
+        inputShape: [IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS],
+        kernelSize: 5,
+        filters: 8,
+        strides: 1,
+        activation: 'relu',
+        kernelInitializer: 'varianceScaling'
+    }));
+
+    // The MaxPooling layer acts as a sort of downsampling using max values
+    // in a region instead of averaging.  
+    model.add(tf.layers.maxPooling2d({poolSize: [2, 2], strides: [2, 2]}));
+
+    // Repeat another conv2d + maxPooling stack. 
+    // Note that we have more filters in the convolution.
+    model.add(tf.layers.conv2d({
+        kernelSize: 5,
+        filters: 16,
+        strides: 1,
+        activation: 'relu',
+        kernelInitializer: 'varianceScaling'
+    }));
+    model.add(tf.layers.maxPooling2d({poolSize: [2, 2], strides: [2, 2]}));
+
+    // Now we flatten the output from the 2D filters into a 1D vector to prepare
+    // it for input into our last layer. This is common practice when feeding
+    // higher dimensional data to a final classification output layer.
+    model.add(tf.layers.flatten());
+
+    // Our last layer is a dense layer which has 10 output units, one for each
+    // output class (i.e. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9).
+    const NUM_OUTPUT_CLASSES = 10;
+    model.add(tf.layers.dense({
+        units: NUM_OUTPUT_CLASSES,
+        kernelInitializer: 'varianceScaling',
+        activation: 'softmax'
+    }));
+
+
+    // Choose an optimizer, loss function and accuracy metric,
+    // then compile and return the model
+    const optimizer = tf.train.adam();
+        model.compile({
+        optimizer: optimizer,
+        loss: 'categoricalCrossentropy',
+        metrics: ['accuracy'],
+    });
+
+    return model;
+}
+
+async function train(model, data) {
+    const metrics = ['loss', 'val_loss', 'acc', 'val_acc'];
+    const container = {
+        name: 'Model Training', styles: { height: '1000px' }
+    };
+    const fitCallbacks = tfvis.show.fitCallbacks(container, metrics);
+
+    const BATCH_SIZE = 512;
+    const TRAIN_DATA_SIZE = 5500;
+    const TEST_DATA_SIZE = 1000;
+
+    const [trainXs, trainYs] = tf.tidy(() => {
+        const d = data.nextTrainBatch(TRAIN_DATA_SIZE);
+        return [
+        d.xs.reshape([TRAIN_DATA_SIZE, 28, 28, 1]),
+        d.labels
+        ];
+    });
+
+    const [testXs, testYs] = tf.tidy(() => {
+        const d = data.nextTestBatch(TEST_DATA_SIZE);
+        return [
+        d.xs.reshape([TEST_DATA_SIZE, 28, 28, 1]),
+        d.labels
+        ];
+    });
+
+    return model.fit(trainXs, trainYs, {
+        batchSize: BATCH_SIZE,
+        validationData: [testXs, testYs],
+        epochs: 10,
+        shuffle: true,
+        callbacks: fitCallbacks
+    });
+}
+
+const classNames = ['Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine'];
+
+function doPrediction1(model, data, testDataSize = 1) {
+    const IMAGE_WIDTH = 28;
+    const IMAGE_HEIGHT = 28;
+    const testData = data.nextTestBatch(1);
+
+
+    const imageTensor = tf.tidy(() => {
+        // Reshape the image to 28x28 px
+        return testData.xs
+            .slice([0, 0], [1, testData.xs.shape[1]])
+            .reshape([28, 28, 1]);
+    });
+    const imageArray=imageTensor.arraySync();
+    var dataUri= createImage(imageArray);
+    cyy.$('#ex').style('background-image', dataUri);
+    
+
+
+    const testxs = testData.xs.reshape([testDataSize, IMAGE_WIDTH, IMAGE_HEIGHT, 1]);
+
+    const labels = testData.labels.argMax([-1]);
+    const preds = model.predict(testxs).argMax([-1]);
+  
+    testxs.dispose();
+    console.log(preds.arraySync());
+
+    updateVariable('pred', ""+preds.arraySync());
+    return [preds, labels];
+}
+
+
+function doPrediction(model, data, testDataSize = 500) {
+  const IMAGE_WIDTH = 28;
+  const IMAGE_HEIGHT = 28;
+  const testData = data.nextTestBatch(testDataSize);
+  const testxs = testData.xs.reshape([testDataSize, IMAGE_WIDTH, IMAGE_HEIGHT, 1]);
+  const labels = testData.labels.argMax([-1]);
+  const preds = model.predict(testxs).argMax([-1]);
+
+  testxs.dispose();
+  return [preds, labels];
+}
+
+
+async function showAccuracy(model, data) {
+  const [preds, labels] = doPrediction(model, data);
+  const classAccuracy = await tfvis.metrics.perClassAccuracy(labels, preds);
+  const container = {name: 'Accuracy', tab: 'Evaluation'};
+  tfvis.show.perClassAccuracy(container, classAccuracy, classNames);
+
+  labels.dispose();
+}
+
+async function showConfusion(model, data) {
+  const [preds, labels] = doPrediction(model, data);
+  const confusionMatrix = await tfvis.metrics.confusionMatrix(labels, preds);
+  const container = {name: 'Confusion Matrix', tab: 'Evaluation'};
+  tfvis.render.confusionMatrix(
+      container, {values: confusionMatrix}, classNames);
+
+  labels.dispose();
+}
+
+
+document.addEventListener('DOMContentLoaded', load);
+document.addEventListener('runNode', run);
diff --git a/01_Code/physical_computing_interface/globals.js b/01_Code/physical_computing_interface/globals.js
index b290bf80f5660a740cac170fe679d268b76840e5..cba7f0e88ff374e13221cbda63310cf9bd88b766 100644
--- a/01_Code/physical_computing_interface/globals.js
+++ b/01_Code/physical_computing_interface/globals.js
@@ -284,7 +284,7 @@ function globals(utils){
 }
 
 //////////////////////events//////////////////
-globals.prototype.addNode=function (x,y,z,replay=false){
+globals.prototype.addNode=function (x,y,z,replay=false,data={}){
     x=parseInt(x);
     y=parseInt(y);
     z=parseInt(z);
@@ -300,7 +300,8 @@ globals.prototype.addNode=function (x,y,z,replay=false){
             posY:p_y,
             posZ:p_z,
             rotY:r_y,
-            replay:replay
+            replay:replay,
+            data:data
         }
     });
     document.dispatchEvent(addNodeEvent);
diff --git a/01_Code/physical_computing_interface/graph/graph.js b/01_Code/physical_computing_interface/graph/graph.js
index 390b87469310ae9eff2c11727c64bb5b5e4accb5..f28995447389f1463028487c26639c3785d11aad 100644
--- a/01_Code/physical_computing_interface/graph/graph.js
+++ b/01_Code/physical_computing_interface/graph/graph.js
@@ -475,7 +475,8 @@ function initGraph(){
                     inValues:[],
                     outValues:[],
                     numRuns:0,
-                    maxRuns:10
+                    maxRuns:10,
+                    dnn:e.detail.data
                 }
             },
             position: {
diff --git a/02_Presentation/200508_CNN.png b/02_Presentation/200508_CNN.png
new file mode 100644
index 0000000000000000000000000000000000000000..95b998bdb6e013efb5e76a2f4601b26ec06699b3
Binary files /dev/null and b/02_Presentation/200508_CNN.png differ
diff --git a/README.md b/README.md
index b2b95594e4feecb69d0d0587fb1febd387a1e865..e6cf3800a9f33a259c82268338db1a6af8c60145 100644
--- a/README.md
+++ b/README.md
@@ -20,7 +20,10 @@ Topics include:
 ----
 ## Demo Links
 
-- **"Physical Computing Interface"** demo lives [here.](https://amiraa.pages.cba.mit.edu/physical-computing-design-tools/01_Code/physical_computing_interface/index.html)
+- **"Physical Computing Interface"** 
+  - [Assembler Control Demo](https://amiraa.pages.cba.mit.edu/physical-computing-design-tools/01_Code/physical_computing_interface/index.html)
+  - [Voxel Simulation Demo](https://amiraa.pages.cba.mit.edu/physical-computing-design-tools/01_Code/physical_computing_interface/index.html)
+  - [Convolutional Neural Network (CNN) Demo](https://amiraa.pages.cba.mit.edu/physical-computing-design-tools/01_Code/physical_computing_interface/demos/indexDNN.html)
 - **"Performance Calculation Graph"** demo lives [here.](https://amiraa.pages.cba.mit.edu/physical-computing-design-tools/01_Code/physical_computing_interface/probabilisticProjections/index.html)
 - [Distributed Deep Neural Networks](https://gitlab.cba.mit.edu/amiraa/ddnn)
 - UR10 voxel Assembly [demo.](https://amiraa.pages.cba.mit.edu/physical-computing-design-tools/01_Code/physical_computing_interface/assembly/standAloneAssembly.html)
@@ -28,9 +31,50 @@ Topics include:
 ----
 ## Progress
 
+### Demo Videos
+
 ![](02_Presentation/191202_demo.mp4)
 ![](02_Presentation/assemblerFirstTrial.mp4)
 
+---
+
+### Applications
+- The most effieceint applications for dice are those who need reconfiguration white running, examples:
+  - probabilistic programming (Gamalon)
+  - more computing power while assembling voxels
+  - Neural Networks?
+  
+#### Voxel Simulation
+
+![](02_Presentation/200114_simulation.PNG)
+
+#### Convolutional Neural Networks
+
+![](02_Presentation/200508_CNN.png)
+
+----
+
+### Capabilities and Updates
+-  Design
+    -  Parametric grid definition (cubic, hexagonal, dice)
+-  Assembly/timeline
+    -  Timeline for future morphing computing
+    -  UR10 
+-  Graph
+    -  Hierarchy
+    -  Automatic neighborhood
+    -  highlights
+-  Computing
+    -  Asynchronous code propagation (distributed max example)
+    -  Simple test case find max value distributed
+    -  Min cut max flow concept study neighborhood 
+-  Improved UI and integration
+    -  Radial menu
+    -  Json propagation
+    -  Highlight selected node
+  
+---
+
 ### Hardware Architecture Inference
 
 #### Probabilistic Programming
@@ -107,39 +151,8 @@ expectation(dist)
 ```
 Until now I have a dummy computation model. Next steps would be to get a complex computational graph and for each part infer the best hardware architecture for it. Moreover, if the computation graph changes through time one can also alter the hardware architecture to respond to these changes.
 
-### Applications
-- The idea is to find an application that needs reconfiguration white running, examples:
-  - Gamalon probabilistic programming
-  - more computing power while assembling voxels
-  -  Neural Networks?
-  
-#### Voxel Simulation
-
-![](02_Presentation/200114_simulation.PNG)
-
-
-
-
-### Capabilities and Updates
--  Design
-    -  Parametric grid definition (cubic, hexagonal, dice)
--  Assembly/timeline
-    -  Timeline for future morphing computing
-    -  UR10 
--  Graph
-    -  Hierarchy
-    -  Automatic neighborhood
-    -  highlights
--  Computing
-    -  Asynchronous code propagation (distributed max example)
-    -  Simple test case find max value distributed
-    -  Min cut max flow concept study neighborhood 
--  Improved UI and integration
-    -  Radial menu
-    -  Json propagation
-    -  Highlight selected node
-  
 ---
+
 ## Desired Milestones
 
 - [ ] Assembly