|
46 | 46 | "numinlets" : 1, |
47 | 47 | "numoutlets" : 0, |
48 | 48 | "patching_rect" : [ 714.0, 373.0, 323.0, 87.0 ], |
49 | | - "text" : "This space represents the values of each neuron in the central hidden layer of the neural network.\n\nIt has, to some extent, learned a lower-dimensional representation of the input-norm data space by figuring out how it can reconstruct it with only 2 dimensions.", |
50 | | - "textcolor" : [ 0.501960784313725, 0.501960784313725, 0.501960784313725, 1.0 ] |
| 49 | + "text" : "This space represents the values of each neuron in the central hidden layer of the neural network.\n\nIt has, to some extent, learned a lower-dimensional representation of the input-norm data space by figuring out how it can reconstruct it with only 2 dimensions." |
51 | 50 | } |
52 | 51 |
|
53 | 52 | } |
|
59 | 58 | "numinlets" : 1, |
60 | 59 | "numoutlets" : 0, |
61 | 60 | "patching_rect" : [ 151.0, 399.762878000000001, 328.5, 33.0 ], |
62 | | - "text" : "Keep pressing stage (4) until the fit number seems to stop moving. A good value is around 0.20 in this example", |
63 | | - "textcolor" : [ 0.501960784313725, 0.501960784313725, 0.501960784313725, 1.0 ] |
| 61 | + "text" : "Keep pressing stage (4) until the fit number seems to stop moving. A good value is around 0.20 in this example" |
64 | 62 | } |
65 | 63 |
|
66 | 64 | } |
|
212 | 210 | "numinlets" : 1, |
213 | 211 | "numoutlets" : 0, |
214 | 212 | "patching_rect" : [ 297.5, 20.0, 699.0, 60.0 ], |
215 | | - "text" : "Without getting too deep into the weeds a neural network learns the relationship between input and output data. If we ask it to learn the relationship between the same data as the input and output it essentially learns how it 'reconstruct' the data using a smaller number of dimensions (the neurons in the hidden layers). This means we can use the fluid.mlpregressor~ as a form of dimension reduction!", |
216 | | - "textcolor" : [ 0.501960784313725, 0.501960784313725, 0.501960784313725, 1.0 ] |
| 213 | + "text" : "Without getting too deep into the weeds a neural network learns the relationship between input and output data. If we ask it to learn the relationship between the same data as the input and output it essentially learns how it 'reconstruct' the data using a smaller number of dimensions (the neurons in the hidden layers). This means we can use the fluid.mlpregressor~ as a form of dimension reduction!" |
217 | 214 | } |
218 | 215 |
|
219 | 216 | } |
|
224 | 221 | "numinlets" : 1, |
225 | 222 | "numoutlets" : 0, |
226 | 223 | "patching_rect" : [ 13.0, 48.0, 269.0, 20.0 ], |
227 | | - "text" : "Neural networks learning how to reconstruct data", |
228 | | - "textcolor" : [ 0.501960784313725, 0.501960784313725, 0.501960784313725, 1.0 ] |
| 224 | + "text" : "Neural networks learning how to reconstruct data" |
229 | 225 | } |
230 | 226 |
|
231 | 227 | } |
|
612 | 608 | "numinlets" : 1, |
613 | 609 | "numoutlets" : 0, |
614 | 610 | "patching_rect" : [ 70.5, 205.5, 286.0, 33.0 ], |
615 | | - "text" : "this is the hop size of the analysis, to convert the position of the lookup to a position in the audio file", |
616 | | - "textcolor" : [ 0.501960784313725, 0.501960784313725, 0.501960784313725, 1.0 ] |
| 611 | + "text" : "this is the hop size of the analysis, to convert the position of the lookup to a position in the audio file" |
617 | 612 | } |
618 | 613 |
|
619 | 614 | } |
|
870 | 865 | "numinlets" : 1, |
871 | 866 | "numoutlets" : 0, |
872 | 867 | "patching_rect" : [ 834.0, 651.0, 190.0, 20.0 ], |
873 | | - "text" : "predicting from the neural network", |
874 | | - "textcolor" : [ 1.0, 1.0, 1.0, 1.0 ] |
| 868 | + "text" : "predicting from the neural network" |
875 | 869 | } |
876 | 870 |
|
877 | 871 | } |
|
882 | 876 | "numinlets" : 1, |
883 | 877 | "numoutlets" : 0, |
884 | 878 | "patching_rect" : [ 540.0, 651.0, 55.0, 20.0 ], |
885 | | - "text" : "raw data", |
886 | | - "textcolor" : [ 1.0, 1.0, 1.0, 1.0 ] |
| 879 | + "text" : "raw data" |
887 | 880 | } |
888 | 881 |
|
889 | 882 | } |
|
0 commit comments