1
+ """asciiartist module: Converts images to ascii art
2
+
3
+ Brief Usage Description:
4
+
5
+ This module's main export is `asciiartist`. This generates
6
+ ascii art from an image. `asciiartist` also enabled certain
7
+ configuration the user might wanna tune.
8
+
9
+ `asciiartist` returns the ascii art and a numpy array
10
+ representing the edges detected. `display_edges` is used
11
+ to display the edges, and this helps with debugging and
12
+ finetuning the parameters given to `asciiartist`.
13
+ """
14
+
15
+ from typing import Tuple , Union
16
+
17
+ from skimage import feature
18
+ from PIL import Image
19
+ import numpy as np
20
+
21
+ def _model_predict (X :"np.ndarray[np.bool]" , _model = []
22
+ ) -> "np.ndarray[np.uint8]" :
23
+
24
+ """Performs model inference
25
+
26
+ Parameters
27
+ ----------
28
+ X: np.ndarray[np.bool]
29
+ Input array, batched, representing edges for each
30
+ ascii char. This function will deal with batching.
31
+
32
+ _model:
33
+ Caches the loaded model, onehot mapping and in-out
34
+ info.
35
+
36
+ Returns
37
+ -------
38
+ np.ndarray[np.uint8]
39
+ Char array of ascii characters.
40
+ """
41
+
42
+ batch_size = 32
43
+
44
+ if len (_model ) == 0 :
45
+
46
+ import os
47
+ dir = os .path .dirname (__file__ )
48
+
49
+ import tflite_runtime .interpreter as tflite
50
+
51
+ interpreter = tflite .Interpreter (
52
+ model_path = os .path .join (dir , "gen/ascii-model.tflite" ))
53
+ m_out = interpreter .get_output_details ()[0 ]
54
+ m_in = interpreter .get_input_details ()[0 ]
55
+
56
+ interpreter .resize_tensor_input (
57
+ m_in ['index' ], (batch_size , * m_in ["shape" ][1 :]))
58
+ interpreter .resize_tensor_input (
59
+ m_out ['index' ], (batch_size , * m_out ["shape" ][1 :]))
60
+ interpreter .allocate_tensors ()
61
+ _model .append ((
62
+ interpreter ,
63
+ [* open (os .path .join (dir , "gen/_onehot.txt" ), "rb" ).read ()],
64
+ (m_out , m_in )
65
+ ))
66
+
67
+ model , onehot , (m_out , m_in ) = _model [0 ]
68
+
69
+ asc = []
70
+ for i in range (X .shape [0 ] // batch_size + 1 ):
71
+
72
+ if (i + 1 )* batch_size > X .shape [0 ]:
73
+
74
+ # Pad to `batch_size` samples
75
+ pad = batch_size - X .shape [0 ]% batch_size
76
+ model .set_tensor (
77
+ m_in ['index' ],
78
+ np .pad (
79
+ X [i * batch_size : (i + 1 )* batch_size ],
80
+ [(0 ,pad ), (0 ,0 ), (0 ,0 ), (0 ,0 )],
81
+ mode = "constant"
82
+ ))
83
+
84
+ model .invoke ()
85
+ y = model .get_tensor (m_out ['index' ])[:- pad ]
86
+ asc .append (np .argmax (y , axis = 1 ))
87
+ continue
88
+
89
+ model .set_tensor (
90
+ m_in ['index' ], X [i * batch_size : (i + 1 )* batch_size ])
91
+
92
+ model .invoke ()
93
+ y = model .get_tensor (m_out ['index' ])
94
+ asc .append (np .argmax (y , axis = 1 ))
95
+
96
+ return np .array ([onehot [i ] for i in np .concatenate (asc )], dtype = np .uint8 )
97
+
98
+
99
+ def _get_edges (
100
+ img : 'Image' ,
101
+ height : int ,
102
+ sigma : float ,
103
+ weight : float ,
104
+ ratio : float
105
+ ) -> "np.ndarray[np.bool]" :
106
+
107
+ """Detects edges and segments image.
108
+
109
+ Parameters
110
+ ----------
111
+ img: 'Image'
112
+ Image object from Pillow (PIL.Image)
113
+
114
+ height: int
115
+ Number of lines of output art
116
+
117
+ sigma: float
118
+ Noise reduction param for canny edge detection
119
+
120
+ weight: float
121
+ Line weight. A higher value thickens the edges
122
+
123
+ ratio: float
124
+ y/x ratio of each char of text
125
+
126
+ Returns
127
+ -------
128
+ np.ndarray[np.uint8]
129
+ 5D array representing edges detected per segment
130
+ Shape: (y,x,ph,pw,1)
131
+ y,x ==> Height and width of image in ascii chars
132
+ ph,pw ==> Height and width of each ascii char in
133
+ image pixels.
134
+ """
135
+
136
+ img = np .array (img .convert ("L" ))
137
+ y , x = img .shape [:2 ]
138
+ width = int (x / y * height * ratio + .5 )
139
+
140
+ ph ,pw = 88 ,35
141
+
142
+ # Resize image
143
+ ny = ph * height
144
+ nx = pw * width
145
+ img = np .array (
146
+ Image .fromarray ((img .astype (float )* 255 ).astype (np .uint8 ))
147
+ .resize ((int (nx // weight + .5 ), int (ny // weight + .5 )), Image .NEAREST ),
148
+ dtype = np .uint8 )
149
+
150
+ edges = feature .canny (img , sigma = sigma )
151
+ edges = np .array (
152
+ Image .fromarray (edges )
153
+ .resize ((nx , ny ), Image .NEAREST )
154
+ ).astype (np .float32 )
155
+ edges = edges .reshape (* edges .shape , 1 )
156
+
157
+ segs = np .array ([
158
+ [edges [ph * i :ph * (i + 1 ), pw * j :pw * (j + 1 )] for j in range (width )]
159
+ for i in range (height )
160
+ ])
161
+
162
+ return segs
163
+
164
+
165
+ def _segments_to_ascii (edges : "np.ndarray[np.bool]" ) -> str :
166
+
167
+ """Formats input and output of model
168
+
169
+ Parameters
170
+ ----------
171
+ edges: np.ndarray[np.bool]
172
+ Output of `_get_edges`. See docs of `_get_edges`.
173
+
174
+ Returns
175
+ -------
176
+ str
177
+ ascii art string
178
+ """
179
+
180
+ y , x = edges .shape [:2 ]
181
+ inp = edges .reshape (- 1 , * edges .shape [- 3 :])
182
+ asc = _model_predict (inp )
183
+ return "\n " .join ("" .join (map (chr , i )) for i in asc .reshape (y , x ))
184
+
185
+
186
+ def display_edges (
187
+ edges : "np.ndarray[np.bool]"
188
+ ) -> None :
189
+
190
+ """Displays edges (2nd output of `asciiartist`)
191
+
192
+ This can be used to manually finetune `asciiartist`
193
+ parameters.
194
+
195
+ The edges will be displayed, and grid segments the images
196
+ for where each ascii char will be placed.
197
+
198
+ Parameters
199
+ ----------
200
+ edges: np.ndarray[np.bool]
201
+ 2nd output of `asciiartist`. See `asciiartist` docs
202
+ """
203
+
204
+ import matplotlib .pyplot as plt
205
+
206
+ y ,x ,h ,w ,_ = edges .shape
207
+ eimg = np .hstack (np .hstack (edges ))
208
+ eimg = eimg .reshape (eimg .shape [:- 1 ])
209
+
210
+ plt .figure (figsize = (10 ,10 ))
211
+ plt .imshow (eimg .astype (float ))
212
+ for i in range (y ):
213
+ plt .plot ([0 ,x * w ],[i * h ,i * h ], color = "white" , linewidth = 0.1 )
214
+ for i in range (x ):
215
+ plt .plot ([i * w ,i * w ],[0 ,y * h ], color = "white" , linewidth = 0.1 )
216
+
217
+ plt .ylim ((y * h ,0 ))
218
+ plt .xlim ((0 ,x * w ))
219
+ plt .show ()
220
+
221
+
222
+ def asciiartist (
223
+ img : 'Image' ,
224
+ n_lines : int ,
225
+ noise_reduction : float = 3 ,
226
+ line_weight : float = 1 ,
227
+ text_ratio : float = 2.5 ,
228
+ _generate_ascii : bool = True
229
+ ) -> "Tuple[Union[str, None], np.ndarray[np.bool]]" :
230
+
231
+ """Converts images into ascii art
232
+
233
+ Returns two items (ascii_art, edges). `edges` can be
234
+ displayed with `display_edges` to aid in manually
235
+ finetuning the parameters.
236
+
237
+ Parameters
238
+ ----------
239
+ img: 'Image'
240
+ Image object from Pillow (PIL.Image)
241
+
242
+ n_lines: int
243
+ Number of lines of output art
244
+
245
+ noise_reduction: float, optional
246
+ Noise reduction param for canny edge detection
247
+ (Default 3.)
248
+
249
+ line_weight: float, optional
250
+ Line weight. A higher value thickens the edges
251
+ (Default 1.)
252
+
253
+ text_ratio: float, optional
254
+ y/x ratio of each char of text. (Default 2.5)
255
+
256
+ _generate_ascii: bool, optional
257
+ If disabled, no ascii art is generated, but function
258
+ will return the edges. This option can be used if
259
+ the user only wants the `edges` output to put into
260
+ `display_edges` for manual finetuning of the
261
+ parameters (Default True)
262
+
263
+ Returns
264
+ -------
265
+ (str | None, np.ndarray[np.bool])
266
+
267
+ Let the output be (art, edges).
268
+
269
+ `art` is the ascii art string. This value is None
270
+ if `_generate_ascii` is false.
271
+
272
+ `edges` is a numpy array representing the edges
273
+ detected. This can be fed into `display_edges` to aid
274
+ in manually finetuning the parameters.
275
+
276
+ """
277
+
278
+ edges = _get_edges (
279
+ img , n_lines ,
280
+ sigma = noise_reduction ,
281
+ weight = line_weight ,
282
+ ratio = text_ratio
283
+ )
284
+
285
+ asc = None
286
+ if _generate_ascii :
287
+ asc = _segments_to_ascii (edges )
288
+
289
+ return asc , edges
0 commit comments