@@ -168,12 +168,13 @@ def extract_metadata(mp_args, path) -> Optional[Dict[str, int]]:
168
168
}
169
169
170
170
ext = path .rsplit ("." , 1 )[- 1 ].lower ()
171
+ is_scan_all_files = getattr (mp_args , "scan_all_files" , False )
171
172
172
- if DBType . audio in mp_args . profiles and (
173
- ext in (consts .AUDIO_ONLY_EXTENSIONS | consts .VIDEO_EXTENSIONS ) or mp_args . scan_all_files
173
+ if objects . is_profile ( mp_args , DBType . audio ) and (
174
+ ext in (consts .AUDIO_ONLY_EXTENSIONS | consts .VIDEO_EXTENSIONS ) or is_scan_all_files
174
175
):
175
176
media |= av .munge_av_tags (mp_args , path )
176
- elif DBType .video in mp_args . profiles and (ext in consts .VIDEO_EXTENSIONS or mp_args . scan_all_files ):
177
+ elif objects . is_profile ( mp_args , DBType .video ) and (ext in consts .VIDEO_EXTENSIONS or is_scan_all_files ):
177
178
media |= av .munge_av_tags (mp_args , path )
178
179
179
180
if not Path (path ).exists ():
@@ -184,7 +185,7 @@ def extract_metadata(mp_args, path) -> Optional[Dict[str, int]]:
184
185
text_exts |= consts .OCR_EXTENSIONS
185
186
if mp_args .speech_recognition :
186
187
text_exts |= consts .SPEECH_RECOGNITION_EXTENSIONS
187
- if DBType .text in mp_args . profiles and (ext in text_exts or mp_args . scan_all_files ):
188
+ if objects . is_profile ( mp_args , DBType .text ) and (ext in text_exts or is_scan_all_files ):
188
189
try :
189
190
start = timer ()
190
191
if any ([mp_args .ocr , mp_args .speech_recognition ]):
@@ -201,7 +202,7 @@ def extract_metadata(mp_args, path) -> Optional[Dict[str, int]]:
201
202
media ["hash" ] = sample_hash .sample_hash_file (path )
202
203
203
204
if getattr (mp_args , "process" , False ):
204
- if DBType . audio in mp_args . profiles and Path (path ).suffix not in [".opus" , ".mka" ]:
205
+ if objects . is_profile ( mp_args , DBType . audio ) and Path (path ).suffix not in [".opus" , ".mka" ]:
205
206
path = media ["path" ] = process_audio .process_path (
206
207
path , split_longer_than = 2160 if "audiobook" in path .lower () else None
207
208
)
@@ -223,7 +224,7 @@ def clean_up_temp_dirs():
223
224
224
225
225
226
def extract_chunk (args , media ) -> None :
226
- if DBType . image in args . profiles :
227
+ if objects . is_profile ( args , DBType . image ) :
227
228
media = books .extract_image_metadata_chunk (media )
228
229
229
230
if args .scan_subtitles :
@@ -246,7 +247,7 @@ def extract_chunk(args, media) -> None:
246
247
247
248
captions .append (caption )
248
249
249
- media = [{"playlist_id " : args .playlist_id , ** d } for d in media ]
250
+ media = [{"playlists_id " : args .playlists_id , ** d } for d in media ]
250
251
media = iterables .list_dict_filter_bool (media )
251
252
args .db ["media" ].insert_all (media , pk = "id" , alter = True , replace = True )
252
253
@@ -393,7 +394,7 @@ def scan_path(args, path_str: str) -> int:
393
394
),
394
395
"time_deleted" : 0 ,
395
396
}
396
- args .playlist_id = db_playlists .add (args , str (path ), info , check_subpath = True )
397
+ args .playlists_id = db_playlists .add (args , str (path ), info , check_subpath = True )
397
398
398
399
print (f"[{ path } ] Building file list..." )
399
400
new_files = find_new_files (args , path )
0 commit comments