Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .tests/ids_unique.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def check_ids_unique(data_root, verbose=False):
if paths_by_id:
print('Duplicate IDs found:')
for id_, paths in paths_by_id.items():
print('ID {}'.format(id_))
print(f'ID {id_}')
for path in paths:
print('\t', path)
sys.exit(1)
Expand Down
2 changes: 1 addition & 1 deletion .tests/languages.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def check_ids_unique(data_root, verbose=False):
if bad_lang_by_path:
print('Incorrect languages found:')
for path, lang in bad_lang_by_path.items():
print('{} {}'.format(lang, path))
print(f'{lang} {path}')
sys.exit(1)


Expand Down
1 change: 0 additions & 1 deletion .tests/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,4 @@ jinja2
jsonschema
pathschema
pygments
six
unidecode
2 changes: 1 addition & 1 deletion .tests/schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def check_schemas(data_root, schemas_dir, verbose=False):
try:
blob = json.load(fp)
except json.decoder.JSONDecodeError as e:
print('\nError JSON-decoding {}'.format(file_path),
print(f'\nError JSON-decoding {file_path}',
flush=True)
if verbose:
print(e, flush=True)
Expand Down
2 changes: 1 addition & 1 deletion .tests/shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def check_render_rest(data_root, verbose=False):

if error_by_path:
for path, blobs in error_by_path.items():
print('Incorrect serialization order in {}'.format(path), flush=True)
print(f'Incorrect serialization order in {path}', flush=True)
blobs = tuple(blob.splitlines(keepends=True) for blob in blobs)
if verbose:
print(''.join(difflib.ndiff(*blobs)), end="")
Expand Down
2 changes: 1 addition & 1 deletion .tests/slugs_unique.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def check_slugs_unique(data_root, verbose=False):
if paths_by_combo:
print('Duplicate slug combinations found:')
for combo, paths in paths_by_combo.items():
print('Combination {}'.format(combo))
print(f'Combination {combo}')
for path in paths:
print('\t', path)
sys.exit(1)
Expand Down
10 changes: 4 additions & 6 deletions tools/csv2rst.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
# coding: utf-8
"""
Prints a table of contents for a lightning talks description field.

Expand Down Expand Up @@ -42,10 +41,9 @@ def body_line(time, speaker, title, first_line=False):
hiperlink_underscore = ''
else:
hiperlink_underscore = '_'
return (" * - {}{}\n"
" - {}\n"
" - {}\n").format(time, hiperlink_underscore,
speaker.strip(), title.strip())
return (f" * - {time}{hiperlink_underscore}\n"
f" - {speaker.strip()}\n"
f" - {title.strip()}\n")

head = (".. list-table:: Lightning Talks\n"
" :widths: 10 30 60\n"
Expand Down Expand Up @@ -117,7 +115,7 @@ def main():
if args.new_csv:
if os.path.exists(csv_path):
raise Exception(
'Error creating new file. File exists: {}'.format(csv_path))
f'Error creating new file. File exists: {csv_path}')
else:
create_sample_csv(csv_path)
description = csv_description_to_rst(csv_path, video_url)
Expand Down
9 changes: 4 additions & 5 deletions tools/fill_id_field.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
# coding: utf-8
"""Fill id field in json video files
If a video file is found without id then it gets a id = max(id) + 1"""

Expand All @@ -21,10 +20,10 @@ def get_json_data(file_name):
try:
data = json.load(f_stream)
except ValueError:
print('Json syntax error in file {}'.format(file_name))
print(f'Json syntax error in file {file_name}')
raise
if 'file_name' in data:
print('"File_name" is not a proper field in {}'.format(file_name))
print(f'"File_name" is not a proper field in {file_name}')
raise ValueError
data['file_name'] = file_name
return data
Expand Down Expand Up @@ -61,9 +60,9 @@ def main():
if 'id' in video.keys())
most_common, times_duplicate = all_id.most_common(1)[0]
if times_duplicate > 1:
raise ValueError('Duplicate id: {}'.format(most_common))
raise ValueError(f'Duplicate id: {most_common}')
max_id = max(all_id)
logging.debug('Max id: {}'.format(max_id))
logging.debug(f'Max id: {max_id}')

# Update files
video_without_id = [video for video in tb_video
Expand Down
2 changes: 1 addition & 1 deletion tools/parse_for_detail.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def parse(path):
try:
data = json.load(fp)
except ValueError:
logging.error('Json syntax error in file {}'.format(path))
logging.error(f'Json syntax error in file {path}')
raise

source = data.get(SOURCE_KEY, '')
Expand Down
2 changes: 1 addition & 1 deletion tools/pull_related_urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def pull_links_from_file(file_):
try:
data = json.load(fp)
except ValueError:
logging.error('Json syntax error in file {}'.format(file_))
logging.error(f'Json syntax error in file {file_}')
raise

description = data.get('description') or ''
Expand Down
2 changes: 1 addition & 1 deletion tools/reserialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def reserialize(file_):
try:
data = json.load(fp)
except ValueError:
logging.error('Json syntax error in file {}'.format(file_))
logging.error(f'Json syntax error in file {file_}')
raise

with open(file_, 'w') as fp:
Expand Down
8 changes: 1 addition & 7 deletions tools/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import re

from jinja2.utils import markupsafe
import six


def get_json_files(root, exclude=None):
Expand Down Expand Up @@ -31,15 +30,10 @@ def slugify(value, substitutions=()):
# value must be unicode per se
import unicodedata
from unidecode import unidecode
# unidecode returns str in Py2 and 3, so in Py2 we have to make
# it unicode again
value = unidecode(value)
if isinstance(value, six.binary_type):
value = value.decode('ascii')
# still unicode
value = unicodedata.normalize('NFKD', value).lower()

# backward compatible covert from 2-tuples to 3-tuples
# backward compatible convert from 2-tuples to 3-tuples
new_subs = []
for tpl in substitutions:
try:
Expand Down
5 changes: 2 additions & 3 deletions tools/video_statistics.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
# coding: utf-8
"""Read only program that prints statistics (in markdown) of all json files
in the repository:
- Fields
Expand Down Expand Up @@ -31,7 +30,7 @@ def get_types(file_name):
try:
data = json.load(f_stream)
except ValueError:
print('Json syntax error in file {}'.format(file_name))
print(f'Json syntax error in file {file_name}')
raise
return {(key, type(data[key]).__name__, is_void(data[key]))
for key in data}
Expand All @@ -46,7 +45,7 @@ def markdown_statistics(file_names):
for field, class_, void in sorted(total, key=str):
result.append("|{}|{}|{}|{}|".format(field, class_, void, total[(
field, class_, void)]))
logging.debug('result: {}'.format(result))
logging.debug(f'result: {result}')
return '\n'.join(result)


Expand Down
6 changes: 3 additions & 3 deletions tools/youtube.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def fetch_list(api_key, play_list_id):
next_page_token = response_dict.get('nextPageToken')

total_results = response_dict.get('pageInfo', {}).get('totalResults')
print('Found {} results. Gathering them now .'.format(total_results), end='')
print(f'Found {total_results} results. Gathering them now .', end='')

items = response_dict.get('items', [])
while next_page_token:
Expand Down Expand Up @@ -166,7 +166,7 @@ def fetch_list(api_key, play_list_id):
print('Done parsing results. Writing files to disk ', end='')

# make category dir
category = 'category-{}'.format(time.time())
category = f'category-{time.time()}'
os.makedirs(os.path.join(category, 'videos'))

with open(os.path.join(category, 'category.json') , 'w') as fp:
Expand All @@ -179,7 +179,7 @@ def fetch_list(api_key, play_list_id):
file_name = os.path.join(category, 'videos', file_name)
# add some randomness to the name if a file already exists with that name
if os.path.exists(file_name + '.json'):
file_name += '-{}.json'.format(str(uuid.uuid1())[:6])
file_name += f'-{str(uuid.uuid1())[:6]}.json'
else:
file_name += '.json'

Expand Down