Skip to content
Merged
18 changes: 13 additions & 5 deletions cps/db.py
Original file line number Diff line number Diff line change
Expand Up @@ -944,16 +944,13 @@ def order_authors(self, entries, list_return=False, combined=False):
def get_typeahead(self, database, query, replace=('', ''), tag_filter=true()):
query = query or ''
self.create_functions()
# self.session.connection().connection.connection.create_function("lower", 1, lcase)
entries = self.session.query(database).filter(tag_filter). \
filter(func.lower(database.name).ilike("%" + query + "%")).all()
# json_dumps = json.dumps([dict(name=escape(r.name.replace(*replace))) for r in entries])
json_dumps = json.dumps([dict(name=r.name.replace(*replace)) for r in entries])
return json_dumps

def check_exists_book(self, authr, title):
self.create_functions()
# self.session.connection().connection.connection.create_function("lower", 1, lcase)
q = list()
author_terms = re.split(r'\s*&\s*', authr)
for author_term in author_terms:
Expand Down Expand Up @@ -1111,7 +1108,7 @@ def speaking_language(self, languages=None, return_all_languages=False, with_cou
.group_by(text('books_languages_link.lang_code')).all()
tags = list()
for lang in languages:
tag = Category(isoLanguages.get_language_name(get_locale(), lang[0].lang_code), lang[0].lang_code)
tag = Category(isoLanguages.get_language_name(get_locale(), None, lang[0].lang_code), lang[0].lang_code)
tags.append([tag, lang[1]])
# Append all books without language to list
if not return_all_languages:
Expand All @@ -1121,7 +1118,7 @@ def speaking_language(self, languages=None, return_all_languages=False, with_cou
.filter(self.common_filters())
.count())
if no_lang_count:
tags.append([Category(_("None"), "none"), no_lang_count])
tags.append([Category(_("None"), None, "none"), no_lang_count])
return sorted(tags, key=lambda x: x[0].name.lower(), reverse=reverse_order)
else:
if not languages:
Expand Down Expand Up @@ -1175,14 +1172,25 @@ def lcase(s):
return s.lower()


def title_sort(title, config):
# calibre sort stuff
title_pat = re.compile(config.config_title_regex, re.IGNORECASE)
match = title_pat.search(title)
if match:
prep = match.group(1)
title = title[len(prep):] + ', ' + prep
return strip_whitespaces(title)

class Category:
name = None
sort = None
id = None
count = None
rating = None

def __init__(self, name, cat_id, rating=None):
self.name = name
self.sort = name
self.id = cat_id
self.rating = rating
self.count = 1
4 changes: 2 additions & 2 deletions cps/editbooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -1611,7 +1611,7 @@ def add_objects(db_book_object, db_object, db_session, db_type, add_elements):
if db_type == 'author':
new_element = db_object(add_element, helper.get_sorted_author(add_element.replace('|', ',')))
elif db_type == 'series':
new_element = db_object(add_element, add_element)
new_element = db_object(add_element, db.title_sort(add_element, config))
elif db_type == 'custom':
new_element = db_object(value=add_element)
elif db_type == 'publisher':
Expand Down Expand Up @@ -1642,7 +1642,7 @@ def create_objects_for_addition(db_element, add_element, db_type):
elif db_type == 'series':
if db_element.name != add_element:
db_element.name = add_element
db_element.sort = add_element
db_element.sort = db.title_sort(add_element, config)
elif db_type == 'author':
if db_element.name != add_element:
db_element.name = add_element
Expand Down
7 changes: 6 additions & 1 deletion cps/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -1091,8 +1091,13 @@ def get_download_link(book_id, book_format, client):
file_name = book.title
if len(book.authors) > 0:
file_name = file_name + ' - ' + book.authors[0].name
original_name = file_name
file_name = get_valid_filename(file_name, replace_whitespace=False, force_unidecode=True)
quoted_file_name = file_name if client == "kindle" else quote(file_name)
if client == "kindle":
quoted_file_name = file_name
else:
native_name = get_valid_filename(original_name, replace_whitespace=False, force_unidecode=False)
quoted_file_name = quote(native_name)
headers = Headers()
headers["Content-Type"] = mimetypes.types_map.get('.' + book_format, "application/octet-stream")
headers["Content-Disposition"] = ('attachment; filename="{}.{}"; filename*=UTF-8\'\'{}.{}').format(
Expand Down
2 changes: 2 additions & 0 deletions cps/kobo.py
Original file line number Diff line number Diff line change
Expand Up @@ -811,6 +811,8 @@ def HandleStateRequest(book_uuid):

ub.session.merge(kobo_reading_state)
ub.session_commit()
update_results_response["LastModified"] = convert_to_kobo_timestamp_string(kobo_reading_state.last_modified)
update_results_response["PriorityTimestamp"] = convert_to_kobo_timestamp_string(kobo_reading_state.priority_timestamp)
return jsonify({
"RequestResult": "Success",
"UpdateResults": [update_results_response],
Expand Down
1 change: 0 additions & 1 deletion cps/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,6 @@ def render_adv_search_results(term, offset=None, order=None, limit=None):

cc = calibre_db.get_cc_columns(config, filter_config_custom_read=True)
calibre_db.create_functions()
# calibre_db.session.connection().connection.connection.create_function("lower", 1, db.lcase)
query = calibre_db.generate_linked_query(config.config_read_column, db.Books)
q = query.outerjoin(db.books_series_link, db.Books.id == db.books_series_link.c.book)\
.outerjoin(db.Series)\
Expand Down
2 changes: 1 addition & 1 deletion cps/search_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,5 +135,5 @@ def metadata_search():
if active.get(c.__id__, True)
}
for future in concurrent.futures.as_completed(meta):
data.extend([asdict(x) for x in future.result() if x])
data.extend([asdict(x) for x in (future.result() or []) if x])
return make_response(jsonify(data))
Binary file modified cps/translations/ar/LC_MESSAGES/messages.mo
Binary file not shown.
Loading
Loading