diff --git a/cps/__init__.py b/cps/__init__.py index d557649c..03945b57 100644 --- a/cps/__init__.py +++ b/cps/__init__.py @@ -73,7 +73,6 @@ ub.init_db(cli.settingspath) # pylint: disable=no-member config = config_sql.load_configuration(ub.session) -searched_ids = {} web_server = WebServer() babel = Babel() @@ -83,6 +82,8 @@ log = logger.create() from . import services +db.CalibreDB.setup_db(config, cli.settingspath) + calibre_db = db.CalibreDB() def create_app(): @@ -91,7 +92,7 @@ def create_app(): if sys.version_info < (3, 0): app.static_folder = app.static_folder.decode('utf-8') app.root_path = app.root_path.decode('utf-8') - app.instance_path = app.instance_path .decode('utf-8') + app.instance_path = app.instance_path.decode('utf-8') cache_buster.init_cache_busting(app) @@ -101,8 +102,6 @@ def create_app(): app.secret_key = os.getenv('SECRET_KEY', config_sql.get_flask_session_key(ub.session)) web_server.init_app(app, config) - calibre_db.setup_db(config, cli.settingspath) - calibre_db.start() babel.init_app(app) _BABEL_TRANSLATIONS.update(str(item) for item in babel.list_translations()) diff --git a/cps/config_sql.py b/cps/config_sql.py index 3573abe7..d5c7b213 100644 --- a/cps/config_sql.py +++ b/cps/config_sql.py @@ -287,7 +287,7 @@ class _ConfigSQL(object): db_file = os.path.join(self.config_calibre_dir, 'metadata.db') have_metadata_db = os.path.isfile(db_file) self.db_configured = have_metadata_db - constants.EXTENSIONS_UPLOAD = [x.lstrip().rstrip() for x in self.config_upload_formats.split(',')] + constants.EXTENSIONS_UPLOAD = [x.lstrip().rstrip().lower() for x in self.config_upload_formats.split(',')] logfile = logger.setup(self.config_logfile, self.config_log_level) if logfile != self.config_logfile: log.warning("Log path %s not valid, falling back to default", self.config_logfile) diff --git a/cps/constants.py b/cps/constants.py index 27e9e1c8..89810e90 100644 --- a/cps/constants.py +++ b/cps/constants.py @@ -81,10 +81,11 @@ SIDEBAR_PUBLISHER = 1 << 12 SIDEBAR_RATING = 1 << 13 SIDEBAR_FORMAT = 1 << 14 SIDEBAR_ARCHIVED = 1 << 15 -# SIDEBAR_LIST = 1 << 16 +SIDEBAR_DOWNLOAD = 1 << 16 +SIDEBAR_LIST = 1 << 17 ADMIN_USER_ROLES = sum(r for r in ALL_ROLES.values()) & ~ROLE_ANONYMOUS -ADMIN_USER_SIDEBAR = (SIDEBAR_ARCHIVED << 1) - 1 +ADMIN_USER_SIDEBAR = (SIDEBAR_LIST << 1) - 1 UPDATE_STABLE = 0 << 0 AUTO_UPDATE_STABLE = 1 << 0 diff --git a/cps/db.py b/cps/db.py index 64ed5274..b2ab257a 100644 --- a/cps/db.py +++ b/cps/db.py @@ -24,14 +24,13 @@ import re import ast import json from datetime import datetime -import threading from sqlalchemy import create_engine from sqlalchemy import Table, Column, ForeignKey, CheckConstraint from sqlalchemy import String, Integer, Boolean, TIMESTAMP, Float from sqlalchemy.orm import relationship, sessionmaker, scoped_session -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.exc import OperationalError +from sqlalchemy.orm.collections import InstrumentedList +from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta from sqlalchemy.pool import StaticPool from flask_login import current_user from sqlalchemy.sql.expression import and_, true, false, text, func, or_ @@ -43,47 +42,48 @@ from flask_babel import gettext as _ from . import logger, ub, isoLanguages from .pagination import Pagination +from weakref import WeakSet + try: import unidecode use_unidecode = True except ImportError: use_unidecode = False - cc_exceptions = ['datetime', 'comments', 'composite', 'series'] cc_classes = {} Base = declarative_base() books_authors_link = Table('books_authors_link', Base.metadata, - Column('book', Integer, ForeignKey('books.id'), primary_key=True), - Column('author', Integer, ForeignKey('authors.id'), primary_key=True) - ) + Column('book', Integer, ForeignKey('books.id'), primary_key=True), + Column('author', Integer, ForeignKey('authors.id'), primary_key=True) + ) books_tags_link = Table('books_tags_link', Base.metadata, - Column('book', Integer, ForeignKey('books.id'), primary_key=True), - Column('tag', Integer, ForeignKey('tags.id'), primary_key=True) - ) + Column('book', Integer, ForeignKey('books.id'), primary_key=True), + Column('tag', Integer, ForeignKey('tags.id'), primary_key=True) + ) books_series_link = Table('books_series_link', Base.metadata, - Column('book', Integer, ForeignKey('books.id'), primary_key=True), - Column('series', Integer, ForeignKey('series.id'), primary_key=True) - ) + Column('book', Integer, ForeignKey('books.id'), primary_key=True), + Column('series', Integer, ForeignKey('series.id'), primary_key=True) + ) books_ratings_link = Table('books_ratings_link', Base.metadata, - Column('book', Integer, ForeignKey('books.id'), primary_key=True), - Column('rating', Integer, ForeignKey('ratings.id'), primary_key=True) - ) + Column('book', Integer, ForeignKey('books.id'), primary_key=True), + Column('rating', Integer, ForeignKey('ratings.id'), primary_key=True) + ) books_languages_link = Table('books_languages_link', Base.metadata, - Column('book', Integer, ForeignKey('books.id'), primary_key=True), - Column('lang_code', Integer, ForeignKey('languages.id'), primary_key=True) - ) + Column('book', Integer, ForeignKey('books.id'), primary_key=True), + Column('lang_code', Integer, ForeignKey('languages.id'), primary_key=True) + ) books_publishers_link = Table('books_publishers_link', Base.metadata, - Column('book', Integer, ForeignKey('books.id'), primary_key=True), - Column('publisher', Integer, ForeignKey('publishers.id'), primary_key=True) - ) + Column('book', Integer, ForeignKey('books.id'), primary_key=True), + Column('publisher', Integer, ForeignKey('publishers.id'), primary_key=True) + ) class Identifiers(Base): @@ -171,6 +171,9 @@ class Comments(Base): self.text = text self.book = book + def get(self): + return self.text + def __repr__(self): return u"".format(self.text) @@ -184,6 +187,9 @@ class Tags(Base): def __init__(self, name): self.name = name + def get(self): + return self.name + def __repr__(self): return u"".format(self.name) @@ -201,6 +207,9 @@ class Authors(Base): self.sort = sort self.link = link + def get(self): + return self.name + def __repr__(self): return u"".format(self.name, self.sort, self.link) @@ -216,6 +225,9 @@ class Series(Base): self.name = name self.sort = sort + def get(self): + return self.name + def __repr__(self): return u"".format(self.name, self.sort) @@ -229,6 +241,9 @@ class Ratings(Base): def __init__(self, rating): self.rating = rating + def get(self): + return self.rating + def __repr__(self): return u"".format(self.rating) @@ -242,6 +257,12 @@ class Languages(Base): def __init__(self, lang_code): self.lang_code = lang_code + def get(self): + if self.language_name: + return self.language_name + else: + return self.lang_code + def __repr__(self): return u"".format(self.lang_code) @@ -257,13 +278,16 @@ class Publishers(Base): self.name = name self.sort = sort + def get(self): + return self.name + def __repr__(self): return u"".format(self.name, self.sort) class Data(Base): __tablename__ = 'data' - __table_args__ = {'schema':'calibre'} + __table_args__ = {'schema': 'calibre'} id = Column(Integer, primary_key=True) book = Column(Integer, ForeignKey('books.id'), nullable=False) @@ -277,6 +301,10 @@ class Data(Base): self.uncompressed_size = uncompressed_size self.name = name + # ToDo: Check + def get(self): + return self.name + def __repr__(self): return u"".format(self.book, self.format, self.uncompressed_size, self.name) @@ -284,14 +312,14 @@ class Data(Base): class Books(Base): __tablename__ = 'books' - DEFAULT_PUBDATE = "0101-01-01 00:00:00+00:00" + DEFAULT_PUBDATE = datetime(101, 1, 1, 0, 0, 0, 0) # ("0101-01-01 00:00:00+00:00") id = Column(Integer, primary_key=True, autoincrement=True) title = Column(String(collation='NOCASE'), nullable=False, default='Unknown') sort = Column(String(collation='NOCASE')) author_sort = Column(String(collation='NOCASE')) timestamp = Column(TIMESTAMP, default=datetime.utcnow) - pubdate = Column(String) # , default=datetime.utcnow) + pubdate = Column(TIMESTAMP, default=DEFAULT_PUBDATE) series_index = Column(String, nullable=False, default="1.0") last_modified = Column(TIMESTAMP, default=datetime.utcnow) path = Column(String, default="", nullable=False) @@ -321,7 +349,8 @@ class Books(Base): self.series_index = series_index self.last_modified = last_modified self.path = path - self.has_cover = has_cover + self.has_cover = (has_cover != None) + def __repr__(self): return u"".format(self.title, self.sort, self.author_sort, @@ -332,6 +361,7 @@ class Books(Base): def atom_timestamp(self): return (self.timestamp.strftime('%Y-%m-%dT%H:%M:%S+00:00') or '') + class Custom_Columns(Base): __tablename__ = 'custom_columns' @@ -352,46 +382,67 @@ class Custom_Columns(Base): return display_dict -class CalibreDB(threading.Thread): +class AlchemyEncoder(json.JSONEncoder): + + def default(self, obj): + if isinstance(obj.__class__, DeclarativeMeta): + # an SQLAlchemy class + fields = {} + for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']: + if field == 'books': + continue + data = obj.__getattribute__(field) + try: + if isinstance(data, str): + data = data.replace("'", "\'") + elif isinstance(data, InstrumentedList): + el = list() + for ele in data: + if ele.get: + el.append(ele.get()) + else: + el.append(json.dumps(ele, cls=AlchemyEncoder)) + data = ",".join(el) + if data == '[]': + data = "" + else: + json.dumps(data) + fields[field] = data + except: + fields[field] = "" + # a json-encodable dict + return fields + + return json.JSONEncoder.default(self, obj) + + +class CalibreDB(): + _init = False + engine = None + config = None + session_factory = None + # This is a WeakSet so that references here don't keep other CalibreDB + # instances alive once they reach the end of their respective scopes + instances = WeakSet() def __init__(self): - threading.Thread.__init__(self) - self.engine = None + """ Initialize a new CalibreDB session + """ self.session = None - self.queue = None - self.log = None - self.config = None + if self._init: + self.initSession() - def add_queue(self,queue): - self.queue = queue - self.log = logger.create() - - def run(self): - while True: - i = self.queue.get() - if i == 'dummy': - self.queue.task_done() - break - if i['task'] == 'add_format': - cur_book = self.session.query(Books).filter(Books.id == i['id']).first() - cur_book.data.append(i['format']) - try: - # db.session.merge(cur_book) - self.session.commit() - except OperationalError as e: - self.session.rollback() - self.log.error("Database error: %s", e) - # self._handleError(_(u"Database error: %(error)s.", error=e)) - # return - self.queue.task_done() + self.instances.add(self) - def stop(self): - self.queue.put('dummy') + def initSession(self): + self.session = self.session_factory() + self.update_title_sort(self.config) - def setup_db(self, config, app_db_path): - self.config = config - self.dispose() + @classmethod + def setup_db(cls, config, app_db_path): + cls.config = config + cls.dispose() if not config.config_calibre_dir: config.invalidate() @@ -403,22 +454,21 @@ class CalibreDB(threading.Thread): return False try: - self.engine = create_engine('sqlite://', - echo=False, - isolation_level="SERIALIZABLE", - connect_args={'check_same_thread': False}, - poolclass=StaticPool) - self.engine.execute("attach database '{}' as calibre;".format(dbpath)) - self.engine.execute("attach database '{}' as app_settings;".format(app_db_path)) + cls.engine = create_engine('sqlite://', + echo=False, + isolation_level="SERIALIZABLE", + connect_args={'check_same_thread': False}, + poolclass=StaticPool) + cls.engine.execute("attach database '{}' as calibre;".format(dbpath)) + cls.engine.execute("attach database '{}' as app_settings;".format(app_db_path)) - conn = self.engine.connect() + conn = cls.engine.connect() # conn.text_factory = lambda b: b.decode(errors = 'ignore') possible fix for #1302 except Exception as e: config.invalidate(e) return False config.db_configured = True - self.update_title_sort(config, conn.connection) if not cc_classes: cc = conn.execute("SELECT id, datatype FROM custom_columns") @@ -433,12 +483,12 @@ class CalibreDB(threading.Thread): 'book': Column(Integer, ForeignKey('books.id'), primary_key=True), 'map_value': Column('value', Integer, - ForeignKey('custom_column_' + - str(row.id) + '.id'), - primary_key=True), + ForeignKey('custom_column_' + + str(row.id) + '.id'), + primary_key=True), 'extra': Column(Float), - 'asoc' : relationship('custom_column_' + str(row.id), uselist=False), - 'value' : association_proxy('asoc', 'value') + 'asoc': relationship('custom_column_' + str(row.id), uselist=False), + 'value': association_proxy('asoc', 'value') } books_custom_column_links[row.id] = type(str('books_custom_column_' + str(row.id) + '_link'), (Base,), dicttable) @@ -474,7 +524,7 @@ class CalibreDB(threading.Thread): 'custom_column_' + str(cc_id[0]), relationship(cc_classes[cc_id[0]], primaryjoin=( - Books.id == cc_classes[cc_id[0]].book), + Books.id == cc_classes[cc_id[0]].book), backref='books')) elif (cc_id[1] == 'series'): setattr(Books, @@ -488,17 +538,20 @@ class CalibreDB(threading.Thread): secondary=books_custom_column_links[cc_id[0]], backref='books')) - Session = scoped_session(sessionmaker(autocommit=False, - autoflush=False, - bind=self.engine)) - self.session = Session() + cls.session_factory = scoped_session(sessionmaker(autocommit=False, + autoflush=True, + bind=cls.engine)) + for inst in cls.instances: + inst.initSession() + + cls._init = True return True def get_book(self, book_id): return self.session.query(Books).filter(Books.id == book_id).first() def get_filtered_book(self, book_id, allow_show_archived=False): - return self.session.query(Books).filter(Books.id == book_id).\ + return self.session.query(Books).filter(Books.id == book_id). \ filter(self.common_filters(allow_show_archived)).first() def get_book_by_uuid(self, book_uuid): @@ -545,10 +598,12 @@ class CalibreDB(threading.Thread): pos_content_cc_filter, ~neg_content_cc_filter, archived_filter) # Fill indexpage with all requested data from database - def fill_indexpage(self, page, database, db_filter, order, *join): - return self.fill_indexpage_with_archived_books(page, database, db_filter, order, False, *join) + def fill_indexpage(self, page, pagesize, database, db_filter, order, *join): + return self.fill_indexpage_with_archived_books(page, pagesize, database, db_filter, order, False, *join) - def fill_indexpage_with_archived_books(self, page, database, db_filter, order, allow_show_archived, *join): + def fill_indexpage_with_archived_books(self, page, pagesize, database, db_filter, order, allow_show_archived, + *join): + pagesize = pagesize or self.config.config_books_per_page if current_user.show_detail_random(): randm = self.session.query(Books) \ .filter(self.common_filters(allow_show_archived)) \ @@ -556,14 +611,14 @@ class CalibreDB(threading.Thread): .limit(self.config.config_random_books) else: randm = false() - off = int(int(self.config.config_books_per_page) * (page - 1)) + off = int(int(pagesize) * (page - 1)) query = self.session.query(database) \ .join(*join, isouter=True) \ .filter(db_filter) \ .filter(self.common_filters(allow_show_archived)) - pagination = Pagination(page, self.config.config_books_per_page, + pagination = Pagination(page, pagesize, len(query.all())) - entries = query.order_by(*order).offset(off).limit(self.config.config_books_per_page).all() + entries = query.order_by(*order).offset(off).limit(pagesize).all() for book in entries: book = self.order_authors(book) return entries, randm, pagination @@ -573,13 +628,16 @@ class CalibreDB(threading.Thread): sort_authors = entry.author_sort.split('&') authors_ordered = list() error = False + ids = [a.id for a in entry.authors] for auth in sort_authors: + results = self.session.query(Authors).filter(Authors.sort == auth.lstrip().strip()).all() # ToDo: How to handle not found authorname - result = self.session.query(Authors).filter(Authors.sort == auth.lstrip().strip()).first() - if not result: + if not len(results): error = True break - authors_ordered.append(result) + for r in results: + if r.id in ids: + authors_ordered.append(r) if not error: entry.authors = authors_ordered return entry @@ -599,24 +657,39 @@ class CalibreDB(threading.Thread): for authorterm in authorterms: q.append(Books.authors.any(func.lower(Authors.name).ilike("%" + authorterm + "%"))) - return self.session.query(Books)\ + return self.session.query(Books) \ .filter(and_(Books.authors.any(and_(*q)), func.lower(Books.title).ilike("%" + title + "%"))).first() # read search results from calibre-database and return it (function is used for feed and simple search - def get_search_results(self, term): + def get_search_results(self, term, offset=None, order=None, limit=None): + order = order or [Books.sort] + pagination = None term.strip().lower() self.session.connection().connection.connection.create_function("lower", 1, lcase) q = list() authorterms = re.split("[, ]+", term) for authorterm in authorterms: q.append(Books.authors.any(func.lower(Authors.name).ilike("%" + authorterm + "%"))) - return self.session.query(Books).filter(self.common_filters(True)).filter( + result = self.session.query(Books).filter(self.common_filters(True)).filter( or_(Books.tags.any(func.lower(Tags.name).ilike("%" + term + "%")), Books.series.any(func.lower(Series.name).ilike("%" + term + "%")), Books.authors.any(and_(*q)), Books.publishers.any(func.lower(Publishers.name).ilike("%" + term + "%")), func.lower(Books.title).ilike("%" + term + "%") - )).order_by(Books.sort).all() + )).order_by(*order).all() + result_count = len(result) + if offset != None and limit != None: + offset = int(offset) + limit_all = offset + int(limit) + pagination = Pagination((offset / (int(limit)) + 1), limit, result_count) + else: + offset = 0 + limit_all = result_count + + ub.store_ids(result) + + + return result[offset:limit_all], result_count, pagination, # Creates for all stored languages a translated speaking name in the array for the UI def speaking_language(self, languages=None): @@ -650,17 +723,23 @@ class CalibreDB(threading.Thread): conn = conn or self.session.connection().connection.connection conn.create_function("title_sort", 1, _title_sort) - def dispose(self): + @classmethod + def dispose(cls): # global session - old_session = self.session - self.session = None - if old_session: - try: old_session.close() - except: pass - if old_session.bind: - try: old_session.bind.dispose() - except Exception: pass + for inst in cls.instances: + old_session = inst.session + inst.session = None + if old_session: + try: + old_session.close() + except: + pass + if old_session.bind: + try: + old_session.bind.dispose() + except Exception: + pass for attr in list(Books.__dict__.keys()): if attr.startswith("custom_column_"): @@ -677,10 +756,11 @@ class CalibreDB(threading.Thread): Base.metadata.remove(table) def reconnect_db(self, config, app_db_path): - self.session.close() + self.dispose() self.engine.dispose() self.setup_db(config, app_db_path) + def lcase(s): try: return unidecode.unidecode(s.lower()) diff --git a/cps/editbooks.py b/cps/editbooks.py index 3127d4a9..7f7764e5 100644 --- a/cps/editbooks.py +++ b/cps/editbooks.py @@ -27,14 +27,17 @@ import json from shutil import copyfile from uuid import uuid4 +from babel import Locale as LC from flask import Blueprint, request, flash, redirect, url_for, abort, Markup, Response from flask_babel import gettext as _ from flask_login import current_user, login_required from sqlalchemy.exc import OperationalError from . import constants, logger, isoLanguages, gdriveutils, uploader, helper -from . import config, get_locale, ub, worker, db +from . import config, get_locale, ub, db from . import calibre_db +from .services.worker import WorkerThread +from .tasks.upload import TaskUpload from .web import login_required_if_no_ano, render_title_template, edit_required, upload_required @@ -172,21 +175,42 @@ def modify_identifiers(input_identifiers, db_identifiers, db_session): changed = True return changed, error - -@editbook.route("/delete//", defaults={'book_format': ""}) -@editbook.route("/delete///") +@editbook.route("/ajax/delete/") @login_required -def delete_book(book_id, book_format): +def delete_book_from_details(book_id): + return Response(delete_book(book_id,"", True), mimetype='application/json') + + +@editbook.route("/delete/", defaults={'book_format': ""}) +@editbook.route("/delete//") +@login_required +def delete_book_ajax(book_id, book_format): + return delete_book(book_id,book_format, False) + +def delete_book(book_id, book_format, jsonResponse): + warning = {} if current_user.role_delete_books(): book = calibre_db.get_book(book_id) if book: try: result, error = helper.delete_book(book, config.config_calibre_dir, book_format=book_format.upper()) if not result: - flash(error, category="error") - return redirect(url_for('editbook.edit_book', book_id=book_id)) + if jsonResponse: + return json.dumps({"location": url_for("editbook.edit_book"), + "type": "alert", + "format": "", + "error": error}), + else: + flash(error, category="error") + return redirect(url_for('editbook.edit_book', book_id=book_id)) if error: - flash(error, category="warning") + if jsonResponse: + warning = {"location": url_for("editbook.edit_book"), + "type": "warning", + "format": "", + "error": error} + else: + flash(error, category="warning") if not book_format: # delete book from Shelfs, Downloads, Read list ub.session.query(ub.BookShelf).filter(ub.BookShelf.book_id == book_id).delete() @@ -236,17 +260,29 @@ def delete_book(book_id, book_format): filter(db.Data.format == book_format).delete() calibre_db.session.commit() except Exception as e: - log.debug(e) + log.exception(e) calibre_db.session.rollback() else: # book not found log.error('Book with id "%s" could not be deleted: not found', book_id) if book_format: - flash(_('Book Format Successfully Deleted'), category="success") - return redirect(url_for('editbook.edit_book', book_id=book_id)) + if jsonResponse: + return json.dumps([warning, {"location": url_for("editbook.edit_book", book_id=book_id), + "type": "success", + "format": book_format, + "message": _('Book Format Successfully Deleted')}]) + else: + flash(_('Book Format Successfully Deleted'), category="success") + return redirect(url_for('editbook.edit_book', book_id=book_id)) else: - flash(_('Book Successfully Deleted'), category="success") - return redirect(url_for('web.index')) + if jsonResponse: + return json.dumps([warning, {"location": url_for('web.index'), + "type": "success", + "format": book_format, + "message": _('Book Successfully Deleted')}]) + else: + flash(_('Book Successfully Deleted'), category="success") + return redirect(url_for('web.index')) def render_edit_book(book_id): @@ -466,64 +502,64 @@ def edit_cc_data(book_id, book, to_save): def upload_single_file(request, book, book_id): # Check and handle Uploaded file if 'btn-upload-format' in request.files: - requested_file = request.files['btn-upload-format'] - # check for empty request - if requested_file.filename != '': - if not current_user.role_upload(): - abort(403) - if '.' in requested_file.filename: - file_ext = requested_file.filename.rsplit('.', 1)[-1].lower() - if file_ext not in constants.EXTENSIONS_UPLOAD and '' not in constants.EXTENSIONS_UPLOAD: - flash(_("File extension '%(ext)s' is not allowed to be uploaded to this server", ext=file_ext), - category="error") - return redirect(url_for('web.show_book', book_id=book.id)) - else: - flash(_('File to be uploaded must have an extension'), category="error") + requested_file = request.files['btn-upload-format'] + # check for empty request + if requested_file.filename != '': + if not current_user.role_upload(): + abort(403) + if '.' in requested_file.filename: + file_ext = requested_file.filename.rsplit('.', 1)[-1].lower() + if file_ext not in constants.EXTENSIONS_UPLOAD and '' not in constants.EXTENSIONS_UPLOAD: + flash(_("File extension '%(ext)s' is not allowed to be uploaded to this server", ext=file_ext), + category="error") return redirect(url_for('web.show_book', book_id=book.id)) + else: + flash(_('File to be uploaded must have an extension'), category="error") + return redirect(url_for('web.show_book', book_id=book.id)) - file_name = book.path.rsplit('/', 1)[-1] - filepath = os.path.normpath(os.path.join(config.config_calibre_dir, book.path)) - saved_filename = os.path.join(filepath, file_name + '.' + file_ext) + file_name = book.path.rsplit('/', 1)[-1] + filepath = os.path.normpath(os.path.join(config.config_calibre_dir, book.path)) + saved_filename = os.path.join(filepath, file_name + '.' + file_ext) - # check if file path exists, otherwise create it, copy file to calibre path and delete temp file - if not os.path.exists(filepath): - try: - os.makedirs(filepath) - except OSError: - flash(_(u"Failed to create path %(path)s (Permission denied).", path=filepath), category="error") - return redirect(url_for('web.show_book', book_id=book.id)) + # check if file path exists, otherwise create it, copy file to calibre path and delete temp file + if not os.path.exists(filepath): try: - requested_file.save(saved_filename) + os.makedirs(filepath) except OSError: - flash(_(u"Failed to store file %(file)s.", file=saved_filename), category="error") + flash(_(u"Failed to create path %(path)s (Permission denied).", path=filepath), category="error") + return redirect(url_for('web.show_book', book_id=book.id)) + try: + requested_file.save(saved_filename) + except OSError: + flash(_(u"Failed to store file %(file)s.", file=saved_filename), category="error") + return redirect(url_for('web.show_book', book_id=book.id)) + + file_size = os.path.getsize(saved_filename) + is_format = calibre_db.get_book_format(book_id, file_ext.upper()) + + # Format entry already exists, no need to update the database + if is_format: + log.warning('Book format %s already existing', file_ext.upper()) + else: + try: + db_format = db.Data(book_id, file_ext.upper(), file_size, file_name) + calibre_db.session.add(db_format) + calibre_db.session.commit() + calibre_db.update_title_sort(config) + except OperationalError as e: + calibre_db.session.rollback() + log.error('Database error: %s', e) + flash(_(u"Database error: %(error)s.", error=e), category="error") return redirect(url_for('web.show_book', book_id=book.id)) - file_size = os.path.getsize(saved_filename) - is_format = calibre_db.get_book_format(book_id, file_ext.upper()) + # Queue uploader info + uploadText=_(u"File format %(ext)s added to %(book)s", ext=file_ext.upper(), book=book.title) + WorkerThread.add(current_user.nickname, TaskUpload( + "" + uploadText + "")) - # Format entry already exists, no need to update the database - if is_format: - log.warning('Book format %s already existing', file_ext.upper()) - else: - try: - db_format = db.Data(book_id, file_ext.upper(), file_size, file_name) - calibre_db.session.add(db_format) - calibre_db.session.commit() - calibre_db.update_title_sort(config) - except OperationalError as e: - calibre_db.session.rollback() - log.error('Database error: %s', e) - flash(_(u"Database error: %(error)s.", error=e), category="error") - return redirect(url_for('web.show_book', book_id=book.id)) - - # Queue uploader info - uploadText=_(u"File format %(ext)s added to %(book)s", ext=file_ext.upper(), book=book.title) - worker.add_upload(current_user.nickname, - "" + uploadText + "") - - return uploader.process( - saved_filename, *os.path.splitext(requested_file.filename), - rarExecutable=config.config_rarfile_location) + return uploader.process( + saved_filename, *os.path.splitext(requested_file.filename), + rarExecutable=config.config_rarfile_location) def upload_cover(request, book): @@ -569,6 +605,7 @@ def edit_book(book_id): merge_metadata(to_save, meta) # Update book edited_books_id = None + #handle book title if book.title != to_save["book_title"].rstrip().strip(): if to_save["book_title"] == '': @@ -779,42 +816,17 @@ def upload(): if not db_author: db_author = stored_author sort_author = stored_author.sort - sort_authors_list.append(sort_author) # helper.get_sorted_author(sort_author)) + sort_authors_list.append(sort_author) sort_authors = ' & '.join(sort_authors_list) title_dir = helper.get_valid_filename(title) author_dir = helper.get_valid_filename(db_author.name) - filepath = os.path.join(config.config_calibre_dir, author_dir, title_dir) - saved_filename = os.path.join(filepath, title_dir + meta.extension.lower()) - - # check if file path exists, otherwise create it, copy file to calibre path and delete temp file - if not os.path.exists(filepath): - try: - os.makedirs(filepath) - except OSError: - log.error("Failed to create path %s (Permission denied)", filepath) - flash(_(u"Failed to create path %(path)s (Permission denied).", path=filepath), category="error") - return Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json') - try: - copyfile(meta.file_path, saved_filename) - os.unlink(meta.file_path) - except OSError as e: - log.error("Failed to move file %s: %s", saved_filename, e) - flash(_(u"Failed to Move File %(file)s: %(error)s", file=saved_filename, error=e), category="error") - return Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json') - - if meta.cover is None: - has_cover = 0 - copyfile(os.path.join(constants.STATIC_DIR, 'generic_cover.jpg'), - os.path.join(filepath, "cover.jpg")) - else: - has_cover = 1 # combine path and normalize path from windows systems path = os.path.join(author_dir, title_dir).replace('\\', '/') # Calibre adds books with utc as timezone db_book = db.Books(title, "", sort_authors, datetime.utcnow(), datetime(101, 1, 1), - '1', datetime.utcnow(), path, has_cover, db_author, [], "") + '1', datetime.utcnow(), path, meta.cover, db_author, [], "") modif_date |= modify_database_object(input_authors, db_book.authors, db.Authors, calibre_db.session, 'author') @@ -832,7 +844,7 @@ def upload(): modif_date |= edit_book_series(meta.series, db_book) # Add file to book - file_size = os.path.getsize(saved_filename) + file_size = os.path.getsize(meta.file_path) db_data = db.Data(db_book, meta.extension.upper()[1:], file_size, title_dir) db_book.data.append(db_data) calibre_db.session.add(db_book) @@ -840,39 +852,44 @@ def upload(): # flush content, get db_book.id available calibre_db.session.flush() - # Comments needs book id therfore only possiblw after flush + # Comments needs book id therfore only possible after flush modif_date |= edit_book_comments(Markup(meta.description).unescape(), db_book) book_id = db_book.id title = db_book.title - error = helper.update_dir_stucture(book_id, config.config_calibre_dir, input_authors[0]) + error = helper.update_dir_structure_file(book_id, + config.config_calibre_dir, + input_authors[0], + meta.file_path, + title_dir + meta.extension) # move cover to final directory, including book id - if has_cover: - new_coverpath = os.path.join(config.config_calibre_dir, db_book.path, "cover.jpg") - try: - copyfile(meta.cover, new_coverpath) + if meta.cover: + coverfile = meta.cover + else: + coverfile = os.path.join(constants.STATIC_DIR, 'generic_cover.jpg') + new_coverpath = os.path.join(config.config_calibre_dir, db_book.path, "cover.jpg") + try: + copyfile(coverfile, new_coverpath) + if meta.cover: os.unlink(meta.cover) - except OSError as e: - log.error("Failed to move cover file %s: %s", new_coverpath, e) - flash(_(u"Failed to Move Cover File %(file)s: %(error)s", file=new_coverpath, - error=e), - category="error") + except OSError as e: + log.error("Failed to move cover file %s: %s", new_coverpath, e) + flash(_(u"Failed to Move Cover File %(file)s: %(error)s", file=new_coverpath, + error=e), + category="error") # save data to database, reread data calibre_db.session.commit() - #calibre_db.setup_db(config, ub.app_DB_path) - # Reread book. It's important not to filter the result, as it could have language which hide it from - # current users view (tags are not stored/extracted from metadata and could also be limited) - #book = calibre_db.get_book(book_id) + if config.config_use_google_drive: gdriveutils.updateGdriveCalibreFromLocal() if error: flash(error, category="error") uploadText=_(u"File %(file)s uploaded", file=title) - worker.add_upload(current_user.nickname, - "" + uploadText + "") + WorkerThread.add(current_user.nickname, TaskUpload( + "" + uploadText + "")) if len(request.files.getlist("btn-upload")) < 2: if current_user.role_edit() or current_user.role_admin(): @@ -910,3 +927,112 @@ def convert_bookformat(book_id): else: flash(_(u"There was an error converting this book: %(res)s", res=rtn), category="error") return redirect(url_for('editbook.edit_book', book_id=book_id)) + +@editbook.route("/ajax/editbooks/", methods=['POST']) +@login_required_if_no_ano +def edit_list_book(param): + vals = request.form.to_dict() + # calibre_db.update_title_sort(config) + #calibre_db.session.connection().connection.connection.create_function('uuid4', 0, lambda: str(uuid4())) + book = calibre_db.get_book(vals['pk']) + if param =='series_index': + edit_book_series_index(vals['value'], book) + elif param =='tags': + edit_book_tags(vals['value'], book) + elif param =='series': + edit_book_series(vals['value'], book) + elif param =='publishers': + vals['publisher'] = vals['value'] + edit_book_publisher(vals, book) + elif param =='languages': + edit_book_languages(vals['value'], book) + elif param =='author_sort': + book.author_sort = vals['value'] + elif param =='title': + book.title = vals['value'] + helper.update_dir_stucture(book.id, config.config_calibre_dir) + elif param =='sort': + book.sort = vals['value'] + # ToDo: edit books + elif param =='authors': + input_authors = vals['value'].split('&') + input_authors = list(map(lambda it: it.strip().replace(',', '|'), input_authors)) + modify_database_object(input_authors, book.authors, db.Authors, calibre_db.session, 'author') + sort_authors_list = list() + for inp in input_authors: + stored_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == inp).first() + if not stored_author: + stored_author = helper.get_sorted_author(inp) + else: + stored_author = stored_author.sort + sort_authors_list.append(helper.get_sorted_author(stored_author)) + sort_authors = ' & '.join(sort_authors_list) + if book.author_sort != sort_authors: + book.author_sort = sort_authors + helper.update_dir_stucture(book.id, config.config_calibre_dir, input_authors[0]) + book.last_modified = datetime.utcnow() + calibre_db.session.commit() + return "" + +@editbook.route("/ajax/sort_value//") +@login_required +def get_sorted_entry(field, bookid): + if field == 'title' or field == 'authors': + book = calibre_db.get_filtered_book(bookid) + if book: + if field == 'title': + return json.dumps({'sort': book.sort}) + elif field == 'authors': + return json.dumps({'author_sort': book.author_sort}) + return "" + + +@editbook.route("/ajax/simulatemerge", methods=['POST']) +@login_required +def simulate_merge_list_book(): + vals = request.get_json().get('Merge_books') + if vals: + to_book = calibre_db.get_book(vals[0]).title + vals.pop(0) + if to_book: + for book_id in vals: + from_book = [] + from_book.append(calibre_db.get_book(book_id).title) + return json.dumps({'to': to_book, 'from': from_book}) + return "" + + +@editbook.route("/ajax/mergebooks", methods=['POST']) +@login_required +def merge_list_book(): + vals = request.get_json().get('Merge_books') + to_file = list() + if vals: + # load all formats from target book + to_book = calibre_db.get_book(vals[0]) + vals.pop(0) + if to_book: + for file in to_book.data: + to_file.append(file.format) + to_name = helper.get_valid_filename(to_book.title) + ' - ' + \ + helper.get_valid_filename(to_book.authors[0].name) + for book_id in vals: + from_book = calibre_db.get_book(book_id) + if from_book: + for element in from_book.data: + if element.format not in to_file: + # create new data entry with: book_id, book_format, uncompressed_size, name + filepath_new = os.path.normpath(os.path.join(config.config_calibre_dir, + to_book.path, + to_name + "." + element.format.lower())) + filepath_old = os.path.normpath(os.path.join(config.config_calibre_dir, + from_book.path, + element.name + "." + element.format.lower())) + copyfile(filepath_old, filepath_new) + to_book.data.append(db.Data(to_book.id, + element.format, + element.uncompressed_size, + to_name)) + delete_book(from_book.id,"", True) # json_resp = + return json.dumps({'success': True}) + return "" diff --git a/cps/epub.py b/cps/epub.py index a1f2b1f0..779c1ca2 100644 --- a/cps/epub.py +++ b/cps/epub.py @@ -26,6 +26,7 @@ from .helper import split_authors from .constants import BookMeta + def extractCover(zipFile, coverFile, coverpath, tmp_file_name): if coverFile is None: return None diff --git a/cps/helper.py b/cps/helper.py index d40128d7..f946a039 100644 --- a/cps/helper.py +++ b/cps/helper.py @@ -32,13 +32,14 @@ from tempfile import gettempdir import requests from babel.dates import format_datetime from babel.units import format_unit -from flask import send_from_directory, make_response, redirect, abort +from flask import send_from_directory, make_response, redirect, abort, url_for from flask_babel import gettext as _ from flask_login import current_user from sqlalchemy.sql.expression import true, false, and_, text, func from werkzeug.datastructures import Headers from werkzeug.security import generate_password_hash from . import calibre_db +from .tasks.convert import TaskConvert try: from urllib.parse import quote @@ -58,12 +59,12 @@ try: except ImportError: use_PIL = False -from . import logger, config, get_locale, db, ub, worker +from . import logger, config, get_locale, db, ub from . import gdriveutils as gd from .constants import STATIC_DIR as _STATIC_DIR from .subproc_wrapper import process_wait -from .worker import STAT_WAITING, STAT_FAIL, STAT_STARTED, STAT_FINISH_SUCCESS -from .worker import TASK_EMAIL, TASK_CONVERT, TASK_UPLOAD, TASK_CONVERT_ANY +from .services.worker import WorkerThread, STAT_WAITING, STAT_FAIL, STAT_STARTED, STAT_FINISH_SUCCESS +from .tasks.mail import TaskEmail log = logger.create() @@ -73,46 +74,42 @@ log = logger.create() def convert_book_format(book_id, calibrepath, old_book_format, new_book_format, user_id, kindle_mail=None): book = calibre_db.get_book(book_id) data = calibre_db.get_book_format(book.id, old_book_format) + file_path = os.path.join(calibrepath, book.path, data.name) if not data: error_message = _(u"%(format)s format not found for book id: %(book)d", format=old_book_format, book=book_id) log.error("convert_book_format: %s", error_message) return error_message if config.config_use_google_drive: - df = gd.getFileFromEbooksFolder(book.path, data.name + "." + old_book_format.lower()) - if df: - datafile = os.path.join(calibrepath, book.path, data.name + u"." + old_book_format.lower()) - if not os.path.exists(os.path.join(calibrepath, book.path)): - os.makedirs(os.path.join(calibrepath, book.path)) - df.GetContentFile(datafile) - else: + if not gd.getFileFromEbooksFolder(book.path, data.name + "." + old_book_format.lower()): error_message = _(u"%(format)s not found on Google Drive: %(fn)s", format=old_book_format, fn=data.name + "." + old_book_format.lower()) return error_message - file_path = os.path.join(calibrepath, book.path, data.name) - if os.path.exists(file_path + "." + old_book_format.lower()): - # read settings and append converter task to queue - if kindle_mail: - settings = config.get_mail_settings() - settings['subject'] = _('Send to Kindle') # pretranslate Subject for e-mail - settings['body'] = _(u'This e-mail has been sent via Calibre-Web.') - # text = _(u"%(format)s: %(book)s", format=new_book_format, book=book.title) - else: - settings = dict() - txt = (u"%s -> %s: %s" % (old_book_format, new_book_format, book.title)) - settings['old_book_format'] = old_book_format - settings['new_book_format'] = new_book_format - worker.add_convert(file_path, book.id, user_id, txt, settings, kindle_mail) - return None else: - error_message = _(u"%(format)s not found: %(fn)s", - format=old_book_format, fn=data.name + "." + old_book_format.lower()) - return error_message + if not os.path.exists(file_path + "." + old_book_format.lower()): + error_message = _(u"%(format)s not found: %(fn)s", + format=old_book_format, fn=data.name + "." + old_book_format.lower()) + return error_message + # read settings and append converter task to queue + if kindle_mail: + settings = config.get_mail_settings() + settings['subject'] = _('Send to Kindle') # pretranslate Subject for e-mail + settings['body'] = _(u'This e-mail has been sent via Calibre-Web.') + else: + settings = dict() + txt = (u"%s -> %s: %s" % ( + old_book_format, + new_book_format, + "" + book.title + "")) + settings['old_book_format'] = old_book_format + settings['new_book_format'] = new_book_format + WorkerThread.add(user_id, TaskConvert(file_path, book.id, txt, settings, kindle_mail, user_id)) + return None def send_test_mail(kindle_mail, user_name): - worker.add_email(_(u'Calibre-Web test e-mail'), None, None, - config.get_mail_settings(), kindle_mail, user_name, - _(u"Test e-mail"), _(u'This e-mail has been sent via Calibre-Web.')) + WorkerThread.add(user_name, TaskEmail(_(u'Calibre-Web test e-mail'), None, None, + config.get_mail_settings(), kindle_mail, _(u"Test e-mail"), + _(u'This e-mail has been sent via Calibre-Web.'))) return @@ -127,9 +124,16 @@ def send_registration_mail(e_mail, user_name, default_password, resend=False): text += "Don't forget to change your password after first login.\r\n" text += "Sincerely\r\n\r\n" text += "Your Calibre-Web team" - worker.add_email(_(u'Get Started with Calibre-Web'), None, None, - config.get_mail_settings(), e_mail, None, - _(u"Registration e-mail for user: %(name)s", name=user_name), text) + WorkerThread.add(None, TaskEmail( + subject=_(u'Get Started with Calibre-Web'), + filepath=None, + attachment=None, + settings=config.get_mail_settings(), + recipient=e_mail, + taskMessage=_(u"Registration e-mail for user: %(name)s", name=user_name), + text=text + )) + return @@ -221,9 +225,9 @@ def send_mail(book_id, book_format, convert, kindle_mail, calibrepath, user_id): for entry in iter(book.data): if entry.format.upper() == book_format.upper(): converted_file_name = entry.name + '.' + book_format.lower() - worker.add_email(_(u"Send to Kindle"), book.path, converted_file_name, - config.get_mail_settings(), kindle_mail, user_id, - _(u"E-mail: %(book)s", book=book.title), _(u'This e-mail has been sent via Calibre-Web.')) + WorkerThread.add(user_id, TaskEmail(_(u"Send to Kindle"), book.path, converted_file_name, + config.get_mail_settings(), kindle_mail, + _(u"E-mail: %(book)s", book=book.title), _(u'This e-mail has been sent via Calibre-Web.'))) return return _(u"The requested file could not be read. Maybe wrong permissions?") @@ -343,66 +347,69 @@ def delete_book_file(book, calibrepath, book_format=None): path=book.path) -def update_dir_structure_file(book_id, calibrepath, first_author): +# Moves files in file storage during author/title rename, or from temp dir to file storage +def update_dir_structure_file(book_id, calibrepath, first_author, orignal_filepath, db_filename): + # get book database entry from id, if original path overwrite source with original_filepath localbook = calibre_db.get_book(book_id) - path = os.path.join(calibrepath, localbook.path) + if orignal_filepath: + path = orignal_filepath + else: + path = os.path.join(calibrepath, localbook.path) + # Create (current) authordir and titledir from database authordir = localbook.path.split('/')[0] + titledir = localbook.path.split('/')[1] + + # Create new_authordir from parameter or from database + # Create new titledir from database and add id if first_author: new_authordir = get_valid_filename(first_author) else: new_authordir = get_valid_filename(localbook.authors[0].name) - - titledir = localbook.path.split('/')[1] new_titledir = get_valid_filename(localbook.title) + " (" + str(book_id) + ")" - if titledir != new_titledir: - new_title_path = os.path.join(os.path.dirname(path), new_titledir) + if titledir != new_titledir or authordir != new_authordir or orignal_filepath: + new_path = os.path.join(calibrepath, new_authordir, new_titledir) + new_name = get_valid_filename(localbook.title) + ' - ' + get_valid_filename(new_authordir) try: - if not os.path.exists(new_title_path): - os.renames(os.path.normcase(path), os.path.normcase(new_title_path)) - else: - log.info("Copying title: %s into existing: %s", path, new_title_path) + if orignal_filepath: + os.renames(os.path.normcase(path), + os.path.normcase(os.path.join(new_path, db_filename))) + log.debug("Moving title: %s to %s/%s", path, new_path, new_name) + # Check new path is not valid path + elif not os.path.exists(new_path): + # move original path to new path + os.renames(os.path.normcase(path), os.path.normcase(new_path)) + log.debug("Moving title: %s to %s", path, new_path) + else: # path is valid copy only files to new location (merge) + log.info("Moving title: %s into existing: %s", path, new_path) + # Take all files and subfolder from old path (strange command) for dir_name, __, file_list in os.walk(path): for file in file_list: os.renames(os.path.normcase(os.path.join(dir_name, file)), - os.path.normcase(os.path.join(new_title_path + dir_name[len(path):], file))) - path = new_title_path - localbook.path = localbook.path.split('/')[0] + '/' + new_titledir + os.path.normcase(os.path.join(new_path + dir_name[len(path):], file))) + # change location in database to new author/title path + localbook.path = os.path.join(new_authordir, new_titledir) except OSError as ex: - log.error("Rename title from: %s to %s: %s", path, new_title_path, ex) + log.error("Rename title from: %s to %s: %s", path, new_path, ex) log.debug(ex, exc_info=True) return _("Rename title from: '%(src)s' to '%(dest)s' failed with error: %(error)s", - src=path, dest=new_title_path, error=str(ex)) - if authordir != new_authordir: - new_author_path = os.path.join(calibrepath, new_authordir, os.path.basename(path)) + src=path, dest=new_path, error=str(ex)) + + # Rename all files from old names to new names try: - os.renames(os.path.normcase(path), os.path.normcase(new_author_path)) - localbook.path = new_authordir + '/' + localbook.path.split('/')[1] - except OSError as ex: - log.error("Rename author from: %s to %s: %s", path, new_author_path, ex) - log.debug(ex, exc_info=True) - return _("Rename author from: '%(src)s' to '%(dest)s' failed with error: %(error)s", - src=path, dest=new_author_path, error=str(ex)) - # Rename all files from old names to new names - if authordir != new_authordir or titledir != new_titledir: - new_name = "" - try: - new_name = get_valid_filename(localbook.title) + ' - ' + get_valid_filename(new_authordir) - path_name = os.path.join(calibrepath, new_authordir, os.path.basename(path)) for file_format in localbook.data: os.renames(os.path.normcase( - os.path.join(path_name, file_format.name + '.' + file_format.format.lower())), - os.path.normcase(os.path.join(path_name, new_name + '.' + file_format.format.lower()))) + os.path.join(new_path, file_format.name + '.' + file_format.format.lower())), + os.path.normcase(os.path.join(new_path, new_name + '.' + file_format.format.lower()))) file_format.name = new_name except OSError as ex: - log.error("Rename file in path %s to %s: %s", path, new_name, ex) + log.error("Rename file in path %s to %s: %s", new_path, new_name, ex) log.debug(ex, exc_info=True) return _("Rename file in path '%(src)s' to '%(dest)s' failed with error: %(error)s", - src=path, dest=new_name, error=str(ex)) + src=new_path, dest=new_name, error=str(ex)) return False - def update_dir_structure_gdrive(book_id, first_author): error = False book = calibre_db.get_book(book_id) @@ -505,11 +512,11 @@ def uniq(inpt): # ################################# External interface ################################# -def update_dir_stucture(book_id, calibrepath, first_author=None): +def update_dir_stucture(book_id, calibrepath, first_author=None, orignal_filepath=None, db_filename=None): if config.config_use_google_drive: return update_dir_structure_gdrive(book_id, first_author) else: - return update_dir_structure_file(book_id, calibrepath, first_author) + return update_dir_structure_file(book_id, calibrepath, first_author, orignal_filepath, db_filename) def delete_book(book, calibrepath, book_format): @@ -722,47 +729,30 @@ def format_runtime(runtime): # helper function to apply localize status information in tasklist entries def render_task_status(tasklist): renderedtasklist = list() - for task in tasklist: - if task['user'] == current_user.nickname or current_user.role_admin(): - if task['formStarttime']: - task['starttime'] = format_datetime(task['formStarttime'], format='short', locale=get_locale()) - # task2['formStarttime'] = "" - else: - if 'starttime' not in task: - task['starttime'] = "" - - if 'formRuntime' not in task: - task['runtime'] = "" - else: - task['runtime'] = format_runtime(task['formRuntime']) + for num, user, added, task in tasklist: + if user == current_user.nickname or current_user.role_admin(): + ret = {} + if task.start_time: + ret['starttime'] = format_datetime(task.start_time, format='short', locale=get_locale()) + ret['runtime'] = format_runtime(task.runtime) # localize the task status - if isinstance(task['stat'], int): - if task['stat'] == STAT_WAITING: - task['status'] = _(u'Waiting') - elif task['stat'] == STAT_FAIL: - task['status'] = _(u'Failed') - elif task['stat'] == STAT_STARTED: - task['status'] = _(u'Started') - elif task['stat'] == STAT_FINISH_SUCCESS: - task['status'] = _(u'Finished') + if isinstance(task.stat, int): + if task.stat == STAT_WAITING: + ret['status'] = _(u'Waiting') + elif task.stat == STAT_FAIL: + ret['status'] = _(u'Failed') + elif task.stat == STAT_STARTED: + ret['status'] = _(u'Started') + elif task.stat == STAT_FINISH_SUCCESS: + ret['status'] = _(u'Finished') else: - task['status'] = _(u'Unknown Status') + ret['status'] = _(u'Unknown Status') - # localize the task type - if isinstance(task['taskType'], int): - if task['taskType'] == TASK_EMAIL: - task['taskMessage'] = _(u'E-mail: ') + task['taskMess'] - elif task['taskType'] == TASK_CONVERT: - task['taskMessage'] = _(u'Convert: ') + task['taskMess'] - elif task['taskType'] == TASK_UPLOAD: - task['taskMessage'] = _(u'Upload: ') + task['taskMess'] - elif task['taskType'] == TASK_CONVERT_ANY: - task['taskMessage'] = _(u'Convert: ') + task['taskMess'] - else: - task['taskMessage'] = _(u'Unknown Task: ') + task['taskMess'] - - renderedtasklist.append(task) + ret['taskMessage'] = "{}: {}".format(_(task.name), task.message) + ret['progress'] = "{} %".format(int(task.progress * 100)) + ret['user'] = user + renderedtasklist.append(ret) return renderedtasklist diff --git a/cps/jinjia.py b/cps/jinjia.py index c91534eb..87ba4159 100644 --- a/cps/jinjia.py +++ b/cps/jinjia.py @@ -44,6 +44,8 @@ log = logger.create() def url_for_other_page(page): args = request.view_args.copy() args['page'] = page + for get, val in request.args.items(): + args[get] = val return url_for(request.endpoint, **args) @@ -76,22 +78,18 @@ def mimetype_filter(val): @jinjia.app_template_filter('formatdate') def formatdate_filter(val): try: - conformed_timestamp = re.sub(r"[:]|([-](?!((\d{2}[:]\d{2})|(\d{4}))$))", '', val) - formatdate = datetime.datetime.strptime(conformed_timestamp[:15], "%Y%m%d %H%M%S") - return format_date(formatdate, format='medium', locale=get_locale()) + return format_date(val, format='medium', locale=get_locale()) except AttributeError as e: log.error('Babel error: %s, Current user locale: %s, Current User: %s', e, current_user.locale, current_user.nickname ) - return formatdate + return val @jinjia.app_template_filter('formatdateinput') def format_date_input(val): - conformed_timestamp = re.sub(r"[:]|([-](?!((\d{2}[:]\d{2})|(\d{4}))$))", '', val) - date_obj = datetime.datetime.strptime(conformed_timestamp[:15], "%Y%m%d %H%M%S") - input_date = date_obj.isoformat().split('T', 1)[0] # Hack to support dates <1900 + input_date = val.isoformat().split('T', 1)[0] # Hack to support dates <1900 return '' if input_date == "0101-01-01" else input_date diff --git a/cps/opds.py b/cps/opds.py index 78d5d8ed..ac0e103b 100644 --- a/cps/opds.py +++ b/cps/opds.py @@ -100,7 +100,7 @@ def feed_normal_search(): @requires_basic_auth_if_no_ano def feed_new(): off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), + entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, True, [db.Books.timestamp.desc()]) return render_xml_template('feed.xml', entries=entries, pagination=pagination) @@ -118,7 +118,7 @@ def feed_discover(): @requires_basic_auth_if_no_ano def feed_best_rated(): off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), + entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.ratings.any(db.Ratings.rating > 9), [db.Books.timestamp.desc()]) return render_xml_template('feed.xml', entries=entries, pagination=pagination) @@ -164,7 +164,7 @@ def feed_authorindex(): @requires_basic_auth_if_no_ano def feed_author(book_id): off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), + entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.authors.any(db.Authors.id == book_id), [db.Books.timestamp.desc()]) @@ -190,7 +190,7 @@ def feed_publisherindex(): @requires_basic_auth_if_no_ano def feed_publisher(book_id): off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), + entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.publishers.any(db.Publishers.id == book_id), [db.Books.timestamp.desc()]) @@ -218,7 +218,7 @@ def feed_categoryindex(): @requires_basic_auth_if_no_ano def feed_category(book_id): off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), + entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.tags.any(db.Tags.id == book_id), [db.Books.timestamp.desc()]) @@ -245,7 +245,7 @@ def feed_seriesindex(): @requires_basic_auth_if_no_ano def feed_series(book_id): off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), + entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.series.any(db.Series.id == book_id), [db.Books.series_index]) @@ -276,7 +276,7 @@ def feed_ratingindex(): @requires_basic_auth_if_no_ano def feed_ratings(book_id): off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), + entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.ratings.any(db.Ratings.id == book_id), [db.Books.timestamp.desc()]) @@ -304,7 +304,7 @@ def feed_formatindex(): @requires_basic_auth_if_no_ano def feed_format(book_id): off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), + entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.data.any(db.Data.format == book_id.upper()), [db.Books.timestamp.desc()]) @@ -338,7 +338,7 @@ def feed_languagesindex(): @requires_basic_auth_if_no_ano def feed_languages(book_id): off = request.args.get("offset") or 0 - entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), + entries, __, pagination = calibre_db.fill_indexpage((int(off) / (int(config.config_books_per_page)) + 1), 0, db.Books, db.Books.languages.any(db.Languages.id == book_id), [db.Books.timestamp.desc()]) @@ -408,7 +408,7 @@ def get_metadata_calibre_companion(uuid, library): def feed_search(term): if term: - entries = calibre_db.get_search_results(term) + entries, __ = calibre_db.get_search_results(term) entriescount = len(entries) if len(entries) > 0 else 1 pagination = Pagination(1, entriescount, entriescount) return render_xml_template('feed.xml', searchterm=term, entries=entries, pagination=pagination) diff --git a/cps/server.py b/cps/server.py index d5e88587..de98fc6e 100644 --- a/cps/server.py +++ b/cps/server.py @@ -212,9 +212,6 @@ class WebServer(object): def stop(self, restart=False): from . import updater_thread updater_thread.stop() - from . import calibre_db - calibre_db.stop() - log.info("webserver stop (restart=%s)", restart) self.restart = restart diff --git a/cps/services/worker.py b/cps/services/worker.py new file mode 100644 index 00000000..c2ea594c --- /dev/null +++ b/cps/services/worker.py @@ -0,0 +1,220 @@ + +from __future__ import division, print_function, unicode_literals +import threading +import abc +import uuid +import time + +try: + import queue +except ImportError: + import Queue as queue +from datetime import datetime +from collections import namedtuple + +from cps import logger + +log = logger.create() + +# task 'status' consts +STAT_WAITING = 0 +STAT_FAIL = 1 +STAT_STARTED = 2 +STAT_FINISH_SUCCESS = 3 + +# Only retain this many tasks in dequeued list +TASK_CLEANUP_TRIGGER = 20 + +QueuedTask = namedtuple('QueuedTask', 'num, user, added, task') + + +def _get_main_thread(): + for t in threading.enumerate(): + if t.__class__.__name__ == '_MainThread': + return t + raise Exception("main thread not found?!") + + + +class ImprovedQueue(queue.Queue): + def to_list(self): + """ + Returns a copy of all items in the queue without removing them. + """ + + with self.mutex: + return list(self.queue) + +#Class for all worker tasks in the background +class WorkerThread(threading.Thread): + _instance = None + + @classmethod + def getInstance(cls): + if cls._instance is None: + cls._instance = WorkerThread() + return cls._instance + + def __init__(self): + threading.Thread.__init__(self) + + self.dequeued = list() + + self.doLock = threading.Lock() + self.queue = ImprovedQueue() + self.num = 0 + self.start() + + @classmethod + def add(cls, user, task): + ins = cls.getInstance() + ins.num += 1 + ins.queue.put(QueuedTask( + num=ins.num, + user=user, + added=datetime.now(), + task=task, + )) + + @property + def tasks(self): + with self.doLock: + tasks = self.queue.to_list() + self.dequeued + return sorted(tasks, key=lambda x: x.num) + + def cleanup_tasks(self): + with self.doLock: + dead = [] + alive = [] + for x in self.dequeued: + (dead if x.task.dead else alive).append(x) + + # if the ones that we need to keep are within the trigger, do nothing else + delta = len(self.dequeued) - len(dead) + if delta > TASK_CLEANUP_TRIGGER: + ret = alive + else: + # otherwise, lop off the oldest dead tasks until we hit the target trigger + ret = sorted(dead, key=lambda x: x.task.end_time)[-TASK_CLEANUP_TRIGGER:] + alive + + self.dequeued = sorted(ret, key=lambda x: x.num) + + # Main thread loop starting the different tasks + def run(self): + main_thread = _get_main_thread() + while main_thread.is_alive(): + try: + # this blocks until something is available. This can cause issues when the main thread dies - this + # thread will remain alive. We implement a timeout to unblock every second which allows us to check if + # the main thread is still alive. + # We don't use a daemon here because we don't want the tasks to just be abruptly halted, leading to + # possible file / database corruption + item = self.queue.get(timeout=1) + except queue.Empty as ex: + time.sleep(1) + continue + + with self.doLock: + # add to list so that in-progress tasks show up + self.dequeued.append(item) + + # once we hit our trigger, start cleaning up dead tasks + if len(self.dequeued) > TASK_CLEANUP_TRIGGER: + self.cleanup_tasks() + + # sometimes tasks (like Upload) don't actually have work to do and are created as already finished + if item.task.stat is STAT_WAITING: + # CalibreTask.start() should wrap all exceptions in it's own error handling + item.task.start(self) + + self.queue.task_done() + + +class CalibreTask: + __metaclass__ = abc.ABCMeta + + def __init__(self, message): + self._progress = 0 + self.stat = STAT_WAITING + self.error = None + self.start_time = None + self.end_time = None + self.message = message + self.id = uuid.uuid4() + + @abc.abstractmethod + def run(self, worker_thread): + """Provides the caller some human-readable name for this class""" + raise NotImplementedError + + @abc.abstractmethod + def name(self): + """Provides the caller some human-readable name for this class""" + raise NotImplementedError + + def start(self, *args): + self.start_time = datetime.now() + self.stat = STAT_STARTED + + # catch any unhandled exceptions in a task and automatically fail it + try: + self.run(*args) + except Exception as e: + self._handleError(str(e)) + log.exception(e) + + self.end_time = datetime.now() + + @property + def stat(self): + return self._stat + + @stat.setter + def stat(self, x): + self._stat = x + + @property + def progress(self): + return self._progress + + @progress.setter + def progress(self, x): + if not 0 <= x <= 1: + raise ValueError("Task progress should within [0, 1] range") + self._progress = x + + @property + def error(self): + return self._error + + @error.setter + def error(self, x): + self._error = x + + @property + def runtime(self): + return (self.end_time or datetime.now()) - self.start_time + + @property + def dead(self): + """Determines whether or not this task can be garbage collected + + We have a separate dictating this because there may be certain tasks that want to override this + """ + # By default, we're good to clean a task if it's "Done" + return self.stat in (STAT_FINISH_SUCCESS, STAT_FAIL) + + @progress.setter + def progress(self, x): + # todo: throw error if outside of [0,1] + self._progress = x + + def _handleError(self, error_message): + log.exception(error_message) + self.stat = STAT_FAIL + self.progress = 1 + self.error = error_message + + def _handleSuccess(self): + self.stat = STAT_FINISH_SUCCESS + self.progress = 1 diff --git a/cps/shelf.py b/cps/shelf.py index 19a350d5..ea7f1eeb 100644 --- a/cps/shelf.py +++ b/cps/shelf.py @@ -29,7 +29,7 @@ from flask_login import login_required, current_user from sqlalchemy.sql.expression import func from sqlalchemy.exc import OperationalError, InvalidRequestError -from . import logger, ub, searched_ids, calibre_db +from . import logger, ub, calibre_db from .web import login_required_if_no_ano, render_title_template @@ -124,18 +124,18 @@ def search_to_shelf(shelf_id): flash(_(u"You are not allowed to add a book to the the shelf: %(name)s", name=shelf.name), category="error") return redirect(url_for('web.index')) - if current_user.id in searched_ids and searched_ids[current_user.id]: + if current_user.id in ub.searched_ids and ub.searched_ids[current_user.id]: books_for_shelf = list() books_in_shelf = ub.session.query(ub.BookShelf).filter(ub.BookShelf.shelf == shelf_id).all() if books_in_shelf: book_ids = list() for book_id in books_in_shelf: book_ids.append(book_id.book_id) - for searchid in searched_ids[current_user.id]: + for searchid in ub.searched_ids[current_user.id]: if searchid not in book_ids: books_for_shelf.append(searchid) else: - books_for_shelf = searched_ids[current_user.id] + books_for_shelf = ub.searched_ids[current_user.id] if not books_for_shelf: log.error("Books are already part of %s", shelf) diff --git a/cps/static/css/caliBlur.min.css b/cps/static/css/caliBlur.min.css index 6b056f95..9447cd18 100644 --- a/cps/static/css/caliBlur.min.css +++ b/cps/static/css/caliBlur.min.css @@ -585,7 +585,7 @@ div.btn-group[role=group][aria-label="Download, send to Kindle, reading"] > .dow border-left: 2px solid rgba(0, 0, 0, .15) } -div[aria-label="Edit/Delete book"] > .btn-warning { +div[aria-label="Edit/Delete book"] > .btn { width: 50px; height: 60px; margin: 0; @@ -600,7 +600,7 @@ div[aria-label="Edit/Delete book"] > .btn-warning { color: transparent } -div[aria-label="Edit/Delete book"] > .btn-warning > span { +div[aria-label="Edit/Delete book"] > .btn > span { visibility: visible; position: relative; display: inline-block; @@ -616,7 +616,7 @@ div[aria-label="Edit/Delete book"] > .btn-warning > span { margin: auto } -div[aria-label="Edit/Delete book"] > .btn-warning > span:before { +div[aria-label="Edit/Delete book"] > .btn > span:before { content: "\EA5d"; font-family: plex-icons; font-size: 20px; @@ -625,7 +625,7 @@ div[aria-label="Edit/Delete book"] > .btn-warning > span:before { height: 60px } -div[aria-label="Edit/Delete book"] > .btn-warning > span:hover { +div[aria-label="Edit/Delete book"] > .btn > span:hover { color: #fff } @@ -1939,7 +1939,9 @@ body > div.container-fluid > div > div.col-sm-10 > div.discover > form > .btn.bt z-index: 99999 } -.pagination:after, body > div.container-fluid > div > div.col-sm-10 > div.pagination > a.next, body > div.container-fluid > div > div.col-sm-10 > div.pagination > a.previous { +body > div.container-fluid > div > div.col-sm-10 > div.pagination .page-next > a, +body > div.container-fluid > div > div.col-sm-10 > div.pagination .page-previous > a +{ top: 0; font-family: plex-icons-new; font-weight: 100; @@ -1947,7 +1949,8 @@ body > div.container-fluid > div > div.col-sm-10 > div.discover > form > .btn.bt line-height: 60px; height: 60px; font-style: normal; - -moz-osx-font-smoothing: grayscale + -moz-osx-font-smoothing: grayscale; + overflow: hidden; } .pagination > a { @@ -1967,68 +1970,46 @@ body > div.container-fluid > div > div.col-sm-10 > div.discover > form > .btn.bt color: #fff !important } -body > div.container-fluid > div > div.col-sm-10 > div.pagination > a, body > div.container-fluid > div > div.col-sm-10 > div.pagination > a.previous + a, body > div.container-fluid > div > div.col-sm-10 > div.pagination > a[href*=page] { +body > div.container-fluid > div > div.col-sm-10 > div.pagination > .page-item:not(.page-next):not(.page-previous) +{ display: none } -body > div.container-fluid > div > div.col-sm-10 > div.pagination > a.next, body > div.container-fluid > div > div.col-sm-10 > div.pagination > a.previous { +body > div.container-fluid > div > div.col-sm-10 > div.pagination > .page-next > a, +body > div.container-fluid > div > div.col-sm-10 > div.pagination > .page-previous > a { color: transparent; + background-color:transparent; margin-left: 0; width: 65px; padding: 0; font-size: 15px; - position: absolute; - display: block !important + display: block !important; + border: none; } -body > div.container-fluid > div > div.col-sm-10 > div.pagination > a.next { - right: 0 -} - -body > div.container-fluid > div > div.col-sm-10 > div.pagination > a.previous { - right: 65px -} - -body > div.container-fluid > div > div.col-sm-10 > div.pagination > a.next:before { - content: "\EA32"; +body > div.container-fluid > div > div.col-sm-10 > div.pagination > .page-next > a:before, +body > div.container-fluid > div > div.col-sm-10 > div.pagination > .page-previous > a:before { visibility: visible; color: hsla(0, 0%, 100%, .35); height: 60px; line-height: 60px; border-left: 2px solid transparent; font-size: 20px; - padding: 20px 0 20px 20px; - margin-right: -27px + padding: 20px 25px; + margin-right: -27px; } -body > div.container-fluid > div > div.col-sm-10 > div.pagination > a.previous:before { - content: "\EA33"; - visibility: visible; - color: hsla(0, 0%, 100%, .65); - height: 60px; - line-height: 60px; - font-size: 20px; - padding: 20px 25px -} - -body > div.container-fluid > div > div.col-sm-10 > div.pagination > a.next:hover:before, body > div.container-fluid > div > div.col-sm-10 > div.pagination > a.previous:hover:before { - color: #fff -} - -.pagination > strong { - display: none -} - -.pagination:after { +body > div.container-fluid > div > div.col-sm-10 > div.pagination > .page-next > a:before { content: "\EA32"; - position: relative; - right: 0; - display: inline-block; - color: hsla(0, 0%, 100%, .55); - font-size: 20px; - padding: 0 23px; - margin-left: 20px; - z-index: -1 +} + +body > div.container-fluid > div > div.col-sm-10 > div.pagination > .page-previous > a:before { + content: "\EA33"; +} + +body > div.container-fluid > div > div.col-sm-10 > div.pagination > .page-next > a:hover:before, +body > div.container-fluid > div > div.col-sm-10 > div.pagination > .page-previous > a:hover:before { + color: #fff } .pagination > .ellipsis, .pagination > a:nth-last-of-type(2) { diff --git a/cps/static/css/caliBlur_override.css b/cps/static/css/caliBlur_override.css index 05a7c0d8..7f940212 100644 --- a/cps/static/css/caliBlur_override.css +++ b/cps/static/css/caliBlur_override.css @@ -5,7 +5,7 @@ body.serieslist.grid-view div.container-fluid>div>div.col-sm-10:before{ .cover .badge{ position: absolute; top: 0; - right: 0; + left: 0; background-color: #cc7b19; border-radius: 0; padding: 0 8px; diff --git a/cps/static/css/style.css b/cps/static/css/style.css index 4d8b4805..e2cd4ec7 100644 --- a/cps/static/css/style.css +++ b/cps/static/css/style.css @@ -51,7 +51,22 @@ body h2 { color:#444; } -a { color: #45b29d; } +a, .danger,.book-remove, .editable-empty, .editable-empty:hover { color: #45b29d; } + +.book-remove:hover { color: #23527c; } + +.btn-default a { color: #444; } + +.btn-default a:hover { + color: #45b29d; + text-decoration: None; +} + +.btn-default:hover { + color: #45b29d; +} + +.editable-click, a.editable-click, a.editable-click:hover { border-bottom: None; } .navigation .nav-head { text-transform: uppercase; @@ -63,6 +78,7 @@ a { color: #45b29d; } border-top: 1px solid #ccc; padding-top: 20px; } + .navigation li a { color: #444; text-decoration: none; diff --git a/cps/static/js/archive/archive.js b/cps/static/js/archive/archive.js index cb76321f..06c05624 100644 --- a/cps/static/js/archive/archive.js +++ b/cps/static/js/archive/archive.js @@ -411,6 +411,19 @@ bitjs.archive = bitjs.archive || {}; return "unrar.js"; }; + /** + * Unrarrer5 + * @extends {bitjs.archive.Unarchiver} + * @constructor + */ + bitjs.archive.Unrarrer5 = function(arrayBuffer, optPathToBitJS) { + bitjs.base(this, arrayBuffer, optPathToBitJS); + }; + bitjs.inherits(bitjs.archive.Unrarrer5, bitjs.archive.Unarchiver); + bitjs.archive.Unrarrer5.prototype.getScriptFileName = function() { + return "unrar5.js"; + }; + /** * Untarrer * @extends {bitjs.archive.Unarchiver} diff --git a/cps/static/js/archive/unrar.js b/cps/static/js/archive/unrar.js index fadb791e..3e2a45af 100644 --- a/cps/static/js/archive/unrar.js +++ b/cps/static/js/archive/unrar.js @@ -14,10 +14,10 @@ /* global VM_FIXEDGLOBALSIZE, VM_GLOBALMEMSIZE, MAXWINMASK, VM_GLOBALMEMADDR, MAXWINSIZE */ // This file expects to be invoked as a Worker (see onmessage below). -importScripts("../io/bitstream.js"); +/*importScripts("../io/bitstream.js"); importScripts("../io/bytebuffer.js"); importScripts("archive.js"); -importScripts("rarvm.js"); +importScripts("rarvm.js");*/ // Progress variables. var currentFilename = ""; @@ -29,19 +29,21 @@ var totalFilesInArchive = 0; // Helper functions. var info = function(str) { - postMessage(new bitjs.archive.UnarchiveInfoEvent(str)); + console.log(str); + // postMessage(new bitjs.archive.UnarchiveInfoEvent(str)); }; var err = function(str) { - postMessage(new bitjs.archive.UnarchiveErrorEvent(str)); + console.log(str); + // postMessage(new bitjs.archive.UnarchiveErrorEvent(str)); }; var postProgress = function() { - postMessage(new bitjs.archive.UnarchiveProgressEvent( + /*postMessage(new bitjs.archive.UnarchiveProgressEvent( currentFilename, currentFileNumber, currentBytesUnarchivedInFile, currentBytesUnarchived, totalUncompressedBytesInArchive, - totalFilesInArchive)); + totalFilesInArchive));*/ }; // shows a byte value as its hex representation @@ -1298,7 +1300,7 @@ var unrar = function(arrayBuffer) { totalUncompressedBytesInArchive = 0; totalFilesInArchive = 0; - postMessage(new bitjs.archive.UnarchiveStartEvent()); + //postMessage(new bitjs.archive.UnarchiveStartEvent()); var bstream = new bitjs.io.BitStream(arrayBuffer, false /* rtl */); var header = new RarVolumeHeader(bstream); @@ -1348,7 +1350,7 @@ var unrar = function(arrayBuffer) { localfile.unrar(); if (localfile.isValid) { - postMessage(new bitjs.archive.UnarchiveExtractEvent(localfile)); + // postMessage(new bitjs.archive.UnarchiveExtractEvent(localfile)); postProgress(); } } @@ -1358,7 +1360,7 @@ var unrar = function(arrayBuffer) { } else { err("Invalid RAR file"); } - postMessage(new bitjs.archive.UnarchiveFinishEvent()); + // postMessage(new bitjs.archive.UnarchiveFinishEvent()); }; // event.data.file has the ArrayBuffer. diff --git a/cps/static/js/archive/unrar5.js b/cps/static/js/archive/unrar5.js new file mode 100644 index 00000000..452989c0 --- /dev/null +++ b/cps/static/js/archive/unrar5.js @@ -0,0 +1,1371 @@ +/** + * unrar.js + * + * Licensed under the MIT License + * + * Copyright(c) 2011 Google Inc. + * Copyright(c) 2011 antimatter15 + * + * Reference Documentation: + * + * http://kthoom.googlecode.com/hg/docs/unrar.html + */ +/* global bitjs, importScripts, RarVM, Uint8Array, UnpackFilter */ +/* global VM_FIXEDGLOBALSIZE, VM_GLOBALMEMSIZE, MAXWINMASK, VM_GLOBALMEMADDR, MAXWINSIZE */ + +// This file expects to be invoked as a Worker (see onmessage below). +/*importScripts("../io/bitstream.js"); +importScripts("../io/bytebuffer.js"); +importScripts("archive.js"); +importScripts("rarvm.js");*/ + +// Progress variables. +var currentFilename = ""; +var currentFileNumber = 0; +var currentBytesUnarchivedInFile = 0; +var currentBytesUnarchived = 0; +var totalUncompressedBytesInArchive = 0; +var totalFilesInArchive = 0; + +// Helper functions. +var info = function(str) { + console.log(str) + //postMessage(new bitjs.archive.UnarchiveInfoEvent(str)); +}; +var err = function(str) { + console.log(str) + //postMessage(new bitjs.archive.UnarchiveErrorEvent(str)); +}; +var postProgress = function() { + /*postMessage(new bitjs.archive.UnarchiveProgressEvent( + currentFilename, + currentFileNumber, + currentBytesUnarchivedInFile, + currentBytesUnarchived, + totalUncompressedBytesInArchive, + totalFilesInArchive));*/ +}; + +// shows a byte value as its hex representation +var nibble = "0123456789ABCDEF"; +var byteValueToHexString = function(num) { + return nibble[num >> 4] + nibble[num & 0xF]; +}; +var twoByteValueToHexString = function(num) { + return nibble[(num >> 12) & 0xF] + nibble[(num >> 8) & 0xF] + nibble[(num >> 4) & 0xF] + nibble[num & 0xF]; +}; + + +// Volume Types +var MAIN_HEAD = 0x01, + ENCRYPT_HEAD = 0x04, + FILE_HEAD = 0x02, + SERVICE_HEAD = 0x03, + // COMM_HEAD = 0x75, + // AV_HEAD = 0x76, + // SUB_HEAD = 0x77, + // PROTECT_HEAD = 0x78, + // SIGN_HEAD = 0x79, + // NEWSUB_HEAD = 0x7a, + ENDARC_HEAD = 0x05; + +// ============================================================================================== // + +var RarMainVolumeHeader = function(bstream) { + var headPos = bstream.bytePtr; + // byte 1,2 + info("Rar Volume Header @" + bstream.bytePtr); + + this.crc = bstream.readBits(16); + info(" crc=" + this.crc); + + // byte 3 + this.headType = bstream.readBits(8); + info(" headType=" + this.headType); + + // Get flags + // bytes 4,5 + this.flags = {}; + this.flags.value = bstream.readBits(16); + + // byte 6 + this.headSize = bstream.readBits(8); + // byte 7 + if (bstream.readBits(8) === 1) { + info(" RarVersion=5"); + } + // byte 8 + bstream.readBits(8); +} + +var vint = function(bstream) { + var size = 0; + var result = 0; + var loop = 0 ; + do { + size = bstream.readBits(8); + result |= (size & 0x7F) << (loop * 7); + loop++; + } while (size & 0x80 ) + return result; +} + +/** + * @param {bitjs.io.BitStream} bstream + * @constructor + */ +var RarVolumeHeader = function(bstream) { + var headPos = bstream.bytePtr; + // byte 1,2 + info("Rar Volume Header @" + bstream.bytePtr); + + this.crc = bstream.readBits(32); + info(" crc=" + this.crc); + + // byte 3 + x Header size + this.headSize = vint(bstream); + info(" Header Size=" + this.headSize); + + // byte 4 + this.headType = bstream.readBits(8); + info(" headType=" + this.headType); + + // Get Header flags + this.headFlags = {}; + this.headFlags.value = bstream.peekBits(8); + + info(" Header flags=" + byteValueToHexString(this.headFlags.value)); + this.headFlags.EXTRA_AREA = !!bstream.readBits(1); + this.headFlags.DATA_AREA = !!bstream.readBits(1); + this.headFlags.UNKNOWN = !!bstream.readBits(1); + this.headFlags.CONTINUE_FROM = !!bstream.readBits(1); + this.headFlags.CONTNUE_TO = !!bstream.readBits(1); + this.headFlags.DEPENDS = !!bstream.readBits(1); + this.headFlags.CHILDBLOCK = !!bstream.readBits(1); + bstream.readBits(1); // unused*/ + + // Get extra AreaSize + if (this.headFlags.EXTRA_AREA) { + this.extraSize = vint(bstream); + } else { + this.extraSize = 0; + } + if (this.headFlags.DATA_AREA && (this.headType == FILE_HEAD || this.headType == SERVICE_HEAD)) { + this.packSize = vint(bstream); + // this.packSize = bstream.readBits(32); + } + this.flags = {}; + this.flags.value = bstream.peekBits(8); + + switch (this.headType) { + case MAIN_HEAD: + // this.flags = {}; + // this.flags.value = bstream.peekBits(16); + this.flags.MHD_VOLUME = !!bstream.readBits(1); + this.flags.MHD_VOLUMNE_NO = !!bstream.readBits(1); + this.flags.MHD_SOLID = !!bstream.readBits(1); + this.flags.MHD_RECOVERY = !!bstream.readBits(1); + this.flags.MHD_LOCKED = !!bstream.readBits(1); + bstream.readBits(3); // unused*/ + if (this.flags.MHD_VOLUMNE_NO) { + this.volumeNumber = vint(bstream); + } + bstream.readBytes(this.extraSize); + // break; + return; // Main Header finally parsed + // ------------ + case FILE_HEAD: + case SERVICE_HEAD: + this.flags.DIRECTORY = !!bstream.readBits(1); + this.flags.TIME = !!bstream.readBits(1); + this.flags.CRC = !!bstream.readBits(1); + this.flags.UNPACK_UNKNOWN = !!bstream.readBits(1); + bstream.readBits(4); + + if (this.flags.UNPACK_UNKNOWN) { + vint(bstream); + } else { + this.unpackedSize = vint(bstream); + } + this.fileAttr = vint(bstream); + if (this.flags.TIME) { + this.fileTime = bstream.readBits(32); + } + if (this.flags.CRC) { + this.fileCRC = bstream.readBits(32); + } + // var compInfo = vint(bstream); + this.unpVer = bstream.readBits(6); + this.solid = bstream.readBits(1); + bstream.readBits(1); + this.method = bstream.readBits(3); + this.dictSize = bstream.readBits(4); + bstream.readBits(1); + this.hostOS = vint(bstream); + this.nameSize = vint(bstream); + + this.filename = bstream.readBytes(this.nameSize); + var _s = ""; + for (var _i = 0; _i < this.filename.length ; _i++) { + _s += String.fromCharCode(this.filename[_i]); + } + + this.filename = _s; + bstream.readBytes(this.extraSize); + break; + + default: + info("Found a header of type 0x" + byteValueToHexString(this.headType)); + // skip the rest of the header bytes (for now) + bstream.readBytes(this.headSize - 7); + break; + } +}; + +//var BLOCK_LZ = 0; + +var rLDecode = [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224], + rLBits = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5], + rDBitLengthCounts = [4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 14, 0, 12], + rSDDecode = [0, 4, 8, 16, 32, 64, 128, 192], + rSDBits = [2, 2, 3, 4, 5, 6, 6, 6]; + +var rDDecode = [0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, + 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, + 4096, 6144, 8192, 12288, 16384, 24576, 32768, 49152, 65536, 98304, + 131072, 196608, 262144, 327680, 393216, 458752, 524288, 589824, + 655360, 720896, 786432, 851968, 917504, 983040 +]; + +var rDBits = [0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, + 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, + 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16 +]; + +var rLowDistRepCount = 16; + +var rNC = 299, + rDC = 60, + rLDC = 17, + rRC = 28, + rBC = 20, + rHuffTableSize = (rNC + rDC + rRC + rLDC); + +//var UnpBlockType = BLOCK_LZ; +var UnpOldTable = new Array(rHuffTableSize); + +var BD = { //bitdecode + DecodeLen: new Array(16), + DecodePos: new Array(16), + DecodeNum: new Array(rBC) +}; +var LD = { //litdecode + DecodeLen: new Array(16), + DecodePos: new Array(16), + DecodeNum: new Array(rNC) +}; +var DD = { //distdecode + DecodeLen: new Array(16), + DecodePos: new Array(16), + DecodeNum: new Array(rDC) +}; +var LDD = { //low dist decode + DecodeLen: new Array(16), + DecodePos: new Array(16), + DecodeNum: new Array(rLDC) +}; +var RD = { //rep decode + DecodeLen: new Array(16), + DecodePos: new Array(16), + DecodeNum: new Array(rRC) +}; + +/** + * @type {Array} + */ +var rOldBuffers = []; + +/** + * The current buffer we are unpacking to. + * @type {bitjs.io.ByteBuffer} + */ +var rBuffer; + +/** + * The buffer of the final bytes after filtering (only used in Unpack29). + * @type {bitjs.io.ByteBuffer} + */ +var wBuffer; + +var lowDistRepCount = 0; +var prevLowDist = 0; + +var rOldDist = [0, 0, 0, 0]; +var lastDist; +var lastLength; + +/** + * In unpack.cpp, UnpPtr keeps track of what bytes have been unpacked + * into the Window buffer and WrPtr keeps track of what bytes have been + * actually written to disk after the unpacking and optional filtering + * has been done. + * + * In our case, rBuffer is the buffer for the unpacked bytes and wBuffer is + * the final output bytes. + */ + + +/** + * Read in Huffman tables for RAR + * @param {bitjs.io.BitStream} bstream + */ +function rarReadTables(bstream) { + var BitLength = new Array(rBC); + var Table = new Array(rHuffTableSize); + var i; + // before we start anything we need to get byte-aligned + bstream.readBits((8 - bstream.bitPtr) & 0x7); + + if (bstream.readBits(1)) { + info("Error! PPM not implemented yet"); + return; + } + + if (!bstream.readBits(1)) { //discard old table + for (i = UnpOldTable.length; i--;) { + UnpOldTable[i] = 0; + } + } + + // read in bit lengths + for (var I = 0; I < rBC; ++I) { + var Length = bstream.readBits(4); + if (Length === 15) { + var ZeroCount = bstream.readBits(4); + if (ZeroCount === 0) { + BitLength[I] = 15; + } else { + ZeroCount += 2; + while (ZeroCount-- > 0 && I < rBC) { + BitLength[I++] = 0; + } + --I; + } + } else { + BitLength[I] = Length; + } + } + + // now all 20 bit lengths are obtained, we construct the Huffman Table: + + rarMakeDecodeTables(BitLength, 0, BD, rBC); + + var TableSize = rHuffTableSize; + //console.log(DecodeLen, DecodePos, DecodeNum); + for (i = 0; i < TableSize;) { + var N; + var num = rarDecodeNumber(bstream, BD); + if (num < 16) { + Table[i] = (num + UnpOldTable[i]) & 0xf; + i++; + } else if (num < 18) { + N = (num === 16) ? (bstream.readBits(3) + 3) : (bstream.readBits(7) + 11); + + while (N-- > 0 && i < TableSize) { + Table[i] = Table[i - 1]; + i++; + } + } else { + N = (num === 18) ? (bstream.readBits(3) + 3) : (bstream.readBits(7) + 11); + + while (N-- > 0 && i < TableSize) { + Table[i++] = 0; + } + } + } + + rarMakeDecodeTables(Table, 0, LD, rNC); + rarMakeDecodeTables(Table, rNC, DD, rDC); + rarMakeDecodeTables(Table, rNC + rDC, LDD, rLDC); + rarMakeDecodeTables(Table, rNC + rDC + rLDC, RD, rRC); + + for (i = UnpOldTable.length; i--;) { + UnpOldTable[i] = Table[i]; + } + return true; +} + + +function rarDecodeNumber(bstream, dec) { + var DecodeLen = dec.DecodeLen, + DecodePos = dec.DecodePos, + DecodeNum = dec.DecodeNum; + var bitField = bstream.getBits() & 0xfffe; + //some sort of rolled out binary search + var bits = ((bitField < DecodeLen[8]) ? + ((bitField < DecodeLen[4]) ? + ((bitField < DecodeLen[2]) ? + ((bitField < DecodeLen[1]) ? 1 : 2) : + ((bitField < DecodeLen[3]) ? 3 : 4)) : + (bitField < DecodeLen[6]) ? + ((bitField < DecodeLen[5]) ? 5 : 6) : + ((bitField < DecodeLen[7]) ? 7 : 8)) : + ((bitField < DecodeLen[12]) ? + ((bitField < DecodeLen[10]) ? + ((bitField < DecodeLen[9]) ? 9 : 10) : + ((bitField < DecodeLen[11]) ? 11 : 12)) : + (bitField < DecodeLen[14]) ? + ((bitField < DecodeLen[13]) ? 13 : 14) : + 15)); + bstream.readBits(bits); + var N = DecodePos[bits] + ((bitField - DecodeLen[bits - 1]) >>> (16 - bits)); + + return DecodeNum[N]; +} + + +function rarMakeDecodeTables(BitLength, offset, dec, size) { + var DecodeLen = dec.DecodeLen; + var DecodePos = dec.DecodePos; + var DecodeNum = dec.DecodeNum; + var LenCount = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + var TmpPos = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + var N = 0; + var M = 0; + var i; + for (i = DecodeNum.length; i--;) { + DecodeNum[i] = 0; + } + for (i = 0; i < size; i++) { + LenCount[BitLength[i + offset] & 0xF]++; + } + LenCount[0] = 0; + TmpPos[0] = 0; + DecodePos[0] = 0; + DecodeLen[0] = 0; + + var I; + for (I = 1; I < 16; ++I) { + N = 2 * (N + LenCount[I]); + M = (N << (15 - I)); + if (M > 0xFFFF) { + M = 0xFFFF; + } + DecodeLen[I] = M; + DecodePos[I] = DecodePos[I - 1] + LenCount[I - 1]; + TmpPos[I] = DecodePos[I]; + } + for (I = 0; I < size; ++I) { + if (BitLength[I + offset] !== 0) { + DecodeNum[TmpPos[BitLength[offset + I] & 0xF]++] = I; + } + } + +} + +// TODO: implement +/** + * @param {bitjs.io.BitStream} bstream + * @param {boolean} Solid + */ +function unpack15() { //bstream, Solid) { + info("ERROR! RAR 1.5 compression not supported"); +} + +/** + * Unpacks the bit stream into rBuffer using the Unpack20 algorithm. + * @param {bitjs.io.BitStream} bstream + * @param {boolean} Solid + */ +function unpack20(bstream) { //, Solid) { + var destUnpSize = rBuffer.data.length; + var oldDistPtr = 0; + var Length; + var Distance; + rarReadTables20(bstream); + while (destUnpSize > rBuffer.ptr) { + var num = rarDecodeNumber(bstream, LD); + var Bits; + if (num < 256) { + rBuffer.insertByte(num); + continue; + } + if (num > 269) { + Length = rLDecode[num -= 270] + 3; + if ((Bits = rLBits[num]) > 0) { + Length += bstream.readBits(Bits); + } + var DistNumber = rarDecodeNumber(bstream, DD); + Distance = rDDecode[DistNumber] + 1; + if ((Bits = rDBits[DistNumber]) > 0) { + Distance += bstream.readBits(Bits); + } + if (Distance >= 0x2000) { + Length++; + if (Distance >= 0x40000) { + Length++; + } + } + lastLength = Length; + lastDist = rOldDist[oldDistPtr++ & 3] = Distance; + rarCopyString(Length, Distance); + continue; + } + if (num === 269) { + rarReadTables20(bstream); + rarUpdateProgress(); + continue; + } + if (num === 256) { + lastDist = rOldDist[oldDistPtr++ & 3] = lastDist; + rarCopyString(lastLength, lastDist); + continue; + } + if (num < 261) { + Distance = rOldDist[(oldDistPtr - (num - 256)) & 3]; + var LengthNumber = rarDecodeNumber(bstream, RD); + Length = rLDecode[LengthNumber] + 2; + if ((Bits = rLBits[LengthNumber]) > 0) { + Length += bstream.readBits(Bits); + } + if (Distance >= 0x101) { + Length++; + if (Distance >= 0x2000) { + Length++; + if (Distance >= 0x40000) { + Length++; + } + } + } + lastLength = Length; + lastDist = rOldDist[oldDistPtr++ & 3] = Distance; + rarCopyString(Length, Distance); + continue; + } + if (num < 270) { + Distance = rSDDecode[num -= 261] + 1; + if ((Bits = rSDBits[num]) > 0) { + Distance += bstream.readBits(Bits); + } + lastLength = 2; + lastDist = rOldDist[oldDistPtr++ & 3] = Distance; + rarCopyString(2, Distance); + continue; + } + } + rarUpdateProgress(); +} + +function rarUpdateProgress() { + var change = rBuffer.ptr - currentBytesUnarchivedInFile; + currentBytesUnarchivedInFile = rBuffer.ptr; + currentBytesUnarchived += change; + postProgress(); +} + +var rNC20 = 298, + rDC20 = 48, + rRC20 = 28, + rBC20 = 19, + rMC20 = 257; + +var UnpOldTable20 = new Array(rMC20 * 4); + +function rarReadTables20(bstream) { + var BitLength = new Array(rBC20); + var Table = new Array(rMC20 * 4); + var TableSize, N, I; + var i; + bstream.readBits(1); + if (!bstream.readBits(1)) { + for (i = UnpOldTable20.length; i--;) { + UnpOldTable20[i] = 0; + } + } + TableSize = rNC20 + rDC20 + rRC20; + for (I = 0; I < rBC20; I++) { + BitLength[I] = bstream.readBits(4); + } + rarMakeDecodeTables(BitLength, 0, BD, rBC20); + I = 0; + while (I < TableSize) { + var num = rarDecodeNumber(bstream, BD); + if (num < 16) { + Table[I] = num + UnpOldTable20[I] & 0xf; + I++; + } else if (num === 16) { + N = bstream.readBits(2) + 3; + while (N-- > 0 && I < TableSize) { + Table[I] = Table[I - 1]; + I++; + } + } else { + if (num === 17) { + N = bstream.readBits(3) + 3; + } else { + N = bstream.readBits(7) + 11; + } + while (N-- > 0 && I < TableSize) { + Table[I++] = 0; + } + } + } + rarMakeDecodeTables(Table, 0, LD, rNC20); + rarMakeDecodeTables(Table, rNC20, DD, rDC20); + rarMakeDecodeTables(Table, rNC20 + rDC20, RD, rRC20); + for (i = UnpOldTable20.length; i--;) { + UnpOldTable20[i] = Table[i]; + } +} + +// ============================================================================================== // + +// Unpack code specific to RarVM +var VM = new RarVM(); + +/** + * Filters code, one entry per filter. + * @type {Array} + */ +var Filters = []; + +/** + * Filters stack, several entrances of same filter are possible. + * @type {Array} + */ +var PrgStack = []; + +/** + * Lengths of preceding blocks, one length per filter. Used to reduce + * size required to write block length if lengths are repeating. + * @type {Array} + */ +var OldFilterLengths = []; + +var LastFilter = 0; + +function initFilters() { + OldFilterLengths = []; + LastFilter = 0; + Filters = []; + PrgStack = []; +} + + +/** + * @param {number} firstByte The first byte (flags). + * @param {Uint8Array} vmCode An array of bytes. + */ +function rarAddVMCode(firstByte, vmCode) { + VM.init(); + var i; + var bstream = new bitjs.io.BitStream(vmCode.buffer, true /* rtl */ ); + + var filtPos; + if (firstByte & 0x80) { + filtPos = RarVM.readData(bstream); + if (filtPos === 0) { + initFilters(); + } else { + filtPos--; + } + } else { + filtPos = LastFilter; + } + + if (filtPos > Filters.length || filtPos > OldFilterLengths.length) { + return false; + } + + LastFilter = filtPos; + var newFilter = (filtPos === Filters.length); + + // new filter for PrgStack + var stackFilter = new UnpackFilter(); + var filter = null; + // new filter code, never used before since VM reset + if (newFilter) { + // too many different filters, corrupt archive + if (filtPos > 1024) { + return false; + } + + filter = new UnpackFilter(); + Filters.push(filter); + stackFilter.ParentFilter = (Filters.length - 1); + OldFilterLengths.push(0); // OldFilterLengths.Add(1) + filter.ExecCount = 0; + } else { // filter was used in the past + filter = Filters[filtPos]; + stackFilter.ParentFilter = filtPos; + filter.ExecCount++; + } + + var emptyCount = 0; + for (i = 0; i < PrgStack.length; ++i) { + PrgStack[i - emptyCount] = PrgStack[i]; + + if (PrgStack[i] === null) { + emptyCount++; + } + if (emptyCount > 0) { + PrgStack[i] = null; + } + } + + if (emptyCount === 0) { + PrgStack.push(null); //PrgStack.Add(1); + emptyCount = 1; + } + + var stackPos = PrgStack.length - emptyCount; + PrgStack[stackPos] = stackFilter; + stackFilter.ExecCount = filter.ExecCount; + + var blockStart = RarVM.readData(bstream); + if (firstByte & 0x40) { + blockStart += 258; + } + stackFilter.BlockStart = (blockStart + rBuffer.ptr) & MAXWINMASK; + + if (firstByte & 0x20) { + stackFilter.BlockLength = RarVM.readData(bstream); + } else { + stackFilter.BlockLength = filtPos < OldFilterLengths.length ? + OldFilterLengths[filtPos] : + 0; + } + stackFilter.NextWindow = (wBuffer.ptr !== rBuffer.ptr) && + (((wBuffer.ptr - rBuffer.ptr) & MAXWINMASK) <= blockStart); + + OldFilterLengths[filtPos] = stackFilter.BlockLength; + + for (i = 0; i < 7; ++i) { + stackFilter.Prg.InitR[i] = 0; + } + stackFilter.Prg.InitR[3] = VM_GLOBALMEMADDR; + stackFilter.Prg.InitR[4] = stackFilter.BlockLength; + stackFilter.Prg.InitR[5] = stackFilter.ExecCount; + + // set registers to optional parameters if any + if (firstByte & 0x10) { + var initMask = bstream.readBits(7); + for (i = 0; i < 7; ++i) { + if (initMask & (1 << i)) { + stackFilter.Prg.InitR[i] = RarVM.readData(bstream); + } + } + } + + if (newFilter) { + var vmCodeSize = RarVM.readData(bstream); + if (vmCodeSize >= 0x10000 || vmCodeSize === 0) { + return false; + } + vmCode = new Uint8Array(vmCodeSize); + for (i = 0; i < vmCodeSize; ++i) { + //if (Inp.Overflow(3)) + // return(false); + vmCode[i] = bstream.readBits(8); + } + VM.prepare(vmCode, filter.Prg); + } + stackFilter.Prg.Cmd = filter.Prg.Cmd; + stackFilter.Prg.AltCmd = filter.Prg.Cmd; + + var staticDataSize = filter.Prg.StaticData.length; + if (staticDataSize > 0 && staticDataSize < VM_GLOBALMEMSIZE) { + // read statically defined data contained in DB commands + for (i = 0; i < staticDataSize; ++i) { + stackFilter.Prg.StaticData[i] = filter.Prg.StaticData[i]; + } + } + + if (stackFilter.Prg.GlobalData.length < VM_FIXEDGLOBALSIZE) { + stackFilter.Prg.GlobalData = new Uint8Array(VM_FIXEDGLOBALSIZE); + } + + var globalData = stackFilter.Prg.GlobalData; + for (i = 0; i < 7; ++i) { + VM.setLowEndianValue(globalData, stackFilter.Prg.InitR[i], i * 4); + } + + VM.setLowEndianValue(globalData, stackFilter.BlockLength, 0x1c); + VM.setLowEndianValue(globalData, 0, 0x20); + VM.setLowEndianValue(globalData, stackFilter.ExecCount, 0x2c); + for (i = 0; i < 16; ++i) { + globalData[0x30 + i] = 0; + } + + // put data block passed as parameter if any + if (firstByte & 8) { + //if (Inp.Overflow(3)) + // return(false); + var dataSize = RarVM.readData(bstream); + if (dataSize > (VM_GLOBALMEMSIZE - VM_FIXEDGLOBALSIZE)) { + return (false); + } + + var curSize = stackFilter.Prg.GlobalData.length; + if (curSize < dataSize + VM_FIXEDGLOBALSIZE) { + // Resize global data and update the stackFilter and local variable. + var numBytesToAdd = dataSize + VM_FIXEDGLOBALSIZE - curSize; + var newGlobalData = new Uint8Array(globalData.length + numBytesToAdd); + newGlobalData.set(globalData); + + stackFilter.Prg.GlobalData = newGlobalData; + globalData = newGlobalData; + } + //byte *GlobalData=&StackFilter->Prg.GlobalData[VM_FIXEDGLOBALSIZE]; + for (i = 0; i < dataSize; ++i) { + //if (Inp.Overflow(3)) + // return(false); + globalData[VM_FIXEDGLOBALSIZE + i] = bstream.readBits(8); + } + } + + return true; +} + + +/** + * @param {!bitjs.io.BitStream} bstream + */ +function rarReadVMCode(bstream) { + var firstByte = bstream.readBits(8); + var length = (firstByte & 7) + 1; + if (length === 7) { + length = bstream.readBits(8) + 7; + } else if (length === 8) { + length = bstream.readBits(16); + } + + // Read all bytes of VM code into an array. + var vmCode = new Uint8Array(length); + for (var i = 0; i < length; i++) { + // Do something here with checking readbuf. + vmCode[i] = bstream.readBits(8); + } + return rarAddVMCode(firstByte, vmCode); +} + +/** + * Unpacks the bit stream into rBuffer using the Unpack29 algorithm. + * @param {bitjs.io.BitStream} bstream + * @param {boolean} Solid + */ +function unpack29(bstream) { + // lazy initialize rDDecode and rDBits + + var DDecode = new Array(rDC); + var DBits = new Array(rDC); + var Distance = 0; + var Length = 0; + var Dist = 0, BitLength = 0, Slot = 0; + var I; + for (I = 0; I < rDBitLengthCounts.length; I++, BitLength++) { + for (var J = 0; J < rDBitLengthCounts[I]; J++, Slot++, Dist += (1 << BitLength)) { + DDecode[Slot] = Dist; + DBits[Slot] = BitLength; + } + } + + var Bits; + //tablesRead = false; + + rOldDist = [0, 0, 0, 0]; + + lastDist = 0; + lastLength = 0; + var i; + for (i = UnpOldTable.length; i--;) { + UnpOldTable[i] = 0; + } + + // read in Huffman tables + rarReadTables(bstream); + + while (true) { + var num = rarDecodeNumber(bstream, LD); + + if (num < 256) { + rBuffer.insertByte(num); + continue; + } + if (num >= 271) { + Length = rLDecode[num -= 271] + 3; + if ((Bits = rLBits[num]) > 0) { + Length += bstream.readBits(Bits); + } + var DistNumber = rarDecodeNumber(bstream, DD); + Distance = DDecode[DistNumber] + 1; + if ((Bits = DBits[DistNumber]) > 0) { + if (DistNumber > 9) { + if (Bits > 4) { + Distance += ((bstream.getBits() >>> (20 - Bits)) << 4); + bstream.readBits(Bits - 4); + //todo: check this + } + if (lowDistRepCount > 0) { + lowDistRepCount--; + Distance += prevLowDist; + } else { + var LowDist = rarDecodeNumber(bstream, LDD); + if (LowDist === 16) { + lowDistRepCount = rLowDistRepCount - 1; + Distance += prevLowDist; + } else { + Distance += LowDist; + prevLowDist = LowDist; + } + } + } else { + Distance += bstream.readBits(Bits); + } + } + if (Distance >= 0x2000) { + Length++; + if (Distance >= 0x40000) { + Length++; + } + } + rarInsertOldDist(Distance); + rarInsertLastMatch(Length, Distance); + rarCopyString(Length, Distance); + continue; + } + if (num === 256) { + if (!rarReadEndOfBlock(bstream)) { + break; + } + continue; + } + if (num === 257) { + if (!rarReadVMCode(bstream)) { + break; + } + continue; + } + if (num === 258) { + if (lastLength !== 0) { + rarCopyString(lastLength, lastDist); + } + continue; + } + if (num < 263) { + var DistNum = num - 259; + Distance = rOldDist[DistNum]; + + for (var I2 = DistNum; I2 > 0; I2--) { + rOldDist[I2] = rOldDist[I2 - 1]; + } + rOldDist[0] = Distance; + + var LengthNumber = rarDecodeNumber(bstream, RD); + Length = rLDecode[LengthNumber] + 2; + if ((Bits = rLBits[LengthNumber]) > 0) { + Length += bstream.readBits(Bits); + } + rarInsertLastMatch(Length, Distance); + rarCopyString(Length, Distance); + continue; + } + if (num < 272) { + Distance = rSDDecode[num -= 263] + 1; + if ((Bits = rSDBits[num]) > 0) { + Distance += bstream.readBits(Bits); + } + rarInsertOldDist(Distance); + rarInsertLastMatch(2, Distance); + rarCopyString(2, Distance); + continue; + } + } // while (true) + rarUpdateProgress(); + rarWriteBuf(); +} + +/** + * Does stuff to the current byte buffer (rBuffer) based on + * the filters loaded into the RarVM and writes out to wBuffer. + */ +function rarWriteBuf() { + var writeSize = (rBuffer.ptr & MAXWINMASK); + var j; + var flt; + for (var i = 0; i < PrgStack.length; ++i) { + flt = PrgStack[i]; + if (flt === null) { + continue; + } + + if (flt.NextWindow) { + flt.NextWindow = false; + continue; + } + + var blockStart = flt.BlockStart; + var blockLength = flt.BlockLength; + var parentPrg; + + // WrittenBorder = wBuffer.ptr + if (((blockStart - wBuffer.ptr) & MAXWINMASK) < writeSize) { + if (wBuffer.ptr !== blockStart) { + // Copy blockStart bytes from rBuffer into wBuffer. + rarWriteArea(wBuffer.ptr, blockStart); + writeSize = (rBuffer.ptr - wBuffer.ptr) & MAXWINMASK; + } + if (blockLength <= writeSize) { + var blockEnd = (blockStart + blockLength) & MAXWINMASK; + if (blockStart < blockEnd || blockEnd === 0) { + VM.setMemory(0, rBuffer.data.subarray(blockStart, blockStart + blockLength), blockLength); + } else { + var firstPartLength = MAXWINSIZE - blockStart; + VM.setMemory(0, rBuffer.data.subarray(blockStart, blockStart + firstPartLength), firstPartLength); + VM.setMemory(firstPartLength, rBuffer.data, blockEnd); + } + + parentPrg = Filters[flt.ParentFilter].Prg; + var prg = flt.Prg; + + if (parentPrg.GlobalData.length > VM_FIXEDGLOBALSIZE) { + // Copy global data from previous script execution if any. + prg.GlobalData = new Uint8Array(parentPrg.GlobalData); + } + + rarExecuteCode(prg); + var globalDataLen; + + if (prg.GlobalData.length > VM_FIXEDGLOBALSIZE) { + // Save global data for next script execution. + globalDataLen = prg.GlobalData.length; + if (parentPrg.GlobalData.length < globalDataLen) { + parentPrg.GlobalData = new Uint8Array(globalDataLen); + } + parentPrg.GlobalData.set( + this.mem_.subarray(VM_FIXEDGLOBALSIZE, VM_FIXEDGLOBALSIZE + globalDataLen), + VM_FIXEDGLOBALSIZE); + } else { + parentPrg.GlobalData = new Uint8Array(0); + } + + var filteredData = prg.FilteredData; + + PrgStack[i] = null; + while (i + 1 < PrgStack.length) { + var nextFilter = PrgStack[i + 1]; + if (nextFilter === null || nextFilter.BlockStart !== blockStart || + nextFilter.BlockLength !== filteredData.length || nextFilter.NextWindow) { + break; + } + + // Apply several filters to same data block. + + VM.setMemory(0, filteredData, filteredData.length); + + parentPrg = Filters[nextFilter.ParentFilter].Prg; + var nextPrg = nextFilter.Prg; + + globalDataLen = parentPrg.GlobalData.length; + if (globalDataLen > VM_FIXEDGLOBALSIZE) { + // Copy global data from previous script execution if any. + nextPrg.GlobalData = new Uint8Array(globalDataLen); + nextPrg.GlobalData.set(parentPrg.GlobalData.subarray(VM_FIXEDGLOBALSIZE, VM_FIXEDGLOBALSIZE + globalDataLen), VM_FIXEDGLOBALSIZE); + } + + rarExecuteCode(nextPrg); + + if (nextPrg.GlobalData.length > VM_GLOBALMEMSIZE) { + // Save global data for next script execution. + globalDataLen = nextPrg.GlobalData.length; + if (parentPrg.GlobalData.length < globalDataLen) { + parentPrg.GlobalData = new Uint8Array(globalDataLen); + } + parentPrg.GlobalData.set( + this.mem_.subarray(VM_FIXEDGLOBALSIZE, VM_FIXEDGLOBALSIZE + globalDataLen), + VM_FIXEDGLOBALSIZE); + } else { + parentPrg.GlobalData = new Uint8Array(0); + } + + filteredData = nextPrg.FilteredData; + i++; + PrgStack[i] = null; + } // while (i + 1 < PrgStack.length) + + for (j = 0; j < filteredData.length; ++j) { + wBuffer.insertByte(filteredData[j]); + } + writeSize = (rBuffer.ptr - wBuffer.ptr) & MAXWINMASK; + } else { // if (blockLength <= writeSize) + for (j = i; j < PrgStack.length; ++j) { + flt = PrgStack[j]; + if (flt !== null && flt.NextWindow) { + flt.NextWindow = false; + } + } + //WrPtr=WrittenBorder; + return; + } + } // if (((blockStart - wBuffer.ptr) & MAXWINMASK) < writeSize) + } // for (var i = 0; i < PrgStack.length; ++i) + + // Write any remaining bytes from rBuffer to wBuffer; + rarWriteArea(wBuffer.ptr, rBuffer.ptr); + + // Now that the filtered buffer has been written, swap it back to rBuffer. + rBuffer = wBuffer; +} + +/** + * Copy bytes from rBuffer to wBuffer. + * @param {number} startPtr The starting point to copy from rBuffer. + * @param {number} endPtr The ending point to copy from rBuffer. + */ +function rarWriteArea(startPtr, endPtr) { + if (endPtr < startPtr) { + console.error("endPtr < startPtr, endPtr=" + endPtr + ", startPtr=" + startPtr); + // rarWriteData(startPtr, -(int)StartPtr & MAXWINMASK); + // RarWriteData(0, endPtr); + return; + } else if (startPtr < endPtr) { + rarWriteData(startPtr, endPtr - startPtr); + } +} + +/** + * Writes bytes into wBuffer from rBuffer. + * @param {number} offset The starting point to copy bytes from rBuffer. + * @param {number} numBytes The number of bytes to copy. + */ +function rarWriteData(offset, numBytes) { + if (wBuffer.ptr >= rBuffer.data.length) { + return; + } + var leftToWrite = rBuffer.data.length - wBuffer.ptr; + if (numBytes > leftToWrite) { + numBytes = leftToWrite; + } + for (var i = 0; i < numBytes; ++i) { + wBuffer.insertByte(rBuffer.data[offset + i]); + } +} + +/** + * @param {VM_PreparedProgram} prg + */ +function rarExecuteCode(prg) { + if (prg.GlobalData.length > 0) { + var writtenFileSize = wBuffer.ptr; + prg.InitR[6] = writtenFileSize; + VM.setLowEndianValue(prg.GlobalData, writtenFileSize, 0x24); + VM.setLowEndianValue(prg.GlobalData, (writtenFileSize >>> 32) >> 0, 0x28); + VM.execute(prg); + } +} + +function rarReadEndOfBlock(bstream) { + rarUpdateProgress(); + + var NewTable = false, + NewFile = false; + if (bstream.readBits(1)) { + NewTable = true; + } else { + NewFile = true; + NewTable = !!bstream.readBits(1); + } + //tablesRead = !NewTable; + return !(NewFile || (NewTable && !rarReadTables(bstream))); +} + +function rarInsertLastMatch(length, distance) { + lastDist = distance; + lastLength = length; +} + +function rarInsertOldDist(distance) { + rOldDist.splice(3, 1); + rOldDist.splice(0, 0, distance); +} + +/** + * Copies len bytes from distance bytes ago in the buffer to the end of the + * current byte buffer. + * @param {number} length How many bytes to copy. + * @param {number} distance How far back in the buffer from the current write + * pointer to start copying from. + */ +function rarCopyString(length, distance) { + var srcPtr = rBuffer.ptr - distance; + if (srcPtr < 0) { + var l = rOldBuffers.length; + while (srcPtr < 0) { + srcPtr = rOldBuffers[--l].data.length + srcPtr; + } + // TODO: lets hope that it never needs to read beyond file boundaries + while (length--) { + rBuffer.insertByte(rOldBuffers[l].data[srcPtr++]); + } + } + if (length > distance) { + while (length--) { + rBuffer.insertByte(rBuffer.data[srcPtr++]); + } + } else { + rBuffer.insertBytes(rBuffer.data.subarray(srcPtr, srcPtr + length)); + } +} + +/** + * @param {RarLocalFile} v + */ +function unpack(v) { + // TODO: implement what happens when unpVer is < 15 + var Ver = v.header.unpVer <= 15 ? 15 : v.header.unpVer; + // var Solid = v.header.LHD_SOLID; + var bstream = new bitjs.io.BitStream(v.fileData.buffer, true /* rtl */, v.fileData.byteOffset, v.fileData.byteLength); + + rBuffer = new bitjs.io.ByteBuffer(v.header.unpackedSize); + + info("Unpacking " + v.filename + " RAR v" + Ver); + + switch (Ver) { + case 15: // rar 1.5 compression + unpack15(); //(bstream, Solid); + break; + case 20: // rar 2.x compression + case 26: // files larger than 2GB + unpack20(bstream); //, Solid); + break; + case 29: // rar 3.x compression + case 36: // alternative hash + wBuffer = new bitjs.io.ByteBuffer(rBuffer.data.length); + unpack29(bstream); + break; + } // switch(method) + + rOldBuffers.push(rBuffer); + // TODO: clear these old buffers when there's over 4MB of history + return rBuffer.data; +} + +// bstream is a bit stream +var RarLocalFile = function(bstream) { + this.header = new RarVolumeHeader(bstream); + this.filename = this.header.filename; + + if (this.header.headType !== FILE_HEAD && this.header.headType !== ENDARC_HEAD && this.header.headType !== SERVICE_HEAD) { + this.isValid = false; + info("Error! RAR Volume did not include a FILE_HEAD header "); + } else { + // read in the compressed data + this.fileData = null; + if (this.header.packSize > 0) { + this.fileData = bstream.readBytes(this.header.packSize); + if (this.header.headType === FILE_HEAD) { + this.isValid = true; + } + } + } +}; + +RarLocalFile.prototype.unrar5 = function() { + //if (!this.header.flags.LHD_SPLIT_BEFORE) { + // unstore file + // No compression + if (this.header.method === 0x00) { + info("Unstore " + this.filename); + this.isValid = true; + + currentBytesUnarchivedInFile += this.fileData.length; + currentBytesUnarchived += this.fileData.length; + + // Create a new buffer and copy it over. + var len = this.header.packSize; + var newBuffer = new bitjs.io.ByteBuffer(len); + newBuffer.insertBytes(this.fileData); + this.fileData = newBuffer.data; + } else { + this.isValid = true; + this.fileData = unpack(this); + } + //} +}; + +var unrar5 = function(arrayBuffer) { + currentFilename = ""; + currentFileNumber = 0; + currentBytesUnarchivedInFile = 0; + currentBytesUnarchived = 0; + totalUncompressedBytesInArchive = 0; + totalFilesInArchive = 0; + + // postMessage(new bitjs.archive.UnarchiveStartEvent()); + var bstream = new bitjs.io.BitStream(arrayBuffer, false /* rtl */); + + var header = new RarMainVolumeHeader(bstream); + if (header.crc === 0x6152 && + header.headType === 0x72 && + header.flags.value === 0x1A21 && + header.headSize === 7) { + info("Found RAR signature"); + + var mhead = new RarVolumeHeader(bstream); + if (mhead.headType !== MAIN_HEAD) { + info("Error! RAR did not include a MAIN_HEAD header"); + } else { + var localFiles = []; + var localFile = null; + do { + try { + localFile = new RarLocalFile(bstream); + info("RAR localFile isValid=" + localFile.isValid + ", volume packSize=" + localFile.header.packSize); + if (localFile && localFile.isValid && localFile.header.packSize > 0) { + totalUncompressedBytesInArchive += localFile.header.unpackedSize; + localFiles.push(localFile); + } else if (localFile.header.packSize === 0 && localFile.header.unpackedSize === 0) { + localFile.isValid = true; + } + } catch (err) { + break; + } + //info("bstream" + bstream.bytePtr+"/"+bstream.bytes.length); + } while (localFile.isValid); + totalFilesInArchive = localFiles.length; + + // now we have all information but things are unpacked + localFiles.sort(alphanumCase); + + info(localFiles.map(function(a) { + return a.filename; + }).join(", ")); + for (var i = 0; i < localFiles.length; ++i) { + var localfile = localFiles[i]; + + // update progress + currentFilename = localfile.header.filename; + currentBytesUnarchivedInFile = 0; + + // actually do the unzipping + localfile.unrar5(); + + if (localfile.isValid) { + postMessage(new bitjs.archive.UnarchiveExtractEvent(localfile)); + postProgress(); + } + } + + postProgress(); + } + } else { + err("Invalid RAR file"); + } + // postMessage(new bitjs.archive.UnarchiveFinishEvent()); +}; + +// event.data.file has the ArrayBuffer. +onmessage = function(event) { + var ab = event.data.file; + unrar5(ab, true); +}; diff --git a/cps/static/js/filter_grid.js b/cps/static/js/filter_grid.js index 457b9055..362c6bfa 100644 --- a/cps/static/js/filter_grid.js +++ b/cps/static/js/filter_grid.js @@ -24,6 +24,14 @@ var $list = $("#list").isotope({ }); $("#desc").click(function() { + var page = $(this).data("id"); + $.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", + dataType: "json", + url: window.location.pathname + "/../../ajax/view", + data: "{\"" + page + "\": {\"dir\": \"desc\"}}", + }); $list.isotope({ sortBy: "name", sortAscending: true @@ -32,6 +40,14 @@ $("#desc").click(function() { }); $("#asc").click(function() { + var page = $(this).data("id"); + $.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", + dataType: "json", + url: window.location.pathname + "/../../ajax/view", + data: "{\"" + page + "\": {\"dir\": \"asc\"}}", + }); $list.isotope({ sortBy: "name", sortAscending: false diff --git a/cps/static/js/filter_list.js b/cps/static/js/filter_list.js index 8291f0ac..676ff47b 100644 --- a/cps/static/js/filter_list.js +++ b/cps/static/js/filter_list.js @@ -19,6 +19,17 @@ var direction = 0; // Descending order var sort = 0; // Show sorted entries $("#sort_name").click(function() { + var class_name = $("h1").attr('Class') + "_sort_name"; + var obj = {}; + obj[class_name] = sort; + /*$.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", + dataType: "json", + url: window.location.pathname + "/../../ajax/view", + data: JSON.stringify({obj}), + });*/ + var count = 0; var index = 0; var store; @@ -40,9 +51,7 @@ $("#sort_name").click(function() { count++; } }); - /*listItems.sort(function(a,b){ - return $(a).children()[1].innerText.localeCompare($(b).children()[1].innerText) - });*/ + // Find count of middle element if (count > 20) { var middle = parseInt(count / 2, 10) + (count % 2); @@ -66,6 +75,14 @@ $("#desc").click(function() { if (direction === 0) { return; } + var page = $(this).data("id"); + $.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", + dataType: "json", + url: window.location.pathname + "/../../ajax/view", + data: "{\"" + page + "\": {\"dir\": \"desc\"}}", + }); var index = 0; var list = $("#list"); var second = $("#second"); @@ -102,9 +119,18 @@ $("#desc").click(function() { $("#asc").click(function() { + if (direction === 1) { return; } + var page = $(this).data("id"); + $.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", + dataType: "json", + url: window.location.pathname + "/../../ajax/view", + data: "{\"" + page + "\": {\"dir\": \"asc\"}}", + }); var index = 0; var list = $("#list"); var second = $("#second"); @@ -131,7 +157,6 @@ $("#asc").click(function() { }); // middle = parseInt(elementLength / 2) + (elementLength % 2); - list.append(reversed.slice(0, index)); second.append(reversed.slice(index, elementLength)); } else { diff --git a/cps/static/js/kthoom.js b/cps/static/js/kthoom.js index 33a2ac0e..bbb3fead 100644 --- a/cps/static/js/kthoom.js +++ b/cps/static/js/kthoom.js @@ -162,10 +162,15 @@ function initProgressClick() { function loadFromArrayBuffer(ab) { var start = (new Date).getTime(); var h = new Uint8Array(ab, 0, 10); + unrar5(ab); var pathToBitJS = "../../static/js/archive/"; var lastCompletion = 0; - if (h[0] === 0x52 && h[1] === 0x61 && h[2] === 0x72 && h[3] === 0x21) { //Rar! - unarchiver = new bitjs.archive.Unrarrer(ab, pathToBitJS); + /*if (h[0] === 0x52 && h[1] === 0x61 && h[2] === 0x72 && h[3] === 0x21) { //Rar! + if (h[7] === 0x01) { + unarchiver = new bitjs.archive.Unrarrer(ab, pathToBitJS); + } else { + unarchiver = new bitjs.archive.Unrarrer5(ab, pathToBitJS); + } } else if (h[0] === 80 && h[1] === 75) { //PK (Zip) unarchiver = new bitjs.archive.Unzipper(ab, pathToBitJS); } else if (h[0] === 255 && h[1] === 216) { // JPEG @@ -229,7 +234,7 @@ function loadFromArrayBuffer(ab) { unarchiver.start(); } else { alert("Some error"); - } + }*/ } function scrollTocToActive() { diff --git a/cps/static/js/main.js b/cps/static/js/main.js index 6338be0b..7312f08d 100644 --- a/cps/static/js/main.js +++ b/cps/static/js/main.js @@ -58,6 +58,60 @@ $(document).on("change", "select[data-controlall]", function() { } }); +$("#delete_confirm").click(function() { + //get data-id attribute of the clicked element + var pathname = document.getElementsByTagName("script"), src = pathname[pathname.length - 1].src; + var path = src.substring(0, src.lastIndexOf("/")); + var deleteId = $(this).data("delete-id"); + var bookFormat = $(this).data("delete-format"); + if (bookFormat) { + window.location.href = path + "/../../delete/" + deleteId + "/" + bookFormat; + } else { + if ($(this).data("delete-format")) { + path = path + "/../../ajax/delete/" + deleteId; + $.ajax({ + method:"get", + url: path, + timeout: 900, + success:function(data) { + data.forEach(function(item) { + if (!jQuery.isEmptyObject(item)) { + if (item.format != "") { + $("button[data-delete-format='"+item.format+"']").addClass('hidden'); + } + $( ".navbar" ).after( '
' + + '
'+item.message+'
' + + '
'); + + } + }); + } + }); + } else { + window.location.href = path + "/../../delete/" + deleteId; + + } + } + +}); + +//triggered when modal is about to be shown +$("#deleteModal").on("show.bs.modal", function(e) { + //get data-id attribute of the clicked element and store in button + var bookId = $(e.relatedTarget).data("delete-id"); + var bookfomat = $(e.relatedTarget).data("delete-format"); + if (bookfomat) { + $("#book_format").removeClass('hidden'); + $("#book_complete").addClass('hidden'); + } else { + $("#book_complete").removeClass('hidden'); + $("#book_format").addClass('hidden'); + } + $(e.currentTarget).find("#delete_confirm").data("delete-id", bookId); + $(e.currentTarget).find("#delete_confirm").data("delete-format", bookfomat); +}); + + $(function() { var updateTimerID; @@ -324,16 +378,19 @@ $(function() { }); $(".update-view").click(function(e) { - var target = $(this).data("target"); var view = $(this).data("view"); e.preventDefault(); e.stopPropagation(); - var data = {}; - data[target] = view; - console.debug("Updating view data: ", data); - $.post( "/ajax/view", data).done(function( ) { - location.reload(); + $.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", + dataType: "json", + url: window.location.pathname + "/../../ajax/view", + data: "{\"series\": {\"series_view\": \""+ view +"\"}}", + success: function success() { + location.reload(); + } }); }); }); diff --git a/cps/static/js/table.js b/cps/static/js/table.js index 77045db3..62f7e220 100644 --- a/cps/static/js/table.js +++ b/cps/static/js/table.js @@ -1,5 +1,5 @@ /* This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web) - * Copyright (C) 2018 OzzieIsaacs + * Copyright (C) 2020 OzzieIsaacs * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -15,10 +15,158 @@ * along with this program. If not, see . */ -/* exported TableActions, RestrictionActions*/ +/* exported TableActions, RestrictionActions, EbookActions, responseHandler */ + +var selections = []; $(function() { + $("#books-table").on("check.bs.table check-all.bs.table uncheck.bs.table uncheck-all.bs.table", + function (e, rowsAfter, rowsBefore) { + var rows = rowsAfter; + + if (e.type === "uncheck-all") { + rows = rowsBefore; + } + + var ids = $.map(!$.isArray(rows) ? [rows] : rows, function (row) { + return row.id; + }); + + var func = $.inArray(e.type, ["check", "check-all"]) > -1 ? "union" : "difference"; + selections = window._[func](selections, ids); + if (selections.length >= 2) { + $("#merge_books").removeClass("disabled"); + $("#merge_books").attr("aria-disabled", false); + } else { + $("#merge_books").addClass("disabled"); + $("#merge_books").attr("aria-disabled", true); + } + if (selections.length < 1) { + $("#delete_selection").addClass("disabled"); + $("#delete_selection").attr("aria-disabled", true); + } + else{ + $("#delete_selection").removeClass("disabled"); + $("#delete_selection").attr("aria-disabled", false); + } + }); + $("#delete_selection").click(function() { + $("#books-table").bootstrapTable('uncheckAll'); + }); + + $("#merge_confirm").click(function() { + $.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", + dataType: "json", + url: window.location.pathname + "/../../ajax/mergebooks", + data: JSON.stringify({"Merge_books":selections}), + success: function success() { + $('#books-table').bootstrapTable('refresh'); + $("#books-table").bootstrapTable('uncheckAll'); + } + }); + }); + + $("#merge_books").click(function() { + $.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", + dataType: "json", + url: window.location.pathname + "/../../ajax/simulatemerge", + data: JSON.stringify({"Merge_books":selections}), + success: function success(book_titles) { + $.each(book_titles.from, function(i, item) { + $("- " + item + "").appendTo("#merge_from"); + }); + $('#merge_to').text("- " + book_titles.to); + + } + }); + }); + + var column = []; + $("#books-table > thead > tr > th").each(function() { + var element = {}; + if ($(this).attr("data-edit")) { + element = { + editable: { + mode: "inline", + emptytext: "", + } + }; + } + var validateText = $(this).attr("data-edit-validate"); + if (validateText) { + element.editable.validate = function (value) { + if ($.trim(value) === "") return validateText; + }; + } + column.push(element); + }); + + $("#books-table").bootstrapTable({ + sidePagination: "server", + pagination: true, + paginationLoop: false, + paginationDetailHAlign: " hidden", + paginationHAlign: "left", + idField: "id", + uniqueId: "id", + search: true, + showColumns: true, + searchAlign: "left", + showSearchButton : false, + searchOnEnterKey: true, + checkboxHeader: false, + maintainMetaData: true, + responseHandler: responseHandler, + columns: column, + formatNoMatches: function () { + return ""; + }, + onEditableSave: function (field, row, oldvalue, $el) { + if (field === 'title' || field === 'authors') { + $.ajax({ + method:"get", + dataType: "json", + url: window.location.pathname + "/../../ajax/sort_value/" + field + '/' + row.id, + success: function success(data) { + var key = Object.keys(data)[0] + $("#books-table").bootstrapTable('updateCellByUniqueId', { + id: row.id, + field: key, + value: data[key] + }); + console.log(data); + } + }); + } + }, + onColumnSwitch: function (field, checked) { + var visible = $("#books-table").bootstrapTable('getVisibleColumns'); + var hidden = $("#books-table").bootstrapTable('getHiddenColumns'); + var visibility =[] + var st = "" + visible.forEach(function(item) { + st += "\""+ item.field + "\":\"" +"true"+ "\"," + }); + hidden.forEach(function(item) { + st += "\""+ item.field + "\":\"" +"false"+ "\"," + }); + st = st.slice(0, -1); + $.ajax({ + method:"post", + contentType: "application/json; charset=utf-8", + dataType: "json", + url: window.location.pathname + "/../../ajax/table_settings", + data: "{" + st + "}", + }); + }, + }); + + $("#domain_allow_submit").click(function(event) { event.preventDefault(); $("#domain_add_allow").ajaxForm(); @@ -33,6 +181,7 @@ $(function() { } }); }); + $("#domain-allow-table").bootstrapTable({ formatNoMatches: function () { return ""; @@ -205,6 +354,7 @@ function TableActions (value, row) { ].join(""); } + /* Function for deleting domain restrictions */ function RestrictionActions (value, row) { return [ @@ -213,3 +363,20 @@ function RestrictionActions (value, row) { "" ].join(""); } + +/* Function for deleting books */ +function EbookActions (value, row) { + return [ + "
", + "", + "
" + ].join(""); +} + +/* Function for keeping checked rows */ +function responseHandler(res) { + $.each(res.rows, function (i, row) { + row.state = $.inArray(row.id, selections) !== -1; + }); + return res; +} diff --git a/cps/tasks/__init__.py b/cps/tasks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cps/tasks/convert.py b/cps/tasks/convert.py new file mode 100644 index 00000000..b60d4976 --- /dev/null +++ b/cps/tasks/convert.py @@ -0,0 +1,217 @@ +from __future__ import division, print_function, unicode_literals +import sys +import os +import re + +from glob import glob +from shutil import copyfile + +from sqlalchemy.exc import SQLAlchemyError + +from cps.services.worker import CalibreTask, STAT_FINISH_SUCCESS +from cps import calibre_db, db +from cps import logger, config +from cps.subproc_wrapper import process_open +from flask_babel import gettext as _ + +from cps.tasks.mail import TaskEmail +from cps import gdriveutils +log = logger.create() + + +class TaskConvert(CalibreTask): + def __init__(self, file_path, bookid, taskMessage, settings, kindle_mail, user=None): + super(TaskConvert, self).__init__(taskMessage) + self.file_path = file_path + self.bookid = bookid + self.settings = settings + self.kindle_mail = kindle_mail + self.user = user + + self.results = dict() + + def run(self, worker_thread): + self.worker_thread = worker_thread + if config.config_use_google_drive: + cur_book = calibre_db.get_book(self.bookid) + data = calibre_db.get_book_format(self.bookid, self.settings['old_book_format']) + df = gdriveutils.getFileFromEbooksFolder(cur_book.path, + data.name + "." + self.settings['old_book_format'].lower()) + if df: + datafile = os.path.join(config.config_calibre_dir, + cur_book.path, + data.name + u"." + self.settings['old_book_format'].lower()) + if not os.path.exists(os.path.join(config.config_calibre_dir, cur_book.path)): + os.makedirs(os.path.join(config.config_calibre_dir, cur_book.path)) + df.GetContentFile(datafile) + else: + error_message = _(u"%(format)s not found on Google Drive: %(fn)s", + format=self.settings['old_book_format'], + fn=data.name + "." + self.settings['old_book_format'].lower()) + return error_message + + filename = self._convert_ebook_format() + if config.config_use_google_drive: + os.remove(self.file_path + u'.' + self.settings['old_book_format'].lower()) + + if filename: + if config.config_use_google_drive: + # Upload files to gdrive + gdriveutils.updateGdriveCalibreFromLocal() + self._handleSuccess() + if self.kindle_mail: + # if we're sending to kindle after converting, create a one-off task and run it immediately + # todo: figure out how to incorporate this into the progress + try: + worker_thread.add(self.user, TaskEmail(self.settings['subject'], self.results["path"], + filename, self.settings, self.kindle_mail, + self.settings['subject'], self.settings['body'], internal=True)) + except Exception as e: + return self._handleError(str(e)) + + def _convert_ebook_format(self): + error_message = None + local_session = db.CalibreDB().session + file_path = self.file_path + book_id = self.bookid + format_old_ext = u'.' + self.settings['old_book_format'].lower() + format_new_ext = u'.' + self.settings['new_book_format'].lower() + + # check to see if destination format already exists - + # if it does - mark the conversion task as complete and return a success + # this will allow send to kindle workflow to continue to work + if os.path.isfile(file_path + format_new_ext): + log.info("Book id %d already converted to %s", book_id, format_new_ext) + cur_book = calibre_db.get_book(book_id) + self.results['path'] = file_path + self.results['title'] = cur_book.title + self._handleSuccess() + return os.path.basename(file_path + format_new_ext) + else: + log.info("Book id %d - target format of %s does not exist. Moving forward with convert.", + book_id, + format_new_ext) + + if config.config_kepubifypath and format_old_ext == '.epub' and format_new_ext == '.kepub': + check, error_message = self._convert_kepubify(file_path, + format_old_ext, + format_new_ext) + else: + # check if calibre converter-executable is existing + if not os.path.exists(config.config_converterpath): + # ToDo Text is not translated + self._handleError(_(u"Calibre ebook-convert %(tool)s not found", tool=config.config_converterpath)) + return + check, error_message = self._convert_calibre(file_path, format_old_ext, format_new_ext) + + if check == 0: + cur_book = calibre_db.get_book(book_id) + if os.path.isfile(file_path + format_new_ext): + # self.db_queue.join() + new_format = db.Data(name=cur_book.data[0].name, + book_format=self.settings['new_book_format'].upper(), + book=book_id, uncompressed_size=os.path.getsize(file_path + format_new_ext)) + try: + local_session.merge(new_format) + local_session.commit() + except SQLAlchemyError as e: + local_session.rollback() + log.error("Database error: %s", e) + return + self.results['path'] = cur_book.path + self.results['title'] = cur_book.title + if not config.config_use_google_drive: + self._handleSuccess() + return os.path.basename(file_path + format_new_ext) + else: + error_message = _('%(format)s format not found on disk', format=format_new_ext.upper()) + log.info("ebook converter failed with error while converting book") + if not error_message: + error_message = _('Ebook converter failed with unknown error') + self._handleError(error_message) + return + + def _convert_kepubify(self, file_path, format_old_ext, format_new_ext): + quotes = [1, 3] + command = [config.config_kepubifypath, (file_path + format_old_ext), '-o', os.path.dirname(file_path)] + try: + p = process_open(command, quotes) + except OSError as e: + return 1, _(u"Kepubify-converter failed: %(error)s", error=e) + self.progress = 0.01 + while True: + nextline = p.stdout.readlines() + nextline = [x.strip('\n') for x in nextline if x != '\n'] + if sys.version_info < (3, 0): + nextline = [x.decode('utf-8') for x in nextline] + for line in nextline: + log.debug(line) + if p.poll() is not None: + break + + # ToD Handle + # process returncode + check = p.returncode + + # move file + if check == 0: + converted_file = glob(os.path.join(os.path.dirname(file_path), "*.kepub.epub")) + if len(converted_file) == 1: + copyfile(converted_file[0], (file_path + format_new_ext)) + os.unlink(converted_file[0]) + else: + return 1, _(u"Converted file not found or more than one file in folder %(folder)s", + folder=os.path.dirname(file_path)) + return check, None + + def _convert_calibre(self, file_path, format_old_ext, format_new_ext): + try: + # Linux py2.7 encode as list without quotes no empty element for parameters + # linux py3.x no encode and as list without quotes no empty element for parameters + # windows py2.7 encode as string with quotes empty element for parameters is okay + # windows py 3.x no encode and as string with quotes empty element for parameters is okay + # separate handling for windows and linux + quotes = [1, 2] + command = [config.config_converterpath, (file_path + format_old_ext), + (file_path + format_new_ext)] + quotes_index = 3 + if config.config_calibre: + parameters = config.config_calibre.split(" ") + for param in parameters: + command.append(param) + quotes.append(quotes_index) + quotes_index += 1 + + p = process_open(command, quotes) + except OSError as e: + return 1, _(u"Ebook-converter failed: %(error)s", error=e) + + while p.poll() is None: + nextline = p.stdout.readline() + if os.name == 'nt' and sys.version_info < (3, 0): + nextline = nextline.decode('windows-1252') + elif os.name == 'posix' and sys.version_info < (3, 0): + nextline = nextline.decode('utf-8') + log.debug(nextline.strip('\r\n')) + # parse progress string from calibre-converter + progress = re.search(r"(\d+)%\s.*", nextline) + if progress: + self.progress = int(progress.group(1)) / 100 + if config.config_use_google_drive: + self.progress *= 0.9 + + # process returncode + check = p.returncode + calibre_traceback = p.stderr.readlines() + error_message = "" + for ele in calibre_traceback: + if sys.version_info < (3, 0): + ele = ele.decode('utf-8') + log.debug(ele.strip('\n')) + if not ele.startswith('Traceback') and not ele.startswith(' File'): + error_message = _("Calibre failed with error: %(error)s", error=ele.strip('\n')) + return check, error_message + + @property + def name(self): + return "Convert" diff --git a/cps/tasks/mail.py b/cps/tasks/mail.py new file mode 100644 index 00000000..ac3ec424 --- /dev/null +++ b/cps/tasks/mail.py @@ -0,0 +1,241 @@ +from __future__ import division, print_function, unicode_literals +import sys +import os +import smtplib +import threading +import socket + +try: + from StringIO import StringIO + from email.MIMEBase import MIMEBase + from email.MIMEMultipart import MIMEMultipart + from email.MIMEText import MIMEText +except ImportError: + from io import StringIO + from email.mime.base import MIMEBase + from email.mime.multipart import MIMEMultipart + from email.mime.text import MIMEText + +from email import encoders +from email.utils import formatdate, make_msgid +from email.generator import Generator + +from cps.services.worker import CalibreTask +from cps import logger, config + +from cps import gdriveutils + +log = logger.create() + +CHUNKSIZE = 8192 + + +# Class for sending email with ability to get current progress +class EmailBase: + + transferSize = 0 + progress = 0 + + def data(self, msg): + self.transferSize = len(msg) + (code, resp) = smtplib.SMTP.data(self, msg) + self.progress = 0 + return (code, resp) + + def send(self, strg): + """Send `strg' to the server.""" + log.debug('send: %r', strg[:300]) + if hasattr(self, 'sock') and self.sock: + try: + if self.transferSize: + lock=threading.Lock() + lock.acquire() + self.transferSize = len(strg) + lock.release() + for i in range(0, self.transferSize, CHUNKSIZE): + if isinstance(strg, bytes): + self.sock.send((strg[i:i + CHUNKSIZE])) + else: + self.sock.send((strg[i:i + CHUNKSIZE]).encode('utf-8')) + lock.acquire() + self.progress = i + lock.release() + else: + self.sock.sendall(strg.encode('utf-8')) + except socket.error: + self.close() + raise smtplib.SMTPServerDisconnected('Server not connected') + else: + raise smtplib.SMTPServerDisconnected('please run connect() first') + + @classmethod + def _print_debug(cls, *args): + log.debug(args) + + def getTransferStatus(self): + if self.transferSize: + lock2 = threading.Lock() + lock2.acquire() + value = int((float(self.progress) / float(self.transferSize))*100) + lock2.release() + return value / 100 + else: + return 1 + + +# Class for sending email with ability to get current progress, derived from emailbase class +class Email(EmailBase, smtplib.SMTP): + + def __init__(self, *args, **kwargs): + smtplib.SMTP.__init__(self, *args, **kwargs) + + +# Class for sending ssl encrypted email with ability to get current progress, , derived from emailbase class +class EmailSSL(EmailBase, smtplib.SMTP_SSL): + + def __init__(self, *args, **kwargs): + smtplib.SMTP_SSL.__init__(self, *args, **kwargs) + + +class TaskEmail(CalibreTask): + def __init__(self, subject, filepath, attachment, settings, recipient, taskMessage, text, internal=False): + super(TaskEmail, self).__init__(taskMessage) + self.subject = subject + self.attachment = attachment + self.settings = settings + self.filepath = filepath + self.recipent = recipient + self.text = text + self.asyncSMTP = None + + self.results = dict() + + def run(self, worker_thread): + # create MIME message + msg = MIMEMultipart() + msg['Subject'] = self.subject + msg['Message-Id'] = make_msgid('calibre-web') + msg['Date'] = formatdate(localtime=True) + text = self.text + msg.attach(MIMEText(text.encode('UTF-8'), 'plain', 'UTF-8')) + if self.attachment: + result = self._get_attachment(self.filepath, self.attachment) + if result: + msg.attach(result) + else: + self._handleError(u"Attachment not found") + return + + msg['From'] = self.settings["mail_from"] + msg['To'] = self.recipent + + use_ssl = int(self.settings.get('mail_use_ssl', 0)) + try: + # convert MIME message to string + fp = StringIO() + gen = Generator(fp, mangle_from_=False) + gen.flatten(msg) + msg = fp.getvalue() + + # send email + timeout = 600 # set timeout to 5mins + + # redirect output to logfile on python2 pn python3 debugoutput is caught with overwritten + # _print_debug function + if sys.version_info < (3, 0): + org_smtpstderr = smtplib.stderr + smtplib.stderr = logger.StderrLogger('worker.smtp') + + if use_ssl == 2: + self.asyncSMTP = EmailSSL(self.settings["mail_server"], self.settings["mail_port"], + timeout=timeout) + else: + self.asyncSMTP = Email(self.settings["mail_server"], self.settings["mail_port"], timeout=timeout) + + # link to logginglevel + if logger.is_debug_enabled(): + self.asyncSMTP.set_debuglevel(1) + if use_ssl == 1: + self.asyncSMTP.starttls() + if self.settings["mail_password"]: + self.asyncSMTP.login(str(self.settings["mail_login"]), str(self.settings["mail_password"])) + self.asyncSMTP.sendmail(self.settings["mail_from"], self.recipent, msg) + self.asyncSMTP.quit() + self._handleSuccess() + + if sys.version_info < (3, 0): + smtplib.stderr = org_smtpstderr + + except (MemoryError) as e: + log.exception(e) + self._handleError(u'MemoryError sending email: ' + str(e)) + # return None + except (smtplib.SMTPException, smtplib.SMTPAuthenticationError) as e: + if hasattr(e, "smtp_error"): + text = e.smtp_error.decode('utf-8').replace("\n", '. ') + elif hasattr(e, "message"): + text = e.message + elif hasattr(e, "args"): + text = '\n'.join(e.args) + else: + log.exception(e) + text = '' + self._handleError(u'Smtplib Error sending email: ' + text) + # return None + except (socket.error) as e: + self._handleError(u'Socket Error sending email: ' + e.strerror) + # return None + + + @property + def progress(self): + if self.asyncSMTP is not None: + return self.asyncSMTP.getTransferStatus() + else: + return self._progress + + @progress.setter + def progress(self, x): + """This gets explicitly set when handle(Success|Error) are called. In this case, remove the SMTP connection""" + if x == 1: + self.asyncSMTP = None + self._progress = x + + + @classmethod + def _get_attachment(cls, bookpath, filename): + """Get file as MIMEBase message""" + calibrepath = config.config_calibre_dir + if config.config_use_google_drive: + df = gdriveutils.getFileFromEbooksFolder(bookpath, filename) + if df: + datafile = os.path.join(calibrepath, bookpath, filename) + if not os.path.exists(os.path.join(calibrepath, bookpath)): + os.makedirs(os.path.join(calibrepath, bookpath)) + df.GetContentFile(datafile) + else: + return None + file_ = open(datafile, 'rb') + data = file_.read() + file_.close() + os.remove(datafile) + else: + try: + file_ = open(os.path.join(calibrepath, bookpath, filename), 'rb') + data = file_.read() + file_.close() + except IOError as e: + log.exception(e) + log.error(u'The requested file could not be read. Maybe wrong permissions?') + return None + + attachment = MIMEBase('application', 'octet-stream') + attachment.set_payload(data) + encoders.encode_base64(attachment) + attachment.add_header('Content-Disposition', 'attachment', + filename=filename) + return attachment + + @property + def name(self): + return "Email" diff --git a/cps/tasks/upload.py b/cps/tasks/upload.py new file mode 100644 index 00000000..ce2cb07b --- /dev/null +++ b/cps/tasks/upload.py @@ -0,0 +1,19 @@ +from __future__ import division, print_function, unicode_literals + +from datetime import datetime +from cps.services.worker import CalibreTask, STAT_FINISH_SUCCESS + +class TaskUpload(CalibreTask): + def __init__(self, taskMessage): + super(TaskUpload, self).__init__(taskMessage) + self.start_time = self.end_time = datetime.now() + self.stat = STAT_FINISH_SUCCESS + self.progress = 1 + + def run(self, worker_thread): + """Upload task doesn't have anything to do, it's simply a way to add information to the task list""" + pass + + @property + def name(self): + return "Upload" diff --git a/cps/templates/admin.html b/cps/templates/admin.html index d8cf88db..b19dc235 100644 --- a/cps/templates/admin.html +++ b/cps/templates/admin.html @@ -161,8 +161,8 @@ -
{{_('Check for Update')}}
- +
{{_('Check for Update')}}
+ diff --git a/cps/templates/author.html b/cps/templates/author.html index 18742003..41a9aebb 100644 --- a/cps/templates/author.html +++ b/cps/templates/author.html @@ -23,14 +23,14 @@

{{_("In Library")}}

{% endif %} @@ -53,7 +53,7 @@ {% if not loop.first %} & {% endif %} - {{author.name.replace('|',',')|shortentitle(30)}} + {{author.name.replace('|',',')|shortentitle(30)}} {% if loop.last %} (...) {% endif %} @@ -61,7 +61,7 @@ {% if not loop.first %} & {% endif %} - {{author.name.replace('|',',')|shortentitle(30)}} + {{author.name.replace('|',',')|shortentitle(30)}} {% endif %} {% endfor %} {% for format in entry.data %} diff --git a/cps/templates/book_edit.html b/cps/templates/book_edit.html index ef483c5c..16c02fae 100644 --- a/cps/templates/book_edit.html +++ b/cps/templates/book_edit.html @@ -7,13 +7,13 @@ {% if g.user.role_delete_books() %}
- +
{% if book.data|length > 1 %}

{{_('Delete formats:')}}

{% for file in book.data %}
- {{_('Delete')}} - {{file.format}} +
{% endfor %}
@@ -197,34 +197,7 @@ {% endblock %} {% block modal %} -{% if g.user.role_delete_books() %} - -{% endif %} +{{ delete_book(book.id) }}