mirror of
https://github.com/janeczku/calibre-web
synced 2024-10-31 23:26:20 +00:00
Merge branch 'master' into develop
# Conflicts: # cps/db.py # cps/ub.py # cps/web.py # readme.md # requirements.txt
This commit is contained in:
commit
cf1ca21bbc
4
cps.py
4
cps.py
@ -6,7 +6,9 @@ import sys
|
|||||||
|
|
||||||
base_path = os.path.dirname(os.path.abspath(__file__))
|
base_path = os.path.dirname(os.path.abspath(__file__))
|
||||||
# Insert local directories into path
|
# Insert local directories into path
|
||||||
sys.path.insert(0, os.path.join(base_path, 'vendor'))
|
sys.path.append(base_path)
|
||||||
|
sys.path.append(os.path.join(base_path, 'cps'))
|
||||||
|
sys.path.append(os.path.join(base_path, 'vendor'))
|
||||||
|
|
||||||
from cps import web
|
from cps import web
|
||||||
from tornado.wsgi import WSGIContainer
|
from tornado.wsgi import WSGIContainer
|
||||||
|
@ -14,28 +14,28 @@ try:
|
|||||||
from wand.image import Image
|
from wand.image import Image
|
||||||
from wand import version as ImageVersion
|
from wand import version as ImageVersion
|
||||||
use_generic_pdf_cover = False
|
use_generic_pdf_cover = False
|
||||||
except ImportError, e:
|
except ImportError as e:
|
||||||
logger.warning('cannot import Image, generating pdf covers for pdf uploads will not work: %s', e)
|
logger.warning('cannot import Image, generating pdf covers for pdf uploads will not work: %s', e)
|
||||||
use_generic_pdf_cover = True
|
use_generic_pdf_cover = True
|
||||||
try:
|
try:
|
||||||
from PyPDF2 import PdfFileReader
|
from PyPDF2 import PdfFileReader
|
||||||
from PyPDF2 import __version__ as PyPdfVersion
|
from PyPDF2 import __version__ as PyPdfVersion
|
||||||
use_pdf_meta = True
|
use_pdf_meta = True
|
||||||
except ImportError, e:
|
except ImportError as e:
|
||||||
logger.warning('cannot import PyPDF2, extracting pdf metadata will not work: %s', e)
|
logger.warning('cannot import PyPDF2, extracting pdf metadata will not work: %s', e)
|
||||||
use_pdf_meta = False
|
use_pdf_meta = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import epub
|
import epub
|
||||||
use_epub_meta = True
|
use_epub_meta = True
|
||||||
except ImportError, e:
|
except ImportError as e:
|
||||||
logger.warning('cannot import epub, extracting epub metadata will not work: %s', e)
|
logger.warning('cannot import epub, extracting epub metadata will not work: %s', e)
|
||||||
use_epub_meta = False
|
use_epub_meta = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import fb2
|
import fb2
|
||||||
use_fb2_meta = True
|
use_fb2_meta = True
|
||||||
except ImportError, e:
|
except ImportError as e:
|
||||||
logger.warning('cannot import fb2, extracting fb2 metadata will not work: %s', e)
|
logger.warning('cannot import fb2, extracting fb2 metadata will not work: %s', e)
|
||||||
use_fb2_meta = False
|
use_fb2_meta = False
|
||||||
|
|
||||||
@ -48,7 +48,7 @@ def process(tmp_file_path, original_file_name, original_file_extension):
|
|||||||
return epub.get_epub_info(tmp_file_path, original_file_name, original_file_extension)
|
return epub.get_epub_info(tmp_file_path, original_file_name, original_file_extension)
|
||||||
if ".FB2" == original_file_extension.upper() and use_fb2_meta is True:
|
if ".FB2" == original_file_extension.upper() and use_fb2_meta is True:
|
||||||
return fb2.get_fb2_info(tmp_file_path, original_file_extension)
|
return fb2.get_fb2_info(tmp_file_path, original_file_extension)
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
logger.warning('cannot parse metadata, using default: %s', e)
|
logger.warning('cannot parse metadata, using default: %s', e)
|
||||||
return default_meta(tmp_file_path, original_file_name, original_file_extension)
|
return default_meta(tmp_file_path, original_file_name, original_file_extension)
|
||||||
|
|
||||||
|
48
cps/db.py
48
cps/db.py
@ -11,10 +11,8 @@ from ub import config
|
|||||||
import ub
|
import ub
|
||||||
|
|
||||||
session = None
|
session = None
|
||||||
cc_exceptions = None
|
cc_exceptions = ['datetime', 'int', 'comments', 'float', 'composite', 'series']
|
||||||
cc_classes = {}
|
cc_classes = None
|
||||||
cc_ids = []
|
|
||||||
books_custom_column_links = {}
|
|
||||||
engine = None
|
engine = None
|
||||||
|
|
||||||
|
|
||||||
@ -283,24 +281,19 @@ class Custom_Columns(Base):
|
|||||||
|
|
||||||
|
|
||||||
def setup_db():
|
def setup_db():
|
||||||
global session
|
|
||||||
global cc_exceptions
|
|
||||||
global cc_classes
|
|
||||||
global cc_ids
|
|
||||||
global books_custom_column_links
|
|
||||||
global engine
|
global engine
|
||||||
|
global session
|
||||||
|
global cc_classes
|
||||||
|
|
||||||
if config.config_calibre_dir is None or config.config_calibre_dir == u'':
|
if config.config_calibre_dir is None or config.config_calibre_dir == u'':
|
||||||
return False
|
return False
|
||||||
|
|
||||||
dbpath = os.path.join(config.config_calibre_dir, "metadata.db")
|
dbpath = os.path.join(config.config_calibre_dir, "metadata.db")
|
||||||
if not os.path.exists(dbpath):
|
#engine = create_engine('sqlite:///{0}'.format(dbpath.encode('utf-8')), echo=False, isolation_level="SERIALIZABLE")
|
||||||
return False
|
engine = create_engine('sqlite:///'+ dbpath, echo=False, isolation_level="SERIALIZABLE")
|
||||||
engine = create_engine('sqlite:///{0}'.format(dbpath.encode('utf-8')), echo=False)
|
|
||||||
try:
|
try:
|
||||||
conn = engine.connect()
|
conn = engine.connect()
|
||||||
|
except Exception as e:
|
||||||
except:
|
|
||||||
content = ub.session.query(ub.Settings).first()
|
content = ub.session.query(ub.Settings).first()
|
||||||
content.config_calibre_dir = None
|
content.config_calibre_dir = None
|
||||||
content.db_configured = False
|
content.db_configured = False
|
||||||
@ -313,19 +306,21 @@ def setup_db():
|
|||||||
config.loadSettings()
|
config.loadSettings()
|
||||||
conn.connection.create_function('title_sort', 1, title_sort)
|
conn.connection.create_function('title_sort', 1, title_sort)
|
||||||
|
|
||||||
cc = conn.execute("SELECT id, datatype FROM custom_columns")
|
if not cc_classes:
|
||||||
|
cc = conn.execute("SELECT id, datatype FROM custom_columns")
|
||||||
|
|
||||||
cc_exceptions = ['datetime', 'int', 'comments', 'float', 'composite', 'series']
|
cc_ids = []
|
||||||
for row in cc:
|
books_custom_column_links = {}
|
||||||
if row.datatype not in cc_exceptions:
|
cc_classes = {}
|
||||||
if row.id not in books_custom_column_links:
|
for row in cc:
|
||||||
|
if row.datatype not in cc_exceptions:
|
||||||
books_custom_column_links[row.id] = Table('books_custom_column_' + str(row.id) + '_link', Base.metadata,
|
books_custom_column_links[row.id] = Table('books_custom_column_' + str(row.id) + '_link', Base.metadata,
|
||||||
Column('book', Integer, ForeignKey('books.id'),
|
Column('book', Integer, ForeignKey('books.id'),
|
||||||
primary_key=True),
|
primary_key=True),
|
||||||
Column('value', Integer,
|
Column('value', Integer,
|
||||||
ForeignKey('custom_column_' + str(row.id) + '.id'),
|
ForeignKey('custom_column_' + str(row.id) + '.id'),
|
||||||
primary_key=True)
|
primary_key=True)
|
||||||
)
|
)
|
||||||
cc_ids.append([row.id, row.datatype])
|
cc_ids.append([row.id, row.datatype])
|
||||||
if row.datatype == 'bool':
|
if row.datatype == 'bool':
|
||||||
ccdict = {'__tablename__': 'custom_column_' + str(row.id),
|
ccdict = {'__tablename__': 'custom_column_' + str(row.id),
|
||||||
@ -338,8 +333,7 @@ def setup_db():
|
|||||||
'value': Column(String)}
|
'value': Column(String)}
|
||||||
cc_classes[row.id] = type('Custom_Column_' + str(row.id), (Base,), ccdict)
|
cc_classes[row.id] = type('Custom_Column_' + str(row.id), (Base,), ccdict)
|
||||||
|
|
||||||
for id in cc_ids:
|
for id in cc_ids:
|
||||||
if not hasattr(Books, 'custom_column_' + str(id[0])):
|
|
||||||
if id[1] == 'bool':
|
if id[1] == 'bool':
|
||||||
setattr(Books, 'custom_column_' + str(id[0]), relationship(cc_classes[id[0]],
|
setattr(Books, 'custom_column_' + str(id[0]), relationship(cc_classes[id[0]],
|
||||||
primaryjoin=(
|
primaryjoin=(
|
||||||
|
12
cps/fb2.py
12
cps/fb2.py
@ -4,8 +4,10 @@
|
|||||||
from lxml import etree
|
from lxml import etree
|
||||||
import os
|
import os
|
||||||
import uploader
|
import uploader
|
||||||
import StringIO
|
try:
|
||||||
|
from io import StringIO
|
||||||
|
except ImportError as e:
|
||||||
|
import StringIO
|
||||||
|
|
||||||
def get_fb2_info(tmp_file_path, original_file_extension):
|
def get_fb2_info(tmp_file_path, original_file_extension):
|
||||||
|
|
||||||
@ -37,16 +39,16 @@ def get_fb2_info(tmp_file_path, original_file_extension):
|
|||||||
first_name = u''
|
first_name = u''
|
||||||
return first_name + ' ' + middle_name + ' ' + last_name
|
return first_name + ' ' + middle_name + ' ' + last_name
|
||||||
|
|
||||||
author = unicode(", ".join(map(get_author, authors)))
|
author = str(", ".join(map(get_author, authors)))
|
||||||
|
|
||||||
title = tree.xpath('/fb:FictionBook/fb:description/fb:title-info/fb:book-title/text()', namespaces=ns)
|
title = tree.xpath('/fb:FictionBook/fb:description/fb:title-info/fb:book-title/text()', namespaces=ns)
|
||||||
if len(title):
|
if len(title):
|
||||||
title = unicode(title[0])
|
title = str(title[0])
|
||||||
else:
|
else:
|
||||||
title = u''
|
title = u''
|
||||||
description = tree.xpath('/fb:FictionBook/fb:description/fb:publish-info/fb:book-name/text()', namespaces=ns)
|
description = tree.xpath('/fb:FictionBook/fb:description/fb:publish-info/fb:book-name/text()', namespaces=ns)
|
||||||
if len(description):
|
if len(description):
|
||||||
description = unicode(description[0])
|
description = str(description[0])
|
||||||
else:
|
else:
|
||||||
description = u''
|
description = u''
|
||||||
|
|
||||||
|
@ -13,11 +13,18 @@ import os
|
|||||||
import traceback
|
import traceback
|
||||||
import re
|
import re
|
||||||
import unicodedata
|
import unicodedata
|
||||||
from StringIO import StringIO
|
try:
|
||||||
|
from StringIO import StringIO
|
||||||
|
from email.MIMEBase import MIMEBase
|
||||||
|
from email.MIMEMultipart import MIMEMultipart
|
||||||
|
from email.MIMEText import MIMEText
|
||||||
|
except ImportError as e:
|
||||||
|
from io import StringIO
|
||||||
|
from email.mime.base import MIMEBase
|
||||||
|
from email.mime.multipart import MIMEMultipart
|
||||||
|
from email.mime.text import MIMEText
|
||||||
|
|
||||||
from email import encoders
|
from email import encoders
|
||||||
from email.MIMEBase import MIMEBase
|
|
||||||
from email.MIMEMultipart import MIMEMultipart
|
|
||||||
from email.MIMEText import MIMEText
|
|
||||||
from email.generator import Generator
|
from email.generator import Generator
|
||||||
from email.utils import formatdate
|
from email.utils import formatdate
|
||||||
from email.utils import make_msgid
|
from email.utils import make_msgid
|
||||||
@ -34,7 +41,7 @@ import web
|
|||||||
try:
|
try:
|
||||||
import unidecode
|
import unidecode
|
||||||
use_unidecode=True
|
use_unidecode=True
|
||||||
except:
|
except Exception as e:
|
||||||
use_unidecode=False
|
use_unidecode=False
|
||||||
|
|
||||||
# Global variables
|
# Global variables
|
||||||
@ -149,7 +156,7 @@ def send_raw_email(kindle_mail, msg):
|
|||||||
|
|
||||||
smtplib.stderr = org_stderr
|
smtplib.stderr = org_stderr
|
||||||
|
|
||||||
except (socket.error, smtplib.SMTPRecipientsRefused, smtplib.SMTPException), e:
|
except (socket.error, smtplib.SMTPRecipientsRefused, smtplib.SMTPException) as e:
|
||||||
app.logger.error(traceback.print_exc())
|
app.logger.error(traceback.print_exc())
|
||||||
return _("Failed to send mail: %s" % str(e))
|
return _("Failed to send mail: %s" % str(e))
|
||||||
|
|
||||||
@ -240,8 +247,11 @@ def get_valid_filename(value, replace_whitespace=True):
|
|||||||
value=value.replace(u'§',u'SS')
|
value=value.replace(u'§',u'SS')
|
||||||
value=value.replace(u'ß',u'ss')
|
value=value.replace(u'ß',u'ss')
|
||||||
value = unicodedata.normalize('NFKD', value)
|
value = unicodedata.normalize('NFKD', value)
|
||||||
re_slugify = re.compile('[^\w\s-]', re.UNICODE)
|
re_slugify = re.compile('[\W\s-]', re.UNICODE)
|
||||||
value = unicode(re_slugify.sub('', value).strip())
|
if type(value) is str: #Python3 str, Python2 unicode
|
||||||
|
value = re_slugify.sub('', value).strip()
|
||||||
|
else:
|
||||||
|
value = unicode(re_slugify.sub('', value).strip())
|
||||||
if replace_whitespace:
|
if replace_whitespace:
|
||||||
#*+:\"/<>? werden durch _ ersetzt
|
#*+:\"/<>? werden durch _ ersetzt
|
||||||
value = re.sub('[\*\+:\\\"/<>\?]+', u'_', value, flags=re.U)
|
value = re.sub('[\*\+:\\\"/<>\?]+', u'_', value, flags=re.U)
|
||||||
@ -405,7 +415,7 @@ class Updater(threading.Thread):
|
|||||||
try:
|
try:
|
||||||
os.chown(dst_file, permission.st_uid, permission.st_uid)
|
os.chown(dst_file, permission.st_uid, permission.st_uid)
|
||||||
# print('Permissions: User '+str(new_permissions.st_uid)+' Group '+str(new_permissions.st_uid))
|
# print('Permissions: User '+str(new_permissions.st_uid)+' Group '+str(new_permissions.st_uid))
|
||||||
except:
|
except Exception as e:
|
||||||
e = sys.exc_info()
|
e = sys.exc_info()
|
||||||
logging.getLogger('cps.web').debug('Fail '+str(dst_file)+' error: '+str(e))
|
logging.getLogger('cps.web').debug('Fail '+str(dst_file)+' error: '+str(e))
|
||||||
return
|
return
|
||||||
@ -447,7 +457,7 @@ class Updater(threading.Thread):
|
|||||||
logging.getLogger('cps.web').debug("Delete file " + item_path)
|
logging.getLogger('cps.web').debug("Delete file " + item_path)
|
||||||
log_from_thread("Delete file " + item_path)
|
log_from_thread("Delete file " + item_path)
|
||||||
os.remove(item_path)
|
os.remove(item_path)
|
||||||
except:
|
except Exception as e:
|
||||||
logging.getLogger('cps.web').debug("Could not remove:" + item_path)
|
logging.getLogger('cps.web').debug("Could not remove:" + item_path)
|
||||||
shutil.rmtree(source, ignore_errors=True)
|
shutil.rmtree(source, ignore_errors=True)
|
||||||
|
|
||||||
|
180
cps/static/js/get_meta.js
Normal file
180
cps/static/js/get_meta.js
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
/*
|
||||||
|
* Get Metadata from Douban Books api and Google Books api
|
||||||
|
* Created by idalin<dalin.lin@gmail.com>
|
||||||
|
* Google Books api document: https://developers.google.com/books/docs/v1/using
|
||||||
|
* Douban Books api document: https://developers.douban.com/wiki/?title=book_v2 (Chinese Only)
|
||||||
|
*/
|
||||||
|
|
||||||
|
$(document).ready(function () {
|
||||||
|
var msg = i18n_msg;
|
||||||
|
var douban = 'https://api.douban.com';
|
||||||
|
var db_search = '/v2/book/search';
|
||||||
|
var db_get_info = '/v2/book/';
|
||||||
|
var db_get_info_by_isbn = '/v2/book/isbn/ ';
|
||||||
|
var db_done = false;
|
||||||
|
|
||||||
|
var google = 'https://www.googleapis.com/';
|
||||||
|
var gg_search = '/books/v1/volumes';
|
||||||
|
var gg_get_info = '/books/v1/volumes/';
|
||||||
|
var gg_done = false;
|
||||||
|
|
||||||
|
var db_results = [];
|
||||||
|
var gg_results = [];
|
||||||
|
var show_flag = 0;
|
||||||
|
String.prototype.replaceAll = function (s1, s2) {
|
||||||
|
return this.replace(new RegExp(s1, "gm"), s2);
|
||||||
|
}
|
||||||
|
|
||||||
|
gg_search_book = function (title) {
|
||||||
|
title = title.replaceAll(/\s+/, '+');
|
||||||
|
var url = google + gg_search + '?q=' + title;
|
||||||
|
$.ajax({
|
||||||
|
url: url,
|
||||||
|
type: "GET",
|
||||||
|
dataType: "jsonp",
|
||||||
|
jsonp: 'callback',
|
||||||
|
success: function (data) {
|
||||||
|
gg_results = data.items;
|
||||||
|
},
|
||||||
|
complete: function () {
|
||||||
|
gg_done = true;
|
||||||
|
show_result();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
get_meta = function (source, id) {
|
||||||
|
var meta;
|
||||||
|
if (source == 'google') {;
|
||||||
|
meta = gg_results[id];
|
||||||
|
$('#description').val(meta.volumeInfo.description);
|
||||||
|
$('#bookAuthor').val(meta.volumeInfo.authors.join(' & '));
|
||||||
|
$('#book_title').val(meta.volumeInfo.title);
|
||||||
|
if (meta.volumeInfo.categories) {
|
||||||
|
var tags = meta.volumeInfo.categories.join(',');
|
||||||
|
$('#tags').val(tags);
|
||||||
|
}
|
||||||
|
if (meta.volumeInfo.averageRating) {
|
||||||
|
$('#rating').val(Math.round(meta.volumeInfo.averageRating));
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (source == 'douban') {
|
||||||
|
meta = db_results[id];
|
||||||
|
$('#description').val(meta.summary);
|
||||||
|
$('#bookAuthor').val(meta.author.join(' & '));
|
||||||
|
$('#book_title').val(meta.title);
|
||||||
|
var tags = '';
|
||||||
|
for (var i = 0; i < meta.tags.length; i++) {
|
||||||
|
tags = tags + meta.tags[i].title + ',';
|
||||||
|
}
|
||||||
|
$('#tags').val(tags);
|
||||||
|
$('#rating').val(Math.round(meta.rating.average / 2));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
do_search = function (keyword) {
|
||||||
|
show_flag = 0;
|
||||||
|
$('#meta-info').text(msg.loading);
|
||||||
|
var keyword = $('#keyword').val();
|
||||||
|
if (keyword) {
|
||||||
|
db_search_book(keyword);
|
||||||
|
gg_search_book(keyword);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db_search_book = function (title) {
|
||||||
|
var url = douban + db_search + '?q=' + title + '&fields=all&count=10';
|
||||||
|
$.ajax({
|
||||||
|
url: url,
|
||||||
|
type: "GET",
|
||||||
|
dataType: "jsonp",
|
||||||
|
jsonp: 'callback',
|
||||||
|
success: function (data) {
|
||||||
|
db_results = data.books;
|
||||||
|
},
|
||||||
|
error: function () {
|
||||||
|
$('#meta-info').html('<p class="text-danger">'+ msg.search_error+'!</p>');
|
||||||
|
},
|
||||||
|
complete: function () {
|
||||||
|
db_done = true;
|
||||||
|
show_result();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
show_result = function () {
|
||||||
|
show_flag++;
|
||||||
|
if (show_flag == 1) {
|
||||||
|
$('#meta-info').html('<ul id="book-list" class="media-list"></ul>');
|
||||||
|
}
|
||||||
|
if (gg_done && db_done) {
|
||||||
|
if (!gg_results && !db_results) {
|
||||||
|
$('#meta-info').html('<p class="text-danger">'+ msg.no_result +'</p>');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (gg_done && gg_results.length > 0) {
|
||||||
|
for (var i = 0; i < gg_results.length; i++) {
|
||||||
|
var book = gg_results[i];
|
||||||
|
var book_cover;
|
||||||
|
if (book.volumeInfo.imageLinks) {
|
||||||
|
book_cover = book.volumeInfo.imageLinks.thumbnail;
|
||||||
|
} else {
|
||||||
|
book_cover = '/static/generic_cover.jpg';
|
||||||
|
}
|
||||||
|
var book_html = '<li class="media">' +
|
||||||
|
'<img class="pull-left img-responsive" data-toggle="modal" data-target="#metaModal" src="' +
|
||||||
|
book_cover + '" alt="Cover" style="width:100px;height:150px" onclick=\'javascript:get_meta("google",' +
|
||||||
|
i + ')\'>' +
|
||||||
|
'<div class="media-body">' +
|
||||||
|
'<h4 class="media-heading"><a href="https://books.google.com/books?id=' +
|
||||||
|
book.id + '" target="_blank">' + book.volumeInfo.title + '</a></h4>' +
|
||||||
|
'<p>'+ msg.author +':' + book.volumeInfo.authors + '</p>' +
|
||||||
|
'<p>'+ msg.publisher + ':' + book.volumeInfo.publisher + '</p>' +
|
||||||
|
'<p>'+ msg.description + ':' + book.volumeInfo.description + '</p>' +
|
||||||
|
'<p>'+ msg.source + ':<a href="https://books.google.com" target="_blank">Google Books</a></p>' +
|
||||||
|
'</div>' +
|
||||||
|
'</li>';
|
||||||
|
$("#book-list").append(book_html);
|
||||||
|
}
|
||||||
|
gg_done = false;
|
||||||
|
}
|
||||||
|
if (db_done && db_results.length > 0) {
|
||||||
|
for (var i = 0; i < db_results.length; i++) {
|
||||||
|
var book = db_results[i];
|
||||||
|
var book_html = '<li class="media">' +
|
||||||
|
'<img class="pull-left img-responsive" data-toggle="modal" data-target="#metaModal" src="' +
|
||||||
|
book.image + '" alt="Cover" style="width:100px;height: 150px" onclick=\'javascript:get_meta("douban",' +
|
||||||
|
i + ')\'>' +
|
||||||
|
'<div class="media-body">' +
|
||||||
|
'<h4 class="media-heading"><a href="https://book.douban.com/subject/' +
|
||||||
|
book.id + '" target="_blank">' + book.title + '</a></h4>' +
|
||||||
|
'<p>' + msg.author + ':' + book.author + '</p>' +
|
||||||
|
'<p>' + msg.publisher + ':' + book.publisher + '</p>' +
|
||||||
|
'<p>' + msg.description + ':' + book.summary + '</p>' +
|
||||||
|
'<p>' + msg.source + ':<a href="https://book.douban.com" target="_blank">Douban Books</a></p>' +
|
||||||
|
'</div>' +
|
||||||
|
'</li>';
|
||||||
|
$("#book-list").append(book_html);
|
||||||
|
}
|
||||||
|
db_done = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
$('#do-search').click(function () {
|
||||||
|
var keyword = $('#keyword').val();
|
||||||
|
if (keyword) {
|
||||||
|
do_search(keyword);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
$('#get_meta').click(function () {
|
||||||
|
var book_title = $('#book_title').val();
|
||||||
|
if (book_title) {
|
||||||
|
$('#keyword').val(book_title);
|
||||||
|
do_search(book_title);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
});
|
1
cps/static/js/libs/bootstrap-rating-input.min.js
vendored
Normal file
1
cps/static/js/libs/bootstrap-rating-input.min.js
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
!function(a){"use strict";function b(a){return"[data-value"+(a?"="+a:"")+"]"}function c(a,b,c){var d=c.activeIcon,e=c.inactiveIcon;a.removeClass(b?e:d).addClass(b?d:e)}function d(b,c){var d=a.extend({},i,b.data(),c);return d.inline=""===d.inline||d.inline,d.readonly=""===d.readonly||d.readonly,d.clearable===!1?d.clearableLabel="":d.clearableLabel=d.clearable,d.clearable=""===d.clearable||d.clearable,d}function e(b,c){if(c.inline)var d=a('<span class="rating-input"></span>');else var d=a('<div class="rating-input"></div>');d.addClass(b.attr("class")),d.removeClass("rating");for(var e=c.min;e<=c.max;e++)d.append('<i class="'+c.iconLib+'" data-value="'+e+'"></i>');return c.clearable&&!c.readonly&&d.append(" ").append('<a class="'+f+'"><i class="'+c.iconLib+" "+c.clearableIcon+'"/>'+c.clearableLabel+"</a>"),d}var f="rating-clear",g="."+f,h="hidden",i={min:1,max:5,"empty-value":0,iconLib:"glyphicon",activeIcon:"glyphicon-star",inactiveIcon:"glyphicon-star-empty",clearable:!1,clearableIcon:"glyphicon-remove",clearableRemain:!1,inline:!1,readonly:!1},j=function(a,b){var c=this.$input=a;this.options=d(c,b);var f=this.$el=e(c,this.options);c.addClass(h).before(f),c.attr("type","hidden"),this.highlight(c.val())};j.VERSION="0.4.0",j.DEFAULTS=i,j.prototype={clear:function(){this.setValue(this.options["empty-value"])},setValue:function(a){this.highlight(a),this.updateInput(a)},highlight:function(a,d){var e=this.options,f=this.$el;if(a>=this.options.min&&a<=this.options.max){var i=f.find(b(a));c(i.prevAll("i").andSelf(),!0,e),c(i.nextAll("i"),!1,e)}else c(f.find(b()),!1,e);d||(this.options.clearableRemain?f.find(g).removeClass(h):a&&a!=this.options["empty-value"]?f.find(g).removeClass(h):f.find(g).addClass(h))},updateInput:function(a){var b=this.$input;b.val()!=a&&b.val(a).change()}};var k=a.fn.rating=function(c){return this.filter("input[type=number]").each(function(){var d=a(this),e="object"==typeof c&&c||{},f=new j(d,e);f.options.readonly||f.$el.on("mouseenter",b(),function(){f.highlight(a(this).data("value"),!0)}).on("mouseleave",b(),function(){f.highlight(d.val(),!0)}).on("click",b(),function(){f.setValue(a(this).data("value"))}).on("click",g,function(){f.clear()})})};k.Constructor=j,a(function(){a("input.rating[type=number]").each(function(){a(this).rating()})})}(jQuery);
|
@ -65,6 +65,13 @@ $(function() {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
$("#restart_database").click(function() {
|
||||||
|
$.ajax({
|
||||||
|
dataType: 'json',
|
||||||
|
url: window.location.pathname+"/../../shutdown",
|
||||||
|
data: {"parameter":2}
|
||||||
|
});
|
||||||
|
});
|
||||||
$("#perform_update").click(function() {
|
$("#perform_update").click(function() {
|
||||||
$('#spinner2').show();
|
$('#spinner2').show();
|
||||||
$.ajax({
|
$.ajax({
|
||||||
|
@ -80,6 +80,7 @@
|
|||||||
<div>{{_('Current commit timestamp')}}: <span>{{commit}} </span></div>
|
<div>{{_('Current commit timestamp')}}: <span>{{commit}} </span></div>
|
||||||
<div class="hidden" id="update_info">{{_('Newest commit timestamp')}}: <span></span></div>
|
<div class="hidden" id="update_info">{{_('Newest commit timestamp')}}: <span></span></div>
|
||||||
<p></p>
|
<p></p>
|
||||||
|
<div class="btn btn-default" id="restart_database">{{_('Reconnect to Calibre DB')}}</div>
|
||||||
<div class="btn btn-default" data-toggle="modal" data-target="#RestartDialog">{{_('Restart Calibre-web')}}</div>
|
<div class="btn btn-default" data-toggle="modal" data-target="#RestartDialog">{{_('Restart Calibre-web')}}</div>
|
||||||
<div class="btn btn-default" data-toggle="modal" data-target="#ShutdownDialog">{{_('Stop Calibre-web')}}</div>
|
<div class="btn btn-default" data-toggle="modal" data-target="#ShutdownDialog">{{_('Stop Calibre-web')}}</div>
|
||||||
<div class="btn btn-default" id="check_for_update">{{_('Check for update')}}</div>
|
<div class="btn btn-default" id="check_for_update">{{_('Check for update')}}</div>
|
||||||
|
@ -39,7 +39,7 @@
|
|||||||
</div>
|
</div>
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label for="rating">{{_('Rating')}}</label>
|
<label for="rating">{{_('Rating')}}</label>
|
||||||
<input type="number" min="0" max="5" step="1" class="form-control" name="rating" id="rating" value="{% if book.ratings %}{{book.ratings[0].rating / 2}}{% endif %}">
|
<input type="number" name="rating" id="rating" class="rating input-lg" data-clearable="" value="{% if book.ratings %}{{(book.ratings[0].rating / 2)|int}}{% endif %}">
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label for="cover_url">{{_('Cover URL (jpg)')}}</label>
|
<label for="cover_url">{{_('Cover URL (jpg)')}}</label>
|
||||||
@ -104,16 +104,56 @@
|
|||||||
<input name="detail_view" type="checkbox" checked> {{_('view book after edit')}}
|
<input name="detail_view" type="checkbox" checked> {{_('view book after edit')}}
|
||||||
</label>
|
</label>
|
||||||
</div>
|
</div>
|
||||||
|
<a href="#" id="get_meta" class="btn btn-default" data-toggle="modal" data-target="#metaModal">{{_('Get Metadata')}}</a>
|
||||||
<button type="submit" class="btn btn-default">{{_('Submit')}}</button>
|
<button type="submit" class="btn btn-default">{{_('Submit')}}</button>
|
||||||
<a href="{{ url_for('show_book',id=book.id) }}" class="btn btn-default">{{_('Back')}}</a>
|
<a href="{{ url_for('show_book',id=book.id) }}" class="btn btn-default">{{_('Back')}}</a>
|
||||||
</form>
|
</form>
|
||||||
</div>
|
</div>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
<div class="modal fade" id="metaModal" tabindex="-1" role="dialog" aria-labelledby="metaModalLabel">
|
||||||
|
<div class="modal-dialog" role="document">
|
||||||
|
<div class="modal-content">
|
||||||
|
<div class="modal-header">
|
||||||
|
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
|
||||||
|
<h4 class="modal-title" id="metaModalLabel">{{_('Get metadata')}}</h4>
|
||||||
|
<form class="form-inline">
|
||||||
|
<div class="form-group">
|
||||||
|
<label class="sr-only" for="keyword">{{_('Keyword')}}</label>
|
||||||
|
<input type="text" class="form-control" id="keyword" placeholder="{{_(" Search keyword ")}}">
|
||||||
|
</div>
|
||||||
|
<button type="button" class="btn btn-default" id="do-search">{{_("Go!")}}</button>
|
||||||
|
<span>{{_('Click the cover to load metadata to the form')}}</span>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body" id="meta-info">
|
||||||
|
{{_("Loading...")}}
|
||||||
|
</div>
|
||||||
|
<div class="modal-footer">
|
||||||
|
<button type="button" class="btn btn-default" data-dismiss="modal">{{_('Close')}}</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
{% endblock %}
|
{% endblock %}
|
||||||
|
|
||||||
{% block js %}
|
{% block js %}
|
||||||
|
<script>
|
||||||
|
var i18n_msg = {
|
||||||
|
'loading': {{_('Loading...')|safe|tojson}},
|
||||||
|
'search_error': {{_('Search error!')|safe|tojson}},
|
||||||
|
'no_result': {{_('No Result! Please try anonther keyword.')|safe|tojson}},
|
||||||
|
'author': {{_('Author')|safe|tojson}},
|
||||||
|
'publisher': {{_('Publisher')|safe|tojson}},
|
||||||
|
'description': {{_('Description')|safe|tojson}},
|
||||||
|
'source': {{_('Source')|safe|tojson}},
|
||||||
|
};
|
||||||
|
</script>
|
||||||
<script src="{{ url_for('static', filename='js/libs/typeahead.bundle.js') }}"></script>
|
<script src="{{ url_for('static', filename='js/libs/typeahead.bundle.js') }}"></script>
|
||||||
<script src="{{ url_for('static', filename='js/edit_books.js') }}"></script>
|
<script src="{{ url_for('static', filename='js/edit_books.js') }}"></script>
|
||||||
|
<<<<<<< HEAD
|
||||||
|
<script src="{{ url_for('static', filename='js/libs/bootstrap-rating-input.min.js') }}"></script>
|
||||||
|
=======
|
||||||
|
<script src="{{ url_for('static', filename='js/get_meta.js') }}"></script>
|
||||||
{% endblock %}
|
{% endblock %}
|
||||||
{% block header %}
|
{% block header %}
|
||||||
<link href="{{ url_for('static', filename='css/libs/typeahead.css') }}" rel="stylesheet" media="screen">
|
<link href="{{ url_for('static', filename='css/libs/typeahead.css') }}" rel="stylesheet" media="screen">
|
||||||
|
@ -62,7 +62,10 @@
|
|||||||
<label for="config_random_books">{{_('No. of random books to show')}}</label>
|
<label for="config_random_books">{{_('No. of random books to show')}}</label>
|
||||||
<input type="number" min="1" max="30" class="form-control" name="config_random_books" id="config_random_books" value="{% if content.config_random_books != None %}{{ content.config_random_books }}{% endif %}" autocomplete="off">
|
<input type="number" min="1" max="30" class="form-control" name="config_random_books" id="config_random_books" value="{% if content.config_random_books != None %}{{ content.config_random_books }}{% endif %}" autocomplete="off">
|
||||||
</div>
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="config_columns_to_ignore">{{_('Regular expression for ignoring columns')}}</label>
|
||||||
|
<input type="text" class="form-control" name="config_columns_to_ignore" id="config_columns_to_ignore" value="{% if content.config_columns_to_ignore != None %}{{ content.config_columns_to_ignore }}{% endif %}" autocomplete="off">
|
||||||
|
</div>
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label for="config_title_regex">{{_('Regular expression for title sorting')}}</label>
|
<label for="config_title_regex">{{_('Regular expression for title sorting')}}</label>
|
||||||
<input type="text" class="form-control" name="config_title_regex" id="config_title_regex" value="{% if content.config_title_regex != None %}{{ content.config_title_regex }}{% endif %}" autocomplete="off">
|
<input type="text" class="form-control" name="config_title_regex" id="config_title_regex" value="{% if content.config_title_regex != None %}{{ content.config_title_regex }}{% endif %}" autocomplete="off">
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
<uri>https://github.com/janeczku/calibre-web</uri>
|
<uri>https://github.com/janeczku/calibre-web</uri>
|
||||||
</author>
|
</author>
|
||||||
|
|
||||||
|
{% if entries[0] %}
|
||||||
{% for entry in entries %}
|
{% for entry in entries %}
|
||||||
<entry>
|
<entry>
|
||||||
<title>{{entry.title}}</title>
|
<title>{{entry.title}}</title>
|
||||||
@ -60,6 +61,7 @@
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
</entry>
|
</entry>
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
{% for entry in listelements %}
|
{% for entry in listelements %}
|
||||||
<entry>
|
<entry>
|
||||||
<title>{{entry.name}}</title>
|
<title>{{entry.name}}</title>
|
||||||
|
@ -40,6 +40,7 @@
|
|||||||
<div class="discover load-more">
|
<div class="discover load-more">
|
||||||
<h2>{{title}}</h2>
|
<h2>{{title}}</h2>
|
||||||
<div class="row">
|
<div class="row">
|
||||||
|
{% if entries[0] %}
|
||||||
{% for entry in entries %}
|
{% for entry in entries %}
|
||||||
<div id="books" class="col-sm-3 col-lg-2 col-xs-6 book">
|
<div id="books" class="col-sm-3 col-lg-2 col-xs-6 book">
|
||||||
<div class="cover">
|
<div class="cover">
|
||||||
@ -76,6 +77,7 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
{% endblock %}
|
{% endblock %}
|
||||||
|
Binary file not shown.
@ -325,7 +325,7 @@ msgstr "发送测试邮件时发生错误: %(res)s"
|
|||||||
|
|
||||||
#: cps/web.py:1816
|
#: cps/web.py:1816
|
||||||
msgid "E-Mail settings updated"
|
msgid "E-Mail settings updated"
|
||||||
msgstr ""
|
msgstr "E-Mail 设置已更新"
|
||||||
|
|
||||||
#: cps/web.py:1817
|
#: cps/web.py:1817
|
||||||
msgid "Edit mail settings"
|
msgid "Edit mail settings"
|
||||||
@ -357,11 +357,11 @@ msgstr "编辑元数据"
|
|||||||
#: cps/web.py:2162
|
#: cps/web.py:2162
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "File extension \"%s\" is not allowed to be uploaded to this server"
|
msgid "File extension \"%s\" is not allowed to be uploaded to this server"
|
||||||
msgstr ""
|
msgstr "不能上传后缀为 \"%s\" 的文件到此服务器"
|
||||||
|
|
||||||
#: cps/web.py:2168
|
#: cps/web.py:2168
|
||||||
msgid "File to be uploaded must have an extension"
|
msgid "File to be uploaded must have an extension"
|
||||||
msgstr ""
|
msgstr "要上传的文件必须有一个后缀"
|
||||||
|
|
||||||
#: cps/web.py:2185
|
#: cps/web.py:2185
|
||||||
#, python-format
|
#, python-format
|
||||||
|
15
cps/ub.py
15
cps/ub.py
@ -11,6 +11,7 @@ import logging
|
|||||||
from werkzeug.security import generate_password_hash
|
from werkzeug.security import generate_password_hash
|
||||||
from flask_babel import gettext as _
|
from flask_babel import gettext as _
|
||||||
import json
|
import json
|
||||||
|
#from builtins import str
|
||||||
|
|
||||||
dbpath = os.path.join(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + os.sep + ".." + os.sep), "app.db")
|
dbpath = os.path.join(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + os.sep + ".." + os.sep), "app.db")
|
||||||
engine = create_engine('sqlite:///{0}'.format(dbpath), echo=False)
|
engine = create_engine('sqlite:///{0}'.format(dbpath), echo=False)
|
||||||
@ -92,7 +93,7 @@ class UserBase:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def get_id(self):
|
def get_id(self):
|
||||||
return unicode(self.id)
|
return str(self.id)
|
||||||
|
|
||||||
def filter_language(self):
|
def filter_language(self):
|
||||||
return self.default_language
|
return self.default_language
|
||||||
@ -269,6 +270,7 @@ class Settings(Base):
|
|||||||
config_anonbrowse = Column(SmallInteger, default=0)
|
config_anonbrowse = Column(SmallInteger, default=0)
|
||||||
config_public_reg = Column(SmallInteger, default=0)
|
config_public_reg = Column(SmallInteger, default=0)
|
||||||
config_default_role = Column(SmallInteger, default=0)
|
config_default_role = Column(SmallInteger, default=0)
|
||||||
|
config_columns_to_ignore = Column(String)
|
||||||
config_use_google_drive = Column(Boolean)
|
config_use_google_drive = Column(Boolean)
|
||||||
config_google_drive_client_id = Column(String)
|
config_google_drive_client_id = Column(String)
|
||||||
config_google_drive_client_secret = Column(String)
|
config_google_drive_client_secret = Column(String)
|
||||||
@ -301,6 +303,7 @@ class Config:
|
|||||||
self.config_anonbrowse = data.config_anonbrowse
|
self.config_anonbrowse = data.config_anonbrowse
|
||||||
self.config_public_reg = data.config_public_reg
|
self.config_public_reg = data.config_public_reg
|
||||||
self.config_default_role = data.config_default_role
|
self.config_default_role = data.config_default_role
|
||||||
|
self.config_columns_to_ignore = data.config_columns_to_ignore
|
||||||
self.config_use_google_drive = data.config_use_google_drive
|
self.config_use_google_drive = data.config_use_google_drive
|
||||||
self.config_google_drive_client_id = data.config_google_drive_client_id
|
self.config_google_drive_client_id = data.config_google_drive_client_id
|
||||||
self.config_google_drive_client_secret = data.config_google_drive_client_secret
|
self.config_google_drive_client_secret = data.config_google_drive_client_secret
|
||||||
@ -406,6 +409,12 @@ def migrate_Database():
|
|||||||
conn.execute("ALTER TABLE Settings ADD column `config_google_drive_calibre_url_base` INTEGER DEFAULT 0")
|
conn.execute("ALTER TABLE Settings ADD column `config_google_drive_calibre_url_base` INTEGER DEFAULT 0")
|
||||||
conn.execute("ALTER TABLE Settings ADD column `config_google_drive_folder` String DEFAULT ''")
|
conn.execute("ALTER TABLE Settings ADD column `config_google_drive_folder` String DEFAULT ''")
|
||||||
conn.execute("ALTER TABLE Settings ADD column `config_google_drive_watch_changes_response` String DEFAULT ''")
|
conn.execute("ALTER TABLE Settings ADD column `config_google_drive_watch_changes_response` String DEFAULT ''")
|
||||||
|
try:
|
||||||
|
session.query(exists().where(Settings.config_columns_to_ignore)).scalar()
|
||||||
|
except exc.OperationalError:
|
||||||
|
conn = engine.connect()
|
||||||
|
conn.execute("ALTER TABLE Settings ADD column `config_columns_to_ignore` String DEFAULT ''")
|
||||||
|
session.commit()
|
||||||
try:
|
try:
|
||||||
session.query(exists().where(Settings.config_default_role)).scalar()
|
session.query(exists().where(Settings.config_default_role)).scalar()
|
||||||
session.commit()
|
session.commit()
|
||||||
@ -484,7 +493,7 @@ def create_anonymous_user():
|
|||||||
session.add(user)
|
session.add(user)
|
||||||
try:
|
try:
|
||||||
session.commit()
|
session.commit()
|
||||||
except:
|
except Exception as e:
|
||||||
session.rollback()
|
session.rollback()
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -503,7 +512,7 @@ def create_admin_user():
|
|||||||
session.add(user)
|
session.add(user)
|
||||||
try:
|
try:
|
||||||
session.commit()
|
session.commit()
|
||||||
except:
|
except Exception as e:
|
||||||
session.rollback()
|
session.rollback()
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
116
cps/web.py
116
cps/web.py
@ -21,9 +21,8 @@ from sqlalchemy.exc import IntegrityError
|
|||||||
from sqlalchemy import __version__ as sqlalchemyVersion
|
from sqlalchemy import __version__ as sqlalchemyVersion
|
||||||
from math import ceil
|
from math import ceil
|
||||||
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
|
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
|
||||||
from flask_login import __version__ as flask_loginVersion
|
|
||||||
from flask_principal import Principal, Identity, AnonymousIdentity, identity_changed
|
from flask_principal import Principal, Identity, AnonymousIdentity, identity_changed
|
||||||
from flask_login import __version__ as flask_principalVersion
|
from flask_principal import __version__ as flask_principalVersion
|
||||||
from flask_babel import Babel
|
from flask_babel import Babel
|
||||||
from flask_babel import gettext as _
|
from flask_babel import gettext as _
|
||||||
import requests
|
import requests
|
||||||
@ -59,6 +58,18 @@ import threading
|
|||||||
|
|
||||||
from tornado import version as tornadoVersion
|
from tornado import version as tornadoVersion
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.parse import quote
|
||||||
|
from imp import reload
|
||||||
|
from past.builtins import xrange
|
||||||
|
except ImportError as e:
|
||||||
|
from urllib import quote
|
||||||
|
|
||||||
|
try:
|
||||||
|
from flask_login import __version__ as flask_loginVersion
|
||||||
|
except ImportError, e:
|
||||||
|
from flask_login.__about__ import __version__ as flask_loginVersion
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
current_milli_time = lambda: int(round(time.time() * 1000))
|
current_milli_time = lambda: int(round(time.time() * 1000))
|
||||||
@ -67,7 +78,7 @@ try:
|
|||||||
from wand.image import Image
|
from wand.image import Image
|
||||||
|
|
||||||
use_generic_pdf_cover = False
|
use_generic_pdf_cover = False
|
||||||
except ImportError, e:
|
except ImportError as e:
|
||||||
use_generic_pdf_cover = True
|
use_generic_pdf_cover = True
|
||||||
from cgi import escape
|
from cgi import escape
|
||||||
|
|
||||||
@ -209,6 +220,15 @@ lm.anonymous_user = ub.Anonymous
|
|||||||
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
|
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
|
||||||
db.setup_db()
|
db.setup_db()
|
||||||
|
|
||||||
|
if config.config_log_level == logging.DEBUG :
|
||||||
|
logging.getLogger("sqlalchemy.engine").addHandler(file_handler)
|
||||||
|
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
|
||||||
|
logging.getLogger("sqlalchemy.pool").addHandler(file_handler)
|
||||||
|
logging.getLogger("sqlalchemy.pool").setLevel(config.config_log_level)
|
||||||
|
logging.getLogger("sqlalchemy.orm").addHandler(file_handler)
|
||||||
|
logging.getLogger("sqlalchemy.orm").setLevel(config.config_log_level)
|
||||||
|
|
||||||
|
|
||||||
def is_gdrive_ready():
|
def is_gdrive_ready():
|
||||||
return os.path.exists('settings.yaml') and os.path.exists('gdrive_credentials')
|
return os.path.exists('settings.yaml') and os.path.exists('gdrive_credentials')
|
||||||
|
|
||||||
@ -361,7 +381,7 @@ def shortentitle_filter(s):
|
|||||||
def mimetype_filter(val):
|
def mimetype_filter(val):
|
||||||
try:
|
try:
|
||||||
s = mimetypes.types_map['.' + val]
|
s = mimetypes.types_map['.' + val]
|
||||||
except:
|
except Exception as e:
|
||||||
s = 'application/octet-stream'
|
s = 'application/octet-stream'
|
||||||
return s
|
return s
|
||||||
|
|
||||||
@ -643,7 +663,13 @@ def feed_hot():
|
|||||||
hot_books = all_books.offset(off).limit(config.config_books_per_page)
|
hot_books = all_books.offset(off).limit(config.config_books_per_page)
|
||||||
entries = list()
|
entries = list()
|
||||||
for book in hot_books:
|
for book in hot_books:
|
||||||
entries.append(db.session.query(db.Books).filter(filter).filter(db.Books.id == book.Downloads.book_id).first())
|
downloadBook = db.session.query(db.Books).filter(db.Books.id == book.Downloads.book_id).first()
|
||||||
|
if downloadBook:
|
||||||
|
entries.append(
|
||||||
|
db.session.query(db.Books).filter(filter).filter(db.Books.id == book.Downloads.book_id).first())
|
||||||
|
else:
|
||||||
|
ub.session.query(ub.Downloads).filter(book.Downloads.book_id == ub.Downloads.book_id).delete()
|
||||||
|
ub.session.commit()
|
||||||
numBooks = entries.__len__()
|
numBooks = entries.__len__()
|
||||||
pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, numBooks)
|
pagination = Pagination((int(off) / (int(config.config_books_per_page)) + 1), config.config_books_per_page, numBooks)
|
||||||
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
|
xml = render_title_template('feed.xml', entries=entries, pagination=pagination)
|
||||||
@ -881,7 +907,7 @@ def get_updater_status():
|
|||||||
elif request.method == "GET":
|
elif request.method == "GET":
|
||||||
try:
|
try:
|
||||||
status['status']=helper.updater_thread.get_update_status()
|
status['status']=helper.updater_thread.get_update_status()
|
||||||
except:
|
except Exception as e:
|
||||||
status['status'] = 7
|
status['status'] = 7
|
||||||
return json.dumps(status)
|
return json.dumps(status)
|
||||||
|
|
||||||
@ -896,7 +922,7 @@ def get_languages_json():
|
|||||||
try:
|
try:
|
||||||
cur_l = LC.parse(lang.lang_code)
|
cur_l = LC.parse(lang.lang_code)
|
||||||
lang.name = cur_l.get_language_name(get_locale())
|
lang.name = cur_l.get_language_name(get_locale())
|
||||||
except:
|
except Exception as e:
|
||||||
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
||||||
entries = [s for s in languages if query in s.name.lower()]
|
entries = [s for s in languages if query in s.name.lower()]
|
||||||
json_dumps = json.dumps([dict(name=r.name) for r in entries])
|
json_dumps = json.dumps([dict(name=r.name) for r in entries])
|
||||||
@ -966,9 +992,13 @@ def hot_books(page):
|
|||||||
hot_books = all_books.offset(off).limit(config.config_books_per_page)
|
hot_books = all_books.offset(off).limit(config.config_books_per_page)
|
||||||
entries = list()
|
entries = list()
|
||||||
for book in hot_books:
|
for book in hot_books:
|
||||||
entry=db.session.query(db.Books).filter(filter).filter(db.Books.id == book.Downloads.book_id).first()
|
downloadBook = db.session.query(db.Books).filter(db.Books.id == book.Downloads.book_id).first()
|
||||||
if entry:
|
if downloadBook:
|
||||||
entries.append(entry)
|
entries.append(
|
||||||
|
db.session.query(db.Books).filter(filter).filter(db.Books.id == book.Downloads.book_id).first())
|
||||||
|
else:
|
||||||
|
ub.session.query(ub.Downloads).filter(book.Downloads.book_id == ub.Downloads.book_id).delete()
|
||||||
|
ub.session.commit()
|
||||||
numBooks = entries.__len__()
|
numBooks = entries.__len__()
|
||||||
pagination = Pagination(page, config.config_books_per_page, numBooks)
|
pagination = Pagination(page, config.config_books_per_page, numBooks)
|
||||||
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
|
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
|
||||||
@ -1058,13 +1088,13 @@ def language_overview():
|
|||||||
try:
|
try:
|
||||||
cur_l = LC.parse(lang.lang_code)
|
cur_l = LC.parse(lang.lang_code)
|
||||||
lang.name = cur_l.get_language_name(get_locale())
|
lang.name = cur_l.get_language_name(get_locale())
|
||||||
except:
|
except Exception as e:
|
||||||
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
langfound = 1
|
langfound = 1
|
||||||
cur_l = LC.parse(current_user.filter_language())
|
cur_l = LC.parse(current_user.filter_language())
|
||||||
except:
|
except Exception as e:
|
||||||
langfound = 0
|
langfound = 0
|
||||||
languages = db.session.query(db.Languages).filter(
|
languages = db.session.query(db.Languages).filter(
|
||||||
db.Languages.lang_code == current_user.filter_language()).all()
|
db.Languages.lang_code == current_user.filter_language()).all()
|
||||||
@ -1088,7 +1118,7 @@ def language(name, page):
|
|||||||
try:
|
try:
|
||||||
cur_l = LC.parse(name)
|
cur_l = LC.parse(name)
|
||||||
name = cur_l.get_language_name(get_locale())
|
name = cur_l.get_language_name(get_locale())
|
||||||
except:
|
except Exception as e:
|
||||||
name = _(isoLanguages.get(part3=name).name)
|
name = _(isoLanguages.get(part3=name).name)
|
||||||
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
|
return render_title_template('index.html', random=random, entries=entries, pagination=pagination,
|
||||||
title=_(u"Language: %(name)s", name=name))
|
title=_(u"Language: %(name)s", name=name))
|
||||||
@ -1149,10 +1179,19 @@ def show_book(id):
|
|||||||
try:
|
try:
|
||||||
entries.languages[index].language_name = LC.parse(entries.languages[index].lang_code).get_language_name(
|
entries.languages[index].language_name = LC.parse(entries.languages[index].lang_code).get_language_name(
|
||||||
get_locale())
|
get_locale())
|
||||||
except:
|
except Exception as e:
|
||||||
entries.languages[index].language_name = _(
|
entries.languages[index].language_name = _(
|
||||||
isoLanguages.get(part3=entries.languages[index].lang_code).name)
|
isoLanguages.get(part3=entries.languages[index].lang_code).name)
|
||||||
cc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
|
tmpcc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
|
||||||
|
|
||||||
|
if config.config_columns_to_ignore:
|
||||||
|
cc=[]
|
||||||
|
for col in tmpcc:
|
||||||
|
r= re.compile(config.config_columns_to_ignore)
|
||||||
|
if r.match(col.label):
|
||||||
|
cc.append(col)
|
||||||
|
else:
|
||||||
|
cc=tmpcc
|
||||||
book_in_shelfs = []
|
book_in_shelfs = []
|
||||||
shelfs = ub.session.query(ub.BookShelf).filter(ub.BookShelf.book_id == id).all()
|
shelfs = ub.session.query(ub.BookShelf).filter(ub.BookShelf.book_id == id).all()
|
||||||
for entry in shelfs:
|
for entry in shelfs:
|
||||||
@ -1199,6 +1238,8 @@ def stats():
|
|||||||
stdin=subprocess.PIPE)
|
stdin=subprocess.PIPE)
|
||||||
p.wait()
|
p.wait()
|
||||||
for lines in p.stdout.readlines():
|
for lines in p.stdout.readlines():
|
||||||
|
if type(lines) is bytes:
|
||||||
|
lines = lines.decode('utf-8')
|
||||||
if re.search('Amazon kindlegen\(', lines):
|
if re.search('Amazon kindlegen\(', lines):
|
||||||
versions['KindlegenVersion'] = lines
|
versions['KindlegenVersion'] = lines
|
||||||
versions['PythonVersion'] = sys.version
|
versions['PythonVersion'] = sys.version
|
||||||
@ -1334,6 +1375,11 @@ def shutdown():
|
|||||||
showtext['text'] = _(u'Performing shutdown of server, please close window')
|
showtext['text'] = _(u'Performing shutdown of server, please close window')
|
||||||
return json.dumps(showtext)
|
return json.dumps(showtext)
|
||||||
else:
|
else:
|
||||||
|
if task == 2:
|
||||||
|
db.session.close()
|
||||||
|
db.engine.dispose()
|
||||||
|
db.setup_db()
|
||||||
|
return json.dumps({})
|
||||||
abort(404)
|
abort(404)
|
||||||
|
|
||||||
@app.route("/update")
|
@app.route("/update")
|
||||||
@ -1396,7 +1442,7 @@ def advanced_search():
|
|||||||
try:
|
try:
|
||||||
cur_l = LC.parse(lang.lang_code)
|
cur_l = LC.parse(lang.lang_code)
|
||||||
lang.name = cur_l.get_language_name(get_locale())
|
lang.name = cur_l.get_language_name(get_locale())
|
||||||
except:
|
except Exception as e:
|
||||||
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
||||||
searchterm.extend(language.name for language in language_names)
|
searchterm.extend(language.name for language in language_names)
|
||||||
searchterm = " + ".join(filter(None, searchterm))
|
searchterm = " + ".join(filter(None, searchterm))
|
||||||
@ -1428,7 +1474,7 @@ def advanced_search():
|
|||||||
try:
|
try:
|
||||||
cur_l = LC.parse(lang.lang_code)
|
cur_l = LC.parse(lang.lang_code)
|
||||||
lang.name = cur_l.get_language_name(get_locale())
|
lang.name = cur_l.get_language_name(get_locale())
|
||||||
except:
|
except Exception as e:
|
||||||
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
||||||
else:
|
else:
|
||||||
languages = None
|
languages = None
|
||||||
@ -1553,22 +1599,22 @@ def read_book(book_id, format):
|
|||||||
zfile.close()
|
zfile.close()
|
||||||
return render_title_template('read.html', bookid=book_id, title=_(u"Read a Book"))
|
return render_title_template('read.html', bookid=book_id, title=_(u"Read a Book"))
|
||||||
elif format.lower() == "pdf":
|
elif format.lower() == "pdf":
|
||||||
all_name = str(book_id) + "/" + urllib.quote(book.data[0].name) + ".pdf"
|
all_name = str(book_id) + "/" + book.data[0].name + ".pdf"
|
||||||
tmp_file = os.path.join(book_dir, urllib.quote(book.data[0].name)) + ".pdf"
|
tmp_file = os.path.join(book_dir, book.data[0].name) + ".pdf"
|
||||||
if not os.path.exists(tmp_file):
|
if not os.path.exists(tmp_file):
|
||||||
pdf_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".pdf"
|
pdf_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".pdf"
|
||||||
copyfile(pdf_file, tmp_file)
|
copyfile(pdf_file, tmp_file)
|
||||||
return render_title_template('readpdf.html', pdffile=all_name, title=_(u"Read a Book"))
|
return render_title_template('readpdf.html', pdffile=all_name, title=_(u"Read a Book"))
|
||||||
elif format.lower() == "txt":
|
elif format.lower() == "txt":
|
||||||
all_name = str(book_id) + "/" + urllib.quote(book.data[0].name) + ".txt"
|
all_name = str(book_id) + "/" + book.data[0].name + ".txt"
|
||||||
tmp_file = os.path.join(book_dir, urllib.quote(book.data[0].name)) + ".txt"
|
tmp_file = os.path.join(book_dir, book.data[0].name) + ".txt"
|
||||||
if not os.path.exists(all_name):
|
if not os.path.exists(all_name):
|
||||||
txt_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".txt"
|
txt_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".txt"
|
||||||
copyfile(txt_file, tmp_file)
|
copyfile(txt_file, tmp_file)
|
||||||
return render_title_template('readtxt.html', txtfile=all_name, title=_(u"Read a Book"))
|
return render_title_template('readtxt.html', txtfile=all_name, title=_(u"Read a Book"))
|
||||||
elif format.lower() == "cbr":
|
elif format.lower() == "cbr":
|
||||||
all_name = str(book_id) + "/" + urllib.quote(book.data[0].name) + ".cbr"
|
all_name = str(book_id) + "/" + book.data[0].name + ".cbr"
|
||||||
tmp_file = os.path.join(book_dir, urllib.quote(book.data[0].name)) + ".cbr"
|
tmp_file = os.path.join(book_dir, book.data[0].name) + ".cbr"
|
||||||
if not os.path.exists(all_name):
|
if not os.path.exists(all_name):
|
||||||
cbr_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".cbr"
|
cbr_file = os.path.join(config.config_calibre_dir, book.path, book.data[0].name) + ".cbr"
|
||||||
copyfile(cbr_file, tmp_file)
|
copyfile(cbr_file, tmp_file)
|
||||||
@ -1635,7 +1681,7 @@ def register():
|
|||||||
try:
|
try:
|
||||||
ub.session.add(content)
|
ub.session.add(content)
|
||||||
ub.session.commit()
|
ub.session.commit()
|
||||||
except:
|
except Exception as e:
|
||||||
ub.session.rollback()
|
ub.session.rollback()
|
||||||
flash(_(u"An unknown error occured. Please try again later."), category="error")
|
flash(_(u"An unknown error occured. Please try again later."), category="error")
|
||||||
return render_title_template('register.html', title=_(u"register"))
|
return render_title_template('register.html', title=_(u"register"))
|
||||||
@ -1760,7 +1806,7 @@ def create_shelf():
|
|||||||
ub.session.add(shelf)
|
ub.session.add(shelf)
|
||||||
ub.session.commit()
|
ub.session.commit()
|
||||||
flash(_(u"Shelf %(title)s created", title=to_save["title"]), category="success")
|
flash(_(u"Shelf %(title)s created", title=to_save["title"]), category="success")
|
||||||
except:
|
except Exception as e:
|
||||||
flash(_(u"There was an error"), category="error")
|
flash(_(u"There was an error"), category="error")
|
||||||
return render_title_template('shelf_edit.html', shelf=shelf, title=_(u"create a shelf"))
|
return render_title_template('shelf_edit.html', shelf=shelf, title=_(u"create a shelf"))
|
||||||
else:
|
else:
|
||||||
@ -1788,7 +1834,7 @@ def edit_shelf(shelf_id):
|
|||||||
try:
|
try:
|
||||||
ub.session.commit()
|
ub.session.commit()
|
||||||
flash(_(u"Shelf %(title)s changed", title=to_save["title"]), category="success")
|
flash(_(u"Shelf %(title)s changed", title=to_save["title"]), category="success")
|
||||||
except:
|
except Exception as e:
|
||||||
flash(_(u"There was an error"), category="error")
|
flash(_(u"There was an error"), category="error")
|
||||||
return render_title_template('shelf_edit.html', shelf=shelf, title=_(u"Edit a shelf"))
|
return render_title_template('shelf_edit.html', shelf=shelf, title=_(u"Edit a shelf"))
|
||||||
else:
|
else:
|
||||||
@ -1876,7 +1922,7 @@ def profile():
|
|||||||
try:
|
try:
|
||||||
cur_l = LC.parse(lang.lang_code)
|
cur_l = LC.parse(lang.lang_code)
|
||||||
lang.name = cur_l.get_language_name(get_locale())
|
lang.name = cur_l.get_language_name(get_locale())
|
||||||
except:
|
except Exception as e:
|
||||||
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
||||||
translations = babel.list_translations() + [LC('en')]
|
translations = babel.list_translations() + [LC('en')]
|
||||||
for book in content.downloads:
|
for book in content.downloads:
|
||||||
@ -2007,6 +2053,8 @@ def configuration_helper(origin):
|
|||||||
reboot_required = True
|
reboot_required = True
|
||||||
if "config_calibre_web_title" in to_save:
|
if "config_calibre_web_title" in to_save:
|
||||||
content.config_calibre_web_title = to_save["config_calibre_web_title"]
|
content.config_calibre_web_title = to_save["config_calibre_web_title"]
|
||||||
|
if "config_columns_to_ignore" in to_save:
|
||||||
|
content.config_columns_to_ignore = to_save["config_columns_to_ignore"]
|
||||||
if "config_title_regex" in to_save:
|
if "config_title_regex" in to_save:
|
||||||
if content.config_title_regex != to_save["config_title_regex"]:
|
if content.config_title_regex != to_save["config_title_regex"]:
|
||||||
content.config_title_regex = to_save["config_title_regex"]
|
content.config_title_regex = to_save["config_title_regex"]
|
||||||
@ -2086,7 +2134,7 @@ def new_user():
|
|||||||
try:
|
try:
|
||||||
cur_l = LC.parse(lang.lang_code)
|
cur_l = LC.parse(lang.lang_code)
|
||||||
lang.name = cur_l.get_language_name(get_locale())
|
lang.name = cur_l.get_language_name(get_locale())
|
||||||
except:
|
except Exception as e:
|
||||||
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
||||||
translations = [LC('en')] + babel.list_translations()
|
translations = [LC('en')] + babel.list_translations()
|
||||||
if request.method == "POST":
|
if request.method == "POST":
|
||||||
@ -2186,7 +2234,7 @@ def edit_user(user_id):
|
|||||||
try:
|
try:
|
||||||
cur_l = LC.parse(lang.lang_code)
|
cur_l = LC.parse(lang.lang_code)
|
||||||
lang.name = cur_l.get_language_name(get_locale())
|
lang.name = cur_l.get_language_name(get_locale())
|
||||||
except:
|
except Exception as e:
|
||||||
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
||||||
translations = babel.list_translations() + [LC('en')]
|
translations = babel.list_translations() + [LC('en')]
|
||||||
for book in content.downloads:
|
for book in content.downloads:
|
||||||
@ -2313,7 +2361,7 @@ def edit_book(book_id):
|
|||||||
try:
|
try:
|
||||||
book.languages[index].language_name = LC.parse(book.languages[index].lang_code).get_language_name(
|
book.languages[index].language_name = LC.parse(book.languages[index].lang_code).get_language_name(
|
||||||
get_locale())
|
get_locale())
|
||||||
except:
|
except Exception as e:
|
||||||
book.languages[index].language_name = _(isoLanguages.get(part3=book.languages[index].lang_code).name)
|
book.languages[index].language_name = _(isoLanguages.get(part3=book.languages[index].lang_code).name)
|
||||||
for author in book.authors:
|
for author in book.authors:
|
||||||
author_names.append(author.name)
|
author_names.append(author.name)
|
||||||
@ -2363,7 +2411,7 @@ def edit_book(book_id):
|
|||||||
for lang in languages:
|
for lang in languages:
|
||||||
try:
|
try:
|
||||||
lang.name = LC.parse(lang.lang_code).get_language_name(get_locale()).lower()
|
lang.name = LC.parse(lang.lang_code).get_language_name(get_locale()).lower()
|
||||||
except:
|
except Exception as e:
|
||||||
lang.name = _(isoLanguages.get(part3=lang.lang_code).name).lower()
|
lang.name = _(isoLanguages.get(part3=lang.lang_code).name).lower()
|
||||||
for inp_lang in input_languages:
|
for inp_lang in input_languages:
|
||||||
if inp_lang == lang.name:
|
if inp_lang == lang.name:
|
||||||
@ -2554,12 +2602,12 @@ def upload():
|
|||||||
return redirect(url_for('index'))
|
return redirect(url_for('index'))
|
||||||
try:
|
try:
|
||||||
copyfile(meta.file_path, saved_filename)
|
copyfile(meta.file_path, saved_filename)
|
||||||
except OSError, e:
|
except OSError as e:
|
||||||
flash(_(u"Failed to store file %s (Permission denied)." % saved_filename), category="error")
|
flash(_(u"Failed to store file %s (Permission denied)." % saved_filename), category="error")
|
||||||
return redirect(url_for('index'))
|
return redirect(url_for('index'))
|
||||||
try:
|
try:
|
||||||
os.unlink(meta.file_path)
|
os.unlink(meta.file_path)
|
||||||
except OSError, e:
|
except OSError as e:
|
||||||
flash(_(u"Failed to delete file %s (Permission denied)." % meta.file_path), category="warning")
|
flash(_(u"Failed to delete file %s (Permission denied)." % meta.file_path), category="warning")
|
||||||
|
|
||||||
file_size = os.path.getsize(saved_filename)
|
file_size = os.path.getsize(saved_filename)
|
||||||
@ -2591,7 +2639,7 @@ def upload():
|
|||||||
db.session.add(db_language)
|
db.session.add(db_language)
|
||||||
# combine path and normalize path from windows systems
|
# combine path and normalize path from windows systems
|
||||||
path = os.path.join(author_dir, title_dir).replace('\\','/')
|
path = os.path.join(author_dir, title_dir).replace('\\','/')
|
||||||
db_book = db.Books(title, "", db_author.sort, datetime.datetime.now(), datetime.datetime(101, 01, 01), 1,
|
db_book = db.Books(title, "", db_author.sort, datetime.datetime.now(), datetime.datetime(101, 1, 1), 1,
|
||||||
datetime.datetime.now(), path, has_cover, db_author, [], db_language)
|
datetime.datetime.now(), path, has_cover, db_author, [], db_language)
|
||||||
db_book.authors.append(db_author)
|
db_book.authors.append(db_author)
|
||||||
if db_language is not None:
|
if db_language is not None:
|
||||||
|
3
getVendor.sh
Executable file
3
getVendor.sh
Executable file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
pip install --target ./vendor -r requirements.txt
|
13
readme.md
13
readme.md
@ -37,7 +37,6 @@ If your calibre web is using https (this can be done for free using cloudflare),
|
|||||||
|
|
||||||
9. Click enable watch of metadata.db
|
9. Click enable watch of metadata.db
|
||||||
9. Note that this expires after a week, so will need to be manually refresh (TODO: Add an authenticated call to do this via crontab or something similar or maybe in before_request method)
|
9. Note that this expires after a week, so will need to be manually refresh (TODO: Add an authenticated call to do this via crontab or something similar or maybe in before_request method)
|
||||||
|
|
||||||
##About
|
##About
|
||||||
|
|
||||||
Calibre Web is a web app providing a clean interface for browsing, reading and downloading eBooks using an existing [Calibre](https://calibre-ebook.com) database.
|
Calibre Web is a web app providing a clean interface for browsing, reading and downloading eBooks using an existing [Calibre](https://calibre-ebook.com) database.
|
||||||
@ -46,7 +45,8 @@ Calibre Web is a web app providing a clean interface for browsing, reading and d
|
|||||||
|
|
||||||
![screenshot](https://raw.githubusercontent.com/janeczku/docker-calibre-web/master/screenshot.png)
|
![screenshot](https://raw.githubusercontent.com/janeczku/docker-calibre-web/master/screenshot.png)
|
||||||
|
|
||||||
##Features
|
## Features
|
||||||
|
|
||||||
- Bootstrap 3 HTML5 interface
|
- Bootstrap 3 HTML5 interface
|
||||||
- full graphical setup
|
- full graphical setup
|
||||||
- User management
|
- User management
|
||||||
@ -68,10 +68,11 @@ Calibre Web is a web app providing a clean interface for browsing, reading and d
|
|||||||
|
|
||||||
## Quick start
|
## Quick start
|
||||||
|
|
||||||
1. Execute the command: `python cps.py`
|
1. Install required dependencies by executing `pip install -r requirements.txt`
|
||||||
2. Point your browser to `http://localhost:8083` or `http://localhost:8083/opds` for the OPDS catalog
|
2. Execute the command: `python cps.py` (or `nohup python cps.py` - recommended if you want to exit the terminal window)
|
||||||
3. Set `Location of Calibre database` to the path of the folder where your Calibre library (metadata.db) lives, push "submit" button
|
3. Point your browser to `http://localhost:8083` or `http://localhost:8083/opds` for the OPDS catalog
|
||||||
4. Go to Login page
|
4. Set `Location of Calibre database` to the path of the folder where your Calibre library (metadata.db) lives, push "submit" button
|
||||||
|
5. Go to Login page
|
||||||
|
|
||||||
**Default admin login:**
|
**Default admin login:**
|
||||||
*Username:* admin
|
*Username:* admin
|
||||||
|
@ -9,3 +9,16 @@ PyYAML==3.12
|
|||||||
rsa==3.4.2
|
rsa==3.4.2
|
||||||
six==1.10.0
|
six==1.10.0
|
||||||
uritemplate==3.0.0
|
uritemplate==3.0.0
|
||||||
|
Babel>=1.3
|
||||||
|
Flask>=0.11
|
||||||
|
Flask-Babel==0.11.1
|
||||||
|
Flask-Login>=0.3.2
|
||||||
|
Flask-Principal>=0.3.2
|
||||||
|
iso-639>=0.4.5
|
||||||
|
PyPDF2==1.26.0
|
||||||
|
pytz>=2016.10
|
||||||
|
requests>=2.11.1
|
||||||
|
SQLAlchemy>=0.8.4
|
||||||
|
tornado>=4.1
|
||||||
|
Wand>=0.4.4
|
||||||
|
#future
|
||||||
|
22
vendor/LICENSE_flask_login
vendored
22
vendor/LICENSE_flask_login
vendored
@ -1,22 +0,0 @@
|
|||||||
Copyright (c) 2011 Matthew Frazier
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person
|
|
||||||
obtaining a copy of this software and associated documentation
|
|
||||||
files (the "Software"), to deal in the Software without
|
|
||||||
restriction, including without limitation the rights to use,
|
|
||||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the
|
|
||||||
Software is furnished to do so, subject to the following
|
|
||||||
conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
||||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
|
||||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
||||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
OTHER DEALINGS IN THE SOFTWARE.
|
|
22
vendor/LICENSE_flask_principal
vendored
22
vendor/LICENSE_flask_principal
vendored
@ -1,22 +0,0 @@
|
|||||||
Copyright (c) 2012 Ali Afshar
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person
|
|
||||||
obtaining a copy of this software and associated documentation
|
|
||||||
files (the "Software"), to deal in the Software without
|
|
||||||
restriction, including without limitation the rights to use,
|
|
||||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the
|
|
||||||
Software is furnished to do so, subject to the following
|
|
||||||
conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
||||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
|
||||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
||||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
OTHER DEALINGS IN THE SOFTWARE.
|
|
31
vendor/LICENSE_itsdangerous
vendored
31
vendor/LICENSE_itsdangerous
vendored
@ -1,31 +0,0 @@
|
|||||||
Copyright (c) 2011 by Armin Ronacher and the Django Software Foundation.
|
|
||||||
|
|
||||||
Some rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following
|
|
||||||
disclaimer in the documentation and/or other materials provided
|
|
||||||
with the distribution.
|
|
||||||
|
|
||||||
* The names of the contributors may not be used to endorse or
|
|
||||||
promote products derived from this software without specific
|
|
||||||
prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
5
vendor/PyPDF2/__init__.py
vendored
5
vendor/PyPDF2/__init__.py
vendored
@ -1,5 +0,0 @@
|
|||||||
from .pdf import PdfFileReader, PdfFileWriter
|
|
||||||
from .merger import PdfFileMerger
|
|
||||||
from .pagerange import PageRange, parse_filename_page_ranges
|
|
||||||
from ._version import __version__
|
|
||||||
__all__ = ["pdf", "PdfFileMerger"]
|
|
1
vendor/PyPDF2/_version.py
vendored
1
vendor/PyPDF2/_version.py
vendored
@ -1 +0,0 @@
|
|||||||
__version__ = '1.26.0'
|
|
362
vendor/PyPDF2/filters.py
vendored
362
vendor/PyPDF2/filters.py
vendored
@ -1,362 +0,0 @@
|
|||||||
# vim: sw=4:expandtab:foldmethod=marker
|
|
||||||
#
|
|
||||||
# Copyright (c) 2006, Mathieu Fenniak
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are
|
|
||||||
# met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
# * The name of the author may not be used to endorse or promote products
|
|
||||||
# derived from this software without specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
Implementation of stream filters for PDF.
|
|
||||||
"""
|
|
||||||
__author__ = "Mathieu Fenniak"
|
|
||||||
__author_email__ = "biziqe@mathieu.fenniak.net"
|
|
||||||
|
|
||||||
from .utils import PdfReadError, ord_, chr_
|
|
||||||
from sys import version_info
|
|
||||||
if version_info < ( 3, 0 ):
|
|
||||||
from cStringIO import StringIO
|
|
||||||
else:
|
|
||||||
from io import StringIO
|
|
||||||
import struct
|
|
||||||
|
|
||||||
try:
|
|
||||||
import zlib
|
|
||||||
|
|
||||||
def decompress(data):
|
|
||||||
return zlib.decompress(data)
|
|
||||||
|
|
||||||
def compress(data):
|
|
||||||
return zlib.compress(data)
|
|
||||||
|
|
||||||
except ImportError:
|
|
||||||
# Unable to import zlib. Attempt to use the System.IO.Compression
|
|
||||||
# library from the .NET framework. (IronPython only)
|
|
||||||
import System
|
|
||||||
from System import IO, Collections, Array
|
|
||||||
|
|
||||||
def _string_to_bytearr(buf):
|
|
||||||
retval = Array.CreateInstance(System.Byte, len(buf))
|
|
||||||
for i in range(len(buf)):
|
|
||||||
retval[i] = ord(buf[i])
|
|
||||||
return retval
|
|
||||||
|
|
||||||
def _bytearr_to_string(bytes):
|
|
||||||
retval = ""
|
|
||||||
for i in range(bytes.Length):
|
|
||||||
retval += chr(bytes[i])
|
|
||||||
return retval
|
|
||||||
|
|
||||||
def _read_bytes(stream):
|
|
||||||
ms = IO.MemoryStream()
|
|
||||||
buf = Array.CreateInstance(System.Byte, 2048)
|
|
||||||
while True:
|
|
||||||
bytes = stream.Read(buf, 0, buf.Length)
|
|
||||||
if bytes == 0:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
ms.Write(buf, 0, bytes)
|
|
||||||
retval = ms.ToArray()
|
|
||||||
ms.Close()
|
|
||||||
return retval
|
|
||||||
|
|
||||||
def decompress(data):
|
|
||||||
bytes = _string_to_bytearr(data)
|
|
||||||
ms = IO.MemoryStream()
|
|
||||||
ms.Write(bytes, 0, bytes.Length)
|
|
||||||
ms.Position = 0 # fseek 0
|
|
||||||
gz = IO.Compression.DeflateStream(ms, IO.Compression.CompressionMode.Decompress)
|
|
||||||
bytes = _read_bytes(gz)
|
|
||||||
retval = _bytearr_to_string(bytes)
|
|
||||||
gz.Close()
|
|
||||||
return retval
|
|
||||||
|
|
||||||
def compress(data):
|
|
||||||
bytes = _string_to_bytearr(data)
|
|
||||||
ms = IO.MemoryStream()
|
|
||||||
gz = IO.Compression.DeflateStream(ms, IO.Compression.CompressionMode.Compress, True)
|
|
||||||
gz.Write(bytes, 0, bytes.Length)
|
|
||||||
gz.Close()
|
|
||||||
ms.Position = 0 # fseek 0
|
|
||||||
bytes = ms.ToArray()
|
|
||||||
retval = _bytearr_to_string(bytes)
|
|
||||||
ms.Close()
|
|
||||||
return retval
|
|
||||||
|
|
||||||
|
|
||||||
class FlateDecode(object):
|
|
||||||
def decode(data, decodeParms):
|
|
||||||
data = decompress(data)
|
|
||||||
predictor = 1
|
|
||||||
if decodeParms:
|
|
||||||
try:
|
|
||||||
predictor = decodeParms.get("/Predictor", 1)
|
|
||||||
except AttributeError:
|
|
||||||
pass # usually an array with a null object was read
|
|
||||||
|
|
||||||
# predictor 1 == no predictor
|
|
||||||
if predictor != 1:
|
|
||||||
columns = decodeParms["/Columns"]
|
|
||||||
# PNG prediction:
|
|
||||||
if predictor >= 10 and predictor <= 15:
|
|
||||||
output = StringIO()
|
|
||||||
# PNG prediction can vary from row to row
|
|
||||||
rowlength = columns + 1
|
|
||||||
assert len(data) % rowlength == 0
|
|
||||||
prev_rowdata = (0,) * rowlength
|
|
||||||
for row in range(len(data) // rowlength):
|
|
||||||
rowdata = [ord_(x) for x in data[(row*rowlength):((row+1)*rowlength)]]
|
|
||||||
filterByte = rowdata[0]
|
|
||||||
if filterByte == 0:
|
|
||||||
pass
|
|
||||||
elif filterByte == 1:
|
|
||||||
for i in range(2, rowlength):
|
|
||||||
rowdata[i] = (rowdata[i] + rowdata[i-1]) % 256
|
|
||||||
elif filterByte == 2:
|
|
||||||
for i in range(1, rowlength):
|
|
||||||
rowdata[i] = (rowdata[i] + prev_rowdata[i]) % 256
|
|
||||||
else:
|
|
||||||
# unsupported PNG filter
|
|
||||||
raise PdfReadError("Unsupported PNG filter %r" % filterByte)
|
|
||||||
prev_rowdata = rowdata
|
|
||||||
output.write(''.join([chr(x) for x in rowdata[1:]]))
|
|
||||||
data = output.getvalue()
|
|
||||||
else:
|
|
||||||
# unsupported predictor
|
|
||||||
raise PdfReadError("Unsupported flatedecode predictor %r" % predictor)
|
|
||||||
return data
|
|
||||||
decode = staticmethod(decode)
|
|
||||||
|
|
||||||
def encode(data):
|
|
||||||
return compress(data)
|
|
||||||
encode = staticmethod(encode)
|
|
||||||
|
|
||||||
|
|
||||||
class ASCIIHexDecode(object):
|
|
||||||
def decode(data, decodeParms=None):
|
|
||||||
retval = ""
|
|
||||||
char = ""
|
|
||||||
x = 0
|
|
||||||
while True:
|
|
||||||
c = data[x]
|
|
||||||
if c == ">":
|
|
||||||
break
|
|
||||||
elif c.isspace():
|
|
||||||
x += 1
|
|
||||||
continue
|
|
||||||
char += c
|
|
||||||
if len(char) == 2:
|
|
||||||
retval += chr(int(char, base=16))
|
|
||||||
char = ""
|
|
||||||
x += 1
|
|
||||||
assert char == ""
|
|
||||||
return retval
|
|
||||||
decode = staticmethod(decode)
|
|
||||||
|
|
||||||
|
|
||||||
class LZWDecode(object):
|
|
||||||
"""Taken from:
|
|
||||||
http://www.java2s.com/Open-Source/Java-Document/PDF/PDF-Renderer/com/sun/pdfview/decode/LZWDecode.java.htm
|
|
||||||
"""
|
|
||||||
class decoder(object):
|
|
||||||
def __init__(self, data):
|
|
||||||
self.STOP=257
|
|
||||||
self.CLEARDICT=256
|
|
||||||
self.data=data
|
|
||||||
self.bytepos=0
|
|
||||||
self.bitpos=0
|
|
||||||
self.dict=[""]*4096
|
|
||||||
for i in range(256):
|
|
||||||
self.dict[i]=chr(i)
|
|
||||||
self.resetDict()
|
|
||||||
|
|
||||||
def resetDict(self):
|
|
||||||
self.dictlen=258
|
|
||||||
self.bitspercode=9
|
|
||||||
|
|
||||||
def nextCode(self):
|
|
||||||
fillbits=self.bitspercode
|
|
||||||
value=0
|
|
||||||
while fillbits>0 :
|
|
||||||
if self.bytepos >= len(self.data):
|
|
||||||
return -1
|
|
||||||
nextbits=ord(self.data[self.bytepos])
|
|
||||||
bitsfromhere=8-self.bitpos
|
|
||||||
if bitsfromhere>fillbits:
|
|
||||||
bitsfromhere=fillbits
|
|
||||||
value |= (((nextbits >> (8-self.bitpos-bitsfromhere)) &
|
|
||||||
(0xff >> (8-bitsfromhere))) <<
|
|
||||||
(fillbits-bitsfromhere))
|
|
||||||
fillbits -= bitsfromhere
|
|
||||||
self.bitpos += bitsfromhere
|
|
||||||
if self.bitpos >=8:
|
|
||||||
self.bitpos=0
|
|
||||||
self.bytepos = self.bytepos+1
|
|
||||||
return value
|
|
||||||
|
|
||||||
def decode(self):
|
|
||||||
""" algorithm derived from:
|
|
||||||
http://www.rasip.fer.hr/research/compress/algorithms/fund/lz/lzw.html
|
|
||||||
and the PDFReference
|
|
||||||
"""
|
|
||||||
cW = self.CLEARDICT;
|
|
||||||
baos=""
|
|
||||||
while True:
|
|
||||||
pW = cW;
|
|
||||||
cW = self.nextCode();
|
|
||||||
if cW == -1:
|
|
||||||
raise PdfReadError("Missed the stop code in LZWDecode!")
|
|
||||||
if cW == self.STOP:
|
|
||||||
break;
|
|
||||||
elif cW == self.CLEARDICT:
|
|
||||||
self.resetDict();
|
|
||||||
elif pW == self.CLEARDICT:
|
|
||||||
baos+=self.dict[cW]
|
|
||||||
else:
|
|
||||||
if cW < self.dictlen:
|
|
||||||
baos += self.dict[cW]
|
|
||||||
p=self.dict[pW]+self.dict[cW][0]
|
|
||||||
self.dict[self.dictlen]=p
|
|
||||||
self.dictlen+=1
|
|
||||||
else:
|
|
||||||
p=self.dict[pW]+self.dict[pW][0]
|
|
||||||
baos+=p
|
|
||||||
self.dict[self.dictlen] = p;
|
|
||||||
self.dictlen+=1
|
|
||||||
if (self.dictlen >= (1 << self.bitspercode) - 1 and
|
|
||||||
self.bitspercode < 12):
|
|
||||||
self.bitspercode+=1
|
|
||||||
return baos
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def decode(data,decodeParams=None):
|
|
||||||
return LZWDecode.decoder(data).decode()
|
|
||||||
|
|
||||||
|
|
||||||
class ASCII85Decode(object):
|
|
||||||
def decode(data, decodeParms=None):
|
|
||||||
if version_info < ( 3, 0 ):
|
|
||||||
retval = ""
|
|
||||||
group = []
|
|
||||||
x = 0
|
|
||||||
hitEod = False
|
|
||||||
# remove all whitespace from data
|
|
||||||
data = [y for y in data if not (y in ' \n\r\t')]
|
|
||||||
while not hitEod:
|
|
||||||
c = data[x]
|
|
||||||
if len(retval) == 0 and c == "<" and data[x+1] == "~":
|
|
||||||
x += 2
|
|
||||||
continue
|
|
||||||
#elif c.isspace():
|
|
||||||
# x += 1
|
|
||||||
# continue
|
|
||||||
elif c == 'z':
|
|
||||||
assert len(group) == 0
|
|
||||||
retval += '\x00\x00\x00\x00'
|
|
||||||
x += 1
|
|
||||||
continue
|
|
||||||
elif c == "~" and data[x+1] == ">":
|
|
||||||
if len(group) != 0:
|
|
||||||
# cannot have a final group of just 1 char
|
|
||||||
assert len(group) > 1
|
|
||||||
cnt = len(group) - 1
|
|
||||||
group += [ 85, 85, 85 ]
|
|
||||||
hitEod = cnt
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
c = ord(c) - 33
|
|
||||||
assert c >= 0 and c < 85
|
|
||||||
group += [ c ]
|
|
||||||
if len(group) >= 5:
|
|
||||||
b = group[0] * (85**4) + \
|
|
||||||
group[1] * (85**3) + \
|
|
||||||
group[2] * (85**2) + \
|
|
||||||
group[3] * 85 + \
|
|
||||||
group[4]
|
|
||||||
assert b < (2**32 - 1)
|
|
||||||
c4 = chr((b >> 0) % 256)
|
|
||||||
c3 = chr((b >> 8) % 256)
|
|
||||||
c2 = chr((b >> 16) % 256)
|
|
||||||
c1 = chr(b >> 24)
|
|
||||||
retval += (c1 + c2 + c3 + c4)
|
|
||||||
if hitEod:
|
|
||||||
retval = retval[:-4+hitEod]
|
|
||||||
group = []
|
|
||||||
x += 1
|
|
||||||
return retval
|
|
||||||
else:
|
|
||||||
if isinstance(data, str):
|
|
||||||
data = data.encode('ascii')
|
|
||||||
n = b = 0
|
|
||||||
out = bytearray()
|
|
||||||
for c in data:
|
|
||||||
if ord('!') <= c and c <= ord('u'):
|
|
||||||
n += 1
|
|
||||||
b = b*85+(c-33)
|
|
||||||
if n == 5:
|
|
||||||
out += struct.pack(b'>L',b)
|
|
||||||
n = b = 0
|
|
||||||
elif c == ord('z'):
|
|
||||||
assert n == 0
|
|
||||||
out += b'\0\0\0\0'
|
|
||||||
elif c == ord('~'):
|
|
||||||
if n:
|
|
||||||
for _ in range(5-n):
|
|
||||||
b = b*85+84
|
|
||||||
out += struct.pack(b'>L',b)[:n-1]
|
|
||||||
break
|
|
||||||
return bytes(out)
|
|
||||||
decode = staticmethod(decode)
|
|
||||||
|
|
||||||
|
|
||||||
def decodeStreamData(stream):
|
|
||||||
from .generic import NameObject
|
|
||||||
filters = stream.get("/Filter", ())
|
|
||||||
if len(filters) and not isinstance(filters[0], NameObject):
|
|
||||||
# we have a single filter instance
|
|
||||||
filters = (filters,)
|
|
||||||
data = stream._data
|
|
||||||
# If there is not data to decode we should not try to decode the data.
|
|
||||||
if data:
|
|
||||||
for filterType in filters:
|
|
||||||
if filterType == "/FlateDecode" or filterType == "/Fl":
|
|
||||||
data = FlateDecode.decode(data, stream.get("/DecodeParms"))
|
|
||||||
elif filterType == "/ASCIIHexDecode" or filterType == "/AHx":
|
|
||||||
data = ASCIIHexDecode.decode(data)
|
|
||||||
elif filterType == "/LZWDecode" or filterType == "/LZW":
|
|
||||||
data = LZWDecode.decode(data, stream.get("/DecodeParms"))
|
|
||||||
elif filterType == "/ASCII85Decode" or filterType == "/A85":
|
|
||||||
data = ASCII85Decode.decode(data)
|
|
||||||
elif filterType == "/Crypt":
|
|
||||||
decodeParams = stream.get("/DecodeParams", {})
|
|
||||||
if "/Name" not in decodeParams and "/Type" not in decodeParams:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
raise NotImplementedError("/Crypt filter with /Name or /Type not supported yet")
|
|
||||||
else:
|
|
||||||
# unsupported filter
|
|
||||||
raise NotImplementedError("unsupported filter %s" % filterType)
|
|
||||||
return data
|
|
1226
vendor/PyPDF2/generic.py
vendored
1226
vendor/PyPDF2/generic.py
vendored
File diff suppressed because it is too large
Load Diff
553
vendor/PyPDF2/merger.py
vendored
553
vendor/PyPDF2/merger.py
vendored
@ -1,553 +0,0 @@
|
|||||||
# vim: sw=4:expandtab:foldmethod=marker
|
|
||||||
#
|
|
||||||
# Copyright (c) 2006, Mathieu Fenniak
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are
|
|
||||||
# met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
# * The name of the author may not be used to endorse or promote products
|
|
||||||
# derived from this software without specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
from .generic import *
|
|
||||||
from .utils import isString, str_
|
|
||||||
from .pdf import PdfFileReader, PdfFileWriter
|
|
||||||
from .pagerange import PageRange
|
|
||||||
from sys import version_info
|
|
||||||
if version_info < ( 3, 0 ):
|
|
||||||
from cStringIO import StringIO
|
|
||||||
StreamIO = StringIO
|
|
||||||
else:
|
|
||||||
from io import BytesIO
|
|
||||||
from io import FileIO as file
|
|
||||||
StreamIO = BytesIO
|
|
||||||
|
|
||||||
|
|
||||||
class _MergedPage(object):
|
|
||||||
"""
|
|
||||||
_MergedPage is used internally by PdfFileMerger to collect necessary
|
|
||||||
information on each page that is being merged.
|
|
||||||
"""
|
|
||||||
def __init__(self, pagedata, src, id):
|
|
||||||
self.src = src
|
|
||||||
self.pagedata = pagedata
|
|
||||||
self.out_pagedata = None
|
|
||||||
self.id = id
|
|
||||||
|
|
||||||
|
|
||||||
class PdfFileMerger(object):
|
|
||||||
"""
|
|
||||||
Initializes a PdfFileMerger object. PdfFileMerger merges multiple PDFs
|
|
||||||
into a single PDF. It can concatenate, slice, insert, or any combination
|
|
||||||
of the above.
|
|
||||||
|
|
||||||
See the functions :meth:`merge()<merge>` (or :meth:`append()<append>`)
|
|
||||||
and :meth:`write()<write>` for usage information.
|
|
||||||
|
|
||||||
:param bool strict: Determines whether user should be warned of all
|
|
||||||
problems and also causes some correctable problems to be fatal.
|
|
||||||
Defaults to ``True``.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, strict=True):
|
|
||||||
self.inputs = []
|
|
||||||
self.pages = []
|
|
||||||
self.output = PdfFileWriter()
|
|
||||||
self.bookmarks = []
|
|
||||||
self.named_dests = []
|
|
||||||
self.id_count = 0
|
|
||||||
self.strict = strict
|
|
||||||
|
|
||||||
def merge(self, position, fileobj, bookmark=None, pages=None, import_bookmarks=True):
|
|
||||||
"""
|
|
||||||
Merges the pages from the given file into the output file at the
|
|
||||||
specified page number.
|
|
||||||
|
|
||||||
:param int position: The *page number* to insert this file. File will
|
|
||||||
be inserted after the given number.
|
|
||||||
|
|
||||||
:param fileobj: A File Object or an object that supports the standard read
|
|
||||||
and seek methods similar to a File Object. Could also be a
|
|
||||||
string representing a path to a PDF file.
|
|
||||||
|
|
||||||
:param str bookmark: Optionally, you may specify a bookmark to be applied at
|
|
||||||
the beginning of the included file by supplying the text of the bookmark.
|
|
||||||
|
|
||||||
:param pages: can be a :ref:`Page Range <page-range>` or a ``(start, stop[, step])`` tuple
|
|
||||||
to merge only the specified range of pages from the source
|
|
||||||
document into the output document.
|
|
||||||
|
|
||||||
:param bool import_bookmarks: You may prevent the source document's bookmarks
|
|
||||||
from being imported by specifying this as ``False``.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# This parameter is passed to self.inputs.append and means
|
|
||||||
# that the stream used was created in this method.
|
|
||||||
my_file = False
|
|
||||||
|
|
||||||
# If the fileobj parameter is a string, assume it is a path
|
|
||||||
# and create a file object at that location. If it is a file,
|
|
||||||
# copy the file's contents into a BytesIO (or StreamIO) stream object; if
|
|
||||||
# it is a PdfFileReader, copy that reader's stream into a
|
|
||||||
# BytesIO (or StreamIO) stream.
|
|
||||||
# If fileobj is none of the above types, it is not modified
|
|
||||||
decryption_key = None
|
|
||||||
if isString(fileobj):
|
|
||||||
fileobj = file(fileobj, 'rb')
|
|
||||||
my_file = True
|
|
||||||
elif isinstance(fileobj, file):
|
|
||||||
fileobj.seek(0)
|
|
||||||
filecontent = fileobj.read()
|
|
||||||
fileobj = StreamIO(filecontent)
|
|
||||||
my_file = True
|
|
||||||
elif isinstance(fileobj, PdfFileReader):
|
|
||||||
orig_tell = fileobj.stream.tell()
|
|
||||||
fileobj.stream.seek(0)
|
|
||||||
filecontent = StreamIO(fileobj.stream.read())
|
|
||||||
fileobj.stream.seek(orig_tell) # reset the stream to its original location
|
|
||||||
fileobj = filecontent
|
|
||||||
if hasattr(fileobj, '_decryption_key'):
|
|
||||||
decryption_key = fileobj._decryption_key
|
|
||||||
my_file = True
|
|
||||||
|
|
||||||
# Create a new PdfFileReader instance using the stream
|
|
||||||
# (either file or BytesIO or StringIO) created above
|
|
||||||
pdfr = PdfFileReader(fileobj, strict=self.strict)
|
|
||||||
if decryption_key is not None:
|
|
||||||
pdfr._decryption_key = decryption_key
|
|
||||||
|
|
||||||
# Find the range of pages to merge.
|
|
||||||
if pages == None:
|
|
||||||
pages = (0, pdfr.getNumPages())
|
|
||||||
elif isinstance(pages, PageRange):
|
|
||||||
pages = pages.indices(pdfr.getNumPages())
|
|
||||||
elif not isinstance(pages, tuple):
|
|
||||||
raise TypeError('"pages" must be a tuple of (start, stop[, step])')
|
|
||||||
|
|
||||||
srcpages = []
|
|
||||||
if bookmark:
|
|
||||||
bookmark = Bookmark(TextStringObject(bookmark), NumberObject(self.id_count), NameObject('/Fit'))
|
|
||||||
|
|
||||||
outline = []
|
|
||||||
if import_bookmarks:
|
|
||||||
outline = pdfr.getOutlines()
|
|
||||||
outline = self._trim_outline(pdfr, outline, pages)
|
|
||||||
|
|
||||||
if bookmark:
|
|
||||||
self.bookmarks += [bookmark, outline]
|
|
||||||
else:
|
|
||||||
self.bookmarks += outline
|
|
||||||
|
|
||||||
dests = pdfr.namedDestinations
|
|
||||||
dests = self._trim_dests(pdfr, dests, pages)
|
|
||||||
self.named_dests += dests
|
|
||||||
|
|
||||||
# Gather all the pages that are going to be merged
|
|
||||||
for i in range(*pages):
|
|
||||||
pg = pdfr.getPage(i)
|
|
||||||
|
|
||||||
id = self.id_count
|
|
||||||
self.id_count += 1
|
|
||||||
|
|
||||||
mp = _MergedPage(pg, pdfr, id)
|
|
||||||
|
|
||||||
srcpages.append(mp)
|
|
||||||
|
|
||||||
self._associate_dests_to_pages(srcpages)
|
|
||||||
self._associate_bookmarks_to_pages(srcpages)
|
|
||||||
|
|
||||||
# Slice to insert the pages at the specified position
|
|
||||||
self.pages[position:position] = srcpages
|
|
||||||
|
|
||||||
# Keep track of our input files so we can close them later
|
|
||||||
self.inputs.append((fileobj, pdfr, my_file))
|
|
||||||
|
|
||||||
def append(self, fileobj, bookmark=None, pages=None, import_bookmarks=True):
|
|
||||||
"""
|
|
||||||
Identical to the :meth:`merge()<merge>` method, but assumes you want to concatenate
|
|
||||||
all pages onto the end of the file instead of specifying a position.
|
|
||||||
|
|
||||||
:param fileobj: A File Object or an object that supports the standard read
|
|
||||||
and seek methods similar to a File Object. Could also be a
|
|
||||||
string representing a path to a PDF file.
|
|
||||||
|
|
||||||
:param str bookmark: Optionally, you may specify a bookmark to be applied at
|
|
||||||
the beginning of the included file by supplying the text of the bookmark.
|
|
||||||
|
|
||||||
:param pages: can be a :ref:`Page Range <page-range>` or a ``(start, stop[, step])`` tuple
|
|
||||||
to merge only the specified range of pages from the source
|
|
||||||
document into the output document.
|
|
||||||
|
|
||||||
:param bool import_bookmarks: You may prevent the source document's bookmarks
|
|
||||||
from being imported by specifying this as ``False``.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.merge(len(self.pages), fileobj, bookmark, pages, import_bookmarks)
|
|
||||||
|
|
||||||
def write(self, fileobj):
|
|
||||||
"""
|
|
||||||
Writes all data that has been merged to the given output file.
|
|
||||||
|
|
||||||
:param fileobj: Output file. Can be a filename or any kind of
|
|
||||||
file-like object.
|
|
||||||
"""
|
|
||||||
my_file = False
|
|
||||||
if isString(fileobj):
|
|
||||||
fileobj = file(fileobj, 'wb')
|
|
||||||
my_file = True
|
|
||||||
|
|
||||||
# Add pages to the PdfFileWriter
|
|
||||||
# The commented out line below was replaced with the two lines below it to allow PdfFileMerger to work with PyPdf 1.13
|
|
||||||
for page in self.pages:
|
|
||||||
self.output.addPage(page.pagedata)
|
|
||||||
page.out_pagedata = self.output.getReference(self.output._pages.getObject()["/Kids"][-1].getObject())
|
|
||||||
#idnum = self.output._objects.index(self.output._pages.getObject()["/Kids"][-1].getObject()) + 1
|
|
||||||
#page.out_pagedata = IndirectObject(idnum, 0, self.output)
|
|
||||||
|
|
||||||
# Once all pages are added, create bookmarks to point at those pages
|
|
||||||
self._write_dests()
|
|
||||||
self._write_bookmarks()
|
|
||||||
|
|
||||||
# Write the output to the file
|
|
||||||
self.output.write(fileobj)
|
|
||||||
|
|
||||||
if my_file:
|
|
||||||
fileobj.close()
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
"""
|
|
||||||
Shuts all file descriptors (input and output) and clears all memory
|
|
||||||
usage.
|
|
||||||
"""
|
|
||||||
self.pages = []
|
|
||||||
for fo, pdfr, mine in self.inputs:
|
|
||||||
if mine:
|
|
||||||
fo.close()
|
|
||||||
|
|
||||||
self.inputs = []
|
|
||||||
self.output = None
|
|
||||||
|
|
||||||
def addMetadata(self, infos):
|
|
||||||
"""
|
|
||||||
Add custom metadata to the output.
|
|
||||||
|
|
||||||
:param dict infos: a Python dictionary where each key is a field
|
|
||||||
and each value is your new metadata.
|
|
||||||
Example: ``{u'/Title': u'My title'}``
|
|
||||||
"""
|
|
||||||
self.output.addMetadata(infos)
|
|
||||||
|
|
||||||
def setPageLayout(self, layout):
|
|
||||||
"""
|
|
||||||
Set the page layout
|
|
||||||
|
|
||||||
:param str layout: The page layout to be used
|
|
||||||
|
|
||||||
Valid layouts are:
|
|
||||||
/NoLayout Layout explicitly not specified
|
|
||||||
/SinglePage Show one page at a time
|
|
||||||
/OneColumn Show one column at a time
|
|
||||||
/TwoColumnLeft Show pages in two columns, odd-numbered pages on the left
|
|
||||||
/TwoColumnRight Show pages in two columns, odd-numbered pages on the right
|
|
||||||
/TwoPageLeft Show two pages at a time, odd-numbered pages on the left
|
|
||||||
/TwoPageRight Show two pages at a time, odd-numbered pages on the right
|
|
||||||
"""
|
|
||||||
self.output.setPageLayout(layout)
|
|
||||||
|
|
||||||
def setPageMode(self, mode):
|
|
||||||
"""
|
|
||||||
Set the page mode.
|
|
||||||
|
|
||||||
:param str mode: The page mode to use.
|
|
||||||
|
|
||||||
Valid modes are:
|
|
||||||
/UseNone Do not show outlines or thumbnails panels
|
|
||||||
/UseOutlines Show outlines (aka bookmarks) panel
|
|
||||||
/UseThumbs Show page thumbnails panel
|
|
||||||
/FullScreen Fullscreen view
|
|
||||||
/UseOC Show Optional Content Group (OCG) panel
|
|
||||||
/UseAttachments Show attachments panel
|
|
||||||
"""
|
|
||||||
self.output.setPageMode(mode)
|
|
||||||
|
|
||||||
def _trim_dests(self, pdf, dests, pages):
|
|
||||||
"""
|
|
||||||
Removes any named destinations that are not a part of the specified
|
|
||||||
page set.
|
|
||||||
"""
|
|
||||||
new_dests = []
|
|
||||||
prev_header_added = True
|
|
||||||
for k, o in list(dests.items()):
|
|
||||||
for j in range(*pages):
|
|
||||||
if pdf.getPage(j).getObject() == o['/Page'].getObject():
|
|
||||||
o[NameObject('/Page')] = o['/Page'].getObject()
|
|
||||||
assert str_(k) == str_(o['/Title'])
|
|
||||||
new_dests.append(o)
|
|
||||||
break
|
|
||||||
return new_dests
|
|
||||||
|
|
||||||
def _trim_outline(self, pdf, outline, pages):
|
|
||||||
"""
|
|
||||||
Removes any outline/bookmark entries that are not a part of the
|
|
||||||
specified page set.
|
|
||||||
"""
|
|
||||||
new_outline = []
|
|
||||||
prev_header_added = True
|
|
||||||
for i, o in enumerate(outline):
|
|
||||||
if isinstance(o, list):
|
|
||||||
sub = self._trim_outline(pdf, o, pages)
|
|
||||||
if sub:
|
|
||||||
if not prev_header_added:
|
|
||||||
new_outline.append(outline[i-1])
|
|
||||||
new_outline.append(sub)
|
|
||||||
else:
|
|
||||||
prev_header_added = False
|
|
||||||
for j in range(*pages):
|
|
||||||
if pdf.getPage(j).getObject() == o['/Page'].getObject():
|
|
||||||
o[NameObject('/Page')] = o['/Page'].getObject()
|
|
||||||
new_outline.append(o)
|
|
||||||
prev_header_added = True
|
|
||||||
break
|
|
||||||
return new_outline
|
|
||||||
|
|
||||||
def _write_dests(self):
|
|
||||||
dests = self.named_dests
|
|
||||||
|
|
||||||
for v in dests:
|
|
||||||
pageno = None
|
|
||||||
pdf = None
|
|
||||||
if '/Page' in v:
|
|
||||||
for i, p in enumerate(self.pages):
|
|
||||||
if p.id == v['/Page']:
|
|
||||||
v[NameObject('/Page')] = p.out_pagedata
|
|
||||||
pageno = i
|
|
||||||
pdf = p.src
|
|
||||||
break
|
|
||||||
if pageno != None:
|
|
||||||
self.output.addNamedDestinationObject(v)
|
|
||||||
|
|
||||||
def _write_bookmarks(self, bookmarks=None, parent=None):
|
|
||||||
|
|
||||||
if bookmarks == None:
|
|
||||||
bookmarks = self.bookmarks
|
|
||||||
|
|
||||||
last_added = None
|
|
||||||
for b in bookmarks:
|
|
||||||
if isinstance(b, list):
|
|
||||||
self._write_bookmarks(b, last_added)
|
|
||||||
continue
|
|
||||||
|
|
||||||
pageno = None
|
|
||||||
pdf = None
|
|
||||||
if '/Page' in b:
|
|
||||||
for i, p in enumerate(self.pages):
|
|
||||||
if p.id == b['/Page']:
|
|
||||||
#b[NameObject('/Page')] = p.out_pagedata
|
|
||||||
args = [NumberObject(p.id), NameObject(b['/Type'])]
|
|
||||||
#nothing more to add
|
|
||||||
#if b['/Type'] == '/Fit' or b['/Type'] == '/FitB'
|
|
||||||
if b['/Type'] == '/FitH' or b['/Type'] == '/FitBH':
|
|
||||||
if '/Top' in b and not isinstance(b['/Top'], NullObject):
|
|
||||||
args.append(FloatObject(b['/Top']))
|
|
||||||
else:
|
|
||||||
args.append(FloatObject(0))
|
|
||||||
del b['/Top']
|
|
||||||
elif b['/Type'] == '/FitV' or b['/Type'] == '/FitBV':
|
|
||||||
if '/Left' in b and not isinstance(b['/Left'], NullObject):
|
|
||||||
args.append(FloatObject(b['/Left']))
|
|
||||||
else:
|
|
||||||
args.append(FloatObject(0))
|
|
||||||
del b['/Left']
|
|
||||||
elif b['/Type'] == '/XYZ':
|
|
||||||
if '/Left' in b and not isinstance(b['/Left'], NullObject):
|
|
||||||
args.append(FloatObject(b['/Left']))
|
|
||||||
else:
|
|
||||||
args.append(FloatObject(0))
|
|
||||||
if '/Top' in b and not isinstance(b['/Top'], NullObject):
|
|
||||||
args.append(FloatObject(b['/Top']))
|
|
||||||
else:
|
|
||||||
args.append(FloatObject(0))
|
|
||||||
if '/Zoom' in b and not isinstance(b['/Zoom'], NullObject):
|
|
||||||
args.append(FloatObject(b['/Zoom']))
|
|
||||||
else:
|
|
||||||
args.append(FloatObject(0))
|
|
||||||
del b['/Top'], b['/Zoom'], b['/Left']
|
|
||||||
elif b['/Type'] == '/FitR':
|
|
||||||
if '/Left' in b and not isinstance(b['/Left'], NullObject):
|
|
||||||
args.append(FloatObject(b['/Left']))
|
|
||||||
else:
|
|
||||||
args.append(FloatObject(0))
|
|
||||||
if '/Bottom' in b and not isinstance(b['/Bottom'], NullObject):
|
|
||||||
args.append(FloatObject(b['/Bottom']))
|
|
||||||
else:
|
|
||||||
args.append(FloatObject(0))
|
|
||||||
if '/Right' in b and not isinstance(b['/Right'], NullObject):
|
|
||||||
args.append(FloatObject(b['/Right']))
|
|
||||||
else:
|
|
||||||
args.append(FloatObject(0))
|
|
||||||
if '/Top' in b and not isinstance(b['/Top'], NullObject):
|
|
||||||
args.append(FloatObject(b['/Top']))
|
|
||||||
else:
|
|
||||||
args.append(FloatObject(0))
|
|
||||||
del b['/Left'], b['/Right'], b['/Bottom'], b['/Top']
|
|
||||||
|
|
||||||
b[NameObject('/A')] = DictionaryObject({NameObject('/S'): NameObject('/GoTo'), NameObject('/D'): ArrayObject(args)})
|
|
||||||
|
|
||||||
pageno = i
|
|
||||||
pdf = p.src
|
|
||||||
break
|
|
||||||
if pageno != None:
|
|
||||||
del b['/Page'], b['/Type']
|
|
||||||
last_added = self.output.addBookmarkDict(b, parent)
|
|
||||||
|
|
||||||
def _associate_dests_to_pages(self, pages):
|
|
||||||
for nd in self.named_dests:
|
|
||||||
pageno = None
|
|
||||||
np = nd['/Page']
|
|
||||||
|
|
||||||
if isinstance(np, NumberObject):
|
|
||||||
continue
|
|
||||||
|
|
||||||
for p in pages:
|
|
||||||
if np.getObject() == p.pagedata.getObject():
|
|
||||||
pageno = p.id
|
|
||||||
|
|
||||||
if pageno != None:
|
|
||||||
nd[NameObject('/Page')] = NumberObject(pageno)
|
|
||||||
else:
|
|
||||||
raise ValueError("Unresolved named destination '%s'" % (nd['/Title'],))
|
|
||||||
|
|
||||||
def _associate_bookmarks_to_pages(self, pages, bookmarks=None):
|
|
||||||
if bookmarks == None:
|
|
||||||
bookmarks = self.bookmarks
|
|
||||||
|
|
||||||
for b in bookmarks:
|
|
||||||
if isinstance(b, list):
|
|
||||||
self._associate_bookmarks_to_pages(pages, b)
|
|
||||||
continue
|
|
||||||
|
|
||||||
pageno = None
|
|
||||||
bp = b['/Page']
|
|
||||||
|
|
||||||
if isinstance(bp, NumberObject):
|
|
||||||
continue
|
|
||||||
|
|
||||||
for p in pages:
|
|
||||||
if bp.getObject() == p.pagedata.getObject():
|
|
||||||
pageno = p.id
|
|
||||||
|
|
||||||
if pageno != None:
|
|
||||||
b[NameObject('/Page')] = NumberObject(pageno)
|
|
||||||
else:
|
|
||||||
raise ValueError("Unresolved bookmark '%s'" % (b['/Title'],))
|
|
||||||
|
|
||||||
def findBookmark(self, bookmark, root=None):
|
|
||||||
if root == None:
|
|
||||||
root = self.bookmarks
|
|
||||||
|
|
||||||
for i, b in enumerate(root):
|
|
||||||
if isinstance(b, list):
|
|
||||||
res = self.findBookmark(bookmark, b)
|
|
||||||
if res:
|
|
||||||
return [i] + res
|
|
||||||
elif b == bookmark or b['/Title'] == bookmark:
|
|
||||||
return [i]
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def addBookmark(self, title, pagenum, parent=None):
|
|
||||||
"""
|
|
||||||
Add a bookmark to this PDF file.
|
|
||||||
|
|
||||||
:param str title: Title to use for this bookmark.
|
|
||||||
:param int pagenum: Page number this bookmark will point to.
|
|
||||||
:param parent: A reference to a parent bookmark to create nested
|
|
||||||
bookmarks.
|
|
||||||
"""
|
|
||||||
if parent == None:
|
|
||||||
iloc = [len(self.bookmarks)-1]
|
|
||||||
elif isinstance(parent, list):
|
|
||||||
iloc = parent
|
|
||||||
else:
|
|
||||||
iloc = self.findBookmark(parent)
|
|
||||||
|
|
||||||
dest = Bookmark(TextStringObject(title), NumberObject(pagenum), NameObject('/FitH'), NumberObject(826))
|
|
||||||
|
|
||||||
if parent == None:
|
|
||||||
self.bookmarks.append(dest)
|
|
||||||
else:
|
|
||||||
bmparent = self.bookmarks
|
|
||||||
for i in iloc[:-1]:
|
|
||||||
bmparent = bmparent[i]
|
|
||||||
npos = iloc[-1]+1
|
|
||||||
if npos < len(bmparent) and isinstance(bmparent[npos], list):
|
|
||||||
bmparent[npos].append(dest)
|
|
||||||
else:
|
|
||||||
bmparent.insert(npos, [dest])
|
|
||||||
return dest
|
|
||||||
|
|
||||||
def addNamedDestination(self, title, pagenum):
|
|
||||||
"""
|
|
||||||
Add a destination to the output.
|
|
||||||
|
|
||||||
:param str title: Title to use
|
|
||||||
:param int pagenum: Page number this destination points at.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dest = Destination(TextStringObject(title), NumberObject(pagenum), NameObject('/FitH'), NumberObject(826))
|
|
||||||
self.named_dests.append(dest)
|
|
||||||
|
|
||||||
|
|
||||||
class OutlinesObject(list):
|
|
||||||
def __init__(self, pdf, tree, parent=None):
|
|
||||||
list.__init__(self)
|
|
||||||
self.tree = tree
|
|
||||||
self.pdf = pdf
|
|
||||||
self.parent = parent
|
|
||||||
|
|
||||||
def remove(self, index):
|
|
||||||
obj = self[index]
|
|
||||||
del self[index]
|
|
||||||
self.tree.removeChild(obj)
|
|
||||||
|
|
||||||
def add(self, title, pagenum):
|
|
||||||
pageRef = self.pdf.getObject(self.pdf._pages)['/Kids'][pagenum]
|
|
||||||
action = DictionaryObject()
|
|
||||||
action.update({
|
|
||||||
NameObject('/D') : ArrayObject([pageRef, NameObject('/FitH'), NumberObject(826)]),
|
|
||||||
NameObject('/S') : NameObject('/GoTo')
|
|
||||||
})
|
|
||||||
actionRef = self.pdf._addObject(action)
|
|
||||||
bookmark = TreeObject()
|
|
||||||
|
|
||||||
bookmark.update({
|
|
||||||
NameObject('/A'): actionRef,
|
|
||||||
NameObject('/Title'): createStringObject(title),
|
|
||||||
})
|
|
||||||
|
|
||||||
self.pdf._addObject(bookmark)
|
|
||||||
|
|
||||||
self.tree.addChild(bookmark)
|
|
||||||
|
|
||||||
def removeAll(self):
|
|
||||||
for child in [x for x in self.tree.children()]:
|
|
||||||
self.tree.removeChild(child)
|
|
||||||
self.pop()
|
|
152
vendor/PyPDF2/pagerange.py
vendored
152
vendor/PyPDF2/pagerange.py
vendored
@ -1,152 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
"""
|
|
||||||
Representation and utils for ranges of PDF file pages.
|
|
||||||
|
|
||||||
Copyright (c) 2014, Steve Witham <switham_github@mac-guyver.com>.
|
|
||||||
All rights reserved. This software is available under a BSD license;
|
|
||||||
see https://github.com/mstamy2/PyPDF2/blob/master/LICENSE
|
|
||||||
"""
|
|
||||||
|
|
||||||
import re
|
|
||||||
from .utils import isString
|
|
||||||
|
|
||||||
_INT_RE = r"(0|-?[1-9]\d*)" # A decimal int, don't allow "-0".
|
|
||||||
PAGE_RANGE_RE = "^({int}|({int}?(:{int}?(:{int}?)?)))$".format(int=_INT_RE)
|
|
||||||
# groups: 12 34 5 6 7 8
|
|
||||||
|
|
||||||
|
|
||||||
class ParseError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
PAGE_RANGE_HELP = """Remember, page indices start with zero.
|
|
||||||
Page range expression examples:
|
|
||||||
: all pages. -1 last page.
|
|
||||||
22 just the 23rd page. :-1 all but the last page.
|
|
||||||
0:3 the first three pages. -2 second-to-last page.
|
|
||||||
:3 the first three pages. -2: last two pages.
|
|
||||||
5: from the sixth page onward. -3:-1 third & second to last.
|
|
||||||
The third, "stride" or "step" number is also recognized.
|
|
||||||
::2 0 2 4 ... to the end. 3:0:-1 3 2 1 but not 0.
|
|
||||||
1:10:2 1 3 5 7 9 2::-1 2 1 0.
|
|
||||||
::-1 all pages in reverse order.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class PageRange(object):
|
|
||||||
"""
|
|
||||||
A slice-like representation of a range of page indices,
|
|
||||||
i.e. page numbers, only starting at zero.
|
|
||||||
The syntax is like what you would put between brackets [ ].
|
|
||||||
The slice is one of the few Python types that can't be subclassed,
|
|
||||||
but this class converts to and from slices, and allows similar use.
|
|
||||||
o PageRange(str) parses a string representing a page range.
|
|
||||||
o PageRange(slice) directly "imports" a slice.
|
|
||||||
o to_slice() gives the equivalent slice.
|
|
||||||
o str() and repr() allow printing.
|
|
||||||
o indices(n) is like slice.indices(n).
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, arg):
|
|
||||||
"""
|
|
||||||
Initialize with either a slice -- giving the equivalent page range,
|
|
||||||
or a PageRange object -- making a copy,
|
|
||||||
or a string like
|
|
||||||
"int", "[int]:[int]" or "[int]:[int]:[int]",
|
|
||||||
where the brackets indicate optional ints.
|
|
||||||
{page_range_help}
|
|
||||||
Note the difference between this notation and arguments to slice():
|
|
||||||
slice(3) means the first three pages;
|
|
||||||
PageRange("3") means the range of only the fourth page.
|
|
||||||
However PageRange(slice(3)) means the first three pages.
|
|
||||||
"""
|
|
||||||
if isinstance(arg, slice):
|
|
||||||
self._slice = arg
|
|
||||||
return
|
|
||||||
|
|
||||||
if isinstance(arg, PageRange):
|
|
||||||
self._slice = arg.to_slice()
|
|
||||||
return
|
|
||||||
|
|
||||||
m = isString(arg) and re.match(PAGE_RANGE_RE, arg)
|
|
||||||
if not m:
|
|
||||||
raise ParseError(arg)
|
|
||||||
elif m.group(2):
|
|
||||||
# Special case: just an int means a range of one page.
|
|
||||||
start = int(m.group(2))
|
|
||||||
stop = start + 1 if start != -1 else None
|
|
||||||
self._slice = slice(start, stop)
|
|
||||||
else:
|
|
||||||
self._slice = slice(*[int(g) if g else None
|
|
||||||
for g in m.group(4, 6, 8)])
|
|
||||||
|
|
||||||
# Just formatting this when there is __doc__ for __init__
|
|
||||||
if __init__.__doc__:
|
|
||||||
__init__.__doc__ = __init__.__doc__.format(page_range_help=PAGE_RANGE_HELP)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def valid(input):
|
|
||||||
""" True if input is a valid initializer for a PageRange. """
|
|
||||||
return isinstance(input, slice) or \
|
|
||||||
isinstance(input, PageRange) or \
|
|
||||||
(isString(input)
|
|
||||||
and bool(re.match(PAGE_RANGE_RE, input)))
|
|
||||||
|
|
||||||
def to_slice(self):
|
|
||||||
""" Return the slice equivalent of this page range. """
|
|
||||||
return self._slice
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
""" A string like "1:2:3". """
|
|
||||||
s = self._slice
|
|
||||||
if s.step == None:
|
|
||||||
if s.start != None and s.stop == s.start + 1:
|
|
||||||
return str(s.start)
|
|
||||||
|
|
||||||
indices = s.start, s.stop
|
|
||||||
else:
|
|
||||||
indices = s.start, s.stop, s.step
|
|
||||||
return ':'.join("" if i == None else str(i) for i in indices)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
""" A string like "PageRange('1:2:3')". """
|
|
||||||
return "PageRange(" + repr(str(self)) + ")"
|
|
||||||
|
|
||||||
def indices(self, n):
|
|
||||||
"""
|
|
||||||
n is the length of the list of pages to choose from.
|
|
||||||
Returns arguments for range(). See help(slice.indices).
|
|
||||||
"""
|
|
||||||
return self._slice.indices(n)
|
|
||||||
|
|
||||||
|
|
||||||
PAGE_RANGE_ALL = PageRange(":") # The range of all pages.
|
|
||||||
|
|
||||||
|
|
||||||
def parse_filename_page_ranges(args):
|
|
||||||
"""
|
|
||||||
Given a list of filenames and page ranges, return a list of
|
|
||||||
(filename, page_range) pairs.
|
|
||||||
First arg must be a filename; other ags are filenames, page-range
|
|
||||||
expressions, slice objects, or PageRange objects.
|
|
||||||
A filename not followed by a page range indicates all pages of the file.
|
|
||||||
"""
|
|
||||||
pairs = []
|
|
||||||
pdf_filename = None
|
|
||||||
did_page_range = False
|
|
||||||
for arg in args + [None]:
|
|
||||||
if PageRange.valid(arg):
|
|
||||||
if not pdf_filename:
|
|
||||||
raise ValueError("The first argument must be a filename, " \
|
|
||||||
"not a page range.")
|
|
||||||
|
|
||||||
pairs.append( (pdf_filename, PageRange(arg)) )
|
|
||||||
did_page_range = True
|
|
||||||
else:
|
|
||||||
# New filename or end of list--do all of the previous file?
|
|
||||||
if pdf_filename and not did_page_range:
|
|
||||||
pairs.append( (pdf_filename, PAGE_RANGE_ALL) )
|
|
||||||
|
|
||||||
pdf_filename = arg
|
|
||||||
did_page_range = False
|
|
||||||
return pairs
|
|
3004
vendor/PyPDF2/pdf.py
vendored
3004
vendor/PyPDF2/pdf.py
vendored
File diff suppressed because it is too large
Load Diff
295
vendor/PyPDF2/utils.py
vendored
295
vendor/PyPDF2/utils.py
vendored
@ -1,295 +0,0 @@
|
|||||||
# Copyright (c) 2006, Mathieu Fenniak
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are
|
|
||||||
# met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
|
||||||
# and/or other materials provided with the distribution.
|
|
||||||
# * The name of the author may not be used to endorse or promote products
|
|
||||||
# derived from this software without specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Utility functions for PDF library.
|
|
||||||
"""
|
|
||||||
__author__ = "Mathieu Fenniak"
|
|
||||||
__author_email__ = "biziqe@mathieu.fenniak.net"
|
|
||||||
|
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
try:
|
|
||||||
import __builtin__ as builtins
|
|
||||||
except ImportError: # Py3
|
|
||||||
import builtins
|
|
||||||
|
|
||||||
|
|
||||||
xrange_fn = getattr(builtins, "xrange", range)
|
|
||||||
_basestring = getattr(builtins, "basestring", str)
|
|
||||||
|
|
||||||
bytes_type = type(bytes()) # Works the same in Python 2.X and 3.X
|
|
||||||
string_type = getattr(builtins, "unicode", str)
|
|
||||||
int_types = (int, long) if sys.version_info[0] < 3 else (int,)
|
|
||||||
|
|
||||||
|
|
||||||
# Make basic type tests more consistent
|
|
||||||
def isString(s):
|
|
||||||
"""Test if arg is a string. Compatible with Python 2 and 3."""
|
|
||||||
return isinstance(s, _basestring)
|
|
||||||
|
|
||||||
|
|
||||||
def isInt(n):
|
|
||||||
"""Test if arg is an int. Compatible with Python 2 and 3."""
|
|
||||||
return isinstance(n, int_types)
|
|
||||||
|
|
||||||
|
|
||||||
def isBytes(b):
|
|
||||||
"""Test if arg is a bytes instance. Compatible with Python 2 and 3."""
|
|
||||||
return isinstance(b, bytes_type)
|
|
||||||
|
|
||||||
|
|
||||||
#custom implementation of warnings.formatwarning
|
|
||||||
def formatWarning(message, category, filename, lineno, line=None):
|
|
||||||
file = filename.replace("/", "\\").rsplit("\\", 1)[1] # find the file name
|
|
||||||
return "%s: %s [%s:%s]\n" % (category.__name__, message, file, lineno)
|
|
||||||
|
|
||||||
|
|
||||||
def readUntilWhitespace(stream, maxchars=None):
|
|
||||||
"""
|
|
||||||
Reads non-whitespace characters and returns them.
|
|
||||||
Stops upon encountering whitespace or when maxchars is reached.
|
|
||||||
"""
|
|
||||||
txt = b_("")
|
|
||||||
while True:
|
|
||||||
tok = stream.read(1)
|
|
||||||
if tok.isspace() or not tok:
|
|
||||||
break
|
|
||||||
txt += tok
|
|
||||||
if len(txt) == maxchars:
|
|
||||||
break
|
|
||||||
return txt
|
|
||||||
|
|
||||||
|
|
||||||
def readNonWhitespace(stream):
|
|
||||||
"""
|
|
||||||
Finds and reads the next non-whitespace character (ignores whitespace).
|
|
||||||
"""
|
|
||||||
tok = WHITESPACES[0]
|
|
||||||
while tok in WHITESPACES:
|
|
||||||
tok = stream.read(1)
|
|
||||||
return tok
|
|
||||||
|
|
||||||
|
|
||||||
def skipOverWhitespace(stream):
|
|
||||||
"""
|
|
||||||
Similar to readNonWhitespace, but returns a Boolean if more than
|
|
||||||
one whitespace character was read.
|
|
||||||
"""
|
|
||||||
tok = WHITESPACES[0]
|
|
||||||
cnt = 0;
|
|
||||||
while tok in WHITESPACES:
|
|
||||||
tok = stream.read(1)
|
|
||||||
cnt+=1
|
|
||||||
return (cnt > 1)
|
|
||||||
|
|
||||||
|
|
||||||
def skipOverComment(stream):
|
|
||||||
tok = stream.read(1)
|
|
||||||
stream.seek(-1, 1)
|
|
||||||
if tok == b_('%'):
|
|
||||||
while tok not in (b_('\n'), b_('\r')):
|
|
||||||
tok = stream.read(1)
|
|
||||||
|
|
||||||
|
|
||||||
def readUntilRegex(stream, regex, ignore_eof=False):
|
|
||||||
"""
|
|
||||||
Reads until the regular expression pattern matched (ignore the match)
|
|
||||||
Raise PdfStreamError on premature end-of-file.
|
|
||||||
:param bool ignore_eof: If true, ignore end-of-line and return immediately
|
|
||||||
"""
|
|
||||||
name = b_('')
|
|
||||||
while True:
|
|
||||||
tok = stream.read(16)
|
|
||||||
if not tok:
|
|
||||||
# stream has truncated prematurely
|
|
||||||
if ignore_eof == True:
|
|
||||||
return name
|
|
||||||
else:
|
|
||||||
raise PdfStreamError("Stream has ended unexpectedly")
|
|
||||||
m = regex.search(tok)
|
|
||||||
if m is not None:
|
|
||||||
name += tok[:m.start()]
|
|
||||||
stream.seek(m.start()-len(tok), 1)
|
|
||||||
break
|
|
||||||
name += tok
|
|
||||||
return name
|
|
||||||
|
|
||||||
|
|
||||||
class ConvertFunctionsToVirtualList(object):
|
|
||||||
def __init__(self, lengthFunction, getFunction):
|
|
||||||
self.lengthFunction = lengthFunction
|
|
||||||
self.getFunction = getFunction
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return self.lengthFunction()
|
|
||||||
|
|
||||||
def __getitem__(self, index):
|
|
||||||
if isinstance(index, slice):
|
|
||||||
indices = xrange_fn(*index.indices(len(self)))
|
|
||||||
cls = type(self)
|
|
||||||
return cls(indices.__len__, lambda idx: self[indices[idx]])
|
|
||||||
if not isInt(index):
|
|
||||||
raise TypeError("sequence indices must be integers")
|
|
||||||
len_self = len(self)
|
|
||||||
if index < 0:
|
|
||||||
# support negative indexes
|
|
||||||
index = len_self + index
|
|
||||||
if index < 0 or index >= len_self:
|
|
||||||
raise IndexError("sequence index out of range")
|
|
||||||
return self.getFunction(index)
|
|
||||||
|
|
||||||
|
|
||||||
def RC4_encrypt(key, plaintext):
|
|
||||||
S = [i for i in range(256)]
|
|
||||||
j = 0
|
|
||||||
for i in range(256):
|
|
||||||
j = (j + S[i] + ord_(key[i % len(key)])) % 256
|
|
||||||
S[i], S[j] = S[j], S[i]
|
|
||||||
i, j = 0, 0
|
|
||||||
retval = b_("")
|
|
||||||
for x in range(len(plaintext)):
|
|
||||||
i = (i + 1) % 256
|
|
||||||
j = (j + S[i]) % 256
|
|
||||||
S[i], S[j] = S[j], S[i]
|
|
||||||
t = S[(S[i] + S[j]) % 256]
|
|
||||||
retval += b_(chr(ord_(plaintext[x]) ^ t))
|
|
||||||
return retval
|
|
||||||
|
|
||||||
|
|
||||||
def matrixMultiply(a, b):
|
|
||||||
return [[sum([float(i)*float(j)
|
|
||||||
for i, j in zip(row, col)]
|
|
||||||
) for col in zip(*b)]
|
|
||||||
for row in a]
|
|
||||||
|
|
||||||
|
|
||||||
def markLocation(stream):
|
|
||||||
"""Creates text file showing current location in context."""
|
|
||||||
# Mainly for debugging
|
|
||||||
RADIUS = 5000
|
|
||||||
stream.seek(-RADIUS, 1)
|
|
||||||
outputDoc = open('PyPDF2_pdfLocation.txt', 'w')
|
|
||||||
outputDoc.write(stream.read(RADIUS))
|
|
||||||
outputDoc.write('HERE')
|
|
||||||
outputDoc.write(stream.read(RADIUS))
|
|
||||||
outputDoc.close()
|
|
||||||
stream.seek(-RADIUS, 1)
|
|
||||||
|
|
||||||
|
|
||||||
class PyPdfError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class PdfReadError(PyPdfError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class PageSizeNotDefinedError(PyPdfError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class PdfReadWarning(UserWarning):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class PdfStreamError(PdfReadError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
if sys.version_info[0] < 3:
|
|
||||||
def b_(s):
|
|
||||||
return s
|
|
||||||
else:
|
|
||||||
B_CACHE = {}
|
|
||||||
|
|
||||||
def b_(s):
|
|
||||||
bc = B_CACHE
|
|
||||||
if s in bc:
|
|
||||||
return bc[s]
|
|
||||||
if type(s) == bytes:
|
|
||||||
return s
|
|
||||||
else:
|
|
||||||
r = s.encode('latin-1')
|
|
||||||
if len(s) < 2:
|
|
||||||
bc[s] = r
|
|
||||||
return r
|
|
||||||
|
|
||||||
|
|
||||||
def u_(s):
|
|
||||||
if sys.version_info[0] < 3:
|
|
||||||
return unicode(s, 'unicode_escape')
|
|
||||||
else:
|
|
||||||
return s
|
|
||||||
|
|
||||||
|
|
||||||
def str_(b):
|
|
||||||
if sys.version_info[0] < 3:
|
|
||||||
return b
|
|
||||||
else:
|
|
||||||
if type(b) == bytes:
|
|
||||||
return b.decode('latin-1')
|
|
||||||
else:
|
|
||||||
return b
|
|
||||||
|
|
||||||
|
|
||||||
def ord_(b):
|
|
||||||
if sys.version_info[0] < 3 or type(b) == str:
|
|
||||||
return ord(b)
|
|
||||||
else:
|
|
||||||
return b
|
|
||||||
|
|
||||||
|
|
||||||
def chr_(c):
|
|
||||||
if sys.version_info[0] < 3:
|
|
||||||
return c
|
|
||||||
else:
|
|
||||||
return chr(c)
|
|
||||||
|
|
||||||
|
|
||||||
def barray(b):
|
|
||||||
if sys.version_info[0] < 3:
|
|
||||||
return b
|
|
||||||
else:
|
|
||||||
return bytearray(b)
|
|
||||||
|
|
||||||
|
|
||||||
def hexencode(b):
|
|
||||||
if sys.version_info[0] < 3:
|
|
||||||
return b.encode('hex')
|
|
||||||
else:
|
|
||||||
import codecs
|
|
||||||
coder = codecs.getencoder('hex_codec')
|
|
||||||
return coder(b)[0]
|
|
||||||
|
|
||||||
|
|
||||||
def hexStr(num):
|
|
||||||
return hex(num).replace('L', '')
|
|
||||||
|
|
||||||
|
|
||||||
WHITESPACES = [b_(x) for x in [' ', '\n', '\r', '\t', '\x00']]
|
|
358
vendor/PyPDF2/xmp.py
vendored
358
vendor/PyPDF2/xmp.py
vendored
@ -1,358 +0,0 @@
|
|||||||
import re
|
|
||||||
import datetime
|
|
||||||
import decimal
|
|
||||||
from .generic import PdfObject
|
|
||||||
from xml.dom import getDOMImplementation
|
|
||||||
from xml.dom.minidom import parseString
|
|
||||||
from .utils import u_
|
|
||||||
|
|
||||||
RDF_NAMESPACE = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
|
||||||
DC_NAMESPACE = "http://purl.org/dc/elements/1.1/"
|
|
||||||
XMP_NAMESPACE = "http://ns.adobe.com/xap/1.0/"
|
|
||||||
PDF_NAMESPACE = "http://ns.adobe.com/pdf/1.3/"
|
|
||||||
XMPMM_NAMESPACE = "http://ns.adobe.com/xap/1.0/mm/"
|
|
||||||
|
|
||||||
# What is the PDFX namespace, you might ask? I might ask that too. It's
|
|
||||||
# a completely undocumented namespace used to place "custom metadata"
|
|
||||||
# properties, which are arbitrary metadata properties with no semantic or
|
|
||||||
# documented meaning. Elements in the namespace are key/value-style storage,
|
|
||||||
# where the element name is the key and the content is the value. The keys
|
|
||||||
# are transformed into valid XML identifiers by substituting an invalid
|
|
||||||
# identifier character with \u2182 followed by the unicode hex ID of the
|
|
||||||
# original character. A key like "my car" is therefore "my\u21820020car".
|
|
||||||
#
|
|
||||||
# \u2182, in case you're wondering, is the unicode character
|
|
||||||
# \u{ROMAN NUMERAL TEN THOUSAND}, a straightforward and obvious choice for
|
|
||||||
# escaping characters.
|
|
||||||
#
|
|
||||||
# Intentional users of the pdfx namespace should be shot on sight. A
|
|
||||||
# custom data schema and sensical XML elements could be used instead, as is
|
|
||||||
# suggested by Adobe's own documentation on XMP (under "Extensibility of
|
|
||||||
# Schemas").
|
|
||||||
#
|
|
||||||
# Information presented here on the /pdfx/ schema is a result of limited
|
|
||||||
# reverse engineering, and does not constitute a full specification.
|
|
||||||
PDFX_NAMESPACE = "http://ns.adobe.com/pdfx/1.3/"
|
|
||||||
|
|
||||||
iso8601 = re.compile("""
|
|
||||||
(?P<year>[0-9]{4})
|
|
||||||
(-
|
|
||||||
(?P<month>[0-9]{2})
|
|
||||||
(-
|
|
||||||
(?P<day>[0-9]+)
|
|
||||||
(T
|
|
||||||
(?P<hour>[0-9]{2}):
|
|
||||||
(?P<minute>[0-9]{2})
|
|
||||||
(:(?P<second>[0-9]{2}(.[0-9]+)?))?
|
|
||||||
(?P<tzd>Z|[-+][0-9]{2}:[0-9]{2})
|
|
||||||
)?
|
|
||||||
)?
|
|
||||||
)?
|
|
||||||
""", re.VERBOSE)
|
|
||||||
|
|
||||||
|
|
||||||
class XmpInformation(PdfObject):
|
|
||||||
"""
|
|
||||||
An object that represents Adobe XMP metadata.
|
|
||||||
Usually accessed by :meth:`getXmpMetadata()<PyPDF2.PdfFileReader.getXmpMetadata>`
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, stream):
|
|
||||||
self.stream = stream
|
|
||||||
docRoot = parseString(self.stream.getData())
|
|
||||||
self.rdfRoot = docRoot.getElementsByTagNameNS(RDF_NAMESPACE, "RDF")[0]
|
|
||||||
self.cache = {}
|
|
||||||
|
|
||||||
def writeToStream(self, stream, encryption_key):
|
|
||||||
self.stream.writeToStream(stream, encryption_key)
|
|
||||||
|
|
||||||
def getElement(self, aboutUri, namespace, name):
|
|
||||||
for desc in self.rdfRoot.getElementsByTagNameNS(RDF_NAMESPACE, "Description"):
|
|
||||||
if desc.getAttributeNS(RDF_NAMESPACE, "about") == aboutUri:
|
|
||||||
attr = desc.getAttributeNodeNS(namespace, name)
|
|
||||||
if attr != None:
|
|
||||||
yield attr
|
|
||||||
for element in desc.getElementsByTagNameNS(namespace, name):
|
|
||||||
yield element
|
|
||||||
|
|
||||||
def getNodesInNamespace(self, aboutUri, namespace):
|
|
||||||
for desc in self.rdfRoot.getElementsByTagNameNS(RDF_NAMESPACE, "Description"):
|
|
||||||
if desc.getAttributeNS(RDF_NAMESPACE, "about") == aboutUri:
|
|
||||||
for i in range(desc.attributes.length):
|
|
||||||
attr = desc.attributes.item(i)
|
|
||||||
if attr.namespaceURI == namespace:
|
|
||||||
yield attr
|
|
||||||
for child in desc.childNodes:
|
|
||||||
if child.namespaceURI == namespace:
|
|
||||||
yield child
|
|
||||||
|
|
||||||
def _getText(self, element):
|
|
||||||
text = ""
|
|
||||||
for child in element.childNodes:
|
|
||||||
if child.nodeType == child.TEXT_NODE:
|
|
||||||
text += child.data
|
|
||||||
return text
|
|
||||||
|
|
||||||
def _converter_string(value):
|
|
||||||
return value
|
|
||||||
|
|
||||||
def _converter_date(value):
|
|
||||||
m = iso8601.match(value)
|
|
||||||
year = int(m.group("year"))
|
|
||||||
month = int(m.group("month") or "1")
|
|
||||||
day = int(m.group("day") or "1")
|
|
||||||
hour = int(m.group("hour") or "0")
|
|
||||||
minute = int(m.group("minute") or "0")
|
|
||||||
second = decimal.Decimal(m.group("second") or "0")
|
|
||||||
seconds = second.to_integral(decimal.ROUND_FLOOR)
|
|
||||||
milliseconds = (second - seconds) * 1000000
|
|
||||||
tzd = m.group("tzd") or "Z"
|
|
||||||
dt = datetime.datetime(year, month, day, hour, minute, seconds, milliseconds)
|
|
||||||
if tzd != "Z":
|
|
||||||
tzd_hours, tzd_minutes = [int(x) for x in tzd.split(":")]
|
|
||||||
tzd_hours *= -1
|
|
||||||
if tzd_hours < 0:
|
|
||||||
tzd_minutes *= -1
|
|
||||||
dt = dt + datetime.timedelta(hours=tzd_hours, minutes=tzd_minutes)
|
|
||||||
return dt
|
|
||||||
_test_converter_date = staticmethod(_converter_date)
|
|
||||||
|
|
||||||
def _getter_bag(namespace, name, converter):
|
|
||||||
def get(self):
|
|
||||||
cached = self.cache.get(namespace, {}).get(name)
|
|
||||||
if cached:
|
|
||||||
return cached
|
|
||||||
retval = []
|
|
||||||
for element in self.getElement("", namespace, name):
|
|
||||||
bags = element.getElementsByTagNameNS(RDF_NAMESPACE, "Bag")
|
|
||||||
if len(bags):
|
|
||||||
for bag in bags:
|
|
||||||
for item in bag.getElementsByTagNameNS(RDF_NAMESPACE, "li"):
|
|
||||||
value = self._getText(item)
|
|
||||||
value = converter(value)
|
|
||||||
retval.append(value)
|
|
||||||
ns_cache = self.cache.setdefault(namespace, {})
|
|
||||||
ns_cache[name] = retval
|
|
||||||
return retval
|
|
||||||
return get
|
|
||||||
|
|
||||||
def _getter_seq(namespace, name, converter):
|
|
||||||
def get(self):
|
|
||||||
cached = self.cache.get(namespace, {}).get(name)
|
|
||||||
if cached:
|
|
||||||
return cached
|
|
||||||
retval = []
|
|
||||||
for element in self.getElement("", namespace, name):
|
|
||||||
seqs = element.getElementsByTagNameNS(RDF_NAMESPACE, "Seq")
|
|
||||||
if len(seqs):
|
|
||||||
for seq in seqs:
|
|
||||||
for item in seq.getElementsByTagNameNS(RDF_NAMESPACE, "li"):
|
|
||||||
value = self._getText(item)
|
|
||||||
value = converter(value)
|
|
||||||
retval.append(value)
|
|
||||||
else:
|
|
||||||
value = converter(self._getText(element))
|
|
||||||
retval.append(value)
|
|
||||||
ns_cache = self.cache.setdefault(namespace, {})
|
|
||||||
ns_cache[name] = retval
|
|
||||||
return retval
|
|
||||||
return get
|
|
||||||
|
|
||||||
def _getter_langalt(namespace, name, converter):
|
|
||||||
def get(self):
|
|
||||||
cached = self.cache.get(namespace, {}).get(name)
|
|
||||||
if cached:
|
|
||||||
return cached
|
|
||||||
retval = {}
|
|
||||||
for element in self.getElement("", namespace, name):
|
|
||||||
alts = element.getElementsByTagNameNS(RDF_NAMESPACE, "Alt")
|
|
||||||
if len(alts):
|
|
||||||
for alt in alts:
|
|
||||||
for item in alt.getElementsByTagNameNS(RDF_NAMESPACE, "li"):
|
|
||||||
value = self._getText(item)
|
|
||||||
value = converter(value)
|
|
||||||
retval[item.getAttribute("xml:lang")] = value
|
|
||||||
else:
|
|
||||||
retval["x-default"] = converter(self._getText(element))
|
|
||||||
ns_cache = self.cache.setdefault(namespace, {})
|
|
||||||
ns_cache[name] = retval
|
|
||||||
return retval
|
|
||||||
return get
|
|
||||||
|
|
||||||
def _getter_single(namespace, name, converter):
|
|
||||||
def get(self):
|
|
||||||
cached = self.cache.get(namespace, {}).get(name)
|
|
||||||
if cached:
|
|
||||||
return cached
|
|
||||||
value = None
|
|
||||||
for element in self.getElement("", namespace, name):
|
|
||||||
if element.nodeType == element.ATTRIBUTE_NODE:
|
|
||||||
value = element.nodeValue
|
|
||||||
else:
|
|
||||||
value = self._getText(element)
|
|
||||||
break
|
|
||||||
if value != None:
|
|
||||||
value = converter(value)
|
|
||||||
ns_cache = self.cache.setdefault(namespace, {})
|
|
||||||
ns_cache[name] = value
|
|
||||||
return value
|
|
||||||
return get
|
|
||||||
|
|
||||||
dc_contributor = property(_getter_bag(DC_NAMESPACE, "contributor", _converter_string))
|
|
||||||
"""
|
|
||||||
Contributors to the resource (other than the authors). An unsorted
|
|
||||||
array of names.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_coverage = property(_getter_single(DC_NAMESPACE, "coverage", _converter_string))
|
|
||||||
"""
|
|
||||||
Text describing the extent or scope of the resource.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_creator = property(_getter_seq(DC_NAMESPACE, "creator", _converter_string))
|
|
||||||
"""
|
|
||||||
A sorted array of names of the authors of the resource, listed in order
|
|
||||||
of precedence.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_date = property(_getter_seq(DC_NAMESPACE, "date", _converter_date))
|
|
||||||
"""
|
|
||||||
A sorted array of dates (datetime.datetime instances) of signifigance to
|
|
||||||
the resource. The dates and times are in UTC.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_description = property(_getter_langalt(DC_NAMESPACE, "description", _converter_string))
|
|
||||||
"""
|
|
||||||
A language-keyed dictionary of textual descriptions of the content of the
|
|
||||||
resource.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_format = property(_getter_single(DC_NAMESPACE, "format", _converter_string))
|
|
||||||
"""
|
|
||||||
The mime-type of the resource.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_identifier = property(_getter_single(DC_NAMESPACE, "identifier", _converter_string))
|
|
||||||
"""
|
|
||||||
Unique identifier of the resource.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_language = property(_getter_bag(DC_NAMESPACE, "language", _converter_string))
|
|
||||||
"""
|
|
||||||
An unordered array specifying the languages used in the resource.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_publisher = property(_getter_bag(DC_NAMESPACE, "publisher", _converter_string))
|
|
||||||
"""
|
|
||||||
An unordered array of publisher names.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_relation = property(_getter_bag(DC_NAMESPACE, "relation", _converter_string))
|
|
||||||
"""
|
|
||||||
An unordered array of text descriptions of relationships to other
|
|
||||||
documents.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_rights = property(_getter_langalt(DC_NAMESPACE, "rights", _converter_string))
|
|
||||||
"""
|
|
||||||
A language-keyed dictionary of textual descriptions of the rights the
|
|
||||||
user has to this resource.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_source = property(_getter_single(DC_NAMESPACE, "source", _converter_string))
|
|
||||||
"""
|
|
||||||
Unique identifier of the work from which this resource was derived.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_subject = property(_getter_bag(DC_NAMESPACE, "subject", _converter_string))
|
|
||||||
"""
|
|
||||||
An unordered array of descriptive phrases or keywrods that specify the
|
|
||||||
topic of the content of the resource.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_title = property(_getter_langalt(DC_NAMESPACE, "title", _converter_string))
|
|
||||||
"""
|
|
||||||
A language-keyed dictionary of the title of the resource.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dc_type = property(_getter_bag(DC_NAMESPACE, "type", _converter_string))
|
|
||||||
"""
|
|
||||||
An unordered array of textual descriptions of the document type.
|
|
||||||
"""
|
|
||||||
|
|
||||||
pdf_keywords = property(_getter_single(PDF_NAMESPACE, "Keywords", _converter_string))
|
|
||||||
"""
|
|
||||||
An unformatted text string representing document keywords.
|
|
||||||
"""
|
|
||||||
|
|
||||||
pdf_pdfversion = property(_getter_single(PDF_NAMESPACE, "PDFVersion", _converter_string))
|
|
||||||
"""
|
|
||||||
The PDF file version, for example 1.0, 1.3.
|
|
||||||
"""
|
|
||||||
|
|
||||||
pdf_producer = property(_getter_single(PDF_NAMESPACE, "Producer", _converter_string))
|
|
||||||
"""
|
|
||||||
The name of the tool that created the PDF document.
|
|
||||||
"""
|
|
||||||
|
|
||||||
xmp_createDate = property(_getter_single(XMP_NAMESPACE, "CreateDate", _converter_date))
|
|
||||||
"""
|
|
||||||
The date and time the resource was originally created. The date and
|
|
||||||
time are returned as a UTC datetime.datetime object.
|
|
||||||
"""
|
|
||||||
|
|
||||||
xmp_modifyDate = property(_getter_single(XMP_NAMESPACE, "ModifyDate", _converter_date))
|
|
||||||
"""
|
|
||||||
The date and time the resource was last modified. The date and time
|
|
||||||
are returned as a UTC datetime.datetime object.
|
|
||||||
"""
|
|
||||||
|
|
||||||
xmp_metadataDate = property(_getter_single(XMP_NAMESPACE, "MetadataDate", _converter_date))
|
|
||||||
"""
|
|
||||||
The date and time that any metadata for this resource was last
|
|
||||||
changed. The date and time are returned as a UTC datetime.datetime
|
|
||||||
object.
|
|
||||||
"""
|
|
||||||
|
|
||||||
xmp_creatorTool = property(_getter_single(XMP_NAMESPACE, "CreatorTool", _converter_string))
|
|
||||||
"""
|
|
||||||
The name of the first known tool used to create the resource.
|
|
||||||
"""
|
|
||||||
|
|
||||||
xmpmm_documentId = property(_getter_single(XMPMM_NAMESPACE, "DocumentID", _converter_string))
|
|
||||||
"""
|
|
||||||
The common identifier for all versions and renditions of this resource.
|
|
||||||
"""
|
|
||||||
|
|
||||||
xmpmm_instanceId = property(_getter_single(XMPMM_NAMESPACE, "InstanceID", _converter_string))
|
|
||||||
"""
|
|
||||||
An identifier for a specific incarnation of a document, updated each
|
|
||||||
time a file is saved.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def custom_properties(self):
|
|
||||||
if not hasattr(self, "_custom_properties"):
|
|
||||||
self._custom_properties = {}
|
|
||||||
for node in self.getNodesInNamespace("", PDFX_NAMESPACE):
|
|
||||||
key = node.localName
|
|
||||||
while True:
|
|
||||||
# see documentation about PDFX_NAMESPACE earlier in file
|
|
||||||
idx = key.find(u_("\u2182"))
|
|
||||||
if idx == -1:
|
|
||||||
break
|
|
||||||
key = key[:idx] + chr(int(key[idx+1:idx+5], base=16)) + key[idx+5:]
|
|
||||||
if node.nodeType == node.ATTRIBUTE_NODE:
|
|
||||||
value = node.nodeValue
|
|
||||||
else:
|
|
||||||
value = self._getText(node)
|
|
||||||
self._custom_properties[key] = value
|
|
||||||
return self._custom_properties
|
|
||||||
|
|
||||||
custom_properties = property(custom_properties)
|
|
||||||
"""
|
|
||||||
Retrieves custom metadata properties defined in the undocumented pdfx
|
|
||||||
metadata schema.
|
|
||||||
|
|
||||||
:return: a dictionary of key/value items for custom metadata properties.
|
|
||||||
:rtype: dict
|
|
||||||
"""
|
|
1
vendor/_version.py
vendored
1
vendor/_version.py
vendored
@ -1 +0,0 @@
|
|||||||
__version__ = '5.0.6'
|
|
28
vendor/babel/AUTHORS
vendored
28
vendor/babel/AUTHORS
vendored
@ -1,28 +0,0 @@
|
|||||||
Babel is written and maintained by the Babel team and various contributors:
|
|
||||||
|
|
||||||
Maintainer and Current Project Lead:
|
|
||||||
|
|
||||||
- Armin Ronacher <armin.ronacher@active-4.com>
|
|
||||||
|
|
||||||
Contributors:
|
|
||||||
|
|
||||||
- Christopher Lenz <cmlenz@gmail.com>
|
|
||||||
- Alex Morega <alex@grep.ro>
|
|
||||||
- Felix Schwarz <felix.schwarz@oss.schwarz.eu>
|
|
||||||
- Pedro Algarvio <pedro@algarvio.me>
|
|
||||||
- Jeroen Ruigrok van der Werven <asmodai@in-nomine.org>
|
|
||||||
- Philip Jenvey <pjenvey@underboss.org>
|
|
||||||
- Tobias Bieniek <Tobias.Bieniek@gmx.de>
|
|
||||||
- Jonas Borgström <jonas@edgewall.org>
|
|
||||||
- Daniel Neuhäuser <dasdasich@gmail.com>
|
|
||||||
- Nick Retallack <nick@bitcasa.com>
|
|
||||||
- Thomas Waldmann <tw@waldmann-edv.de>
|
|
||||||
- Lennart Regebro <regebro@gmail.com>
|
|
||||||
|
|
||||||
Babel was previously developed under the Copyright of Edgewall Software. The
|
|
||||||
following copyright notice holds true for releases before 2013: "Copyright (c)
|
|
||||||
2007 - 2011 by Edgewall Software"
|
|
||||||
|
|
||||||
In addition to the regular contributions Babel includes a fork of Lennart
|
|
||||||
Regebro's tzlocal that originally was licensed under the CC0 license. The
|
|
||||||
original copyright of that project is "Copyright 2013 by Lennart Regebro".
|
|
29
vendor/babel/LICENSE
vendored
29
vendor/babel/LICENSE
vendored
@ -1,29 +0,0 @@
|
|||||||
Copyright (C) 2013 by the Babel Team, see AUTHORS for more information.
|
|
||||||
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions
|
|
||||||
are met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
2. Redistributions in binary form must reproduce the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer in
|
|
||||||
the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
3. The name of the author may not be used to endorse or promote
|
|
||||||
products derived from this software without specific prior
|
|
||||||
written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS
|
|
||||||
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
|
||||||
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
||||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
|
||||||
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
|
|
||||||
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
|
||||||
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
|
||||||
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
24
vendor/babel/__init__.py
vendored
24
vendor/babel/__init__.py
vendored
@ -1,24 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
"""
|
|
||||||
babel
|
|
||||||
~~~~~
|
|
||||||
|
|
||||||
Integrated collection of utilities that assist in internationalizing and
|
|
||||||
localizing applications.
|
|
||||||
|
|
||||||
This package is basically composed of two major parts:
|
|
||||||
|
|
||||||
* tools to build and work with ``gettext`` message catalogs
|
|
||||||
* a Python interface to the CLDR (Common Locale Data Repository), providing
|
|
||||||
access to various locale display names, localized number and date
|
|
||||||
formatting, etc.
|
|
||||||
|
|
||||||
:copyright: (c) 2013 by the Babel Team.
|
|
||||||
:license: BSD, see LICENSE for more details.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from babel.core import UnknownLocaleError, Locale, default_locale, \
|
|
||||||
negotiate_locale, parse_locale, get_locale_identifier
|
|
||||||
|
|
||||||
|
|
||||||
__version__ = '1.3'
|
|
51
vendor/babel/_compat.py
vendored
51
vendor/babel/_compat.py
vendored
@ -1,51 +0,0 @@
|
|||||||
import sys
|
|
||||||
|
|
||||||
PY2 = sys.version_info[0] == 2
|
|
||||||
|
|
||||||
_identity = lambda x: x
|
|
||||||
|
|
||||||
|
|
||||||
if not PY2:
|
|
||||||
text_type = str
|
|
||||||
string_types = (str,)
|
|
||||||
integer_types = (int, )
|
|
||||||
unichr = chr
|
|
||||||
|
|
||||||
text_to_native = lambda s, enc: s
|
|
||||||
|
|
||||||
iterkeys = lambda d: iter(d.keys())
|
|
||||||
itervalues = lambda d: iter(d.values())
|
|
||||||
iteritems = lambda d: iter(d.items())
|
|
||||||
|
|
||||||
from io import StringIO, BytesIO
|
|
||||||
import pickle
|
|
||||||
|
|
||||||
izip = zip
|
|
||||||
imap = map
|
|
||||||
range_type = range
|
|
||||||
|
|
||||||
cmp = lambda a, b: (a > b) - (a < b)
|
|
||||||
|
|
||||||
else:
|
|
||||||
text_type = unicode
|
|
||||||
string_types = (str, unicode)
|
|
||||||
integer_types = (int, long)
|
|
||||||
|
|
||||||
text_to_native = lambda s, enc: s.encode(enc)
|
|
||||||
unichr = unichr
|
|
||||||
|
|
||||||
iterkeys = lambda d: d.iterkeys()
|
|
||||||
itervalues = lambda d: d.itervalues()
|
|
||||||
iteritems = lambda d: d.iteritems()
|
|
||||||
|
|
||||||
from cStringIO import StringIO as BytesIO
|
|
||||||
from StringIO import StringIO
|
|
||||||
import cPickle as pickle
|
|
||||||
|
|
||||||
from itertools import izip, imap
|
|
||||||
range_type = xrange
|
|
||||||
|
|
||||||
cmp = cmp
|
|
||||||
|
|
||||||
|
|
||||||
number_types = integer_types + (float,)
|
|
941
vendor/babel/core.py
vendored
941
vendor/babel/core.py
vendored
@ -1,941 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
"""
|
|
||||||
babel.core
|
|
||||||
~~~~~~~~~~
|
|
||||||
|
|
||||||
Core locale representation and locale data access.
|
|
||||||
|
|
||||||
:copyright: (c) 2013 by the Babel Team.
|
|
||||||
:license: BSD, see LICENSE for more details.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from babel import localedata
|
|
||||||
from babel._compat import pickle, string_types
|
|
||||||
|
|
||||||
__all__ = ['UnknownLocaleError', 'Locale', 'default_locale', 'negotiate_locale',
|
|
||||||
'parse_locale']
|
|
||||||
|
|
||||||
|
|
||||||
_global_data = None
|
|
||||||
|
|
||||||
|
|
||||||
def _raise_no_data_error():
|
|
||||||
raise RuntimeError('The babel data files are not available. '
|
|
||||||
'This usually happens because you are using '
|
|
||||||
'a source checkout from Babel and you did '
|
|
||||||
'not build the data files. Just make sure '
|
|
||||||
'to run "python setup.py import_cldr" before '
|
|
||||||
'installing the library.')
|
|
||||||
|
|
||||||
|
|
||||||
def get_global(key):
|
|
||||||
"""Return the dictionary for the given key in the global data.
|
|
||||||
|
|
||||||
The global data is stored in the ``babel/global.dat`` file and contains
|
|
||||||
information independent of individual locales.
|
|
||||||
|
|
||||||
>>> get_global('zone_aliases')['UTC']
|
|
||||||
u'Etc/GMT'
|
|
||||||
>>> get_global('zone_territories')['Europe/Berlin']
|
|
||||||
u'DE'
|
|
||||||
|
|
||||||
.. versionadded:: 0.9
|
|
||||||
|
|
||||||
:param key: the data key
|
|
||||||
"""
|
|
||||||
global _global_data
|
|
||||||
if _global_data is None:
|
|
||||||
dirname = os.path.join(os.path.dirname(__file__))
|
|
||||||
filename = os.path.join(dirname, 'global.dat')
|
|
||||||
if not os.path.isfile(filename):
|
|
||||||
_raise_no_data_error()
|
|
||||||
fileobj = open(filename, 'rb')
|
|
||||||
try:
|
|
||||||
_global_data = pickle.load(fileobj)
|
|
||||||
finally:
|
|
||||||
fileobj.close()
|
|
||||||
return _global_data.get(key, {})
|
|
||||||
|
|
||||||
|
|
||||||
LOCALE_ALIASES = {
|
|
||||||
'ar': 'ar_SY', 'bg': 'bg_BG', 'bs': 'bs_BA', 'ca': 'ca_ES', 'cs': 'cs_CZ',
|
|
||||||
'da': 'da_DK', 'de': 'de_DE', 'el': 'el_GR', 'en': 'en_US', 'es': 'es_ES',
|
|
||||||
'et': 'et_EE', 'fa': 'fa_IR', 'fi': 'fi_FI', 'fr': 'fr_FR', 'gl': 'gl_ES',
|
|
||||||
'he': 'he_IL', 'hu': 'hu_HU', 'id': 'id_ID', 'is': 'is_IS', 'it': 'it_IT',
|
|
||||||
'ja': 'ja_JP', 'km': 'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV',
|
|
||||||
'mk': 'mk_MK', 'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL',
|
|
||||||
'pt': 'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI',
|
|
||||||
'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA'
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class UnknownLocaleError(Exception):
|
|
||||||
"""Exception thrown when a locale is requested for which no locale data
|
|
||||||
is available.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, identifier):
|
|
||||||
"""Create the exception.
|
|
||||||
|
|
||||||
:param identifier: the identifier string of the unsupported locale
|
|
||||||
"""
|
|
||||||
Exception.__init__(self, 'unknown locale %r' % identifier)
|
|
||||||
|
|
||||||
#: The identifier of the locale that could not be found.
|
|
||||||
self.identifier = identifier
|
|
||||||
|
|
||||||
|
|
||||||
class Locale(object):
|
|
||||||
"""Representation of a specific locale.
|
|
||||||
|
|
||||||
>>> locale = Locale('en', 'US')
|
|
||||||
>>> repr(locale)
|
|
||||||
"Locale('en', territory='US')"
|
|
||||||
>>> locale.display_name
|
|
||||||
u'English (United States)'
|
|
||||||
|
|
||||||
A `Locale` object can also be instantiated from a raw locale string:
|
|
||||||
|
|
||||||
>>> locale = Locale.parse('en-US', sep='-')
|
|
||||||
>>> repr(locale)
|
|
||||||
"Locale('en', territory='US')"
|
|
||||||
|
|
||||||
`Locale` objects provide access to a collection of locale data, such as
|
|
||||||
territory and language names, number and date format patterns, and more:
|
|
||||||
|
|
||||||
>>> locale.number_symbols['decimal']
|
|
||||||
u'.'
|
|
||||||
|
|
||||||
If a locale is requested for which no locale data is available, an
|
|
||||||
`UnknownLocaleError` is raised:
|
|
||||||
|
|
||||||
>>> Locale.parse('en_DE')
|
|
||||||
Traceback (most recent call last):
|
|
||||||
...
|
|
||||||
UnknownLocaleError: unknown locale 'en_DE'
|
|
||||||
|
|
||||||
For more information see :rfc:`3066`.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, language, territory=None, script=None, variant=None):
|
|
||||||
"""Initialize the locale object from the given identifier components.
|
|
||||||
|
|
||||||
>>> locale = Locale('en', 'US')
|
|
||||||
>>> locale.language
|
|
||||||
'en'
|
|
||||||
>>> locale.territory
|
|
||||||
'US'
|
|
||||||
|
|
||||||
:param language: the language code
|
|
||||||
:param territory: the territory (country or region) code
|
|
||||||
:param script: the script code
|
|
||||||
:param variant: the variant code
|
|
||||||
:raise `UnknownLocaleError`: if no locale data is available for the
|
|
||||||
requested locale
|
|
||||||
"""
|
|
||||||
#: the language code
|
|
||||||
self.language = language
|
|
||||||
#: the territory (country or region) code
|
|
||||||
self.territory = territory
|
|
||||||
#: the script code
|
|
||||||
self.script = script
|
|
||||||
#: the variant code
|
|
||||||
self.variant = variant
|
|
||||||
self.__data = None
|
|
||||||
|
|
||||||
identifier = str(self)
|
|
||||||
if not localedata.exists(identifier):
|
|
||||||
raise UnknownLocaleError(identifier)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def default(cls, category=None, aliases=LOCALE_ALIASES):
|
|
||||||
"""Return the system default locale for the specified category.
|
|
||||||
|
|
||||||
>>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LC_MESSAGES']:
|
|
||||||
... os.environ[name] = ''
|
|
||||||
>>> os.environ['LANG'] = 'fr_FR.UTF-8'
|
|
||||||
>>> Locale.default('LC_MESSAGES')
|
|
||||||
Locale('fr', territory='FR')
|
|
||||||
|
|
||||||
The following fallbacks to the variable are always considered:
|
|
||||||
|
|
||||||
- ``LANGUAGE``
|
|
||||||
- ``LC_ALL``
|
|
||||||
- ``LC_CTYPE``
|
|
||||||
- ``LANG``
|
|
||||||
|
|
||||||
:param category: one of the ``LC_XXX`` environment variable names
|
|
||||||
:param aliases: a dictionary of aliases for locale identifiers
|
|
||||||
"""
|
|
||||||
# XXX: use likely subtag expansion here instead of the
|
|
||||||
# aliases dictionary.
|
|
||||||
locale_string = default_locale(category, aliases=aliases)
|
|
||||||
return cls.parse(locale_string)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def negotiate(cls, preferred, available, sep='_', aliases=LOCALE_ALIASES):
|
|
||||||
"""Find the best match between available and requested locale strings.
|
|
||||||
|
|
||||||
>>> Locale.negotiate(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
|
|
||||||
Locale('de', territory='DE')
|
|
||||||
>>> Locale.negotiate(['de_DE', 'en_US'], ['en', 'de'])
|
|
||||||
Locale('de')
|
|
||||||
>>> Locale.negotiate(['de_DE', 'de'], ['en_US'])
|
|
||||||
|
|
||||||
You can specify the character used in the locale identifiers to separate
|
|
||||||
the differnet components. This separator is applied to both lists. Also,
|
|
||||||
case is ignored in the comparison:
|
|
||||||
|
|
||||||
>>> Locale.negotiate(['de-DE', 'de'], ['en-us', 'de-de'], sep='-')
|
|
||||||
Locale('de', territory='DE')
|
|
||||||
|
|
||||||
:param preferred: the list of locale identifers preferred by the user
|
|
||||||
:param available: the list of locale identifiers available
|
|
||||||
:param aliases: a dictionary of aliases for locale identifiers
|
|
||||||
"""
|
|
||||||
identifier = negotiate_locale(preferred, available, sep=sep,
|
|
||||||
aliases=aliases)
|
|
||||||
if identifier:
|
|
||||||
return Locale.parse(identifier, sep=sep)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def parse(cls, identifier, sep='_', resolve_likely_subtags=True):
|
|
||||||
"""Create a `Locale` instance for the given locale identifier.
|
|
||||||
|
|
||||||
>>> l = Locale.parse('de-DE', sep='-')
|
|
||||||
>>> l.display_name
|
|
||||||
u'Deutsch (Deutschland)'
|
|
||||||
|
|
||||||
If the `identifier` parameter is not a string, but actually a `Locale`
|
|
||||||
object, that object is returned:
|
|
||||||
|
|
||||||
>>> Locale.parse(l)
|
|
||||||
Locale('de', territory='DE')
|
|
||||||
|
|
||||||
This also can perform resolving of likely subtags which it does
|
|
||||||
by default. This is for instance useful to figure out the most
|
|
||||||
likely locale for a territory you can use ``'und'`` as the
|
|
||||||
language tag:
|
|
||||||
|
|
||||||
>>> Locale.parse('und_AT')
|
|
||||||
Locale('de', territory='AT')
|
|
||||||
|
|
||||||
:param identifier: the locale identifier string
|
|
||||||
:param sep: optional component separator
|
|
||||||
:param resolve_likely_subtags: if this is specified then a locale will
|
|
||||||
have its likely subtag resolved if the
|
|
||||||
locale otherwise does not exist. For
|
|
||||||
instance ``zh_TW`` by itself is not a
|
|
||||||
locale that exists but Babel can
|
|
||||||
automatically expand it to the full
|
|
||||||
form of ``zh_hant_TW``. Note that this
|
|
||||||
expansion is only taking place if no
|
|
||||||
locale exists otherwise. For instance
|
|
||||||
there is a locale ``en`` that can exist
|
|
||||||
by itself.
|
|
||||||
:raise `ValueError`: if the string does not appear to be a valid locale
|
|
||||||
identifier
|
|
||||||
:raise `UnknownLocaleError`: if no locale data is available for the
|
|
||||||
requested locale
|
|
||||||
"""
|
|
||||||
if identifier is None:
|
|
||||||
return None
|
|
||||||
elif isinstance(identifier, Locale):
|
|
||||||
return identifier
|
|
||||||
elif not isinstance(identifier, string_types):
|
|
||||||
raise TypeError('Unxpected value for identifier: %r' % (identifier,))
|
|
||||||
|
|
||||||
parts = parse_locale(identifier, sep=sep)
|
|
||||||
input_id = get_locale_identifier(parts)
|
|
||||||
|
|
||||||
def _try_load(parts):
|
|
||||||
try:
|
|
||||||
return cls(*parts)
|
|
||||||
except UnknownLocaleError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _try_load_reducing(parts):
|
|
||||||
# Success on first hit, return it.
|
|
||||||
locale = _try_load(parts)
|
|
||||||
if locale is not None:
|
|
||||||
return locale
|
|
||||||
|
|
||||||
# Now try without script and variant
|
|
||||||
locale = _try_load(parts[:2])
|
|
||||||
if locale is not None:
|
|
||||||
return locale
|
|
||||||
|
|
||||||
locale = _try_load(parts)
|
|
||||||
if locale is not None:
|
|
||||||
return locale
|
|
||||||
if not resolve_likely_subtags:
|
|
||||||
raise UnknownLocaleError(input_id)
|
|
||||||
|
|
||||||
# From here onwards is some very bad likely subtag resolving. This
|
|
||||||
# whole logic is not entirely correct but good enough (tm) for the
|
|
||||||
# time being. This has been added so that zh_TW does not cause
|
|
||||||
# errors for people when they upgrade. Later we should properly
|
|
||||||
# implement ICU like fuzzy locale objects and provide a way to
|
|
||||||
# maximize and minimize locale tags.
|
|
||||||
|
|
||||||
language, territory, script, variant = parts
|
|
||||||
language = get_global('language_aliases').get(language, language)
|
|
||||||
territory = get_global('territory_aliases').get(territory, territory)
|
|
||||||
script = get_global('script_aliases').get(script, script)
|
|
||||||
variant = get_global('variant_aliases').get(variant, variant)
|
|
||||||
|
|
||||||
if territory == 'ZZ':
|
|
||||||
territory = None
|
|
||||||
if script == 'Zzzz':
|
|
||||||
script = None
|
|
||||||
|
|
||||||
parts = language, territory, script, variant
|
|
||||||
|
|
||||||
# First match: try the whole identifier
|
|
||||||
new_id = get_locale_identifier(parts)
|
|
||||||
likely_subtag = get_global('likely_subtags').get(new_id)
|
|
||||||
if likely_subtag is not None:
|
|
||||||
locale = _try_load_reducing(parse_locale(likely_subtag))
|
|
||||||
if locale is not None:
|
|
||||||
return locale
|
|
||||||
|
|
||||||
# If we did not find anything so far, try again with a
|
|
||||||
# simplified identifier that is just the language
|
|
||||||
likely_subtag = get_global('likely_subtags').get(language)
|
|
||||||
if likely_subtag is not None:
|
|
||||||
language2, _, script2, variant2 = parse_locale(likely_subtag)
|
|
||||||
locale = _try_load_reducing((language2, territory, script2, variant2))
|
|
||||||
if locale is not None:
|
|
||||||
return locale
|
|
||||||
|
|
||||||
raise UnknownLocaleError(input_id)
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
for key in ('language', 'territory', 'script', 'variant'):
|
|
||||||
if not hasattr(other, key):
|
|
||||||
return False
|
|
||||||
return (self.language == other.language) and \
|
|
||||||
(self.territory == other.territory) and \
|
|
||||||
(self.script == other.script) and \
|
|
||||||
(self.variant == other.variant)
|
|
||||||
|
|
||||||
def __ne__(self, other):
|
|
||||||
return not self.__eq__(other)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
parameters = ['']
|
|
||||||
for key in ('territory', 'script', 'variant'):
|
|
||||||
value = getattr(self, key)
|
|
||||||
if value is not None:
|
|
||||||
parameters.append('%s=%r' % (key, value))
|
|
||||||
parameter_string = '%r' % self.language + ', '.join(parameters)
|
|
||||||
return 'Locale(%s)' % parameter_string
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return get_locale_identifier((self.language, self.territory,
|
|
||||||
self.script, self.variant))
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _data(self):
|
|
||||||
if self.__data is None:
|
|
||||||
self.__data = localedata.LocaleDataDict(localedata.load(str(self)))
|
|
||||||
return self.__data
|
|
||||||
|
|
||||||
def get_display_name(self, locale=None):
|
|
||||||
"""Return the display name of the locale using the given locale.
|
|
||||||
|
|
||||||
The display name will include the language, territory, script, and
|
|
||||||
variant, if those are specified.
|
|
||||||
|
|
||||||
>>> Locale('zh', 'CN', script='Hans').get_display_name('en')
|
|
||||||
u'Chinese (Simplified, China)'
|
|
||||||
|
|
||||||
:param locale: the locale to use
|
|
||||||
"""
|
|
||||||
if locale is None:
|
|
||||||
locale = self
|
|
||||||
locale = Locale.parse(locale)
|
|
||||||
retval = locale.languages.get(self.language)
|
|
||||||
if self.territory or self.script or self.variant:
|
|
||||||
details = []
|
|
||||||
if self.script:
|
|
||||||
details.append(locale.scripts.get(self.script))
|
|
||||||
if self.territory:
|
|
||||||
details.append(locale.territories.get(self.territory))
|
|
||||||
if self.variant:
|
|
||||||
details.append(locale.variants.get(self.variant))
|
|
||||||
details = filter(None, details)
|
|
||||||
if details:
|
|
||||||
retval += ' (%s)' % u', '.join(details)
|
|
||||||
return retval
|
|
||||||
|
|
||||||
display_name = property(get_display_name, doc="""\
|
|
||||||
The localized display name of the locale.
|
|
||||||
|
|
||||||
>>> Locale('en').display_name
|
|
||||||
u'English'
|
|
||||||
>>> Locale('en', 'US').display_name
|
|
||||||
u'English (United States)'
|
|
||||||
>>> Locale('sv').display_name
|
|
||||||
u'svenska'
|
|
||||||
|
|
||||||
:type: `unicode`
|
|
||||||
""")
|
|
||||||
|
|
||||||
def get_language_name(self, locale=None):
|
|
||||||
"""Return the language of this locale in the given locale.
|
|
||||||
|
|
||||||
>>> Locale('zh', 'CN', script='Hans').get_language_name('de')
|
|
||||||
u'Chinesisch'
|
|
||||||
|
|
||||||
.. versionadded:: 1.0
|
|
||||||
|
|
||||||
:param locale: the locale to use
|
|
||||||
"""
|
|
||||||
if locale is None:
|
|
||||||
locale = self
|
|
||||||
locale = Locale.parse(locale)
|
|
||||||
return locale.languages.get(self.language)
|
|
||||||
|
|
||||||
language_name = property(get_language_name, doc="""\
|
|
||||||
The localized language name of the locale.
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').language_name
|
|
||||||
u'English'
|
|
||||||
""")
|
|
||||||
|
|
||||||
def get_territory_name(self, locale=None):
|
|
||||||
"""Return the territory name in the given locale."""
|
|
||||||
if locale is None:
|
|
||||||
locale = self
|
|
||||||
locale = Locale.parse(locale)
|
|
||||||
return locale.territories.get(self.territory)
|
|
||||||
|
|
||||||
territory_name = property(get_territory_name, doc="""\
|
|
||||||
The localized territory name of the locale if available.
|
|
||||||
|
|
||||||
>>> Locale('de', 'DE').territory_name
|
|
||||||
u'Deutschland'
|
|
||||||
""")
|
|
||||||
|
|
||||||
def get_script_name(self, locale=None):
|
|
||||||
"""Return the script name in the given locale."""
|
|
||||||
if locale is None:
|
|
||||||
locale = self
|
|
||||||
locale = Locale.parse(locale)
|
|
||||||
return locale.scripts.get(self.script)
|
|
||||||
|
|
||||||
script_name = property(get_script_name, doc="""\
|
|
||||||
The localized script name of the locale if available.
|
|
||||||
|
|
||||||
>>> Locale('ms', 'SG', script='Latn').script_name
|
|
||||||
u'Latin'
|
|
||||||
""")
|
|
||||||
|
|
||||||
@property
|
|
||||||
def english_name(self):
|
|
||||||
"""The english display name of the locale.
|
|
||||||
|
|
||||||
>>> Locale('de').english_name
|
|
||||||
u'German'
|
|
||||||
>>> Locale('de', 'DE').english_name
|
|
||||||
u'German (Germany)'
|
|
||||||
|
|
||||||
:type: `unicode`"""
|
|
||||||
return self.get_display_name(Locale('en'))
|
|
||||||
|
|
||||||
#{ General Locale Display Names
|
|
||||||
|
|
||||||
@property
|
|
||||||
def languages(self):
|
|
||||||
"""Mapping of language codes to translated language names.
|
|
||||||
|
|
||||||
>>> Locale('de', 'DE').languages['ja']
|
|
||||||
u'Japanisch'
|
|
||||||
|
|
||||||
See `ISO 639 <http://www.loc.gov/standards/iso639-2/>`_ for
|
|
||||||
more information.
|
|
||||||
"""
|
|
||||||
return self._data['languages']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def scripts(self):
|
|
||||||
"""Mapping of script codes to translated script names.
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').scripts['Hira']
|
|
||||||
u'Hiragana'
|
|
||||||
|
|
||||||
See `ISO 15924 <http://www.evertype.com/standards/iso15924/>`_
|
|
||||||
for more information.
|
|
||||||
"""
|
|
||||||
return self._data['scripts']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def territories(self):
|
|
||||||
"""Mapping of script codes to translated script names.
|
|
||||||
|
|
||||||
>>> Locale('es', 'CO').territories['DE']
|
|
||||||
u'Alemania'
|
|
||||||
|
|
||||||
See `ISO 3166 <http://www.iso.org/iso/en/prods-services/iso3166ma/>`_
|
|
||||||
for more information.
|
|
||||||
"""
|
|
||||||
return self._data['territories']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def variants(self):
|
|
||||||
"""Mapping of script codes to translated script names.
|
|
||||||
|
|
||||||
>>> Locale('de', 'DE').variants['1901']
|
|
||||||
u'Alte deutsche Rechtschreibung'
|
|
||||||
"""
|
|
||||||
return self._data['variants']
|
|
||||||
|
|
||||||
#{ Number Formatting
|
|
||||||
|
|
||||||
@property
|
|
||||||
def currencies(self):
|
|
||||||
"""Mapping of currency codes to translated currency names. This
|
|
||||||
only returns the generic form of the currency name, not the count
|
|
||||||
specific one. If an actual number is requested use the
|
|
||||||
:func:`babel.numbers.get_currency_name` function.
|
|
||||||
|
|
||||||
>>> Locale('en').currencies['COP']
|
|
||||||
u'Colombian Peso'
|
|
||||||
>>> Locale('de', 'DE').currencies['COP']
|
|
||||||
u'Kolumbianischer Peso'
|
|
||||||
"""
|
|
||||||
return self._data['currency_names']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def currency_symbols(self):
|
|
||||||
"""Mapping of currency codes to symbols.
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').currency_symbols['USD']
|
|
||||||
u'$'
|
|
||||||
>>> Locale('es', 'CO').currency_symbols['USD']
|
|
||||||
u'US$'
|
|
||||||
"""
|
|
||||||
return self._data['currency_symbols']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def number_symbols(self):
|
|
||||||
"""Symbols used in number formatting.
|
|
||||||
|
|
||||||
>>> Locale('fr', 'FR').number_symbols['decimal']
|
|
||||||
u','
|
|
||||||
"""
|
|
||||||
return self._data['number_symbols']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def decimal_formats(self):
|
|
||||||
"""Locale patterns for decimal number formatting.
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').decimal_formats[None]
|
|
||||||
<NumberPattern u'#,##0.###'>
|
|
||||||
"""
|
|
||||||
return self._data['decimal_formats']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def currency_formats(self):
|
|
||||||
"""Locale patterns for currency number formatting.
|
|
||||||
|
|
||||||
>>> print Locale('en', 'US').currency_formats[None]
|
|
||||||
<NumberPattern u'\\xa4#,##0.00'>
|
|
||||||
"""
|
|
||||||
return self._data['currency_formats']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def percent_formats(self):
|
|
||||||
"""Locale patterns for percent number formatting.
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').percent_formats[None]
|
|
||||||
<NumberPattern u'#,##0%'>
|
|
||||||
"""
|
|
||||||
return self._data['percent_formats']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def scientific_formats(self):
|
|
||||||
"""Locale patterns for scientific number formatting.
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').scientific_formats[None]
|
|
||||||
<NumberPattern u'#E0'>
|
|
||||||
"""
|
|
||||||
return self._data['scientific_formats']
|
|
||||||
|
|
||||||
#{ Calendar Information and Date Formatting
|
|
||||||
|
|
||||||
@property
|
|
||||||
def periods(self):
|
|
||||||
"""Locale display names for day periods (AM/PM).
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').periods['am']
|
|
||||||
u'AM'
|
|
||||||
"""
|
|
||||||
return self._data['periods']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def days(self):
|
|
||||||
"""Locale display names for weekdays.
|
|
||||||
|
|
||||||
>>> Locale('de', 'DE').days['format']['wide'][3]
|
|
||||||
u'Donnerstag'
|
|
||||||
"""
|
|
||||||
return self._data['days']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def months(self):
|
|
||||||
"""Locale display names for months.
|
|
||||||
|
|
||||||
>>> Locale('de', 'DE').months['format']['wide'][10]
|
|
||||||
u'Oktober'
|
|
||||||
"""
|
|
||||||
return self._data['months']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def quarters(self):
|
|
||||||
"""Locale display names for quarters.
|
|
||||||
|
|
||||||
>>> Locale('de', 'DE').quarters['format']['wide'][1]
|
|
||||||
u'1. Quartal'
|
|
||||||
"""
|
|
||||||
return self._data['quarters']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def eras(self):
|
|
||||||
"""Locale display names for eras.
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').eras['wide'][1]
|
|
||||||
u'Anno Domini'
|
|
||||||
>>> Locale('en', 'US').eras['abbreviated'][0]
|
|
||||||
u'BC'
|
|
||||||
"""
|
|
||||||
return self._data['eras']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def time_zones(self):
|
|
||||||
"""Locale display names for time zones.
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').time_zones['Europe/London']['long']['daylight']
|
|
||||||
u'British Summer Time'
|
|
||||||
>>> Locale('en', 'US').time_zones['America/St_Johns']['city']
|
|
||||||
u'St. John\u2019s'
|
|
||||||
"""
|
|
||||||
return self._data['time_zones']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def meta_zones(self):
|
|
||||||
"""Locale display names for meta time zones.
|
|
||||||
|
|
||||||
Meta time zones are basically groups of different Olson time zones that
|
|
||||||
have the same GMT offset and daylight savings time.
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').meta_zones['Europe_Central']['long']['daylight']
|
|
||||||
u'Central European Summer Time'
|
|
||||||
|
|
||||||
.. versionadded:: 0.9
|
|
||||||
"""
|
|
||||||
return self._data['meta_zones']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def zone_formats(self):
|
|
||||||
"""Patterns related to the formatting of time zones.
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').zone_formats['fallback']
|
|
||||||
u'%(1)s (%(0)s)'
|
|
||||||
>>> Locale('pt', 'BR').zone_formats['region']
|
|
||||||
u'Hor\\xe1rio %s'
|
|
||||||
|
|
||||||
.. versionadded:: 0.9
|
|
||||||
"""
|
|
||||||
return self._data['zone_formats']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def first_week_day(self):
|
|
||||||
"""The first day of a week, with 0 being Monday.
|
|
||||||
|
|
||||||
>>> Locale('de', 'DE').first_week_day
|
|
||||||
0
|
|
||||||
>>> Locale('en', 'US').first_week_day
|
|
||||||
6
|
|
||||||
"""
|
|
||||||
return self._data['week_data']['first_day']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def weekend_start(self):
|
|
||||||
"""The day the weekend starts, with 0 being Monday.
|
|
||||||
|
|
||||||
>>> Locale('de', 'DE').weekend_start
|
|
||||||
5
|
|
||||||
"""
|
|
||||||
return self._data['week_data']['weekend_start']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def weekend_end(self):
|
|
||||||
"""The day the weekend ends, with 0 being Monday.
|
|
||||||
|
|
||||||
>>> Locale('de', 'DE').weekend_end
|
|
||||||
6
|
|
||||||
"""
|
|
||||||
return self._data['week_data']['weekend_end']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def min_week_days(self):
|
|
||||||
"""The minimum number of days in a week so that the week is counted as
|
|
||||||
the first week of a year or month.
|
|
||||||
|
|
||||||
>>> Locale('de', 'DE').min_week_days
|
|
||||||
4
|
|
||||||
"""
|
|
||||||
return self._data['week_data']['min_days']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def date_formats(self):
|
|
||||||
"""Locale patterns for date formatting.
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').date_formats['short']
|
|
||||||
<DateTimePattern u'M/d/yy'>
|
|
||||||
>>> Locale('fr', 'FR').date_formats['long']
|
|
||||||
<DateTimePattern u'd MMMM y'>
|
|
||||||
"""
|
|
||||||
return self._data['date_formats']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def time_formats(self):
|
|
||||||
"""Locale patterns for time formatting.
|
|
||||||
|
|
||||||
>>> Locale('en', 'US').time_formats['short']
|
|
||||||
<DateTimePattern u'h:mm a'>
|
|
||||||
>>> Locale('fr', 'FR').time_formats['long']
|
|
||||||
<DateTimePattern u'HH:mm:ss z'>
|
|
||||||
"""
|
|
||||||
return self._data['time_formats']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def datetime_formats(self):
|
|
||||||
"""Locale patterns for datetime formatting.
|
|
||||||
|
|
||||||
>>> Locale('en').datetime_formats['full']
|
|
||||||
u"{1} 'at' {0}"
|
|
||||||
>>> Locale('th').datetime_formats['medium']
|
|
||||||
u'{1}, {0}'
|
|
||||||
"""
|
|
||||||
return self._data['datetime_formats']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def plural_form(self):
|
|
||||||
"""Plural rules for the locale.
|
|
||||||
|
|
||||||
>>> Locale('en').plural_form(1)
|
|
||||||
'one'
|
|
||||||
>>> Locale('en').plural_form(0)
|
|
||||||
'other'
|
|
||||||
>>> Locale('fr').plural_form(0)
|
|
||||||
'one'
|
|
||||||
>>> Locale('ru').plural_form(100)
|
|
||||||
'many'
|
|
||||||
"""
|
|
||||||
return self._data['plural_form']
|
|
||||||
|
|
||||||
|
|
||||||
def default_locale(category=None, aliases=LOCALE_ALIASES):
|
|
||||||
"""Returns the system default locale for a given category, based on
|
|
||||||
environment variables.
|
|
||||||
|
|
||||||
>>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']:
|
|
||||||
... os.environ[name] = ''
|
|
||||||
>>> os.environ['LANG'] = 'fr_FR.UTF-8'
|
|
||||||
>>> default_locale('LC_MESSAGES')
|
|
||||||
'fr_FR'
|
|
||||||
|
|
||||||
The "C" or "POSIX" pseudo-locales are treated as aliases for the
|
|
||||||
"en_US_POSIX" locale:
|
|
||||||
|
|
||||||
>>> os.environ['LC_MESSAGES'] = 'POSIX'
|
|
||||||
>>> default_locale('LC_MESSAGES')
|
|
||||||
'en_US_POSIX'
|
|
||||||
|
|
||||||
The following fallbacks to the variable are always considered:
|
|
||||||
|
|
||||||
- ``LANGUAGE``
|
|
||||||
- ``LC_ALL``
|
|
||||||
- ``LC_CTYPE``
|
|
||||||
- ``LANG``
|
|
||||||
|
|
||||||
:param category: one of the ``LC_XXX`` environment variable names
|
|
||||||
:param aliases: a dictionary of aliases for locale identifiers
|
|
||||||
"""
|
|
||||||
varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')
|
|
||||||
for name in filter(None, varnames):
|
|
||||||
locale = os.getenv(name)
|
|
||||||
if locale:
|
|
||||||
if name == 'LANGUAGE' and ':' in locale:
|
|
||||||
# the LANGUAGE variable may contain a colon-separated list of
|
|
||||||
# language codes; we just pick the language on the list
|
|
||||||
locale = locale.split(':')[0]
|
|
||||||
if locale in ('C', 'POSIX'):
|
|
||||||
locale = 'en_US_POSIX'
|
|
||||||
elif aliases and locale in aliases:
|
|
||||||
locale = aliases[locale]
|
|
||||||
try:
|
|
||||||
return get_locale_identifier(parse_locale(locale))
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def negotiate_locale(preferred, available, sep='_', aliases=LOCALE_ALIASES):
|
|
||||||
"""Find the best match between available and requested locale strings.
|
|
||||||
|
|
||||||
>>> negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
|
|
||||||
'de_DE'
|
|
||||||
>>> negotiate_locale(['de_DE', 'en_US'], ['en', 'de'])
|
|
||||||
'de'
|
|
||||||
|
|
||||||
Case is ignored by the algorithm, the result uses the case of the preferred
|
|
||||||
locale identifier:
|
|
||||||
|
|
||||||
>>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
|
|
||||||
'de_DE'
|
|
||||||
|
|
||||||
>>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
|
|
||||||
'de_DE'
|
|
||||||
|
|
||||||
By default, some web browsers unfortunately do not include the territory
|
|
||||||
in the locale identifier for many locales, and some don't even allow the
|
|
||||||
user to easily add the territory. So while you may prefer using qualified
|
|
||||||
locale identifiers in your web-application, they would not normally match
|
|
||||||
the language-only locale sent by such browsers. To workaround that, this
|
|
||||||
function uses a default mapping of commonly used langauge-only locale
|
|
||||||
identifiers to identifiers including the territory:
|
|
||||||
|
|
||||||
>>> negotiate_locale(['ja', 'en_US'], ['ja_JP', 'en_US'])
|
|
||||||
'ja_JP'
|
|
||||||
|
|
||||||
Some browsers even use an incorrect or outdated language code, such as "no"
|
|
||||||
for Norwegian, where the correct locale identifier would actually be "nb_NO"
|
|
||||||
(Bokmål) or "nn_NO" (Nynorsk). The aliases are intended to take care of
|
|
||||||
such cases, too:
|
|
||||||
|
|
||||||
>>> negotiate_locale(['no', 'sv'], ['nb_NO', 'sv_SE'])
|
|
||||||
'nb_NO'
|
|
||||||
|
|
||||||
You can override this default mapping by passing a different `aliases`
|
|
||||||
dictionary to this function, or you can bypass the behavior althogher by
|
|
||||||
setting the `aliases` parameter to `None`.
|
|
||||||
|
|
||||||
:param preferred: the list of locale strings preferred by the user
|
|
||||||
:param available: the list of locale strings available
|
|
||||||
:param sep: character that separates the different parts of the locale
|
|
||||||
strings
|
|
||||||
:param aliases: a dictionary of aliases for locale identifiers
|
|
||||||
"""
|
|
||||||
available = [a.lower() for a in available if a]
|
|
||||||
for locale in preferred:
|
|
||||||
ll = locale.lower()
|
|
||||||
if ll in available:
|
|
||||||
return locale
|
|
||||||
if aliases:
|
|
||||||
alias = aliases.get(ll)
|
|
||||||
if alias:
|
|
||||||
alias = alias.replace('_', sep)
|
|
||||||
if alias.lower() in available:
|
|
||||||
return alias
|
|
||||||
parts = locale.split(sep)
|
|
||||||
if len(parts) > 1 and parts[0].lower() in available:
|
|
||||||
return parts[0]
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def parse_locale(identifier, sep='_'):
|
|
||||||
"""Parse a locale identifier into a tuple of the form ``(language,
|
|
||||||
territory, script, variant)``.
|
|
||||||
|
|
||||||
>>> parse_locale('zh_CN')
|
|
||||||
('zh', 'CN', None, None)
|
|
||||||
>>> parse_locale('zh_Hans_CN')
|
|
||||||
('zh', 'CN', 'Hans', None)
|
|
||||||
|
|
||||||
The default component separator is "_", but a different separator can be
|
|
||||||
specified using the `sep` parameter:
|
|
||||||
|
|
||||||
>>> parse_locale('zh-CN', sep='-')
|
|
||||||
('zh', 'CN', None, None)
|
|
||||||
|
|
||||||
If the identifier cannot be parsed into a locale, a `ValueError` exception
|
|
||||||
is raised:
|
|
||||||
|
|
||||||
>>> parse_locale('not_a_LOCALE_String')
|
|
||||||
Traceback (most recent call last):
|
|
||||||
...
|
|
||||||
ValueError: 'not_a_LOCALE_String' is not a valid locale identifier
|
|
||||||
|
|
||||||
Encoding information and locale modifiers are removed from the identifier:
|
|
||||||
|
|
||||||
>>> parse_locale('it_IT@euro')
|
|
||||||
('it', 'IT', None, None)
|
|
||||||
>>> parse_locale('en_US.UTF-8')
|
|
||||||
('en', 'US', None, None)
|
|
||||||
>>> parse_locale('de_DE.iso885915@euro')
|
|
||||||
('de', 'DE', None, None)
|
|
||||||
|
|
||||||
See :rfc:`4646` for more information.
|
|
||||||
|
|
||||||
:param identifier: the locale identifier string
|
|
||||||
:param sep: character that separates the different components of the locale
|
|
||||||
identifier
|
|
||||||
:raise `ValueError`: if the string does not appear to be a valid locale
|
|
||||||
identifier
|
|
||||||
"""
|
|
||||||
if '.' in identifier:
|
|
||||||
# this is probably the charset/encoding, which we don't care about
|
|
||||||
identifier = identifier.split('.', 1)[0]
|
|
||||||
if '@' in identifier:
|
|
||||||
# this is a locale modifier such as @euro, which we don't care about
|
|
||||||
# either
|
|
||||||
identifier = identifier.split('@', 1)[0]
|
|
||||||
|
|
||||||
parts = identifier.split(sep)
|
|
||||||
lang = parts.pop(0).lower()
|
|
||||||
if not lang.isalpha():
|
|
||||||
raise ValueError('expected only letters, got %r' % lang)
|
|
||||||
|
|
||||||
script = territory = variant = None
|
|
||||||
if parts:
|
|
||||||
if len(parts[0]) == 4 and parts[0].isalpha():
|
|
||||||
script = parts.pop(0).title()
|
|
||||||
|
|
||||||
if parts:
|
|
||||||
if len(parts[0]) == 2 and parts[0].isalpha():
|
|
||||||
territory = parts.pop(0).upper()
|
|
||||||
elif len(parts[0]) == 3 and parts[0].isdigit():
|
|
||||||
territory = parts.pop(0)
|
|
||||||
|
|
||||||
if parts:
|
|
||||||
if len(parts[0]) == 4 and parts[0][0].isdigit() or \
|
|
||||||
len(parts[0]) >= 5 and parts[0][0].isalpha():
|
|
||||||
variant = parts.pop()
|
|
||||||
|
|
||||||
if parts:
|
|
||||||
raise ValueError('%r is not a valid locale identifier' % identifier)
|
|
||||||
|
|
||||||
return lang, territory, script, variant
|
|
||||||
|
|
||||||
|
|
||||||
def get_locale_identifier(tup, sep='_'):
|
|
||||||
"""The reverse of :func:`parse_locale`. It creates a locale identifier out
|
|
||||||
of a ``(language, territory, script, variant)`` tuple. Items can be set to
|
|
||||||
``None`` and trailing ``None``\s can also be left out of the tuple.
|
|
||||||
|
|
||||||
>>> get_locale_identifier(('de', 'DE', None, '1999'))
|
|
||||||
'de_DE_1999'
|
|
||||||
|
|
||||||
.. versionadded:: 1.0
|
|
||||||
|
|
||||||
:param tup: the tuple as returned by :func:`parse_locale`.
|
|
||||||
:param sep: the separator for the identifier.
|
|
||||||
"""
|
|
||||||
tup = tuple(tup[:4])
|
|
||||||
lang, territory, script, variant = tup + (None,) * (4 - len(tup))
|
|
||||||
return sep.join(filter(None, (lang, script, territory, variant)))
|
|
1181
vendor/babel/dates.py
vendored
1181
vendor/babel/dates.py
vendored
File diff suppressed because it is too large
Load Diff
BIN
vendor/babel/global.dat
vendored
BIN
vendor/babel/global.dat
vendored
Binary file not shown.
209
vendor/babel/localedata.py
vendored
209
vendor/babel/localedata.py
vendored
@ -1,209 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
"""
|
|
||||||
babel.localedata
|
|
||||||
~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Low-level locale data access.
|
|
||||||
|
|
||||||
:note: The `Locale` class, which uses this module under the hood, provides a
|
|
||||||
more convenient interface for accessing the locale data.
|
|
||||||
|
|
||||||
:copyright: (c) 2013 by the Babel Team.
|
|
||||||
:license: BSD, see LICENSE for more details.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import threading
|
|
||||||
from collections import MutableMapping
|
|
||||||
|
|
||||||
from babel._compat import pickle
|
|
||||||
|
|
||||||
|
|
||||||
_cache = {}
|
|
||||||
_cache_lock = threading.RLock()
|
|
||||||
_dirname = os.path.join(os.path.dirname(__file__), 'localedata')
|
|
||||||
|
|
||||||
|
|
||||||
def exists(name):
|
|
||||||
"""Check whether locale data is available for the given locale. Ther
|
|
||||||
return value is `True` if it exists, `False` otherwise.
|
|
||||||
|
|
||||||
:param name: the locale identifier string
|
|
||||||
"""
|
|
||||||
if name in _cache:
|
|
||||||
return True
|
|
||||||
return os.path.exists(os.path.join(_dirname, '%s.dat' % name))
|
|
||||||
|
|
||||||
|
|
||||||
def locale_identifiers():
|
|
||||||
"""Return a list of all locale identifiers for which locale data is
|
|
||||||
available.
|
|
||||||
|
|
||||||
.. versionadded:: 0.8.1
|
|
||||||
|
|
||||||
:return: a list of locale identifiers (strings)
|
|
||||||
"""
|
|
||||||
return [stem for stem, extension in [
|
|
||||||
os.path.splitext(filename) for filename in os.listdir(_dirname)
|
|
||||||
] if extension == '.dat' and stem != 'root']
|
|
||||||
|
|
||||||
|
|
||||||
def load(name, merge_inherited=True):
|
|
||||||
"""Load the locale data for the given locale.
|
|
||||||
|
|
||||||
The locale data is a dictionary that contains much of the data defined by
|
|
||||||
the Common Locale Data Repository (CLDR). This data is stored as a
|
|
||||||
collection of pickle files inside the ``babel`` package.
|
|
||||||
|
|
||||||
>>> d = load('en_US')
|
|
||||||
>>> d['languages']['sv']
|
|
||||||
u'Swedish'
|
|
||||||
|
|
||||||
Note that the results are cached, and subsequent requests for the same
|
|
||||||
locale return the same dictionary:
|
|
||||||
|
|
||||||
>>> d1 = load('en_US')
|
|
||||||
>>> d2 = load('en_US')
|
|
||||||
>>> d1 is d2
|
|
||||||
True
|
|
||||||
|
|
||||||
:param name: the locale identifier string (or "root")
|
|
||||||
:param merge_inherited: whether the inherited data should be merged into
|
|
||||||
the data of the requested locale
|
|
||||||
:raise `IOError`: if no locale data file is found for the given locale
|
|
||||||
identifer, or one of the locales it inherits from
|
|
||||||
"""
|
|
||||||
_cache_lock.acquire()
|
|
||||||
try:
|
|
||||||
data = _cache.get(name)
|
|
||||||
if not data:
|
|
||||||
# Load inherited data
|
|
||||||
if name == 'root' or not merge_inherited:
|
|
||||||
data = {}
|
|
||||||
else:
|
|
||||||
parts = name.split('_')
|
|
||||||
if len(parts) == 1:
|
|
||||||
parent = 'root'
|
|
||||||
else:
|
|
||||||
parent = '_'.join(parts[:-1])
|
|
||||||
data = load(parent).copy()
|
|
||||||
filename = os.path.join(_dirname, '%s.dat' % name)
|
|
||||||
fileobj = open(filename, 'rb')
|
|
||||||
try:
|
|
||||||
if name != 'root' and merge_inherited:
|
|
||||||
merge(data, pickle.load(fileobj))
|
|
||||||
else:
|
|
||||||
data = pickle.load(fileobj)
|
|
||||||
_cache[name] = data
|
|
||||||
finally:
|
|
||||||
fileobj.close()
|
|
||||||
return data
|
|
||||||
finally:
|
|
||||||
_cache_lock.release()
|
|
||||||
|
|
||||||
|
|
||||||
def merge(dict1, dict2):
|
|
||||||
"""Merge the data from `dict2` into the `dict1` dictionary, making copies
|
|
||||||
of nested dictionaries.
|
|
||||||
|
|
||||||
>>> d = {1: 'foo', 3: 'baz'}
|
|
||||||
>>> merge(d, {1: 'Foo', 2: 'Bar'})
|
|
||||||
>>> items = d.items(); items.sort(); items
|
|
||||||
[(1, 'Foo'), (2, 'Bar'), (3, 'baz')]
|
|
||||||
|
|
||||||
:param dict1: the dictionary to merge into
|
|
||||||
:param dict2: the dictionary containing the data that should be merged
|
|
||||||
"""
|
|
||||||
for key, val2 in dict2.items():
|
|
||||||
if val2 is not None:
|
|
||||||
val1 = dict1.get(key)
|
|
||||||
if isinstance(val2, dict):
|
|
||||||
if val1 is None:
|
|
||||||
val1 = {}
|
|
||||||
if isinstance(val1, Alias):
|
|
||||||
val1 = (val1, val2)
|
|
||||||
elif isinstance(val1, tuple):
|
|
||||||
alias, others = val1
|
|
||||||
others = others.copy()
|
|
||||||
merge(others, val2)
|
|
||||||
val1 = (alias, others)
|
|
||||||
else:
|
|
||||||
val1 = val1.copy()
|
|
||||||
merge(val1, val2)
|
|
||||||
else:
|
|
||||||
val1 = val2
|
|
||||||
dict1[key] = val1
|
|
||||||
|
|
||||||
|
|
||||||
class Alias(object):
|
|
||||||
"""Representation of an alias in the locale data.
|
|
||||||
|
|
||||||
An alias is a value that refers to some other part of the locale data,
|
|
||||||
as specified by the `keys`.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, keys):
|
|
||||||
self.keys = tuple(keys)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return '<%s %r>' % (type(self).__name__, self.keys)
|
|
||||||
|
|
||||||
def resolve(self, data):
|
|
||||||
"""Resolve the alias based on the given data.
|
|
||||||
|
|
||||||
This is done recursively, so if one alias resolves to a second alias,
|
|
||||||
that second alias will also be resolved.
|
|
||||||
|
|
||||||
:param data: the locale data
|
|
||||||
:type data: `dict`
|
|
||||||
"""
|
|
||||||
base = data
|
|
||||||
for key in self.keys:
|
|
||||||
data = data[key]
|
|
||||||
if isinstance(data, Alias):
|
|
||||||
data = data.resolve(base)
|
|
||||||
elif isinstance(data, tuple):
|
|
||||||
alias, others = data
|
|
||||||
data = alias.resolve(base)
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
class LocaleDataDict(MutableMapping):
|
|
||||||
"""Dictionary wrapper that automatically resolves aliases to the actual
|
|
||||||
values.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, data, base=None):
|
|
||||||
self._data = data
|
|
||||||
if base is None:
|
|
||||||
base = data
|
|
||||||
self.base = base
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return len(self._data)
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
return iter(self._data)
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
orig = val = self._data[key]
|
|
||||||
if isinstance(val, Alias): # resolve an alias
|
|
||||||
val = val.resolve(self.base)
|
|
||||||
if isinstance(val, tuple): # Merge a partial dict with an alias
|
|
||||||
alias, others = val
|
|
||||||
val = alias.resolve(self.base).copy()
|
|
||||||
merge(val, others)
|
|
||||||
if type(val) is dict: # Return a nested alias-resolving dict
|
|
||||||
val = LocaleDataDict(val, base=self.base)
|
|
||||||
if val is not orig:
|
|
||||||
self._data[key] = val
|
|
||||||
return val
|
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
|
||||||
self._data[key] = value
|
|
||||||
|
|
||||||
def __delitem__(self, key):
|
|
||||||
del self._data[key]
|
|
||||||
|
|
||||||
def copy(self):
|
|
||||||
return LocaleDataDict(self._data.copy(), base=self.base)
|
|
BIN
vendor/babel/localedata/aa.dat
vendored
BIN
vendor/babel/localedata/aa.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/aa_DJ.dat
vendored
BIN
vendor/babel/localedata/aa_DJ.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/aa_ER.dat
vendored
BIN
vendor/babel/localedata/aa_ER.dat
vendored
Binary file not shown.
4
vendor/babel/localedata/aa_ET.dat
vendored
4
vendor/babel/localedata/aa_ET.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
(Umin_daysqKU
weekend_startqKU first_dayqKUweekend_endqKuUzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U
unit_patternsq&}q'u.
|
|
BIN
vendor/babel/localedata/af.dat
vendored
BIN
vendor/babel/localedata/af.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/af_NA.dat
vendored
BIN
vendor/babel/localedata/af_NA.dat
vendored
Binary file not shown.
4
vendor/babel/localedata/af_ZA.dat
vendored
4
vendor/babel/localedata/af_ZA.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
(Umin_daysqKU
weekend_startqKU first_dayqKUweekend_endqKuUzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U
unit_patternsq&}q'u.
|
|
BIN
vendor/babel/localedata/agq.dat
vendored
BIN
vendor/babel/localedata/agq.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/agq_CM.dat
vendored
BIN
vendor/babel/localedata/agq_CM.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ak.dat
vendored
BIN
vendor/babel/localedata/ak.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ak_GH.dat
vendored
BIN
vendor/babel/localedata/ak_GH.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/am.dat
vendored
BIN
vendor/babel/localedata/am.dat
vendored
Binary file not shown.
4
vendor/babel/localedata/am_ET.dat
vendored
4
vendor/babel/localedata/am_ET.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
(Umin_daysqKU
weekend_startqKU first_dayqKUweekend_endqKuUzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U
unit_patternsq&}q'u.
|
|
BIN
vendor/babel/localedata/ar.dat
vendored
BIN
vendor/babel/localedata/ar.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ar_001.dat
vendored
BIN
vendor/babel/localedata/ar_001.dat
vendored
Binary file not shown.
4
vendor/babel/localedata/ar_AE.dat
vendored
4
vendor/babel/localedata/ar_AE.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
(U
weekend_startqKU first_dayqKUweekend_endqKuUzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U
unit_patternsq%}q&u.
|
|
4
vendor/babel/localedata/ar_BH.dat
vendored
4
vendor/babel/localedata/ar_BH.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
(U
weekend_startqKU first_dayqKUweekend_endqKuUzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U
unit_patternsq%}q&u.
|
|
BIN
vendor/babel/localedata/ar_DJ.dat
vendored
BIN
vendor/babel/localedata/ar_DJ.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ar_DZ.dat
vendored
BIN
vendor/babel/localedata/ar_DZ.dat
vendored
Binary file not shown.
4
vendor/babel/localedata/ar_EG.dat
vendored
4
vendor/babel/localedata/ar_EG.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
(Umin_daysqKU
weekend_startqKU first_dayqKUweekend_endqKuUzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U
unit_patternsq&}q'u.
|
|
BIN
vendor/babel/localedata/ar_EH.dat
vendored
BIN
vendor/babel/localedata/ar_EH.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ar_ER.dat
vendored
BIN
vendor/babel/localedata/ar_ER.dat
vendored
Binary file not shown.
4
vendor/babel/localedata/ar_IL.dat
vendored
4
vendor/babel/localedata/ar_IL.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
(U
weekend_startqKU first_dayqKUweekend_endqKuUzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U
unit_patternsq%}q&u.
|
|
BIN
vendor/babel/localedata/ar_IQ.dat
vendored
BIN
vendor/babel/localedata/ar_IQ.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ar_JO.dat
vendored
BIN
vendor/babel/localedata/ar_JO.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ar_KM.dat
vendored
BIN
vendor/babel/localedata/ar_KM.dat
vendored
Binary file not shown.
4
vendor/babel/localedata/ar_KW.dat
vendored
4
vendor/babel/localedata/ar_KW.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
(U
weekend_startqKU first_dayqKUweekend_endqKuUzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U
unit_patternsq%}q&u.
|
|
BIN
vendor/babel/localedata/ar_LB.dat
vendored
BIN
vendor/babel/localedata/ar_LB.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ar_LY.dat
vendored
BIN
vendor/babel/localedata/ar_LY.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ar_MA.dat
vendored
BIN
vendor/babel/localedata/ar_MA.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ar_MR.dat
vendored
BIN
vendor/babel/localedata/ar_MR.dat
vendored
Binary file not shown.
4
vendor/babel/localedata/ar_OM.dat
vendored
4
vendor/babel/localedata/ar_OM.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
(U
weekend_startqKU first_dayqKUweekend_endqKuUzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq}q Uvariantsq!}q"Ucurrency_namesq#}q$U
unit_patternsq%}q&u.
|
|
BIN
vendor/babel/localedata/ar_PS.dat
vendored
BIN
vendor/babel/localedata/ar_PS.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ar_QA.dat
vendored
BIN
vendor/babel/localedata/ar_QA.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ar_SA.dat
vendored
BIN
vendor/babel/localedata/ar_SA.dat
vendored
Binary file not shown.
4
vendor/babel/localedata/ar_SD.dat
vendored
4
vendor/babel/localedata/ar_SD.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
(Umin_daysqKU
weekend_startqKU first_dayqKUweekend_endqKuUzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq }q!Uvariantsq"}q#Ucurrency_namesq$}q%U
unit_patternsq&}q'u.
|
|
BIN
vendor/babel/localedata/ar_SO.dat
vendored
BIN
vendor/babel/localedata/ar_SO.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ar_SY.dat
vendored
BIN
vendor/babel/localedata/ar_SY.dat
vendored
Binary file not shown.
4
vendor/babel/localedata/ar_TD.dat
vendored
4
vendor/babel/localedata/ar_TD.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
Uzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq}qUvariantsq}qUcurrency_namesq }q!U
unit_patternsq"}q#u.
|
|
BIN
vendor/babel/localedata/ar_TN.dat
vendored
BIN
vendor/babel/localedata/ar_TN.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ar_YE.dat
vendored
BIN
vendor/babel/localedata/ar_YE.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/as.dat
vendored
BIN
vendor/babel/localedata/as.dat
vendored
Binary file not shown.
4
vendor/babel/localedata/as_IN.dat
vendored
4
vendor/babel/localedata/as_IN.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
(U
weekend_startqKU first_dayqKuUzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq}qUvariantsq }q!Ucurrency_namesq"}q#U
unit_patternsq$}q%u.
|
|
BIN
vendor/babel/localedata/asa.dat
vendored
BIN
vendor/babel/localedata/asa.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/asa_TZ.dat
vendored
BIN
vendor/babel/localedata/asa_TZ.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ast.dat
vendored
BIN
vendor/babel/localedata/ast.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/ast_ES.dat
vendored
BIN
vendor/babel/localedata/ast_ES.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/az.dat
vendored
BIN
vendor/babel/localedata/az.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/az_Cyrl.dat
vendored
BIN
vendor/babel/localedata/az_Cyrl.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/az_Cyrl_AZ.dat
vendored
BIN
vendor/babel/localedata/az_Cyrl_AZ.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/az_Latn.dat
vendored
BIN
vendor/babel/localedata/az_Latn.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/az_Latn_AZ.dat
vendored
BIN
vendor/babel/localedata/az_Latn_AZ.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/bas.dat
vendored
BIN
vendor/babel/localedata/bas.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/bas_CM.dat
vendored
BIN
vendor/babel/localedata/bas_CM.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/be.dat
vendored
BIN
vendor/babel/localedata/be.dat
vendored
Binary file not shown.
4
vendor/babel/localedata/be_BY.dat
vendored
4
vendor/babel/localedata/be_BY.dat
vendored
@ -1,4 +0,0 @@
|
|||||||
€}q(Ucurrency_symbolsq}qUscientific_formatsq}qUpercent_formatsq}qUnumber_symbolsq}q Ucurrency_names_pluralq
|
|
||||||
}qU week_dataq}q
U first_dayqKsUzone_formatsq}qUcurrency_formatsq}qU_versionqM5 U languagesq}qUterritoriesq}U
|
|
||||||
time_zonesq}qUscriptsq}qUdecimal_formatsq}qU
|
|
||||||
meta_zonesq}qUvariantsq}q Ucurrency_namesq!}q"U
unit_patternsq#}q$u.
|
|
BIN
vendor/babel/localedata/bem.dat
vendored
BIN
vendor/babel/localedata/bem.dat
vendored
Binary file not shown.
BIN
vendor/babel/localedata/bem_ZM.dat
vendored
BIN
vendor/babel/localedata/bem_ZM.dat
vendored
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user