Merge pull request #867 from Gluejar/stream-api

Stream api
pull/94/head
Eric Hellman 2020-04-06 15:14:21 -04:00 committed by GitHub
commit 194950e2a5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 34 additions and 20 deletions

View File

@ -17,8 +17,8 @@ feed_header = """<?xml version="1.0" encoding="UTF-8"?>
<ONIXMessage release="3.0" xmlns="http://ns.editeur.org/onix/3.0/reference" >
"""
feed_xml = feed_header + '</ONIXMessage>'
soup = None
bisac = Bisac()
soup = BeautifulSoup(feed_xml, 'xml')
def text_node(tag, text, attrib=None):
node = soup.new_tag(tag)
@ -36,6 +36,10 @@ def sub_element(node, tag, attrib=None):
def onix_feed(facet, max=None, page_number=None):
global soup
if not soup:
soup = BeautifulSoup('', 'lxml')
yield feed_header + str(header(facet))
works = facet.works[0:max] if max else facet.works
@ -56,13 +60,17 @@ def onix_feed(facet, max=None, page_number=None):
yield '</ONIXMessage>'
def onix_feed_for_work(work):
soup = BeautifulSoup(feed_xml, 'xml')
soup.ONIXMessage.append(header(work))
global soup
if not soup:
soup = BeautifulSoup('', 'lxml')
feed = BeautifulSoup(feed_xml, 'xml')
feed.ONIXMessage.append(header(work))
for edition in models.Edition.objects.filter(work=work, ebooks__isnull=False).distinct():
edition_prod = product(edition)
if edition_prod is not None:
soup.ONIXMessage.append(product(edition))
return str(soup)
feed.ONIXMessage.append(product(edition))
return str(feed)
def header(facet=None):
header_node = soup.new_tag("Header")

View File

@ -16,8 +16,8 @@ import regluit.core.cc as cc
licenses = cc.LICENSE_LIST
logger = logging.getLogger(__name__)
soup = BeautifulSoup('', 'xml')
soup = None
FORMAT_TO_MIMETYPE = {'pdf':"application/pdf",
'epub':"application/epub+zip",
'mobi':"application/x-mobipocket-ebook",
@ -47,7 +47,8 @@ def get_facet_class(name):
def text_node(tag, text):
node = soup.new_tag(tag)
node.string = text
if text:
node.string = text
return node
def html_node(tag, html):
@ -124,16 +125,16 @@ def work_node(work, facet=None):
cover_node = soup.new_tag("link")
cover_node.attrs.update({
"href":work.cover_image_small(),
"type":"image/"+work.cover_filetype(),
"rel":"http://opds-spec.org/image/thumbnail"
"href": work.cover_image_small(),
"type": "image/" + work.cover_filetype(),
"rel": "http://opds-spec.org/image/thumbnail"
})
node.append(cover_node)
cover_node = soup.new_tag("link")
cover_node.attrs.update({
"href":work.cover_image_thumbnail(),
"type":"image/"+work.cover_filetype(),
"rel":"http://opds-spec.org/image"
"href": work.cover_image_thumbnail(),
"type": "image/" + work.cover_filetype(),
"rel": "http://opds-spec.org/image"
})
node.append(cover_node)
@ -272,10 +273,14 @@ def opds_feed_for_work(work_id):
return opds_feed_for_works(single_work_facet(work_id))
def opds_feed_for_works(the_facet, page=None, order_by='newest'):
global soup
if not soup:
soup = BeautifulSoup('', 'lxml')
works = the_facet.works
feed_path = the_facet.feed_path
title = the_facet.title
feed_header = """<feed xmlns:dcterms="http://purl.org/dc/terms/"
feed_header = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns:dcterms="http://purl.org/dc/terms/"
xmlns:opds="http://opds-spec.org/"
xmlns="http://www.w3.org/2005/Atom"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
@ -357,7 +362,6 @@ def opds_feed_for_works(the_facet, page=None, order_by='newest'):
if page > 0:
yield navlink('previous', feed_path, page-1, order_by, title="Previous 10").prettify()
for work in works:
yield work_node(work, facet=the_facet.facet_object).prettify()

View File

@ -225,7 +225,7 @@ class OnixView(View):
work = models.safe_get_work(work)
except models.Work.DoesNotExist:
raise Http404
return StreamingHttpResponse(onix.onix_feed_for_work(work), content_type="text/xml")
return HttpResponse(onix.onix_feed_for_work(work), content_type="text/xml")
facet = kwargs.get('facet', 'all')

View File

@ -891,7 +891,7 @@ class Edition(models.Model):
im = get_thumbnail(self.cover_image, 'x550', crop='noop', quality=95)
if im.exists():
return im.url
except IOError:
except (IOError, OSError):
pass
elif self.googlebooks_id:
url = "https://encrypted.google.com/books?id=%s&printsec=frontcover&img=1&zoom=0" % self.googlebooks_id
@ -902,7 +902,7 @@ class Edition(models.Model):
im = get_thumbnail(url, 'x550', crop='noop', quality=95)
if im.exists():
return im.url
except IOError:
except (IOError, OSError):
pass
return ''
@ -913,7 +913,7 @@ class Edition(models.Model):
im = get_thumbnail(self.cover_image, 'x80', crop='noop', quality=95)
if im.exists():
return im.url
except IOError:
except (IOError, OSError):
pass
if self.googlebooks_id:
return "https://encrypted.google.com/books?id=%s&printsec=frontcover&img=1&zoom=5" % self.googlebooks_id
@ -926,7 +926,7 @@ class Edition(models.Model):
im = get_thumbnail(self.cover_image, '128', crop='noop', quality=95)
if im.exists():
return im.url
except IOError:
except (IOError, OSError):
pass
if self.googlebooks_id:
return "https://encrypted.google.com/books?id=%s&printsec=frontcover&img=1&zoom=1" % self.googlebooks_id

View File

@ -433,6 +433,8 @@ NOTIFICATION_QUEUE_ALL = True
# amazon or paypal for now.
PAYMENT_PROCESSOR = 'stripelib'
# allow application code to catch thumbnailing errors
THUMBNAIL_DEBUG = True
# we should suppress Google Analytics outside of production
SHOW_GOOGLE_ANALYTICS = False