2011-11-02 00:42:39 +00:00
|
|
|
import csv
|
|
|
|
import HTMLParser
|
2013-06-03 16:31:39 +00:00
|
|
|
import httplib
|
2011-11-02 00:42:39 +00:00
|
|
|
import logging
|
2013-06-03 16:31:39 +00:00
|
|
|
import mechanize
|
2011-11-02 00:42:39 +00:00
|
|
|
import re
|
2013-06-03 16:31:39 +00:00
|
|
|
import requests
|
|
|
|
|
2011-11-12 02:42:48 +00:00
|
|
|
from datetime import datetime
|
2012-01-17 01:25:36 +00:00
|
|
|
from regluit.core import models
|
2011-11-12 02:42:48 +00:00
|
|
|
|
2011-11-02 00:42:39 +00:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2011-11-12 02:42:48 +00:00
|
|
|
class LibraryThingException(Exception):
|
|
|
|
pass
|
2011-11-02 00:42:39 +00:00
|
|
|
|
|
|
|
class LibraryThing(object):
|
|
|
|
"""
|
|
|
|
This class retrieves and parses the CSV representation of a LibraryThing user's library.
|
|
|
|
"""
|
|
|
|
url = "https://www.librarything.com"
|
2017-07-27 14:33:13 +00:00
|
|
|
csv_file_url = "https://www.librarything.com/export-csv"
|
2011-11-02 00:42:39 +00:00
|
|
|
|
|
|
|
def __init__(self, username=None, password=None):
|
|
|
|
self.username = username
|
|
|
|
self.password = password
|
|
|
|
self.csv_handle = None
|
2011-12-03 03:16:11 +00:00
|
|
|
|
2011-11-02 00:42:39 +00:00
|
|
|
def retrieve_csv(self):
|
|
|
|
br = mechanize.Browser()
|
|
|
|
br.open(LibraryThing.url)
|
|
|
|
# select form#2
|
|
|
|
br.select_form(nr=1)
|
|
|
|
br["formusername"] = self.username
|
|
|
|
br["formpassword"] = self.password
|
|
|
|
br.submit()
|
|
|
|
self.csv_handle = br.open(LibraryThing.csv_file_url)
|
|
|
|
return self.csv_handle
|
2011-12-03 03:16:11 +00:00
|
|
|
|
2011-11-02 00:42:39 +00:00
|
|
|
def parse_csv(self):
|
|
|
|
h = HTMLParser.HTMLParser()
|
|
|
|
reader = csv.DictReader(self.csv_handle)
|
|
|
|
# There are more fields to be parsed out. Note that there is a second author column to handle
|
|
|
|
for (i,row) in enumerate(reader):
|
|
|
|
# ISBNs are written like '[123456789x]' in the CSV, suggesting possibility of a list
|
|
|
|
m = re.match(r'^\[(.*)\]$', row["'ISBNs'"])
|
|
|
|
if m:
|
|
|
|
isbn = m.group(1).split()
|
|
|
|
else:
|
|
|
|
isbn = []
|
|
|
|
yield {'title':h.unescape(row["'TITLE'"]), 'author':h.unescape(row["'AUTHOR (first, last)'"]),
|
|
|
|
'isbn':isbn, 'comment':row["'COMMENT'"],
|
|
|
|
'tags':row["'TAGS'"], 'collections':row["'COLLECTIONS'"],
|
2011-11-12 02:42:48 +00:00
|
|
|
'reviews':h.unescape(row["'REVIEWS'"])}
|
|
|
|
def viewstyle_1(self, rows):
|
|
|
|
|
|
|
|
for (i,row) in enumerate(rows):
|
|
|
|
book_data = {}
|
|
|
|
cols = row.xpath('td')
|
|
|
|
# cover
|
|
|
|
book_data["cover"] = {"cover_id":cols[0].attrib["id"],
|
|
|
|
"image": {"width":cols[0].xpath('.//img')[0].attrib['width'],
|
|
|
|
"src": cols[0].xpath('.//img')[0].attrib['src']}
|
|
|
|
}
|
|
|
|
# title
|
|
|
|
book_data["title"] = {"href":cols[1].xpath('.//a')[0].attrib['href'],
|
|
|
|
"title":cols[1].xpath('.//a')[0].text}
|
2011-11-16 01:14:28 +00:00
|
|
|
|
|
|
|
# extract work_id and book_id from href
|
|
|
|
try:
|
|
|
|
(book_data["work_id"], book_data["book_id"]) = re.match("^/work/(.*)/book/(.*)$",book_data["title"]["href"]).groups()
|
|
|
|
except:
|
|
|
|
(book_data["work_id"], book_data["book_id"]) = (None, None)
|
|
|
|
|
2011-11-12 02:42:48 +00:00
|
|
|
# author -- what if there is more than 1? or none?
|
|
|
|
try:
|
|
|
|
book_data["author"] = {"display_name":cols[2].xpath('.//a')[0].text,
|
|
|
|
"href":cols[2].xpath('.//a')[0].attrib['href'],
|
|
|
|
"name":cols[2].xpath('div')[0].text}
|
|
|
|
except:
|
|
|
|
book_data["author"] = None
|
|
|
|
|
|
|
|
# date
|
|
|
|
book_data["date"] = cols[3].xpath('span')[0].text
|
|
|
|
|
|
|
|
# tags: grab tags that are not empty strings
|
|
|
|
tag_links = cols[4].xpath('.//a')
|
|
|
|
book_data["tags"] = filter(lambda x: x is not None, [a.text for a in tag_links])
|
|
|
|
|
|
|
|
# rating -- count # of stars
|
|
|
|
book_data["rating"] = len(cols[5].xpath('.//img[@alt="*"]'))
|
|
|
|
|
|
|
|
# entry date
|
|
|
|
book_data["entry_date"] = datetime.date(datetime.strptime(cols[6].xpath('span')[0].text, "%b %d, %Y"))
|
|
|
|
|
|
|
|
yield book_data
|
|
|
|
|
|
|
|
def viewstyle_5(self, rows):
|
2011-11-16 01:14:28 +00:00
|
|
|
# implement this view to get at the ISBNs
|
|
|
|
for (i,row) in enumerate(rows):
|
|
|
|
book_data = {}
|
|
|
|
cols = row.xpath('td')
|
|
|
|
|
|
|
|
# title
|
|
|
|
book_data["title"] = {"href":cols[0].xpath('.//a')[0].attrib['href'],
|
|
|
|
"title":cols[0].xpath('.//a')[0].text}
|
|
|
|
|
|
|
|
# extract work_id and book_id from href
|
|
|
|
try:
|
|
|
|
(book_data["work_id"], book_data["book_id"]) = re.match("^/work/(.*)/book/(.*)$",book_data["title"]["href"]).groups()
|
|
|
|
except:
|
|
|
|
(book_data["work_id"], book_data["book_id"]) = (None, None)
|
|
|
|
|
|
|
|
# tags
|
|
|
|
tag_links = cols[1].xpath('.//a')
|
|
|
|
book_data["tags"] = filter(lambda x: x is not None, [a.text for a in tag_links])
|
|
|
|
|
|
|
|
# lc classification
|
|
|
|
try:
|
|
|
|
book_data["lc_call_number"] = cols[2].xpath('.//span')[0].text
|
|
|
|
except Exception, e:
|
2012-01-28 02:18:00 +00:00
|
|
|
logger.info("no lc call number for: %s %s", book_data["title"], e)
|
2011-11-16 01:14:28 +00:00
|
|
|
book_data["lc_call_number"] = None
|
|
|
|
|
|
|
|
# subject
|
|
|
|
|
|
|
|
subjects = cols[3].xpath('.//div[@class="subjectLine"]')
|
|
|
|
book_data["subjects"] = [{'href':s.xpath('a')[0].attrib['href'],
|
|
|
|
'text':s.xpath('a')[0].text} for s in subjects]
|
|
|
|
|
|
|
|
# isbn
|
|
|
|
try:
|
|
|
|
book_data["isbn"] = cols[4].xpath('.//span')[0].text
|
2012-01-28 00:16:46 +00:00
|
|
|
# check for  
|
|
|
|
if book_data["isbn"] == u'\xA0':
|
|
|
|
book_data["isbn"] = None
|
2011-11-16 01:14:28 +00:00
|
|
|
except Exception, e:
|
|
|
|
book_data["isbn"] = None
|
|
|
|
|
|
|
|
yield book_data
|
|
|
|
|
2011-11-12 02:42:48 +00:00
|
|
|
|
|
|
|
def parse_user_catalog(self, view_style=1):
|
|
|
|
from lxml import html
|
|
|
|
|
|
|
|
# we can vary viewstyle to get different info
|
|
|
|
|
|
|
|
IMPLEMENTED_STYLES = [1,5]
|
2012-01-28 00:16:46 +00:00
|
|
|
COLLECTION = 2 # set to get All Collections
|
|
|
|
|
2011-11-12 02:42:48 +00:00
|
|
|
if view_style not in IMPLEMENTED_STYLES:
|
|
|
|
raise NotImplementedError()
|
|
|
|
style_parser = getattr(self,"viewstyle_%s" % view_style)
|
|
|
|
next_page = True
|
|
|
|
offset = 0
|
|
|
|
cookies = None
|
2014-06-11 12:46:24 +00:00
|
|
|
|
|
|
|
# go to the front page of LibraryThing first to pick up relevant session-like cookies
|
|
|
|
r = requests.get("https://www.librarything.com/")
|
|
|
|
cookies = r.cookies
|
2011-11-12 02:42:48 +00:00
|
|
|
|
|
|
|
while next_page:
|
2017-07-27 14:33:13 +00:00
|
|
|
url = "https://www.librarything.com/catalog_bottom.php?view=%s&viewstyle=%d&collection=%d&offset=%d" % (self.username,
|
2012-01-28 00:16:46 +00:00
|
|
|
view_style, COLLECTION, offset)
|
2011-11-12 02:42:48 +00:00
|
|
|
logger.info("url: %s", url)
|
|
|
|
if cookies is None:
|
|
|
|
r = requests.get(url)
|
|
|
|
else:
|
|
|
|
r = requests.get(url, cookies=cookies)
|
|
|
|
|
|
|
|
if r.status_code != httplib.OK:
|
|
|
|
raise LibraryThingException("Error accessing %s: %s" % (url, e))
|
|
|
|
logger.info("Error accessing %s: %s", url, e)
|
|
|
|
etree = html.fromstring(r.content)
|
2012-01-28 00:16:46 +00:00
|
|
|
#logger.info("r.content %s", r.content)
|
2011-11-12 02:42:48 +00:00
|
|
|
cookies = r.cookies # retain the cookies
|
|
|
|
|
|
|
|
# look for a page bar
|
|
|
|
# try to grab the total number of books
|
|
|
|
# 1 - 50 of 82
|
|
|
|
try:
|
|
|
|
count_text = etree.xpath('//td[@class="pbGroup"]')[0].text
|
|
|
|
total = int(re.search(r'(\d+)$',count_text).group(1))
|
|
|
|
logger.info('total: %d', total)
|
|
|
|
except Exception, e: # assume for now that if we can't grab this text, there is no page bar and no books
|
2014-06-11 12:46:24 +00:00
|
|
|
logger.info('Exception {0}'.format(e))
|
2011-11-12 02:42:48 +00:00
|
|
|
total = 0
|
|
|
|
|
|
|
|
# to do paging we can either look for a next link or just increase the offset by the number of rows.
|
|
|
|
# Let's try the latter
|
|
|
|
# possible_next_link = etree.xpath('//a[@class="pageShuttleButton"]')[0]
|
|
|
|
|
|
|
|
rows_xpath = '//table[@id="lt_catalog_list"]/tbody/tr'
|
|
|
|
|
|
|
|
# deal with page 1 first and then working on paging through the collection
|
|
|
|
rows = etree.xpath(rows_xpath)
|
2011-11-16 18:20:10 +00:00
|
|
|
|
|
|
|
i = -1 # have to account for the problem of style_parser(rows) returning nothing
|
2011-11-12 02:42:48 +00:00
|
|
|
|
|
|
|
for (i,row) in enumerate(style_parser(rows)):
|
|
|
|
yield row
|
|
|
|
|
|
|
|
# page size = 50, first page offset = 0, second page offset = 50 -- if total = 50 no need to go
|
2011-11-16 18:20:10 +00:00
|
|
|
|
2011-11-12 02:42:48 +00:00
|
|
|
offset += i + 1
|
|
|
|
if offset >= total:
|
|
|
|
next_page = False
|
2011-11-02 00:42:39 +00:00
|
|
|
|
2011-11-17 00:47:29 +00:00
|
|
|
def load_librarything_into_wishlist(user, lt_username, max_books=None):
|
2011-11-02 00:42:39 +00:00
|
|
|
"""
|
2012-01-28 02:18:00 +00:00
|
|
|
Load a specified LibraryThing shelf (by default: all the books from the LibraryThing account associated with user)
|
2011-11-02 00:42:39 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
from regluit.core import bookloader
|
2012-01-20 04:20:06 +00:00
|
|
|
from regluit.core import tasks
|
2011-11-02 00:42:39 +00:00
|
|
|
from itertools import islice
|
|
|
|
|
2011-11-17 00:47:29 +00:00
|
|
|
logger.info("Entering into load_librarything_into_wishlist")
|
|
|
|
lt = LibraryThing(lt_username)
|
|
|
|
|
|
|
|
|
|
|
|
for (i,book) in enumerate(islice(lt.parse_user_catalog(view_style=5),max_books)):
|
|
|
|
isbn = book["isbn"] # grab the first one
|
|
|
|
logger.info("%d %s %s", i, book["title"]["title"], isbn)
|
2011-11-02 00:42:39 +00:00
|
|
|
try:
|
2012-01-28 02:18:00 +00:00
|
|
|
if not isbn:
|
|
|
|
continue
|
2011-11-02 00:42:39 +00:00
|
|
|
edition = bookloader.add_by_isbn(isbn)
|
2011-12-22 19:51:12 +00:00
|
|
|
if not edition:
|
|
|
|
continue
|
2011-12-03 03:16:11 +00:00
|
|
|
# add the librarything ids to the db since we know them now
|
2012-01-17 01:25:36 +00:00
|
|
|
identifier= models.Identifier.get_or_add(type = 'thng', value = book['book_id'], edition = edition, work = edition.work)
|
|
|
|
identifier= models.Identifier.get_or_add(type = 'ltwk', value = book['work_id'], work = edition.work)
|
2012-01-20 04:20:06 +00:00
|
|
|
if book['lc_call_number']:
|
|
|
|
identifier= models.Identifier.get_or_add(type = 'lccn', value = book['lc_call_number'], edition = edition, work = edition.work)
|
2012-10-16 15:36:51 +00:00
|
|
|
user.wishlist.add_work(edition.work, 'librarything', notify=True)
|
2011-12-22 19:51:12 +00:00
|
|
|
if edition.new:
|
2012-02-16 18:19:36 +00:00
|
|
|
tasks.populate_edition.delay(edition.isbn_13)
|
2011-11-02 00:42:39 +00:00
|
|
|
logger.info("Work with isbn %s added to wishlist.", isbn)
|
|
|
|
except Exception, e:
|
2011-12-03 03:16:11 +00:00
|
|
|
logger.info ("error adding ISBN %s: %s", isbn, e)
|