always close cloudstorage sessions

1. propagate proper pdf filesize handling to GDrive and MSDrive classes
2. use the session as a context handler to assure it closes even if an exception is unhandled.
fix_pdf
eric 2019-06-19 15:03:16 -04:00
parent ee826a5148
commit f197d5227f
3 changed files with 39 additions and 37 deletions

View File

@ -57,9 +57,11 @@ class Dropbox (CloudStorage.CloudStorage):
'Content-Type' : 'application/octet-stream', 'Content-Type' : 'application/octet-stream',
'Dropbox-API-Arg' : json.dumps (parameters) 'Dropbox-API-Arg' : json.dumps (parameters)
} }
with closing (session.post (self.upload_endpoint, data = response.content
data = response.content, with session as s:
headers = headers)) as r: with closing (s.post (self.upload_endpoint,
if 'error_summary' in r.text: data = data,
CloudStorage.error_log (r.text) headers = headers)) as r:
r.raise_for_status () if 'error_summary' in r.text:
CloudStorage.error_log (r.text)
r.raise_for_status ()

View File

@ -54,19 +54,20 @@ class GDrive (CloudStorage.CloudStorage):
} }
headers = { headers = {
'X-Upload-Content-Type': request.headers['Content-Type'], 'X-Upload-Content-Type': request.headers['Content-Type'],
'X-Upload-Content-Length': request.headers['Content-Length'], 'X-Upload-Content-Length': str(len(request.content)),
'Content-Type': 'application/json; charset=UTF-8', 'Content-Type': 'application/json; charset=UTF-8',
} }
with closing (session.post (self.upload_endpoint, with session as s:
data = json.dumps (file_metadata), with closing (s.post (self.upload_endpoint,
headers = headers)) as r2: data = json.dumps (file_metadata),
r2.raise_for_status () headers = headers)) as r2:
session_uri = r2.headers['Location'] r2.raise_for_status ()
session_uri = r2.headers['Location']
headers = { headers = {
'Content-Type': request.headers['Content-Type'], 'Content-Type': request.headers['Content-Type'],
} }
with closing (session.put (session_uri, with closing (s.put (session_uri,
data = request.iter_content (1024 * 1024), data = request.iter_content (1024 * 1024),
headers = headers)) as r3: headers = headers)) as r3:
r3.raise_for_status () r3.raise_for_status ()

View File

@ -50,7 +50,7 @@ class MSDrive(CloudStorage.CloudStorage):
'description': 'A Project Gutenberg Ebook', 'description': 'A Project Gutenberg Ebook',
"@microsoft.graph.conflictBehavior": "rename", "@microsoft.graph.conflictBehavior": "rename",
} }
filesize = int(response.headers['Content-Length']) filesize = len(response.content)
url = self.upload_endpoint.format(filename=filename) url = self.upload_endpoint.format(filename=filename)
chunk_size = 327680 # weird onedrive thing related to FAT tables chunk_size = 327680 # weird onedrive thing related to FAT tables
upload_data = session.post(url, json={'item': item_data}).json() upload_data = session.post(url, json={'item': item_data}).json()
@ -60,21 +60,20 @@ class MSDrive(CloudStorage.CloudStorage):
'Content-Length': str(end - start + 1), 'Content-Length': str(end - start + 1),
'Content-Range': 'bytes {}-{}/{}'.format(start, end, filesize) 'Content-Range': 'bytes {}-{}/{}'.format(start, end, filesize)
} }
with session as s:
if 'uploadUrl' in upload_data:
session_uri = upload_data['uploadUrl']
start = 0
end = min(chunk_size - 1, filesize - 1)
if 'uploadUrl' in upload_data: for chunk in response.iter_content(chunk_size):
session_uri = upload_data['uploadUrl'] r = s.put(
start = 0 session_uri,
end = min(chunk_size - 1, filesize - 1) data=chunk,
headers=headers(start, end, filesize),
for chunk in response.iter_content(chunk_size): )
r = session.put( start = start + chunk_size
session_uri, end = min(end + chunk_size, filesize - 1)
data=chunk, r.raise_for_status()
headers=headers(start, end, filesize), else:
) CloudStorage.log('no uploadUrl in %s' % upload_data)
start = start + chunk_size
end = min(end + chunk_size, filesize - 1)
r.raise_for_status()
else:
CloudStorage.log('no uploadUrl in %s' % upload_data)
session.close()