Further bugfixes for python3
This commit is contained in:
parent
ed449e17e2
commit
3c88752cf9
@ -31,6 +31,9 @@ from libbmc.config import Config
|
|||||||
|
|
||||||
config = Config()
|
config = Config()
|
||||||
default_socket = socket.socket
|
default_socket = socket.socket
|
||||||
|
stdout_encoding = sys.stdout.encoding
|
||||||
|
if stdout_encoding is None:
|
||||||
|
stdout_encoding = 'UTF-8'
|
||||||
|
|
||||||
|
|
||||||
def download(url):
|
def download(url):
|
||||||
@ -64,8 +67,11 @@ def download(url):
|
|||||||
socket.socket = socks.socksocket
|
socket.socket = socks.socksocket
|
||||||
try:
|
try:
|
||||||
r = urlopen(url)
|
r = urlopen(url)
|
||||||
size = int(r.headers.getheader('content-length').strip())
|
try:
|
||||||
dl = ""
|
size = int(dict(r.info())['Content-Length'].strip())
|
||||||
|
except KeyError:
|
||||||
|
size = 1
|
||||||
|
dl = b""
|
||||||
dl_size = 0
|
dl_size = 0
|
||||||
while True:
|
while True:
|
||||||
buf = r.read(1024)
|
buf = r.read(1024)
|
||||||
@ -79,10 +85,13 @@ def download(url):
|
|||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
contenttype = False
|
contenttype = False
|
||||||
if 'pdf' in r.headers.getheader('content-type'):
|
try:
|
||||||
|
if 'pdf' in dict(r.info())['Content-Type']:
|
||||||
contenttype = 'pdf'
|
contenttype = 'pdf'
|
||||||
elif 'djvu' in r.headers.getheader('content-type'):
|
elif 'djvu' in dict(r.info())['Content-Type']:
|
||||||
contenttype = 'djvu'
|
contenttype = 'djvu'
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
if r.getcode() != 200 or contenttype is False:
|
if r.getcode() != 200 or contenttype is False:
|
||||||
continue
|
continue
|
||||||
@ -120,7 +129,7 @@ def findISBN(src):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
while totext.poll() is None:
|
while totext.poll() is None:
|
||||||
extractfull = ' '.join([i.strip() for i in totext.stdout.readlines()])
|
extractfull = ' '.join([i.decode(stdout_encoding).strip() for i in totext.stdout.readlines()])
|
||||||
extractISBN = isbn_re.search(extractfull.lower().replace('Œ',
|
extractISBN = isbn_re.search(extractfull.lower().replace('Œ',
|
||||||
'-'))
|
'-'))
|
||||||
if extractISBN:
|
if extractISBN:
|
||||||
@ -178,7 +187,7 @@ def findDOI(src):
|
|||||||
|
|
||||||
extractfull = ''
|
extractfull = ''
|
||||||
while totext.poll() is None:
|
while totext.poll() is None:
|
||||||
extractfull += ' '.join([i.strip() for i in totext.stdout.readlines()])
|
extractfull += ' '.join([i.decode(stdout_encoding).strip() for i in totext.stdout.readlines()])
|
||||||
extractDOI = doi_re.search(extractfull.lower().replace('Œ', '-'))
|
extractDOI = doi_re.search(extractfull.lower().replace('Œ', '-'))
|
||||||
if not extractDOI:
|
if not extractDOI:
|
||||||
# PNAS fix
|
# PNAS fix
|
||||||
@ -234,10 +243,13 @@ def doi2Bib(doi):
|
|||||||
try:
|
try:
|
||||||
r = urlopen(req)
|
r = urlopen(req)
|
||||||
|
|
||||||
if r.headers.getheader('content-type') == 'application/x-bibtex':
|
try:
|
||||||
|
if dict(r.info())['Content-Type'] == 'application/x-bibtex':
|
||||||
return r.read()
|
return r.read()
|
||||||
else:
|
else:
|
||||||
return ''
|
return ''
|
||||||
|
except KeyError:
|
||||||
|
return ''
|
||||||
except URLError:
|
except URLError:
|
||||||
tools.warning('Unable to contact remote server to get the bibtex ' +
|
tools.warning('Unable to contact remote server to get the bibtex ' +
|
||||||
'entry for doi '+doi)
|
'entry for doi '+doi)
|
||||||
@ -266,7 +278,7 @@ def findArXivId(src):
|
|||||||
|
|
||||||
extractfull = ''
|
extractfull = ''
|
||||||
while totext.poll() is None:
|
while totext.poll() is None:
|
||||||
extractfull += ' '.join([i.strip() for i in totext.stdout.readlines()])
|
extractfull += ' '.join([i.decode(stdout_encoding).strip() for i in totext.stdout.readlines()])
|
||||||
extractID = arXiv_re.search(extractfull)
|
extractID = arXiv_re.search(extractfull)
|
||||||
if extractID:
|
if extractID:
|
||||||
totext.terminate()
|
totext.terminate()
|
||||||
@ -295,7 +307,7 @@ def arXiv2Bib(arxiv):
|
|||||||
else:
|
else:
|
||||||
fetched_bibtex = BibTexParser(bib.bibtex())
|
fetched_bibtex = BibTexParser(bib.bibtex())
|
||||||
fetched_bibtex = fetched_bibtex.get_entry_dict()
|
fetched_bibtex = fetched_bibtex.get_entry_dict()
|
||||||
fetched_bibtex = fetched_bibtex[fetched_bibtex.keys()[0]]
|
fetched_bibtex = fetched_bibtex[list(fetched_bibtex.keys())[0]]
|
||||||
try:
|
try:
|
||||||
del(fetched_bibtex['file'])
|
del(fetched_bibtex['file'])
|
||||||
except KeyError:
|
except KeyError:
|
||||||
@ -325,7 +337,7 @@ def findHALId(src):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
while totext.poll() is None:
|
while totext.poll() is None:
|
||||||
extractfull = ' '.join([i.strip() for i in totext.stdout.readlines()])
|
extractfull = ' '.join([i.decode(stdout_encoding).strip() for i in totext.stdout.readlines()])
|
||||||
extractID = HAL_re.search(extractfull)
|
extractID = HAL_re.search(extractfull)
|
||||||
if extractID:
|
if extractID:
|
||||||
totext.terminate()
|
totext.terminate()
|
||||||
|
Loading…
Reference in New Issue
Block a user