2014-04-25 14:13:37 +02:00
|
|
|
#!/usr/bin/env python2
|
|
|
|
# -*- coding: utf8 -*-
|
2014-04-25 16:53:08 +02:00
|
|
|
|
2014-04-24 21:19:27 +02:00
|
|
|
from __future__ import print_function
|
2014-04-24 00:18:49 +02:00
|
|
|
|
|
|
|
import sys
|
|
|
|
import shutil
|
|
|
|
import requests
|
2014-04-24 16:18:56 +02:00
|
|
|
import subprocess
|
|
|
|
import re
|
2014-04-24 21:19:27 +02:00
|
|
|
import os
|
2014-04-24 22:39:27 +02:00
|
|
|
from isbntools import meta
|
2014-04-25 16:53:08 +02:00
|
|
|
from isbntools.dev.fmt import fmtbib
|
2014-04-24 16:18:56 +02:00
|
|
|
try:
|
|
|
|
from cStringIO import StringIO
|
|
|
|
except:
|
|
|
|
from StringIO import StringIO
|
2014-04-24 00:18:49 +02:00
|
|
|
from bibtexparser.bparser import BibTexParser
|
2014-04-25 14:13:37 +02:00
|
|
|
from bibtexparser.customization import homogeneize_latex_encoding
|
2014-04-25 15:36:54 +02:00
|
|
|
from termios import tcflush, TCIOFLUSH
|
2014-04-24 00:18:49 +02:00
|
|
|
import params
|
|
|
|
|
|
|
|
|
2014-04-25 01:13:19 +02:00
|
|
|
def rawInput(string):
|
2014-04-25 15:36:54 +02:00
|
|
|
tcflush(sys.stdin, TCIOFLUSH)
|
2014-04-25 01:13:19 +02:00
|
|
|
return raw_input(string)
|
|
|
|
|
|
|
|
|
2014-04-24 21:19:27 +02:00
|
|
|
def warning(*objs):
|
|
|
|
"""
|
|
|
|
Write to stderr
|
|
|
|
"""
|
|
|
|
print("WARNING: ", *objs, file=sys.stderr)
|
|
|
|
|
|
|
|
|
2014-04-25 14:13:37 +02:00
|
|
|
def parsed2Bibtex(parsed):
|
2014-04-25 15:36:54 +02:00
|
|
|
"""
|
|
|
|
Convert a single bibtex entry dict to bibtex string
|
|
|
|
"""
|
2014-04-25 16:53:08 +02:00
|
|
|
bibtex = '@'+parsed['type']+'{'+parsed['id']+",\n"
|
|
|
|
|
2014-04-25 14:13:37 +02:00
|
|
|
for field in [i for i in sorted(parsed) if i not in ['type', 'id']]:
|
|
|
|
bibtex += "\t"+field+"={"+parsed[field]+"},\n"
|
2014-04-25 14:22:34 +02:00
|
|
|
bibtex += "}\n"
|
2014-04-25 14:13:37 +02:00
|
|
|
return bibtex
|
|
|
|
|
|
|
|
|
2014-04-24 16:18:56 +02:00
|
|
|
def bibtexAppend(data):
|
2014-04-24 00:18:49 +02:00
|
|
|
"""
|
|
|
|
Append data to the main bibtex file
|
2014-04-25 15:36:54 +02:00
|
|
|
data is a dict for one entry in bibtex, as the one from bibtexparser output
|
2014-04-24 00:18:49 +02:00
|
|
|
"""
|
2014-04-25 14:22:34 +02:00
|
|
|
with open(params.folder+'index.bib', 'a') as fh:
|
|
|
|
fh.write(parsed2Bibtex(data)+"\n")
|
2014-04-24 00:18:49 +02:00
|
|
|
|
2014-04-25 16:53:08 +02:00
|
|
|
|
2014-04-25 15:36:54 +02:00
|
|
|
def bibtexRewrite(data):
|
|
|
|
"""
|
|
|
|
Rewrite the bibtex index file.
|
|
|
|
data is a dict of bibtex entry dict.
|
|
|
|
"""
|
|
|
|
bibtex = ''
|
|
|
|
for entry in data.keys():
|
|
|
|
bibtex += parsed2Bibtex(data[entry])+"\n"
|
|
|
|
with open(params.folder+'index.bib', 'w') as fh:
|
|
|
|
fh.write(bibtex)
|
|
|
|
|
2014-04-24 00:18:49 +02:00
|
|
|
|
2014-04-24 16:18:56 +02:00
|
|
|
def replaceAll(text, dic):
|
|
|
|
for i, j in dic.iteritems():
|
|
|
|
text = text.replace(i, j)
|
|
|
|
return text
|
|
|
|
|
|
|
|
|
2014-04-24 22:39:27 +02:00
|
|
|
def findISBN(src):
|
|
|
|
if src.endswith(".pdf"):
|
|
|
|
totext = subprocess.Popen(["pdftotext", src, "-"],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
|
|
|
elif src.endswith(".djvu"):
|
|
|
|
totext = subprocess.Popen(["djvutxt", src],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
|
|
|
|
|
|
|
extractfull = totext.communicate()
|
|
|
|
if extractfull[1] is not "":
|
|
|
|
return False
|
|
|
|
|
|
|
|
extractfull = extractfull[0]
|
|
|
|
extractISBN = re.search(r"isbn (([0-9]{3}[ -])?[0-9][ -][0-9]{2}[ -][0-9]{6}[ -][0-9])",
|
2014-04-25 16:53:08 +02:00
|
|
|
extractfull.lower().replace('Œ', '-'))
|
2014-04-24 22:39:27 +02:00
|
|
|
|
|
|
|
cleanISBN = False
|
|
|
|
if extractISBN:
|
|
|
|
cleanISBN = extractISBN.group(1).replace('-', '').replace(' ', '')
|
|
|
|
|
|
|
|
return cleanISBN
|
|
|
|
|
|
|
|
|
|
|
|
def isbn2Bib(isbn):
|
|
|
|
return fmtbib('bibtex', meta(isbn, 'default'))
|
|
|
|
|
|
|
|
|
2014-04-24 19:38:52 +02:00
|
|
|
def findDOI(src):
|
|
|
|
if src.endswith(".pdf"):
|
|
|
|
totext = subprocess.Popen(["pdftotext", src, "-"],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
|
|
|
elif src.endswith(".djvu"):
|
|
|
|
totext = subprocess.Popen(["djvutxt", src],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
|
|
|
|
|
|
|
extractfull = totext.communicate()
|
2014-04-24 16:18:56 +02:00
|
|
|
if extractfull[1] is not "":
|
|
|
|
return False
|
|
|
|
|
|
|
|
extractfull = extractfull[0]
|
|
|
|
extractDOI = re.search('(?<=doi)/?:?\s?[0-9\.]{7}/\S*[0-9]',
|
|
|
|
extractfull.lower().replace('Œ', '-'))
|
|
|
|
if not extractDOI:
|
|
|
|
# PNAS fix
|
|
|
|
extractDOI = re.search('(?<=doi).?10.1073/pnas\.\d+',
|
|
|
|
extractfull.lower().replace('pnas', '/pnas'))
|
|
|
|
if not extractDOI:
|
|
|
|
# JSB fix
|
|
|
|
extractDOI = re.search('10\.1083/jcb\.\d{9}', extractfull.lower())
|
|
|
|
|
|
|
|
cleanDOI = False
|
|
|
|
if extractDOI:
|
|
|
|
cleanDOI = extractDOI.group(0).replace(':', '').replace(' ', '')
|
|
|
|
if re.search('^/', cleanDOI):
|
|
|
|
cleanDOI = cleanDOI[1:]
|
|
|
|
|
|
|
|
# FABSE J fix
|
|
|
|
if re.search('^10.1096', cleanDOI):
|
|
|
|
cleanDOI = cleanDOI[:20]
|
|
|
|
|
|
|
|
# Second JCB fix
|
|
|
|
if re.search('^10.1083', cleanDOI):
|
|
|
|
cleanDOI = cleanDOI[:21]
|
|
|
|
|
|
|
|
if len(cleanDOI) > 40:
|
|
|
|
cleanDOItemp = re.sub(r'\d\.\d', '000', cleanDOI)
|
|
|
|
reps = {'.': 'A', '-': '0'}
|
|
|
|
cleanDOItemp = replaceAll(cleanDOItemp[8:], reps)
|
|
|
|
digitStart = 0
|
|
|
|
for i in range(len(cleanDOItemp)):
|
|
|
|
if cleanDOItemp[i].isdigit():
|
|
|
|
digitStart = 1
|
|
|
|
if cleanDOItemp[i].isalpha() and digitStart:
|
|
|
|
break
|
|
|
|
cleanDOI = cleanDOI[0:(8+i)]
|
|
|
|
|
|
|
|
return cleanDOI
|
|
|
|
|
|
|
|
|
|
|
|
def doi2Bib(doi):
|
|
|
|
"""
|
|
|
|
Return a bibTeX string of metadata for a given DOI.
|
|
|
|
From : https://gist.github.com/jrsmith3/5513926
|
|
|
|
"""
|
|
|
|
url = "http://dx.doi.org/" + doi
|
|
|
|
headers = {"accept": "application/x-bibtex"}
|
|
|
|
r = requests.get(url, headers=headers)
|
|
|
|
return r.text
|
|
|
|
|
|
|
|
|
2014-04-24 21:19:27 +02:00
|
|
|
_slugify_strip_re = re.compile(r'[^\w\s-]')
|
|
|
|
_slugify_hyphenate_re = re.compile(r'[\s]+')
|
2014-04-25 16:53:08 +02:00
|
|
|
|
|
|
|
|
2014-04-24 21:19:27 +02:00
|
|
|
def _slugify(value):
|
|
|
|
"""
|
|
|
|
Normalizes string, converts to lowercase, removes non-alpha characters,
|
|
|
|
and converts spaces to hyphens.
|
2014-04-25 16:53:08 +02:00
|
|
|
|
2014-04-24 21:19:27 +02:00
|
|
|
From Django's "django/template/defaultfilters.py".
|
|
|
|
"""
|
|
|
|
import unicodedata
|
|
|
|
if not isinstance(value, unicode):
|
|
|
|
value = unicode(value)
|
|
|
|
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
|
|
|
|
value = unicode(_slugify_strip_re.sub('', value).strip())
|
|
|
|
return _slugify_hyphenate_re.sub('_', value)
|
|
|
|
|
|
|
|
|
|
|
|
def getExtension(filename):
|
|
|
|
"""
|
|
|
|
Get the extension of the filename
|
|
|
|
"""
|
|
|
|
return filename[filename.rfind('.'):]
|
|
|
|
|
|
|
|
|
2014-04-25 01:13:19 +02:00
|
|
|
def checkBibtex(filename, bibtex):
|
|
|
|
print("The bibtex entry found for "+filename+" is :")
|
|
|
|
print(bibtex)
|
|
|
|
check = rawInput("Is it correct ? [Y/n] ")
|
|
|
|
|
|
|
|
bibtex = StringIO(bibtex)
|
2014-04-25 14:13:37 +02:00
|
|
|
bibtex = BibTexParser(bibtex, customization=homogeneize_latex_encoding)
|
|
|
|
bibtex = bibtex.get_entry_dict()
|
2014-04-25 01:13:19 +02:00
|
|
|
bibtex_name = bibtex.keys()[0]
|
|
|
|
bibtex = bibtex[bibtex_name]
|
2014-04-25 16:53:08 +02:00
|
|
|
|
2014-04-25 14:13:37 +02:00
|
|
|
while check.lower() == 'n':
|
|
|
|
fields = [u'type', u'id'] + [i for i in sorted(bibtex)
|
2014-04-25 16:53:08 +02:00
|
|
|
if i not in ['id', 'type']]
|
2014-04-25 01:13:19 +02:00
|
|
|
|
|
|
|
for field in fields:
|
|
|
|
new_value = rawInput(field.capitalize()+" ? ["+bibtex[field]+"] ")
|
|
|
|
if new_value != '':
|
|
|
|
bibtex[field] = new_value
|
|
|
|
|
2014-04-25 01:37:19 +02:00
|
|
|
while True:
|
|
|
|
new_field = rawInput("Add a new field (leave empty to skip) ? ")
|
|
|
|
if new_field == '':
|
|
|
|
break
|
|
|
|
new_value = rawInput("Value for field "+new_field+" ? ")
|
|
|
|
bibtex[new_field] = new_value
|
|
|
|
|
2014-04-25 14:13:37 +02:00
|
|
|
print("\nThe bibtex entry for "+filename+" is :")
|
|
|
|
print(parsed2Bibtex(bibtex))
|
|
|
|
check = rawInput("Is it correct ? [Y/n] ")
|
2014-04-25 01:13:19 +02:00
|
|
|
return bibtex
|
|
|
|
|
|
|
|
|
2014-04-24 22:39:27 +02:00
|
|
|
def addFile(src, filetype):
|
2014-04-24 00:18:49 +02:00
|
|
|
"""
|
|
|
|
Add a file to the library
|
|
|
|
"""
|
2014-04-24 22:39:27 +02:00
|
|
|
if filetype == 'article' or filetype is None:
|
|
|
|
doi = findDOI(src)
|
|
|
|
|
|
|
|
if filetype == 'book' or (filetype is None and doi is False):
|
|
|
|
isbn = findISBN(src)
|
|
|
|
|
|
|
|
if doi is False and isbn is False:
|
|
|
|
if filetype is None:
|
|
|
|
warning("Could not determine the DOI or the ISBN for "+src+"." +
|
|
|
|
"Switching to manual entry.")
|
2014-04-25 16:53:08 +02:00
|
|
|
doi_isbn = ''
|
2014-04-24 22:39:27 +02:00
|
|
|
while doi_isbn not in ['doi', 'isbn']:
|
2014-04-25 01:13:19 +02:00
|
|
|
doi_isbn = rawInput("DOI / ISBN ? ").lower()
|
2014-04-24 22:39:27 +02:00
|
|
|
if doi_isbn == 'doi':
|
2014-04-25 01:13:19 +02:00
|
|
|
doi = rawInput('DOI ? ')
|
2014-04-24 22:39:27 +02:00
|
|
|
else:
|
2014-04-25 01:13:19 +02:00
|
|
|
isbn = rawInput('ISBN ? ')
|
2014-04-24 22:39:27 +02:00
|
|
|
elif filetype == 'article':
|
2014-04-25 16:53:08 +02:00
|
|
|
warning("Could not determine the DOI for "+src +
|
|
|
|
", switching to manual entry.")
|
2014-04-25 01:13:19 +02:00
|
|
|
doi = rawInput('DOI ? ')
|
2014-04-24 22:39:27 +02:00
|
|
|
elif filetype == 'book':
|
2014-04-25 16:53:08 +02:00
|
|
|
warning("Could not determine the ISBN for "+src +
|
|
|
|
", switching to manual entry.")
|
2014-04-25 01:13:19 +02:00
|
|
|
isbn = rawInput('ISBN ? ')
|
2014-04-24 22:39:27 +02:00
|
|
|
elif doi is not False:
|
2014-04-24 16:18:56 +02:00
|
|
|
print("DOI for "+src+" is "+doi+".")
|
2014-04-24 22:39:27 +02:00
|
|
|
elif isbn is not False:
|
|
|
|
print("ISBN for "+src+" is "+isbn+".")
|
2014-04-24 16:18:56 +02:00
|
|
|
|
2014-04-24 22:39:27 +02:00
|
|
|
if doi is not False:
|
2014-04-25 14:13:37 +02:00
|
|
|
# Add extra \n for bibtexparser
|
|
|
|
bibtex = doi2Bib(doi).strip().replace(',', ",\n")+"\n"
|
2014-04-24 22:39:27 +02:00
|
|
|
else:
|
2014-04-25 14:13:37 +02:00
|
|
|
# Idem
|
|
|
|
bibtex = isbn2Bib(isbn).strip()+"\n"
|
2014-04-25 01:13:19 +02:00
|
|
|
bibtex = checkBibtex(src, bibtex)
|
2014-04-24 19:38:52 +02:00
|
|
|
|
|
|
|
authors = re.split(' and ', bibtex['author'])
|
2014-04-24 16:18:56 +02:00
|
|
|
|
2014-04-24 22:39:27 +02:00
|
|
|
if doi is not False:
|
|
|
|
new_name = params.format_articles
|
|
|
|
new_name = new_name.replace("%j", bibtex['journal'])
|
|
|
|
else:
|
|
|
|
new_name = params.format_books
|
|
|
|
|
|
|
|
new_name = new_name.replace("%t", bibtex['title'])
|
|
|
|
new_name = new_name.replace("%Y", bibtex['year'])
|
2014-04-24 19:38:52 +02:00
|
|
|
new_name = new_name.replace("%f", authors[0].split(',')[0].strip())
|
|
|
|
new_name = new_name.replace("%l", authors[-1].split(',')[0].strip())
|
|
|
|
new_name = new_name.replace("%a", ', '.join([i.split(',')[0].strip()
|
2014-04-24 22:39:27 +02:00
|
|
|
for i in authors]))
|
2014-04-24 16:18:56 +02:00
|
|
|
|
2014-04-24 21:19:27 +02:00
|
|
|
new_name = params.folder+_slugify(new_name)+getExtension(src)
|
2014-04-24 00:18:49 +02:00
|
|
|
|
2014-04-24 21:19:27 +02:00
|
|
|
while os.path.exists(new_name):
|
2014-04-25 14:22:34 +02:00
|
|
|
warning("file "+new_name+" already exists.")
|
2014-04-24 21:19:27 +02:00
|
|
|
default_rename = new_name.replace(getExtension(new_name),
|
|
|
|
" (2)"+getExtension(new_name))
|
2014-04-25 01:13:19 +02:00
|
|
|
rename = rawInput("New name ["+default_rename+"] ? ")
|
2014-04-24 21:19:27 +02:00
|
|
|
if rename == '':
|
|
|
|
new_name = default_rename
|
|
|
|
else:
|
|
|
|
new_name = rename
|
2014-04-25 15:36:54 +02:00
|
|
|
bibtex['file'] = new_name
|
2014-04-24 21:19:27 +02:00
|
|
|
|
2014-04-24 00:18:49 +02:00
|
|
|
try:
|
|
|
|
shutil.copy2(src, new_name)
|
|
|
|
except IOError:
|
2014-04-24 16:18:56 +02:00
|
|
|
sys.exit("Unable to move file to library dir " + params.folder+".")
|
2014-04-24 00:18:49 +02:00
|
|
|
|
2014-04-24 16:18:56 +02:00
|
|
|
bibtexAppend(bibtex)
|
2014-04-24 00:18:49 +02:00
|
|
|
print("File " + src + " successfully imported.")
|
|
|
|
|
|
|
|
|
2014-04-25 15:36:54 +02:00
|
|
|
def delete_id(ident):
|
|
|
|
"""
|
|
|
|
Delete a file based on its id in the bibtex file
|
|
|
|
"""
|
|
|
|
with open(params.folder+'index.bib', 'r') as fh:
|
2014-04-25 16:53:08 +02:00
|
|
|
bibtex = BibTexParser(fh, customization=homogeneize_latex_encoding)
|
|
|
|
bibtex = bibtex.get_entry_dict()
|
2014-04-25 15:36:54 +02:00
|
|
|
|
|
|
|
if ident not in bibtex.keys():
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
|
|
|
os.remove(bibtex[ident]['file'])
|
|
|
|
except:
|
|
|
|
warning("Unable to delete file associated to id "+ident+" : " +
|
|
|
|
bibtex[ident]['file'])
|
|
|
|
del(bibtex[ident])
|
|
|
|
bibtexRewrite(bibtex)
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
def delete_file(filename):
|
|
|
|
"""
|
|
|
|
Delete a file based on its filename
|
|
|
|
"""
|
|
|
|
with open(params.folder+'index.bib', 'r') as fh:
|
2014-04-25 16:53:08 +02:00
|
|
|
bibtex = BibTexParser(fh, customization=homogeneize_latex_encoding)
|
|
|
|
bibtex = bibtex.get_entry_dict()
|
2014-04-25 15:36:54 +02:00
|
|
|
|
|
|
|
found = False
|
|
|
|
for key in bibtex.keys():
|
|
|
|
if bibtex[key]['file'] == filename:
|
|
|
|
found = True
|
|
|
|
try:
|
|
|
|
os.remove(bibtex[key]['file'])
|
|
|
|
except:
|
|
|
|
warning("Unable to delete file associated to id "+key+" : " +
|
|
|
|
bibtex[key]['file'])
|
|
|
|
del(bibtex[key])
|
|
|
|
if found:
|
|
|
|
bibtexRewrite(bibtex)
|
|
|
|
return found
|
|
|
|
|
|
|
|
|
2014-04-24 00:18:49 +02:00
|
|
|
if __name__ == '__main__':
|
2014-04-24 21:19:27 +02:00
|
|
|
try:
|
|
|
|
if len(sys.argv) < 2:
|
|
|
|
sys.exit("Usage : TODO")
|
2014-04-24 00:18:49 +02:00
|
|
|
|
2014-04-24 21:19:27 +02:00
|
|
|
if sys.argv[1] == 'download':
|
|
|
|
raise Exception('TODO')
|
2014-04-24 00:18:49 +02:00
|
|
|
|
2014-04-24 21:19:27 +02:00
|
|
|
if sys.argv[1] == 'import':
|
|
|
|
if len(sys.argv) < 3:
|
2014-04-25 16:53:08 +02:00
|
|
|
sys.exit("Usage : " + sys.argv[0] +
|
|
|
|
" import FILE [article|book]")
|
2014-04-24 22:39:27 +02:00
|
|
|
|
|
|
|
filetype = None
|
|
|
|
if len(sys.argv) > 3 and sys.argv[3] in ["article", "book"]:
|
|
|
|
filetype = sys.argv[3].lower()
|
2014-04-24 00:18:49 +02:00
|
|
|
|
2014-04-24 22:39:27 +02:00
|
|
|
addFile(sys.argv[2], filetype)
|
2014-04-24 21:19:27 +02:00
|
|
|
sys.exit()
|
2014-04-24 00:18:49 +02:00
|
|
|
|
2014-04-25 14:22:34 +02:00
|
|
|
elif sys.argv[1] == 'delete':
|
2014-04-25 15:36:54 +02:00
|
|
|
if len(sys.argv) < 3:
|
|
|
|
sys.exit("Usage : " + sys.argv[0] + " delete FILE|ID")
|
|
|
|
|
|
|
|
if not delete_id(sys.argv[2]):
|
|
|
|
if not delete_file(sys.argv[2]):
|
|
|
|
warning("Unable to delete "+sys.argv[2])
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
print(sys.argv[2]+" successfully deleted.")
|
|
|
|
sys.exit()
|
2014-04-25 14:22:34 +02:00
|
|
|
|
2014-04-24 21:19:27 +02:00
|
|
|
elif sys.argv[1] == 'list':
|
|
|
|
raise Exception('TODO')
|
2014-04-24 00:18:49 +02:00
|
|
|
|
2014-04-24 21:19:27 +02:00
|
|
|
elif sys.argv[1] == 'search':
|
|
|
|
raise Exception('TODO')
|
2014-04-25 15:36:54 +02:00
|
|
|
|
|
|
|
elif sys.argv[1] == 'rebuild':
|
|
|
|
raise Exception('TODO')
|
2014-04-24 21:19:27 +02:00
|
|
|
except KeyboardInterrupt:
|
|
|
|
sys.exit()
|