kindle manager

This commit is contained in:
gavin
2020-06-06 08:43:34 +08:00
parent c0bfa52fc3
commit 722757dadd
8 changed files with 498 additions and 358 deletions

BIN
.DS_Store vendored

Binary file not shown.

View File

@@ -75,6 +75,7 @@ b['1']['2'] = {'3':1} # OK
# feature plan
## 20200528
- first abstract from kindle hard / local directory for different OS
- first abstract from kindle hard / local directory for different OS **done**
- add GUI use QT **done**
- new thread to check kindle connection status

2
cui
View File

@@ -1,2 +1,2 @@
pyuic mainwindow.ui -o mainwindow.py
pyuic mainwindow.ui -o mainwindow.py --no-protection
pyside2-rcc -binary kmanapp.qrc -o kmanapp_rc.py

690
kman.py
View File

@@ -8,7 +8,9 @@
#########################################################
import re
import os
import json
import time
import logging
import platform
from collections import defaultdict
@@ -107,347 +109,409 @@ r'''
(\d{1,2}:\d{1,2}:\d{1,2}) #group6 - time
''', flags=re.X )
def parse_section(s,i):
"""parse section
Args:
s: section line list
i: section index
class kMan:
def __init__(self, parent=None):
self.hlnum = 0
self.ntnum = 0
self.refleshtime = '2020/10/10 10:00:00'
self.status = self.status_info()
def status_info(self):
s1 = u'Hightlight: {} Note: {} RefleshTime: {}'. \
format(self.hlnum,self.ntnum,self.refleshtime)
kp = self.get_kindle_path()
if not kp:
s2 = u'Disconnected'
else:
with open(kp+'/system/version.txt' , 'r', encoding='utf8', errors='ignore') as f:
s2 = u'Connected ({}) version {}'.format(kp,f.read().strip())
return [s1,s2]
def parse_section(self,s,i):
"""parse section
Args:
s: section line list
i: section index
Returns:
dict like this:
d = { 'bookname':bookname,
bookname: {
'author':author
'0':{
'type':'HL',
'position':'123',
'day':'2020年5月26日',
'week':'星期二',
'meridiem':'PM',
'time':'10:26:31'
'content':content }}}
"""
# 1. highlight over the picture, the content(#3 line) is empty, only two lines
# 2. bookmark section only two lines
# 3. other not correct format < 2
if len(s)<=2:
return False
# parse #2 line
section = defaultdict(dict)
"""
authinfo = sec[0]
dateinfo = sec[1]
content = sec[2] if len(sec)==3 else None
"""
(authinfo, dateinfo, content) = \
(s[0], s[1], s[2] if len(s)==3 else None)
das = da.search(dateinfo)
# type of section
'''
STAT :
START - start line of section
BM - section is a bookmark
HL - section is a highlight
NT - section is a note
'''
tpy = ('HL' if das.group(2)=='标注' else \
('NT' if das.group(2)=='笔记' else 'BM'))
"""
pos = das.group(1)
day = das.group(3)
week = das.group(4)
pmam = das.group(5)
time = das.group(6)
"""
(pos, x, day, week, pmam, time) = das.groups()[0:6]
# parse #1 line
aus = au.search(authinfo)
bookname = aus.group(1)
author = aus.group(2)
section[bookname]['author'] = author
section['bookname'] = bookname
section[bookname][str(i)] = {
'type':tpy,
'position':pos,
'day':day,
'week':week,
'meridiem':pmam,
'time':time,
'content':content }
return section
def format_time(self,ds):
""" format date
Args:
ds: 2020年1月13日 星期一 上午 8:11:05
Return:
2020/1/13 20:11:05
"""
d = ds.split(' ')
res = re.search(r'(\d{4}).(\d{1,2}).(\d{1,2})',d[0])
ymd = '/'.join(res.groups())
res = re.search(r'(\d{1,2})(:\d{1,2}:\d{1,2})',d[3])
tm = ' '+str(int(res.group(1)) + (0 if d[2]=='上午' else 12))+res.group(2)
return ymd+tm
def format_data(self,bks, ft='MD'):
""" format data for MD & CSV
Args:
bks: books dict
f: can be 'MD'/'CSV'
Return:
list [header, sections]
header and sections are lists
"""
hd =[] # header
secs =[] # content
DELIMITER = '|' if ft=='MD' else ','
hd.append(DELIMITER.join(['TYPE','BOOKNAME','AUTHOR','MARKTIME','CONTENT']))
if ft=='MD':
hd.append(DELIMITER.join(['--' for i in range(5)]))
for kb,vb in bks.items():
author = vb['author']
for ks, vs in vb.items():
if ks in ['author', 'lines']: continue
secs.append(DELIMITER.join([vs['type'],kb,author, \
self.format_time(' '.join([vs['day'],vs['week'],\
vs['meridiem'],vs['time']])),vs['content']]))
return hd+secs
def format_out(self,bks, fnpref, ft='MD'):
"""format output and write to file
markdown format:
TYPE | bookname | author | marktime | content
--|--|--|--|--
xx|xx|xx|xx|xx
CSV format:
TYPE,bookname,author,marktime,content
xx,xx,xx,xx,xx
marktime: 20200403 PM 3:0:3 星期五
Args:
bks: books dict
f: can be 'MD'/'JSON'/'CSV'
Returns: special format of 'bks' dict
"""
suff = {'MD':'.md','CSV':'.csv','JSON':'.json'}
op = fnpref+suff[ft]
with open(op, 'w', encoding='utf8', errors='ignore') as fw:
if ft=='JSON':
fw.write(json.dumps(bks, indent=4, sort_keys=True, ensure_ascii=False))
elif ft in ['MD','CSV']:
for s in self.format_data(bks, ft):
fw.write(s)
fw.write('\n')
else:
fw.write(json.dumps(bks)) # only for load back
def drop_duplicate(self,bks):
""" drop duplicated section
If I mark second time in same place, kindle will create two note,
so I need to remove the duplication record
Args:
bks: books dict
Return:
books remove duplicate sections
"""
[preks,prevs] = ['',{'content':'!#$%^&$%','type':'xx'}]
for kb,vb in bks.items():
bks[kb]['lines'] = 0
# add copy() or throw RuntimeError: dictionary changed size during iteration
# reference - http://www.cocoachina.com/articles/89748
for ks, vs in vb.copy().items():
if ks in ['author', 'lines']: continue
bks[kb]['lines'] += 1
if (vs['content'] in prevs['content'] or \
prevs['content'] in vs['content']) and \
prevs['type'] == vs['type']:
bks[kb].pop(preks)
#if vs['content'] != prevs['content']:
# print('prevs',prevs['type'],prevs['content'])
# print(' vs', vs['type'], vs['content'])
preks = ks
prevs = vs
return bks
def add_note_to_highlight(self,bks):
""" append note content to corresponding highlight
and remove NT sections
Args:
bks: books dict
Return:
changed books
"""
[preks,prevs] = ['',{'content':'!#$%^&$%','type':'xx'}]
for kb,vb in bks.items():
for ks,vs in vb.copy().items():
if ks in ['author', 'lines']: continue
if [prevs['type'], vs['type']] == ['HL','NT']:
bks[kb][preks]['content'] += str(NTPREF+vs['content'])
bks[kb].pop(ks)
preks = ks
prevs = vs
return bks
def search_clip(self,bks, s, t='ALL', p='ALL'):
"""search clip, searching scope may be title/author/content
Args:
input: bks: books dict
s: key word
t: 'ALL'
'HL'
'BM'
'NT'
p: 'ALL'
'TITLE'
'AUTHOR'
'CONTENT'
Return: search clipping content
"""
nbks = defaultdict(dict)
nu = 0
for kb,vb in bks.items():
nbks[kb]['lines'] = 0
for ks,vs in vb.copy().items():
if ks in ['author', 'lines']:
nbks[kb][ks] = vs
continue
if t in ['ALL', vs['type']]:
scopestr = {'ALL':''.join([kb,vb['author'],vs['content']]), \
'TITLE':kb, 'AUTHOR':vb['author'], 'CONTENT':vs['content']}
found = re.search(s, scopestr[p])
if found:
nbks[kb][ks] = vs
nbks[kb]['lines'] += 1
nu += 1
if nbks[kb]['lines']==0:
nbks.pop(kb)
return [nu,nbks]
# to be implement
def statistic(self,bks):
pass
def dict2json(self,d):
"""convert dict to json
Args: d is the dict
Return: json string
"""
jstr = json.dumps(d)
return jstr
def json2dict(self,jf):
"""convert dict to json
Args: jf is the file saved json string
Return: dict
"""
d = {}
with open(jf, 'r', encoding='utf8', errors='ignore') as f:
d=json.load(f)
return d
def get_kindle_path(self):
"""check and return kindle device path
Args:
Return:
if kindle connected, return path string of kindle device
else return false
"""
cmd = "wmic logicaldisk get name,volumename" if os.name=='nt'\
else ("ls /Volumes/Kindle" if os.name=='posix' else '')
# not test for windows & linux
with os.popen(cmd) as s:
r = s.read()
if os.name == 'nt': # windows
for d in r.split('\n'):
if 'Kindle' in d: return d.split('\s+')[0]
elif os.name == 'posix': # mac os
if r: return('/Volumes/Kindle')
else:
pass
Returns:
dict like this:
d = { 'bookname':bookname,
bookname: {
'author':author
'0':{
'type':'HL',
'position':'123',
'day':'2020年5月26日',
'week':'星期二',
'meridiem':'PM',
'time':'10:26:31'
'content':content }}}
"""
# 1. highlight over the picture, the content(#3 line) is empty, only two lines
# 2. bookmark section only two lines
# 3. other not correct format < 2
if len(s)<=2:
return False
# parse #2 line
section = defaultdict(dict)
"""
authinfo = sec[0]
dateinfo = sec[1]
content = sec[2] if len(sec)==3 else None
"""
(authinfo, dateinfo, content) = \
(s[0], s[1], s[2] if len(s)==3 else None)
def import_clips(self, tp='local'):
"""import clips from local file or kindle
4 lines for each section seperated with '======='
so read 4 lines before '======='
das = da.search(dateinfo)
# type of section
'''
STAT :
START - start line of section
BM - section is a bookmark
HL - section is a highlight
NT - section is a note
'''
tpy = ('HL' if das.group(2)=='标注' else \
('NT' if das.group(2)=='笔记' else 'BM'))
"""
pos = das.group(1)
day = das.group(3)
week = das.group(4)
pmam = das.group(5)
time = das.group(6)
"""
(pos, x, day, week, pmam, time) = das.groups()[0:6]
# parse #1 line
aus = au.search(authinfo)
bookname = aus.group(1)
author = aus.group(2)
section[bookname]['author'] = author
section['bookname'] = bookname
section[bookname][str(i)] = {
'type':tpy,
'position':pos,
'day':day,
'week':week,
'meridiem':pmam,
'time':time,
'content':content }
return section
def format_time(ds):
""" format date
Args:
ds: 2020年1月13日 星期一 上午 8:11:05
Return:
2020/1/13 20:11:05
"""
d = ds.split(' ')
res = re.search(r'(\d{4}).(\d{1,2}).(\d{1,2})',d[0])
ymd = '/'.join(res.groups())
res = re.search(r'(\d{1,2})(:\d{1,2}:\d{1,2})',d[3])
tm = ' '+str(int(res.group(1)) + (0 if d[2]=='上午' else 12))+res.group(2)
return ymd+tm
def format_data(bks, ft='MD'):
""" format data for MD & CSV
Args:
bks: books dict
f: can be 'MD'/'CSV'
Return:
list [header, sections]
header and sections are lists
"""
hd =[] # header
secs =[] # content
DELIMITER = '|' if ft=='MD' else ','
hd.append(DELIMITER.join(['TYPE','BOOKNAME','AUTHOR','MARKTIME','CONTENT']))
if ft=='MD':
hd.append(DELIMITER.join(['--' for i in range(5)]))
for kb,vb in bks.items():
author = vb['author']
for ks, vs in vb.items():
if ks in ['author', 'lines']: continue
secs.append(DELIMITER.join([vs['type'],kb,author, \
format_time(' '.join([vs['day'],vs['week'],vs['meridiem'],vs['time']])),vs['content']]))
return hd+secs
def format_out(bks, fnpref, ft='MD'):
"""format output and write to file
markdown format:
TYPE | bookname | author | marktime | content
--|--|--|--|--
xx|xx|xx|xx|xx
CSV format:
TYPE,bookname,author,marktime,content
xx,xx,xx,xx,xx
marktime: 20200403 PM 3:0:3 星期五
Args:
bks: books dict
f: can be 'MD'/'JSON'/'CSV'
Returns: special format of 'bks' dict
"""
suff = {'MD':'.md','CSV':'.csv','JSON':'.json'}
op = fnpref+suff[ft]
with open(op, 'w', encoding='utf8', errors='ignore') as fw:
if ft=='JSON':
fw.write(json.dumps(bks, indent=4, sort_keys=True, ensure_ascii=False))
elif ft in ['MD','CSV']:
for s in format_data(bks, ft):
fw.write(s)
fw.write('\n')
Args: tp: 'local' local clipping file
'kindle' kindle clipping file
Return: 0 - want to import kindle but kindle is not connected
books dict
"""
if tp=='kindle':
kp = get_kindle_path()
if not kp: return 0
else: path = kp
else:
fw.write(json.dumps(bks)) # only for load back
path = CLIPPATH
def drop_duplicate(bks):
""" drop duplicated section
# loop to fill books dict
with open(path, 'r', encoding='utf8', errors='ignore') as f:
bks = defaultdict(dict)
secd = defaultdict(dict)
sidx = 0
idx = 0
sec = []
for line in f.readlines():
line = line.strip()
if re.match(r'^\s*$',line): continue
idx += 1
If I mark second time in same place, kindle will create two note,
so I need to remove the duplication record
Args:
bks: books dict
Return:
books remove duplicate sections
"""
[preks,prevs] = ['',{'content':'!#$%^&$%','type':'xx'}]
for kb,vb in bks.items():
bks[kb]['lines'] = 0
# add copy() or throw RuntimeError: dictionary changed size during iteration
# reference - http://www.cocoachina.com/articles/89748
for ks, vs in vb.copy().items():
if ks in ['author', 'lines']: continue
bks[kb]['lines'] += 1
if (vs['content'] in prevs['content'] or \
prevs['content'] in vs['content']) and \
prevs['type'] == vs['type']:
bks[kb].pop(preks)
#if vs['content'] != prevs['content']:
# print('prevs',prevs['type'],prevs['content'])
# print(' vs', vs['type'], vs['content'])
preks = ks
prevs = vs
return bks
def add_note_to_highlight(bks):
""" append note content to corresponding highlight
and remove NT sections
Args:
bks: books dict
Return:
changed books
"""
[preks,prevs] = ['',{'content':'!#$%^&$%','type':'xx'}]
for kb,vb in bks.items():
for ks,vs in vb.copy().items():
if ks in ['author', 'lines']: continue
if [prevs['type'], vs['type']] == ['HL','NT']:
bks[kb][preks]['content'] += str(NTPREF+vs['content'])
bks[kb].pop(ks)
preks = ks
prevs = vs
return bks
def search_clip(bks, s, t='ALL', p='ALL'):
"""search clip, searching scope may be title/author/content
Args:
input: bks: books dict
s: key word
t: 'ALL'
'HL'
'BM'
'NT'
p: 'ALL'
'TITLE'
'AUTHOR'
'CONTENT'
Return: search clipping content
"""
nbks = defaultdict(dict)
nu = 0
for kb,vb in bks.items():
nbks[kb]['lines'] = 0
for ks,vs in vb.copy().items():
if ks in ['author', 'lines']:
nbks[kb][ks] = vs
continue
if t in ['ALL', vs['type']]:
scopestr = {'ALL':''.join([kb,vb['author'],vs['content']]), \
'TITLE':kb, 'AUTHOR':vb['author'], 'CONTENT':vs['content']}
found = re.search(s, scopestr[p])
if found:
nbks[kb][ks] = vs
nbks[kb]['lines'] += 1
nu += 1
if nbks[kb]['lines']==0:
nbks.pop(kb)
return [nu,nbks]
# to be implement
def statistic(bks):
pass
def dict2json(d):
"""convert dict to json
Args: d is the dict
Return: json string
"""
jstr = json.dumps(d)
return jstr
def json2dict(jf):
"""convert dict to json
Args: jf is the file saved json string
Return: dict
"""
d = {}
with open(jf, 'r', encoding='utf8', errors='ignore') as f:
d=json.load(f)
return d
def import_clips():
# 4 lines for each section seperated with '======='
# so read 4 lines before '======='
# loop to fill books dict
with open(CLIPPATH, 'r', encoding='utf8', errors='ignore') as f:
bks = defaultdict(dict)
secd = defaultdict(dict)
sidx = 0
idx = 0
sec = []
for line in f.readlines():
line = line.strip()
if re.match(r'^\s*$',line): continue
idx += 1
if not re.search(LASTLINE,line):
# content more than 1 line
if idx>3:
sec[2] += str(' '+line)
#logger.debug('idx {} {}'.format(idx, sec[2]))
if not re.search(LASTLINE,line):
# content more than 1 line
if idx>3:
sec[2] += str(' '+line)
#logger.debug('idx {} {}'.format(idx, sec[2]))
else:
sec.append(line)
#logger.debug('idx {} {}'.format(idx, sec[idx-1]))
else:
sec.append(line)
#logger.debug('idx {} {}'.format(idx, sec[idx-1]))
else:
idx = 0
sidx += 1
idx = 0
sidx += 1
# parsing section & fill data structure
secd = parse_section(sec,sidx)
# parsing section & fill data structure
secd = self.parse_section(sec,sidx)
if secd:
bn = secd['bookname']
tpy = secd[bn][str(sidx)]['type']
if secd:
bn = secd['bookname']
tpy = secd[bn][str(sidx)]['type']
bks[bn]['author'] = secd[bn]['author']
bks[bn][str(sidx)] = secd[bn][str(sidx)]
bks[bn]['author'] = secd[bn]['author']
bks[bn][str(sidx)] = secd[bn][str(sidx)]
# not add note to highlight content here,
# because NT maybe duplicated, we need remove duplication record before
"""
if tpy=='NT' and bks[bn][str(sidx-1)]['type']=='HL':
bks[bn][str(sidx-1)]['content'] += str(NTPREF+sec[2])
"""
# not add note to highlight content here,
# because NT maybe duplicated, we need remove duplication record before
"""
if tpy=='NT' and bks[bn][str(sidx-1)]['type']=='HL':
bks[bn][str(sidx-1)]['content'] += str(NTPREF+sec[2])
"""
if tpy=='HL': self.hlnum += 1
elif tpy=='NT': self.ntnum += 1
else: # BM or not correct format section
sidx -= 1
else: # BM or not correct format section
sidx -= 1
# initial section for next section loop
sec = []
return bks
# initial section for next section loop
sec = []
self.refleshtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
return bks
if __name__=='__main__':
#books = defaultdict(dict)
books = import_clips()
km = kMan()
books = km.import_clips('local')
# remove duplication
drop_duplicate(books)
km.drop_duplicate(books)
# test search note function
searchnote = search_clip(books, '三大都市圈', 'ALL', 'CONTENT')
if searchnote[0] > 0: format_out(searchnote[1], 'searchcontent', ft='MD')
searchnote = search_clip(books, '经济', 'ALL', 'TITLE')
if searchnote[0] > 0: format_out(searchnote[1], 'searchtitle', ft='MD')
searchnote = search_clip(books, '巴曙松', 'ALL', 'AUTHOR')
if searchnote[0] > 0: format_out(searchnote[1], 'searchauthor', ft='MD')
searchnote = km.search_clip(books, '三大都市圈', 'ALL', 'CONTENT')
if searchnote[0] > 0: km.format_out(searchnote[1], 'searchcontent', ft='MD')
searchnote = km.search_clip(books, '经济', 'ALL', 'TITLE')
if searchnote[0] > 0: km.format_out(searchnote[1], 'searchtitle', ft='MD')
searchnote = km.search_clip(books, '巴曙松', 'ALL', 'AUTHOR')
if searchnote[0] > 0: km.format_out(searchnote[1], 'searchauthor', ft='MD')
# add note content to hightlight, then delete note
add_note_to_highlight(books)
km.add_note_to_highlight(books)
# test dict json convert
with open('./xx', 'w', encoding='utf8', errors='ignore') as fw:
fw.write(dict2json(books))
if json2dict('./xx')==books: print( 'test OK')
fw.write(km.dict2json(books))
if km.json2dict('./xx')==books: print( 'test OK')
format_out(books, OUTPREF, ft='MD')
km.format_out(books, OUTPREF, ft='MD')
# print data with json format
logger.debug(json.dumps(books, indent=4, sort_keys=True, ensure_ascii=False))

View File

@@ -1,5 +1,10 @@
import sys
from time import sleep
from threading import Thread
import _thread
import threading
from PySide2.QtWidgets import QApplication
from PySide2.QtWidgets import QMainWindow
@@ -16,6 +21,8 @@ from kman import *
# import binary resource file(kmanapp_rc.py)
import kmanapp_rc
ONLY_TEST = 0
class kmanWindow(QMainWindow):
"""
def __init__(self, *args, **kwargs):
@@ -26,13 +33,26 @@ class kmanWindow(QMainWindow):
self.stat_str = 'status information'
self.search_str = ''
self.local_fn = CLIPPATH
# create ui and initial it
ui = Ui_MainWindow()
ui.setupUi(self)
self.ui = ui
self.books = import_clips()
self.km = kMan()
self.books = self.km.import_clips('local')
# loop check kindle is connected or not
# to be implement
"""
try:
#_thread.start_new_thread(self.check_kindle_status)
t1 = threading.Thread(target=check_kindle_status)
t1.start()
except:
print ("Error: can not start thread")
"""
# connect action/toolbutton to slot functions
ui.actionimportkindle.triggered.connect(lambda: self.import_kindle(self.books))
@@ -54,49 +74,48 @@ class kmanWindow(QMainWindow):
def add_ui_component(self):
self.ui.searchComboBox.addItems(['ALL','bookname','content','author'])
#inert test data xxxxxxxx
model = QStandardItemModel()
rootItem = model.invisibleRootItem()
idx = 0
for i in range(4):
idx += 1
item = QStandardItem('item {}'.format(idx))
rootItem.appendRow(item)
icon = QIcon()
icon.addFile(u":/icons/book_open_bookmark.png", QSize(), QIcon.Normal, QIcon.Off)
item.setIcon(icon)
if i==0:
parentItem = item
if not ONLY_TEST: # XXXXXXXXXXXXX
model = QStandardItemModel()
rootItem = model.invisibleRootItem()
idx = 0
for i in range(4):
idx += 1
item = QStandardItem('item {}'.format(idx))
rootItem.appendRow(item)
icon = QIcon()
icon.addFile(u":/icons/emblem_library.png", QSize(), QIcon.Normal, QIcon.Off)
icon.addFile(u":/icons/book_open_bookmark.png", QSize(), QIcon.Normal, QIcon.Off)
item.setIcon(icon)
parentItem.appendRows([QStandardItem('append rows {}'.format(i+idx)) for i in range(5)])
if i==1:
parentItem = item
for i in range(5):
idx += 1
item = QStandardItem('type item {}'.format(i+idx))
#item.setEnabled(False)
item.setEditable(False)
if i==0:
parentItem = item
icon = QIcon()
icon.addFile(u":/icons/register.png", QSize(), QIcon.Normal, QIcon.Off)
icon.addFile(u":/icons/emblem_library.png", QSize(), QIcon.Normal, QIcon.Off)
item.setIcon(icon)
parentItem.appendRow(item)
if i==3:
parentItem = item
for i in range(5):
idx += 1
item = QStandardItem('another item {}'.format(i+idx))
#item.setEnabled(False)
item.setEditable(False)
icon = QIcon()
icon.addFile(u":/icons/book_open.png", QSize(), QIcon.Normal, QIcon.Off)
item.setIcon(icon)
parentItem.appendRow(item)
parentItem.appendRows([QStandardItem('append rows {}'.format(i+idx)) for i in range(5)])
if i==1:
parentItem = item
for i in range(5):
idx += 1
item = QStandardItem('type item {}'.format(i+idx))
#item.setEnabled(False)
item.setEditable(False)
icon = QIcon()
icon.addFile(u":/icons/register.png", QSize(), QIcon.Normal, QIcon.Off)
item.setIcon(icon)
parentItem.appendRow(item)
if i==3:
parentItem = item
for i in range(5):
idx += 1
item = QStandardItem('another item {}'.format(i+idx))
#item.setEnabled(False)
item.setEditable(False)
icon = QIcon()
icon.addFile(u":/icons/book_open.png", QSize(), QIcon.Normal, QIcon.Off)
item.setIcon(icon)
parentItem.appendRow(item)
self.ui.treeView.setModel(model)
def clicked_items(self):
print( 'call clicked_items()' )
@@ -110,15 +129,46 @@ class kmanWindow(QMainWindow):
#print(search_clip(self.books,s,'ALL',p[t]))
print('call search_scope_change()')
def check_kindle_status(self):
while True:
self.show_status_info()
sleep(1)
def show_status_info(self):
""" show status information on statusbar
Args:
conn: 1 if kindle is connected else 0
Return:
conn
"""
status = self.km.status_info()
self.ui.statusbar.showMessage(status[0],0)
clabel = QLabel(status[1])
if not self.km.status:
pe = QPalette()
pe.setColor(QPalette.WindowText,Qt.red)
#clabel.setAutoFillBackground(True)
clabel.setPalette(pe)
self.ui.statusbar.addPermanentWidget(clabel, stretch=0)
# define slot functions
def import_kindle(self,bks):
print("call slot importkindle()")
status = self.km.status_info()
self.show_status_info()
print(bks)
pass
def import_local(self):
print("call slot importlocal()")
pass
fn, ft = QFileDialog.getOpenFileName(self,
"choose file to import",
'./', # 起始路径
"All Files (*);;Text Files (*.txt)") # 设置文件扩展名过滤,用双分号间隔
self.fn = fn
#print('filename ', fn, 'filetype ', ft)
if fn == "": return False
def config(self):
print("call slot config()")
@@ -137,6 +187,12 @@ class kmanWindow(QMainWindow):
pass
def about(self):
self.messagebox('\n'+ \
' kindle management tool \n\n' + \
' v1.0.4\n\n' + \
' Author: chengan\n\n' + \
' douboer@gmail.com')
print("call slot about()")
pass
@@ -144,8 +200,15 @@ class kmanWindow(QMainWindow):
print("call slot flush()")
pass
def messageBox(self, showInfo):
box = QMessageBox.about(self, 'Kindle Management', showInfo)
# unify messagebox
def messagebox(self, showinfo):
msgBox = QMessageBox()
msgBox.setText(showinfo)
msgBox.setInformativeText("")
msgBox.setIcon(QMessageBox.Information)
msgBox.setStandardButtons(QMessageBox.Cancel | QMessageBox.Ok)
msgBox.setBaseSize(QSize(600, 300))
r = msgBox.exec()
if __name__ == "__main__":
import sys
@@ -157,3 +220,4 @@ if __name__ == "__main__":
kmw.show()
app.exec_()

6
searchtitle.md Normal file
View File

@@ -0,0 +1,6 @@
TYPE|BOOKNAME|AUTHOR|MARKTIME|CONTENT
--|--|--|--|--
HL|薛兆丰经济学讲义 |薛兆丰|2020/1/13 8:11:05|么到底什么叫边际?边际就是“新增”带来的“新增”。 例如,边际成本就是每新增一个单位产品所需要付出的新增成本;边际收入是每多卖一个产品能够带来的新增收入;边际产量是每新增一份投入所带来的新增产量;边际效用是每消耗一个单位的商品所能带来的新增享受。
HL|薛兆丰经济学讲义 |薛兆丰|2020/1/30 10:23:58|一个国家很大,贫富有差距,并非每个学校和家长都能负担得起这样标准的校车。标准太高,就会逼着很多学校,尤其是农村的学校放弃提供校车,家长们就只能使用安全性能更低的交通工具,比如自己骑自行车或雇用黑车等,结果是孩子们享受到的安全保障反而降低了。
NT|薛兆丰经济学讲义 |薛兆丰|2020/1/30 10:26:31|山寨 假货 问题
HL|薛兆丰经济学讲义 |薛兆丰|2020/1/30 10:29:41|为了克服信息不对称,建立互信,人类社会构想出了各种各样有趣的解决方案,从重复交易到第三方背书,从质保、延保,再到收益共享。此外,还有三种非常接近的建立信任的办法:付出沉没成本、给出人质或者给出抵押。

View File

@@ -116,7 +116,6 @@ class TestKman(unittest.TestCase):
self.assertEqual(t_ds, '2020/1/13 20:11:05')
# test function format_data
def test_format_data(self):
def test_format_data(self):
t_books = self.cre_tbooks()
t_out = format_data(t_books, ft='MD')
@@ -125,6 +124,11 @@ class TestKman(unittest.TestCase):
self.assertEqual(t_out[2], 'HL|薛兆丰经济学讲义 |薛兆丰|2020/1/13 8:11:05|边际就是“新增”带来的“新增”。\n')
t_out.clear()
def test_add_note_to_highlight(self):
t_books = self.cre_tbooks()
t_books_remove_nt = add_note_to_highlight(t_books)
for k in t_books_remove_nt.keys():
bn = k
self.assertEqual((t_books_remove_nt[bn]['1']['content']).replace('\n',''),\
'边际就是“新增”带来的“新增”。'+NTPREF+ '山寨 假货 问题')

1
xx Normal file
View File

@@ -0,0 +1 @@
{"\u859b\u5146\u4e30\u7ecf\u6d4e\u5b66\u8bb2\u4e49 ": {"author": "\u859b\u5146\u4e30", "1": {"type": "HL", "position": "1408-1410", "day": "2020\u5e741\u670813\u65e5", "week": "\u661f\u671f\u4e00", "meridiem": "\u4e0a\u5348", "time": "8:11:05", "content": "\u4e48\u5230\u5e95\u4ec0\u4e48\u53eb\u8fb9\u9645\uff1f\u8fb9\u9645\u5c31\u662f\u201c\u65b0\u589e\u201d\u5e26\u6765\u7684\u201c\u65b0\u589e\u201d\u3002 \u4f8b\u5982\uff0c\u8fb9\u9645\u6210\u672c\u5c31\u662f\u6bcf\u65b0\u589e\u4e00\u4e2a\u5355\u4f4d\u4ea7\u54c1\u6240\u9700\u8981\u4ed8\u51fa\u7684\u65b0\u589e\u6210\u672c\uff1b\u8fb9\u9645\u6536\u5165\u662f\u6bcf\u591a\u5356\u4e00\u4e2a\u4ea7\u54c1\u80fd\u591f\u5e26\u6765\u7684\u65b0\u589e\u6536\u5165\uff1b\u8fb9\u9645\u4ea7\u91cf\u662f\u6bcf\u65b0\u589e\u4e00\u4efd\u6295\u5165\u6240\u5e26\u6765\u7684\u65b0\u589e\u4ea7\u91cf\uff1b\u8fb9\u9645\u6548\u7528\u662f\u6bcf\u6d88\u8017\u4e00\u4e2a\u5355\u4f4d\u7684\u5546\u54c1\u6240\u80fd\u5e26\u6765\u7684\u65b0\u589e\u4eab\u53d7\u3002"}, "2": {"type": "HL", "position": "4284-4286", "day": "2020\u5e741\u670830\u65e5", "week": "\u661f\u671f\u56db", "meridiem": "\u4e0a\u5348", "time": "10:23:58", "content": "\u4e00\u4e2a\u56fd\u5bb6\u5f88\u5927\uff0c\u8d2b\u5bcc\u6709\u5dee\u8ddd\uff0c\u5e76\u975e\u6bcf\u4e2a\u5b66\u6821\u548c\u5bb6\u957f\u90fd\u80fd\u8d1f\u62c5\u5f97\u8d77\u8fd9\u6837\u6807\u51c6\u7684\u6821\u8f66\u3002\u6807\u51c6\u592a\u9ad8\uff0c\u5c31\u4f1a\u903c\u7740\u5f88\u591a\u5b66\u6821\uff0c\u5c24\u5176\u662f\u519c\u6751\u7684\u5b66\u6821\u653e\u5f03\u63d0\u4f9b\u6821\u8f66\uff0c\u5bb6\u957f\u4eec\u5c31\u53ea\u80fd\u4f7f\u7528\u5b89\u5168\u6027\u80fd\u66f4\u4f4e\u7684\u4ea4\u901a\u5de5\u5177\uff0c\u6bd4\u5982\u81ea\u5df1\u9a91\u81ea\u884c\u8f66\u6216\u96c7\u7528\u9ed1\u8f66\u7b49\uff0c\u7ed3\u679c\u662f\u5b69\u5b50\u4eec\u4eab\u53d7\u5230\u7684\u5b89\u5168\u4fdd\u969c\u53cd\u800c\u964d\u4f4e\u4e86\u3002--CG\u6ce8:\u5c71\u5be8 \u5047\u8d27 \u95ee\u9898"}, "4": {"type": "HL", "position": "4382-4384", "day": "2020\u5e741\u670830\u65e5", "week": "\u661f\u671f\u56db", "meridiem": "\u4e0a\u5348", "time": "10:29:41", "content": "\u4e3a\u4e86\u514b\u670d\u4fe1\u606f\u4e0d\u5bf9\u79f0\uff0c\u5efa\u7acb\u4e92\u4fe1\uff0c\u4eba\u7c7b\u793e\u4f1a\u6784\u60f3\u51fa\u4e86\u5404\u79cd\u5404\u6837\u6709\u8da3\u7684\u89e3\u51b3\u65b9\u6848\uff0c\u4ece\u91cd\u590d\u4ea4\u6613\u5230\u7b2c\u4e09\u65b9\u80cc\u4e66\uff0c\u4ece\u8d28\u4fdd\u3001\u5ef6\u4fdd\uff0c\u518d\u5230\u6536\u76ca\u5171\u4eab\u3002\u6b64\u5916\uff0c\u8fd8\u6709\u4e09\u79cd\u975e\u5e38\u63a5\u8fd1\u7684\u5efa\u7acb\u4fe1\u4efb\u7684\u529e\u6cd5\uff1a\u4ed8\u51fa\u6c89\u6ca1\u6210\u672c\u3001\u7ed9\u51fa\u4eba\u8d28\u6216\u8005\u7ed9\u51fa\u62b5\u62bc\u3002"}, "lines": 4}}