diff --git a/src/wxsy.net/extract.py b/src/wxsy.net/extract.py new file mode 100644 index 0000000..44fdd8e --- /dev/null +++ b/src/wxsy.net/extract.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" +Extract data from raw html content. + + USAGE: python3 extract.py [CATALOG] [HTML_DIR] [OUTPUT_DIR] +""" + +import os +import re +import sys +import json +from logger import logger +from bs4 import BeautifulSoup + + +def splitHtml(rawHtml: str) -> dict: # extract from raw html content + html = BeautifulSoup(rawHtml, 'lxml') + script = html.select('script')[9].text # js code with chapter info + info = { + 'title': html.select('div[class="pt-read-title"]')[0].contents[1].contents[0].attrs['title'], + 'preId': re.search(r'window\.__PREVPAGE = "(\d*)"', script)[1], + 'nextId': re.search(r'window\.__NEXTPAGE = "(\d*)"', script)[1], + 'myId': re.search(r'window\.chapterNum = (\d+)', script)[1], + 'content': [x.text.strip() for x in html.select('p[class="content_detail"]')], + } + if info['title'] != re.search(r'window\.chapterName = \'(.+)\'', script)[1]: # chapter title check + logger.error('Title error -> %s' % info['title']) + return info + + +catalog = json.loads(open(sys.argv[1]).read()) # load catalog + +for _, chapterId in catalog.items(): # traverse all chapters + logger.info('Analyse chapter `%s`' % chapterId) + with open(os.path.join(sys.argv[3], '%s.json' % chapterId), 'w') as fileObj: + htmlFile = os.path.join(sys.argv[2], '%s.html' % chapterId) + fileObj.write(json.dumps( + splitHtml(open(htmlFile).read()) + ))