2015-12-14 19:43:39 +01:00
|
|
|
from HTMLParser import HTMLParser
|
|
|
|
import inspect
|
2016-10-22 01:45:32 +02:00
|
|
|
import multiprocessing
|
2016-02-29 00:47:07 +01:00
|
|
|
import re
|
2016-10-22 01:45:32 +02:00
|
|
|
import Queue
|
2015-12-14 19:43:39 +01:00
|
|
|
from urllib import quote, quote_plus
|
2016-02-25 10:13:39 +01:00
|
|
|
from urlparse import urlparse
|
2016-10-21 15:54:02 +02:00
|
|
|
from debug import logger
|
2016-10-22 01:45:32 +02:00
|
|
|
from shared import parserInputQueue, parserOutputQueue, parserProcess, parserLock
|
2016-10-21 15:54:02 +02:00
|
|
|
|
2016-10-22 01:45:32 +02:00
|
|
|
def regexpSubprocess(parserInputQueue, parserOutputQueue):
|
|
|
|
for data in iter(parserInputQueue.get, None):
|
|
|
|
if data is None:
|
|
|
|
break;
|
|
|
|
try:
|
|
|
|
result = SafeHTMLParser.uriregex1.sub(
|
|
|
|
r'<a href="\1">\1</a>',
|
|
|
|
data)
|
|
|
|
result = SafeHTMLParser.uriregex2.sub(r'<a href="\1&', result)
|
|
|
|
result = SafeHTMLParser.emailregex.sub(r'<a href="mailto:\1">\1</a>', result)
|
|
|
|
parserOutputQueue.put(result)
|
|
|
|
except SystemExit:
|
|
|
|
break;
|
|
|
|
except:
|
|
|
|
break;
|
2015-12-14 19:43:39 +01:00
|
|
|
|
|
|
|
class SafeHTMLParser(HTMLParser):
|
|
|
|
# from html5lib.sanitiser
|
|
|
|
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
|
|
|
|
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
|
|
|
|
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
|
|
|
|
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
|
|
|
|
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
|
|
|
|
'figcaption', 'figure', 'footer', 'font', 'header', 'h1',
|
|
|
|
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins',
|
|
|
|
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
|
|
|
|
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
|
|
|
|
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
|
|
|
|
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
|
|
|
|
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
|
|
|
|
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
|
2016-01-23 09:53:14 +01:00
|
|
|
replaces = [["&", "&"], ["\"", """], ["<", "<"], [">", ">"], ["\n", "<br/>"], ["\t", " "], [" ", " "], [" ", " "], ["<br/> ", "<br/> "]]
|
2016-02-25 10:13:39 +01:00
|
|
|
src_schemes = [ "data" ]
|
2016-03-18 14:09:28 +01:00
|
|
|
uriregex1 = re.compile(r'(?i)\b((?:(https?|ftp|bitcoin):(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?]))')
|
2016-02-29 00:47:07 +01:00
|
|
|
uriregex2 = re.compile(r'<a href="([^"]+)&')
|
|
|
|
emailregex = re.compile(r'\b([A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,})\b')
|
2015-12-16 14:20:51 +01:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def multi_replace(text):
|
|
|
|
for a in SafeHTMLParser.replaces:
|
|
|
|
text = text.replace(a[0], a[1])
|
2016-01-23 09:53:14 +01:00
|
|
|
if len(text) > 1 and text[0] == " ":
|
|
|
|
text = " " + text[1:]
|
2015-12-16 14:20:51 +01:00
|
|
|
return text
|
|
|
|
|
2015-12-14 19:43:39 +01:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
HTMLParser.__init__(self, *args, **kwargs)
|
2016-01-23 09:53:14 +01:00
|
|
|
self.reset_safe()
|
2015-12-14 19:43:39 +01:00
|
|
|
|
|
|
|
def reset_safe(self):
|
2015-12-15 03:51:06 +01:00
|
|
|
self.elements = set()
|
2015-12-14 19:43:39 +01:00
|
|
|
self.raw = u""
|
|
|
|
self.sanitised = u""
|
2015-12-15 03:51:06 +01:00
|
|
|
self.has_html = False
|
2016-02-25 10:13:39 +01:00
|
|
|
self.allow_picture = False
|
|
|
|
self.allow_external_src = False
|
2015-12-14 19:43:39 +01:00
|
|
|
|
|
|
|
def add_if_acceptable(self, tag, attrs = None):
|
2016-02-25 10:13:39 +01:00
|
|
|
if not tag in SafeHTMLParser.acceptable_elements:
|
2015-12-14 19:43:39 +01:00
|
|
|
return
|
|
|
|
self.sanitised += "<"
|
|
|
|
if inspect.stack()[1][3] == "handle_endtag":
|
|
|
|
self.sanitised += "/"
|
|
|
|
self.sanitised += tag
|
2015-12-15 03:51:06 +01:00
|
|
|
if not attrs is None:
|
2016-02-25 10:13:39 +01:00
|
|
|
for attr, val in attrs:
|
|
|
|
if tag == "img" and attr == "src" and not self.allow_picture:
|
|
|
|
val = ""
|
|
|
|
elif attr == "src" and not self.allow_external_src:
|
|
|
|
url = urlparse(val)
|
|
|
|
if url.scheme not in SafeHTMLParser.src_schemes:
|
|
|
|
val == ""
|
|
|
|
self.sanitised += " " + quote_plus(attr)
|
|
|
|
if not (val is None):
|
2016-10-23 18:35:20 +02:00
|
|
|
self.sanitised += "=\"" + (val if isinstance(val, unicode) else unicode(val, 'utf-8', 'replace')) + "\""
|
2015-12-14 19:43:39 +01:00
|
|
|
if inspect.stack()[1][3] == "handle_startendtag":
|
|
|
|
self.sanitised += "/"
|
|
|
|
self.sanitised += ">"
|
|
|
|
|
|
|
|
def handle_starttag(self, tag, attrs):
|
2016-02-25 10:13:39 +01:00
|
|
|
if tag in SafeHTMLParser.acceptable_elements:
|
2015-12-14 19:43:39 +01:00
|
|
|
self.has_html = True
|
|
|
|
self.add_if_acceptable(tag, attrs)
|
|
|
|
|
|
|
|
def handle_endtag(self, tag):
|
|
|
|
self.add_if_acceptable(tag)
|
|
|
|
|
|
|
|
def handle_startendtag(self, tag, attrs):
|
2016-02-25 10:13:39 +01:00
|
|
|
if tag in SafeHTMLParser.acceptable_elements:
|
2015-12-14 19:43:39 +01:00
|
|
|
self.has_html = True
|
|
|
|
self.add_if_acceptable(tag, attrs)
|
|
|
|
|
|
|
|
def handle_data(self, data):
|
|
|
|
self.sanitised += unicode(data, 'utf-8', 'replace')
|
|
|
|
|
|
|
|
def handle_charref(self, name):
|
|
|
|
self.sanitised += "&#" + name + ";"
|
|
|
|
|
|
|
|
def handle_entityref(self, name):
|
|
|
|
self.sanitised += "&" + name + ";"
|
2015-12-16 14:20:51 +01:00
|
|
|
|
|
|
|
def feed(self, data):
|
2016-10-22 01:45:32 +02:00
|
|
|
global parserLock, parserProcess, parserInputQueue, parserOutputQueue
|
2015-12-16 14:20:51 +01:00
|
|
|
HTMLParser.feed(self, data)
|
|
|
|
tmp = SafeHTMLParser.multi_replace(data)
|
2016-10-21 15:54:02 +02:00
|
|
|
tmp = unicode(tmp, 'utf-8', 'replace')
|
2016-10-22 01:45:32 +02:00
|
|
|
|
|
|
|
parserLock.acquire()
|
|
|
|
if parserProcess is None:
|
|
|
|
parserProcess = multiprocessing.Process(target=regexpSubprocess, name="RegExParser", args=(parserInputQueue, parserOutputQueue))
|
|
|
|
parserProcess.start()
|
|
|
|
parserLock.release()
|
2016-11-14 20:19:26 +01:00
|
|
|
# flush queue
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
tmp = parserOutputQueue.get(False)
|
|
|
|
except Queue.Empty:
|
|
|
|
logger.debug("Parser queue flushed")
|
|
|
|
pass
|
2016-10-22 01:45:32 +02:00
|
|
|
parserInputQueue.put(tmp)
|
2016-10-21 15:54:02 +02:00
|
|
|
try:
|
2016-10-22 01:45:32 +02:00
|
|
|
tmp = parserOutputQueue.get(True, 1)
|
|
|
|
except Queue.Empty:
|
2016-10-21 15:54:02 +02:00
|
|
|
logger.error("Regular expression parsing timed out, not displaying links")
|
2016-10-22 01:45:32 +02:00
|
|
|
parserLock.acquire()
|
|
|
|
parserProcess.terminate()
|
|
|
|
parserProcess = multiprocessing.Process(target=regexpSubprocess, name="RegExParser", args=(parserInputQueue, parserOutputQueue))
|
|
|
|
parserProcess.start()
|
|
|
|
parserLock.release()
|
2016-10-27 23:09:31 +02:00
|
|
|
|
2016-02-29 00:47:07 +01:00
|
|
|
self.raw += tmp
|
2015-12-16 14:20:51 +01:00
|
|
|
|
2015-12-14 19:43:39 +01:00
|
|
|
def is_html(self, text = None, allow_picture = False):
|
|
|
|
if text:
|
|
|
|
self.reset()
|
|
|
|
self.reset_safe()
|
2016-02-25 10:13:39 +01:00
|
|
|
self.allow_picture = allow_picture
|
2015-12-14 19:43:39 +01:00
|
|
|
self.feed(text)
|
|
|
|
self.close()
|
2016-02-25 10:13:39 +01:00
|
|
|
return self.has_html
|