2011-02-04 16:17:27 -05:00
|
|
|
import re
|
2011-04-14 03:07:05 -04:00
|
|
|
import random
|
2011-08-11 04:59:51 -04:00
|
|
|
import ostools
|
2016-11-18 03:37:22 -05:00
|
|
|
import collections
|
2011-02-11 04:07:07 -05:00
|
|
|
from copy import copy
|
2011-02-05 12:17:33 -05:00
|
|
|
from datetime import timedelta
|
2021-03-23 17:36:43 -04:00
|
|
|
from PyQt5 import QtCore, QtGui, QtWidgets
|
2011-02-04 16:17:27 -05:00
|
|
|
|
2011-02-13 20:32:02 -05:00
|
|
|
from generic import mysteryTime
|
2012-11-12 23:52:26 -05:00
|
|
|
from quirks import ScriptQuirks
|
2011-06-07 11:48:35 -04:00
|
|
|
from pyquirks import PythonQuirks
|
2012-11-12 23:52:26 -05:00
|
|
|
from luaquirks import LuaQuirks
|
2016-12-07 17:40:37 -05:00
|
|
|
import dataobjs
|
2011-02-13 20:32:02 -05:00
|
|
|
|
2016-11-18 03:37:22 -05:00
|
|
|
# karxi: My own contribution to this - a proper lexer.
|
|
|
|
import pnc.lexercon as lexercon
|
|
|
|
|
|
|
|
# I'll clean up the things that are no longer needed once the transition is
|
|
|
|
# actually finished.
|
2021-03-23 17:36:43 -04:00
|
|
|
try:
|
|
|
|
QString = unicode
|
|
|
|
except NameError:
|
|
|
|
# Python 3
|
|
|
|
QString = str
|
|
|
|
|
2011-02-10 13:00:06 -05:00
|
|
|
_ctag_begin = re.compile(r'(?i)<c=(.*?)>')
|
2011-02-13 04:27:12 -05:00
|
|
|
_gtag_begin = re.compile(r'(?i)<g[a-f]>')
|
2011-02-11 18:37:31 -05:00
|
|
|
_ctag_end = re.compile(r'(?i)</c>')
|
2011-02-04 16:17:27 -05:00
|
|
|
_ctag_rgb = re.compile(r'\d+,\d+,\d+')
|
2012-06-04 12:43:56 -04:00
|
|
|
_urlre = re.compile(r"(?i)(?:^|(?<=\s))(?:(?:https?|ftp)://|magnet:)[^\s]+")
|
2016-11-13 01:40:20 -05:00
|
|
|
_url2re = re.compile(r"(?i)(?<!//)\bwww\.[^\s]+?\.")
|
2011-02-13 04:27:12 -05:00
|
|
|
_memore = re.compile(r"(\s|^)(#[A-Za-z0-9_]+)")
|
2011-04-12 19:54:01 -04:00
|
|
|
_handlere = re.compile(r"(\s|^)(@[A-Za-z0-9_]+)")
|
2011-02-13 04:27:12 -05:00
|
|
|
_imgre = re.compile(r"""(?i)<img src=['"](\S+)['"]\s*/>""")
|
2011-02-24 13:07:37 -05:00
|
|
|
_mecmdre = re.compile(r"^(/me|PESTERCHUM:ME)(\S*)")
|
2016-11-19 13:49:40 -05:00
|
|
|
_oocre = re.compile(r"([\[(\{])\1.*([\])\}])\2")
|
2012-03-14 00:24:13 -04:00
|
|
|
_format_begin = re.compile(r'(?i)<([ibu])>')
|
|
|
|
_format_end = re.compile(r'(?i)</([ibu])>')
|
2012-10-09 18:32:55 -04:00
|
|
|
_honk = re.compile(r"(?i)\bhonk\b")
|
2011-02-04 16:17:27 -05:00
|
|
|
|
2012-11-12 23:52:26 -05:00
|
|
|
quirkloader = ScriptQuirks()
|
|
|
|
quirkloader.add(PythonQuirks())
|
|
|
|
quirkloader.add(LuaQuirks())
|
|
|
|
quirkloader.loadAll()
|
2021-03-23 17:36:43 -04:00
|
|
|
print(quirkloader.funcre())
|
2011-06-07 11:48:35 -04:00
|
|
|
_functionre = re.compile(r"%s" % quirkloader.funcre())
|
2011-04-14 03:07:05 -04:00
|
|
|
_groupre = re.compile(r"\\([0-9]+)")
|
|
|
|
|
2011-06-07 11:48:35 -04:00
|
|
|
def reloadQuirkFunctions():
|
2012-11-12 23:52:26 -05:00
|
|
|
quirkloader.loadAll()
|
2011-06-07 11:48:35 -04:00
|
|
|
global _functionre
|
|
|
|
_functionre = re.compile(r"%s" % quirkloader.funcre())
|
|
|
|
|
2011-02-11 04:07:07 -05:00
|
|
|
def lexer(string, objlist):
|
|
|
|
"""objlist is a list: [(objecttype, re),...] list is in order of preference"""
|
|
|
|
stringlist = [string]
|
|
|
|
for (oType, regexp) in objlist:
|
2011-02-11 18:37:31 -05:00
|
|
|
newstringlist = []
|
2011-02-11 04:07:07 -05:00
|
|
|
for (stri, s) in enumerate(stringlist):
|
2021-03-23 17:36:43 -04:00
|
|
|
if type(s) not in [str, str]:
|
2011-02-11 18:37:31 -05:00
|
|
|
newstringlist.append(s)
|
|
|
|
continue
|
2011-02-11 04:07:07 -05:00
|
|
|
lasti = 0
|
2011-02-11 18:37:31 -05:00
|
|
|
for m in regexp.finditer(s):
|
2011-02-11 04:07:07 -05:00
|
|
|
start = m.start()
|
|
|
|
end = m.end()
|
2011-02-11 18:37:31 -05:00
|
|
|
tag = oType(m.group(0), *m.groups())
|
|
|
|
if lasti != start:
|
|
|
|
newstringlist.append(s[lasti:start])
|
|
|
|
newstringlist.append(tag)
|
|
|
|
lasti = end
|
2011-02-11 04:07:07 -05:00
|
|
|
if lasti < len(string):
|
2011-02-11 18:37:31 -05:00
|
|
|
newstringlist.append(s[lasti:])
|
2011-02-11 04:07:07 -05:00
|
|
|
stringlist = copy(newstringlist)
|
|
|
|
return stringlist
|
|
|
|
|
2016-11-18 03:37:22 -05:00
|
|
|
# karxi: All of these were derived from object before. I changed them to
|
|
|
|
# lexercon.Chunk so that I'd have an easier way to match against them until
|
|
|
|
# they're redone/removed.
|
|
|
|
class colorBegin(lexercon.Chunk):
|
2011-02-11 18:37:31 -05:00
|
|
|
def __init__(self, string, color):
|
|
|
|
self.string = string
|
|
|
|
self.color = color
|
|
|
|
def convert(self, format):
|
|
|
|
color = self.color
|
2011-02-13 04:27:12 -05:00
|
|
|
if format == "text":
|
|
|
|
return ""
|
2011-02-04 16:17:27 -05:00
|
|
|
if _ctag_rgb.match(color) is not None:
|
|
|
|
if format=='ctag':
|
2011-02-04 19:50:56 -05:00
|
|
|
return "<c=%s>" % (color)
|
2011-02-04 16:17:27 -05:00
|
|
|
try:
|
|
|
|
qc = QtGui.QColor(*[int(c) for c in color.split(",")])
|
|
|
|
except ValueError:
|
|
|
|
qc = QtGui.QColor("black")
|
|
|
|
else:
|
|
|
|
qc = QtGui.QColor(color)
|
|
|
|
if not qc.isValid():
|
|
|
|
qc = QtGui.QColor("black")
|
|
|
|
if format == "html":
|
|
|
|
return '<span style="color:%s">' % (qc.name())
|
|
|
|
elif format == "bbcode":
|
|
|
|
return '[color=%s]' % (qc.name())
|
|
|
|
elif format == "ctag":
|
|
|
|
(r,g,b,a) = qc.getRgb()
|
|
|
|
return '<c=%s,%s,%s>' % (r,g,b)
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
class colorEnd(lexercon.Chunk):
|
2011-02-11 18:37:31 -05:00
|
|
|
def __init__(self, string):
|
|
|
|
self.string = string
|
|
|
|
def convert(self, format):
|
|
|
|
if format == "html":
|
|
|
|
return "</span>"
|
|
|
|
elif format == "bbcode":
|
|
|
|
return "[/color]"
|
2011-02-13 04:27:12 -05:00
|
|
|
elif format == "text":
|
|
|
|
return ""
|
2011-02-11 18:37:31 -05:00
|
|
|
else:
|
|
|
|
return self.string
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
class formatBegin(lexercon.Chunk):
|
2012-03-14 00:24:13 -04:00
|
|
|
def __init__(self, string, ftype):
|
|
|
|
self.string = string
|
|
|
|
self.ftype = ftype
|
|
|
|
def convert(self, format):
|
|
|
|
if format == "html":
|
|
|
|
return "<%s>" % (self.ftype)
|
|
|
|
elif format == "bbcode":
|
|
|
|
return "[%s]" % (self.ftype)
|
|
|
|
elif format == "text":
|
|
|
|
return ""
|
|
|
|
else:
|
|
|
|
return self.string
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
class formatEnd(lexercon.Chunk):
|
2012-03-14 00:24:13 -04:00
|
|
|
def __init__(self, string, ftype):
|
|
|
|
self.string = string
|
|
|
|
self.ftype = ftype
|
|
|
|
def convert(self, format):
|
|
|
|
if format == "html":
|
|
|
|
return "</%s>" % (self.ftype)
|
|
|
|
elif format == "bbcode":
|
|
|
|
return "[/%s]" % (self.ftype)
|
|
|
|
elif format == "text":
|
|
|
|
return ""
|
|
|
|
else:
|
|
|
|
return self.string
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
class hyperlink(lexercon.Chunk):
|
2011-02-11 18:37:31 -05:00
|
|
|
def __init__(self, string):
|
|
|
|
self.string = string
|
|
|
|
def convert(self, format):
|
|
|
|
if format == "html":
|
|
|
|
return "<a href='%s'>%s</a>" % (self.string, self.string)
|
|
|
|
elif format == "bbcode":
|
|
|
|
return "[url]%s[/url]" % (self.string)
|
|
|
|
else:
|
|
|
|
return self.string
|
2016-11-18 03:37:22 -05:00
|
|
|
|
2012-06-04 12:43:56 -04:00
|
|
|
class hyperlink_lazy(hyperlink):
|
|
|
|
def __init__(self, string):
|
|
|
|
self.string = "http://" + string
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
class imagelink(lexercon.Chunk):
|
2011-02-13 04:27:12 -05:00
|
|
|
def __init__(self, string, img):
|
|
|
|
self.string = string
|
|
|
|
self.img = img
|
|
|
|
def convert(self, format):
|
|
|
|
if format == "html":
|
|
|
|
return self.string
|
|
|
|
elif format == "bbcode":
|
|
|
|
if self.img[0:7] == "http://":
|
2011-03-03 04:41:51 -05:00
|
|
|
return "[img]%s[/img]" % (self.img)
|
2011-02-13 04:27:12 -05:00
|
|
|
else:
|
|
|
|
return ""
|
|
|
|
else:
|
|
|
|
return ""
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
class memolex(lexercon.Chunk):
|
2011-02-13 04:27:12 -05:00
|
|
|
def __init__(self, string, space, channel):
|
|
|
|
self.string = string
|
|
|
|
self.space = space
|
|
|
|
self.channel = channel
|
|
|
|
def convert(self, format):
|
|
|
|
if format == "html":
|
|
|
|
return "%s<a href='%s'>%s</a>" % (self.space, self.channel, self.channel)
|
|
|
|
else:
|
|
|
|
return self.string
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
class chumhandlelex(lexercon.Chunk):
|
2011-04-12 19:54:01 -04:00
|
|
|
def __init__(self, string, space, handle):
|
|
|
|
self.string = string
|
|
|
|
self.space = space
|
|
|
|
self.handle = handle
|
|
|
|
def convert(self, format):
|
|
|
|
if format == "html":
|
|
|
|
return "%s<a href='%s'>%s</a>" % (self.space, self.handle, self.handle)
|
|
|
|
else:
|
|
|
|
return self.string
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
class smiley(lexercon.Chunk):
|
2011-02-11 18:37:31 -05:00
|
|
|
def __init__(self, string):
|
|
|
|
self.string = string
|
|
|
|
def convert(self, format):
|
|
|
|
if format == "html":
|
2011-08-10 13:42:54 -04:00
|
|
|
return "<img src='smilies/%s' alt='%s' title='%s' />" % (smiledict[self.string], self.string, self.string)
|
2011-02-11 18:37:31 -05:00
|
|
|
else:
|
|
|
|
return self.string
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
class honker(lexercon.Chunk):
|
2012-10-09 18:32:55 -04:00
|
|
|
def __init__(self, string):
|
|
|
|
self.string = string
|
|
|
|
def convert(self, format):
|
|
|
|
if format == "html":
|
|
|
|
return "<img src='smilies/honk.png' alt'honk' title='honk' />"
|
|
|
|
else:
|
|
|
|
return self.string
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
class mecmd(lexercon.Chunk):
|
2011-02-24 13:07:37 -05:00
|
|
|
def __init__(self, string, mecmd, suffix):
|
2011-02-13 04:27:12 -05:00
|
|
|
self.string = string
|
|
|
|
self.suffix = suffix
|
|
|
|
def convert(self, format):
|
|
|
|
return self.string
|
2011-02-04 16:17:27 -05:00
|
|
|
|
2016-11-18 03:37:22 -05:00
|
|
|
kxpclexer = lexercon.Pesterchum()
|
|
|
|
|
|
|
|
def kxlexMsg(string):
|
|
|
|
# Do a bit of sanitization.
|
2021-03-23 17:36:43 -04:00
|
|
|
msg = str(string)
|
2016-11-29 15:20:41 -05:00
|
|
|
# TODO: Let people paste line-by-line normally. Maybe have a mass-paste
|
|
|
|
# right-click option?
|
2016-11-28 19:44:10 -05:00
|
|
|
msg = msg.replace('\n', ' ').replace('\r', ' ')
|
2016-11-18 03:37:22 -05:00
|
|
|
# Something the original doesn't seem to have accounted for.
|
|
|
|
# Replace tabs with 4 spaces.
|
|
|
|
msg = msg.replace('\t', ' ' * 4)
|
|
|
|
# Begin lexing.
|
|
|
|
msg = kxpclexer.lex(msg)
|
|
|
|
# ...and that's it for this.
|
|
|
|
return msg
|
|
|
|
|
2011-02-13 04:27:12 -05:00
|
|
|
def lexMessage(string):
|
|
|
|
lexlist = [(mecmd, _mecmdre),
|
|
|
|
(colorBegin, _ctag_begin), (colorBegin, _gtag_begin),
|
2012-03-14 00:24:13 -04:00
|
|
|
(colorEnd, _ctag_end),
|
2016-11-13 17:14:24 -05:00
|
|
|
# karxi: Disabled this for now. No common versions of Pesterchum
|
|
|
|
# actually use it, save for Chumdroid...which shouldn't.
|
|
|
|
# When I change out parsers, I might add it back in.
|
|
|
|
##(formatBegin, _format_begin), (formatEnd, _format_end),
|
2012-03-14 00:24:13 -04:00
|
|
|
(imagelink, _imgre),
|
2012-06-04 12:43:56 -04:00
|
|
|
(hyperlink, _urlre), (hyperlink_lazy, _url2re),
|
2011-12-24 01:15:22 -05:00
|
|
|
(memolex, _memore),
|
2011-04-12 19:54:01 -04:00
|
|
|
(chumhandlelex, _handlere),
|
2012-10-09 18:32:55 -04:00
|
|
|
(smiley, _smilere),
|
|
|
|
(honker, _honk)]
|
2011-02-11 18:37:31 -05:00
|
|
|
|
2021-03-23 17:36:43 -04:00
|
|
|
string = str(string)
|
2011-04-10 05:22:06 -04:00
|
|
|
string = string.replace("\n", " ").replace("\r", " ")
|
2021-03-23 17:36:43 -04:00
|
|
|
lexed = lexer(str(string), lexlist)
|
2011-02-13 04:27:12 -05:00
|
|
|
|
2011-02-11 18:37:31 -05:00
|
|
|
balanced = []
|
|
|
|
beginc = 0
|
|
|
|
endc = 0
|
|
|
|
for o in lexed:
|
|
|
|
if type(o) is colorBegin:
|
|
|
|
beginc += 1
|
|
|
|
balanced.append(o)
|
|
|
|
elif type(o) is colorEnd:
|
|
|
|
if beginc >= endc:
|
|
|
|
endc += 1
|
|
|
|
balanced.append(o)
|
|
|
|
else:
|
|
|
|
balanced.append(o.string)
|
2011-02-04 16:17:27 -05:00
|
|
|
else:
|
2011-02-11 18:37:31 -05:00
|
|
|
balanced.append(o)
|
|
|
|
if beginc > endc:
|
|
|
|
for i in range(0, beginc-endc):
|
|
|
|
balanced.append(colorEnd("</c>"))
|
2011-02-21 14:07:59 -05:00
|
|
|
if len(balanced) == 0:
|
|
|
|
balanced.append("")
|
2021-03-23 17:36:43 -04:00
|
|
|
if type(balanced[len(balanced)-1]) not in [str, str]:
|
2011-02-13 04:27:12 -05:00
|
|
|
balanced.append("")
|
|
|
|
return balanced
|
|
|
|
|
|
|
|
def convertTags(lexed, format="html"):
|
|
|
|
if format not in ["html", "bbcode", "ctag", "text"]:
|
|
|
|
raise ValueError("Color format not recognized")
|
2011-02-11 18:37:31 -05:00
|
|
|
|
2021-03-23 17:36:43 -04:00
|
|
|
if type(lexed) in [str, str]:
|
2011-02-13 04:27:12 -05:00
|
|
|
lexed = lexMessage(lexed)
|
2011-02-11 18:37:31 -05:00
|
|
|
escaped = ""
|
2011-02-13 04:27:12 -05:00
|
|
|
firststr = True
|
|
|
|
for (i, o) in enumerate(lexed):
|
2021-03-23 17:36:43 -04:00
|
|
|
if type(o) in [str, str]:
|
2011-02-11 18:37:31 -05:00
|
|
|
if format == "html":
|
|
|
|
escaped += o.replace("&", "&").replace(">", ">").replace("<","<")
|
2011-02-04 16:17:27 -05:00
|
|
|
else:
|
2011-02-11 18:37:31 -05:00
|
|
|
escaped += o
|
2011-02-04 16:17:27 -05:00
|
|
|
else:
|
2011-02-11 18:37:31 -05:00
|
|
|
escaped += o.convert(format)
|
2011-02-13 04:27:12 -05:00
|
|
|
|
2011-02-11 18:37:31 -05:00
|
|
|
return escaped
|
|
|
|
|
2016-11-13 19:54:41 -05:00
|
|
|
def _max_msg_len(mask=None, target=None):
|
|
|
|
# karxi: Copied from another file of mine, and modified to work with
|
|
|
|
# Pesterchum.
|
|
|
|
# Note that this effectively assumes the worst when not provided the
|
|
|
|
# information it needs to get an accurate read, so later on, it'll need to
|
2016-11-18 03:37:22 -05:00
|
|
|
# be given a nick or the user's hostmask, as well as where the message is
|
2016-11-13 19:54:41 -05:00
|
|
|
# being sent.
|
|
|
|
# It effectively has to construct the message that'll be sent in advance.
|
|
|
|
limit = 512
|
|
|
|
|
|
|
|
# Start subtracting
|
|
|
|
# ':', " PRIVMSG ", ' ', ':', \r\n
|
|
|
|
limit -= 14
|
|
|
|
|
|
|
|
if mask is not None:
|
|
|
|
# Since this will be included in what we send
|
|
|
|
limit -= len(str(mask))
|
|
|
|
else:
|
|
|
|
# Since we should always be able to fetch this
|
|
|
|
# karxi: ... Which we can't, right now, unlike in the old script.
|
|
|
|
# TODO: Resolve this issue, give it the necessary information.
|
|
|
|
nick = None
|
|
|
|
# If we CAN'T, stick with a length of 30, since that seems to be
|
|
|
|
# the average maximum nowadays
|
|
|
|
limit -= len(nick) if nick is not None else 30
|
|
|
|
# '!', '@'
|
|
|
|
limit -= 2
|
|
|
|
# Maximum ident length
|
|
|
|
limit -= 10
|
|
|
|
# Maximum (?) host length
|
|
|
|
limit -= 63 # RFC 2812
|
|
|
|
# The target is the place this is getting sent to - a channel or a nick
|
|
|
|
if target is not None:
|
|
|
|
limit -= len(target)
|
|
|
|
else:
|
|
|
|
# Normally I'd assume about 60...just to be safe.
|
|
|
|
# However, the current (2016-11-13) Pesterchum limit for memo name
|
|
|
|
# length is 32, so I'll bump it to 40 for some built-in leeway.
|
|
|
|
limit -= 40
|
|
|
|
|
|
|
|
return limit
|
|
|
|
|
2016-11-18 03:37:22 -05:00
|
|
|
def kxsplitMsg(lexed, fmt="pchum", maxlen=None, debug=False):
|
|
|
|
"""Split messages so that they don't go over the length limit.
|
|
|
|
Returns a list of the messages, neatly split.
|
|
|
|
|
|
|
|
Keep in mind that there's a little bit of magic involved in this at the
|
|
|
|
moment; some unsafe assumptions are made."""
|
2016-11-29 15:20:41 -05:00
|
|
|
|
|
|
|
# NOTE: Keep in mind that lexercon CTag objects convert to "r,g,b" format.
|
|
|
|
# This means that they're usually going to be fairly long.
|
|
|
|
# Support for changing this will probably be added later, but it won't work
|
|
|
|
# properly with Chumdroid...I'll probably have to leave it as an actual
|
|
|
|
# config option that's applied to the parser.
|
|
|
|
|
2016-11-18 03:37:22 -05:00
|
|
|
# Procedure: Lex. Convert for lengths as we go, keep starting tag
|
|
|
|
# length as we go too. Split whenever we hit the limit, add the tags to
|
|
|
|
# the start of the next line (or just keep a running line length
|
|
|
|
# total), and continue.
|
|
|
|
# N.B.: Keep the end tag length too. (+4 for each.)
|
|
|
|
# Copy the list so we can't break anything.
|
2016-11-29 15:20:41 -05:00
|
|
|
# TODO: There's presently an issue where certain combinations of color
|
|
|
|
# codes end up being added as a separate, empty line. This is a bug, of
|
|
|
|
# course, and should be looked into.
|
2016-12-10 21:22:19 -05:00
|
|
|
# TODO: This may not work properly with unicode! Because IRC doesn't
|
|
|
|
# formally use it, it should probably use the lengths of the decomposed
|
|
|
|
# characters...ugh.
|
2016-11-18 03:37:22 -05:00
|
|
|
lexed = list(lexed)
|
|
|
|
working = []
|
|
|
|
output = []
|
|
|
|
open_ctags = []
|
|
|
|
# Number of characters we've used.
|
|
|
|
curlen = 0
|
|
|
|
# Maximum number of characters *to* use.
|
|
|
|
if not maxlen:
|
|
|
|
maxlen = _max_msg_len()
|
|
|
|
elif maxlen < 0:
|
|
|
|
# Subtract the (negative) length, giving us less leeway in this
|
|
|
|
# function.
|
|
|
|
maxlen = _max_msg_len() + maxlen
|
|
|
|
|
|
|
|
# Defined here, but modified in the loop.
|
|
|
|
msglen = 0
|
|
|
|
|
|
|
|
def efflenleft():
|
|
|
|
"""Get the remaining space we have to work with, accounting for closing
|
|
|
|
tags that will be needed."""
|
|
|
|
return maxlen - curlen - (len(open_ctags) * 4)
|
|
|
|
|
|
|
|
safekeeping = lexed[:]
|
|
|
|
lexed = collections.deque(lexed)
|
|
|
|
rounds = 0
|
2016-12-06 20:04:19 -05:00
|
|
|
# NOTE: This entire mess is due for a rewrite. I'll start splitting it into
|
|
|
|
# sub-functions for the eventualities that arise during parsing.
|
|
|
|
# (E.g. the text block splitter NEEDS to be a different function....)
|
2016-11-18 03:37:22 -05:00
|
|
|
while len(lexed) > 0:
|
|
|
|
rounds += 1
|
|
|
|
if debug:
|
2021-03-23 17:36:43 -04:00
|
|
|
print("[Starting round {}...]".format(rounds))
|
2016-11-18 03:37:22 -05:00
|
|
|
msg = lexed.popleft()
|
|
|
|
msglen = 0
|
|
|
|
is_text = False
|
|
|
|
|
|
|
|
try:
|
|
|
|
msglen = len(msg.convert(fmt))
|
|
|
|
except AttributeError:
|
|
|
|
# It's probably not a lexer tag. Assume a string.
|
|
|
|
# The input to this is supposed to be sanitary, after all.
|
|
|
|
msglen = len(msg)
|
|
|
|
# We allow this to error out if it fails for some reason.
|
|
|
|
# Remind us that it's a string, and thus can be split.
|
|
|
|
is_text = True
|
|
|
|
|
|
|
|
# Test if we have room.
|
|
|
|
if msglen > efflenleft():
|
|
|
|
# We do NOT have room - which means we need to think of how to
|
|
|
|
# handle this.
|
|
|
|
# If we have text, we can split it, keeping color codes in mind.
|
|
|
|
# Since those were already parsed, we don't have to worry about
|
|
|
|
# breaking something that way.
|
|
|
|
# Thus, we can split it, finalize it, and add the remainder to the
|
|
|
|
# next line (after the color codes).
|
|
|
|
if is_text and efflenleft() > 30:
|
|
|
|
# We use 30 as a general 'guess' - if there's less space than
|
|
|
|
# that, it's probably not worth trying to cram text in.
|
|
|
|
# This also saves us from infinitely trying to reduce the size
|
|
|
|
# of the input.
|
|
|
|
stack = []
|
|
|
|
# We have text to split.
|
|
|
|
# This is okay because we don't apply the changes until the
|
|
|
|
# end - and the rest is shoved onto the stack to be dealt with
|
|
|
|
# immediately after.
|
|
|
|
lenl = efflenleft()
|
|
|
|
subround = 0
|
|
|
|
while len(msg) > lenl:
|
2016-12-06 20:04:19 -05:00
|
|
|
# NOTE: This may be cutting it a little close. Maybe use >=
|
|
|
|
# instead?
|
2016-11-18 03:37:22 -05:00
|
|
|
subround += 1
|
|
|
|
if debug:
|
2021-03-23 17:36:43 -04:00
|
|
|
print("[Splitting round {}-{}...]".format(
|
2016-11-18 03:37:22 -05:00
|
|
|
rounds, subround
|
2021-03-23 17:36:43 -04:00
|
|
|
))
|
2016-11-18 03:37:22 -05:00
|
|
|
point = msg.rfind(' ', 0, lenl)
|
|
|
|
if point < 0:
|
|
|
|
# No spaces to break on...ugh. Break at the last space
|
|
|
|
# we can instead.
|
|
|
|
point = lenl ## - 1
|
|
|
|
# NOTE: The - 1 is for safety (but probably isn't
|
|
|
|
# actually necessary.)
|
|
|
|
# Split and push what we have.
|
|
|
|
stack.append(msg[:point])
|
|
|
|
# Remove what we just added.
|
|
|
|
msg = msg[point:]
|
|
|
|
if debug:
|
2021-03-23 17:36:43 -04:00
|
|
|
print("msg = {!r}".format(msg))
|
2016-11-18 03:37:22 -05:00
|
|
|
else:
|
|
|
|
# Catch the remainder.
|
|
|
|
stack.append(msg)
|
|
|
|
if debug:
|
2021-03-23 17:36:43 -04:00
|
|
|
print("msg caught; stack = {!r}".format(stack))
|
2016-11-18 03:37:22 -05:00
|
|
|
# Done processing. Pluck out the first portion so we can
|
2016-12-06 20:04:19 -05:00
|
|
|
# continue processing, clean it up a bit, then add the rest to
|
|
|
|
# our waiting list.
|
|
|
|
msg = stack.pop(0).rstrip()
|
2016-11-18 03:37:22 -05:00
|
|
|
msglen = len(msg)
|
2016-12-06 20:04:19 -05:00
|
|
|
# A little bit of touching up for the head of our next line.
|
|
|
|
stack[0] = stack[0].lstrip()
|
2016-11-18 03:37:22 -05:00
|
|
|
# Now we have a separated list, so we can add it.
|
|
|
|
# First we have to reverse it, because the extendleft method of
|
|
|
|
# deque objects - like our lexed queue - inserts the elements
|
|
|
|
# *backwards*....
|
|
|
|
stack.reverse()
|
|
|
|
# Now we put them on 'top' of the proverbial deck, and deal
|
|
|
|
# with them next round.
|
|
|
|
lexed.extendleft(stack)
|
|
|
|
# We'll deal with those later. Now to get the 'msg' on the
|
|
|
|
# working list and finalize it for output - which really just
|
|
|
|
# means forcing the issue....
|
|
|
|
working.append(msg)
|
|
|
|
curlen += msglen
|
2016-12-06 20:04:19 -05:00
|
|
|
# NOTE: This is here so we can catch it later - it marks that
|
|
|
|
# we've already worked on this.
|
|
|
|
msg = None
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
# Clear the slate. Add the remaining ctags, then add working to
|
2016-12-06 20:04:19 -05:00
|
|
|
# output, then clear working and statistics. Then we can move on.
|
2016-11-18 03:37:22 -05:00
|
|
|
# Keep in mind that any open ctags get added to the beginning of
|
|
|
|
# working again, since they're still open!
|
|
|
|
|
2016-12-06 20:04:19 -05:00
|
|
|
# Add proper CTagEnd objects ourselves. Won't break anything to use
|
|
|
|
# raw text at time of writing, but it can't hurt to be careful.
|
|
|
|
# We specify the ref as our format, to note. They should match up,
|
|
|
|
# both being 'pchum'.
|
|
|
|
# It shouldn't matter that we use the same object for this - the
|
|
|
|
# process of rendering isn't destructive.
|
|
|
|
# This also doesn't follow compression settings, but closing color
|
|
|
|
# tags can't BE compressed, so it hardly matters.
|
|
|
|
cte = lexercon.CTagEnd("</c>", fmt, None)
|
|
|
|
working.extend([cte] * len(open_ctags))
|
|
|
|
if debug:
|
2021-03-23 17:36:43 -04:00
|
|
|
print("\tRound {0} linebreak: Added {1} closing ctags".format(
|
2016-12-06 20:04:19 -05:00
|
|
|
rounds, len(open_ctags)
|
2021-03-23 17:36:43 -04:00
|
|
|
))
|
2016-12-06 20:04:19 -05:00
|
|
|
|
|
|
|
# Run it through the lexer again to render it.
|
2021-03-23 17:36:43 -04:00
|
|
|
working = ''.join(kxpclexer.list_convert(working))
|
2016-12-06 20:04:19 -05:00
|
|
|
if debug:
|
2021-03-23 17:36:43 -04:00
|
|
|
print("\tRound {0} add: len == {1} (of {2})".format(
|
2016-12-06 20:04:19 -05:00
|
|
|
rounds, len(working), maxlen
|
2021-03-23 17:36:43 -04:00
|
|
|
))
|
2016-11-18 03:37:22 -05:00
|
|
|
# Now that it's done the work for us, append and resume.
|
|
|
|
output.append(working)
|
2016-12-06 20:04:19 -05:00
|
|
|
|
|
|
|
if msg is not None:
|
|
|
|
# We didn't catch it earlier for preprocessing. Thus, toss it
|
|
|
|
# on the stack and continue, so it'll go through the loop.
|
|
|
|
# Remember, we're doing this because we don't have enough space
|
|
|
|
# for it. Hopefully it'll fit on the next line, or split.
|
|
|
|
lexed.appendleft(msg)
|
|
|
|
# Fall through to the next case.
|
|
|
|
if lexed:
|
|
|
|
# We have more to go.
|
|
|
|
# Reset working, starting it with the unclosed ctags.
|
|
|
|
if debug:
|
2021-03-23 17:36:43 -04:00
|
|
|
print("\tRound {0}: More to lex".format(rounds))
|
2016-12-06 20:04:19 -05:00
|
|
|
working = open_ctags[:]
|
|
|
|
# Calculate the length of the starting tags, add it before
|
|
|
|
# anything else.
|
|
|
|
curlen = sum(len(tag.convert(fmt)) for tag in working)
|
|
|
|
else:
|
|
|
|
# There's nothing in lexed - but if msg wasn't None, we ADDED
|
|
|
|
# it to lexed. Thus, if we get here, we don't have anything
|
|
|
|
# more to add.
|
|
|
|
# Getting here means we already flushed the last of what we had
|
|
|
|
# to the stack.
|
|
|
|
# Nothing in lexed. If we didn't preprocess, then we're done.
|
|
|
|
if debug or True:
|
|
|
|
# This probably shouldn't happen, and if it does, I want to
|
|
|
|
# know if it *works* properly.
|
2021-03-23 17:36:43 -04:00
|
|
|
print("\tRound {0}: No more to lex".format(rounds))
|
2016-12-06 20:04:19 -05:00
|
|
|
# Clean up, just in case.
|
|
|
|
working = []
|
|
|
|
open_ctags = []
|
|
|
|
curlen = 0
|
|
|
|
# TODO: What does this mean for the ctags that'd be applied?
|
|
|
|
# Will this break parsing? It shouldn't, but....
|
|
|
|
|
|
|
|
# Break us out of the loop...we could BREAK here and skip the
|
|
|
|
# else, since we know what's going on.
|
2016-11-18 03:37:22 -05:00
|
|
|
continue
|
2016-12-06 20:04:19 -05:00
|
|
|
# We got here because we have more to process, so head back to
|
|
|
|
# resume.
|
2016-11-18 03:37:22 -05:00
|
|
|
continue
|
|
|
|
|
|
|
|
# Normal tag processing stuff. Considerably less interesting/intensive
|
|
|
|
# than the text processing we did up there.
|
|
|
|
if isinstance(msg, lexercon.CTagEnd):
|
|
|
|
# Check for Ends first (subclassing issue).
|
|
|
|
if len(open_ctags) > 0:
|
|
|
|
# Don't add it unless it's going to make things /more/ even.
|
|
|
|
# We could have a Strict checker that errors on that, who
|
|
|
|
# knows.
|
|
|
|
# We just closed a ctag.
|
|
|
|
open_ctags.pop()
|
|
|
|
else:
|
|
|
|
# Ignore it.
|
|
|
|
# NOTE: I realize this is going to screw up something I do, but
|
|
|
|
# it also stops us from screwing up Chumdroid, so...whatever.
|
|
|
|
continue
|
|
|
|
elif isinstance(msg, lexercon.CTag):
|
|
|
|
# It's an opening color tag!
|
|
|
|
open_ctags.append(msg)
|
2016-12-06 20:04:19 -05:00
|
|
|
# TODO: Check and see if we have enough room for the lexemes
|
|
|
|
# *after* this one. If not, shunt it back into lexed and flush
|
|
|
|
# working into output.
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
# Add it to the working message.
|
|
|
|
working.append(msg)
|
|
|
|
|
|
|
|
# Record the additional length.
|
|
|
|
# Also, need to be sure to account for the ends that would be added.
|
|
|
|
curlen += msglen
|
|
|
|
else:
|
|
|
|
# Once we're finally out of things to add, we're, well...out.
|
|
|
|
# So add working to the result one last time.
|
2016-11-28 19:44:10 -05:00
|
|
|
working = kxpclexer.list_convert(working)
|
|
|
|
if len(working) > 0:
|
2016-12-06 20:04:19 -05:00
|
|
|
if debug:
|
2021-03-23 17:36:43 -04:00
|
|
|
print("Adding end trails: {!r}".format(working))
|
|
|
|
working = ''.join(working)
|
2016-11-28 19:44:10 -05:00
|
|
|
output.append(working)
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
# We're...done?
|
|
|
|
return output
|
|
|
|
|
2011-04-10 05:22:06 -04:00
|
|
|
def splitMessage(msg, format="ctag"):
|
2016-11-18 03:37:22 -05:00
|
|
|
"""Splits message if it is too long.
|
|
|
|
This is the older version of this function, kept for compatibility.
|
|
|
|
It will eventually be phased out."""
|
2011-04-13 02:12:19 -04:00
|
|
|
# split long text lines
|
|
|
|
buf = []
|
|
|
|
for o in msg:
|
2021-03-23 17:36:43 -04:00
|
|
|
if type(o) in [str, str] and len(o) > 200:
|
2016-11-18 03:37:22 -05:00
|
|
|
# Split with a step of 200. I.e., cut long segments into chunks of
|
|
|
|
# 200 characters.
|
|
|
|
# I'm...not sure why this is done. I'll probably factor it out
|
|
|
|
# later on.
|
2011-04-13 02:12:19 -04:00
|
|
|
for i in range(0, len(o), 200):
|
|
|
|
buf.append(o[i:i+200])
|
|
|
|
else:
|
2016-11-18 03:37:22 -05:00
|
|
|
# Add non-text tags or 'short' segments without processing.
|
2011-04-13 02:12:19 -04:00
|
|
|
buf.append(o)
|
2016-11-18 03:37:22 -05:00
|
|
|
# Copy the iterative variable.
|
|
|
|
msg = list(buf)
|
|
|
|
# This is the working segment.
|
|
|
|
working = []
|
|
|
|
# Keep a stack of open color tags.
|
2011-04-10 05:22:06 -04:00
|
|
|
cbegintags = []
|
2016-11-18 03:37:22 -05:00
|
|
|
# This is the final result.
|
2011-04-10 05:22:06 -04:00
|
|
|
output = []
|
2021-03-23 17:36:43 -04:00
|
|
|
print(repr(msg))
|
2011-04-10 05:22:06 -04:00
|
|
|
for o in msg:
|
2011-04-14 03:07:05 -04:00
|
|
|
oldctag = None
|
2016-11-18 03:37:22 -05:00
|
|
|
# Add to the working segment.
|
|
|
|
working.append(o)
|
2011-04-10 05:22:06 -04:00
|
|
|
if type(o) is colorBegin:
|
2016-11-18 03:37:22 -05:00
|
|
|
# Track the open tag.
|
2011-04-10 05:22:06 -04:00
|
|
|
cbegintags.append(o)
|
|
|
|
elif type(o) is colorEnd:
|
2011-04-14 03:07:05 -04:00
|
|
|
try:
|
2016-11-18 03:37:22 -05:00
|
|
|
# Remove the last open tag, since we've closed it.
|
2011-04-14 03:07:05 -04:00
|
|
|
oldctag = cbegintags.pop()
|
|
|
|
except IndexError:
|
|
|
|
pass
|
2016-11-18 03:37:22 -05:00
|
|
|
# THIS part is the part I don't get. I'll revise it later....
|
|
|
|
# It doesn't seem to catch ending ctags properly...or beginning ones.
|
|
|
|
# It's pretty much just broken, likely due to the line below.
|
|
|
|
# Maybe I can convert the tags, save the beginning tags, check their
|
|
|
|
# lengths and apply them after a split - or even iterate over each set,
|
|
|
|
# applying old tags before continuing...I don't know.
|
2011-04-10 05:22:06 -04:00
|
|
|
# yeah normally i'd do binary search but im lazy
|
2016-11-18 03:37:22 -05:00
|
|
|
# Get length of beginning tags, and the end tags that'd be applied.
|
|
|
|
msglen = len(convertTags(working, format)) + 4*(len(cbegintags))
|
|
|
|
# Previously this used 400.
|
2016-11-13 19:54:41 -05:00
|
|
|
if msglen > _max_msg_len():
|
2016-11-18 03:37:22 -05:00
|
|
|
working.pop()
|
2011-04-14 03:07:05 -04:00
|
|
|
if type(o) is colorBegin:
|
|
|
|
cbegintags.pop()
|
|
|
|
elif type(o) is colorEnd and oldctag is not None:
|
|
|
|
cbegintags.append(oldctag)
|
2016-11-18 03:37:22 -05:00
|
|
|
if len(working) == 0:
|
2011-04-10 05:22:06 -04:00
|
|
|
output.append([o])
|
|
|
|
else:
|
|
|
|
tmp = []
|
|
|
|
for color in cbegintags:
|
2016-11-18 03:37:22 -05:00
|
|
|
working.append(colorEnd("</c>"))
|
2011-04-10 05:22:06 -04:00
|
|
|
tmp.append(color)
|
2016-11-18 03:37:22 -05:00
|
|
|
output.append(working)
|
2011-04-10 05:22:06 -04:00
|
|
|
if type(o) is colorBegin:
|
|
|
|
cbegintags.append(o)
|
|
|
|
elif type(o) is colorEnd:
|
2011-04-14 03:07:05 -04:00
|
|
|
try:
|
|
|
|
cbegintags.pop()
|
|
|
|
except IndexError:
|
|
|
|
pass
|
2011-04-10 05:22:06 -04:00
|
|
|
tmp.append(o)
|
2016-11-18 03:37:22 -05:00
|
|
|
working = tmp
|
2011-04-10 05:22:06 -04:00
|
|
|
|
2016-11-18 03:37:22 -05:00
|
|
|
if len(working) > 0:
|
|
|
|
# Add any stragglers.
|
|
|
|
output.append(working)
|
2011-04-10 05:22:06 -04:00
|
|
|
return output
|
2011-05-10 02:33:59 -04:00
|
|
|
|
2016-11-19 13:49:40 -05:00
|
|
|
def _is_ooc(msg, strict=True):
|
|
|
|
"""Check if a line is OOC. Note that Pesterchum *is* kind enough to strip
|
|
|
|
trailing spaces for us, even in the older versions, but we don't do that in
|
|
|
|
this function. (It's handled by the calling one.)"""
|
|
|
|
# Define the matching braces.
|
|
|
|
braces = (
|
|
|
|
('(', ')'),
|
|
|
|
('[', ']'),
|
|
|
|
('{', '}')
|
|
|
|
)
|
|
|
|
|
|
|
|
oocDetected = _oocre.match(msg)
|
|
|
|
# Somewhat-improved matching.
|
|
|
|
if oocDetected:
|
|
|
|
if not strict:
|
|
|
|
# The regex matched and we're supposed to be lazy. We're done here.
|
|
|
|
return True
|
|
|
|
# We have a match....
|
|
|
|
ooc1, ooc2 = oocDetected.group(1, 2)
|
|
|
|
# Make sure the appropriate braces are used.
|
2021-03-23 17:36:43 -04:00
|
|
|
mbraces = [ooc1 == br[0] and ooc2 == br[1] for br in braces]
|
2016-11-19 13:49:40 -05:00
|
|
|
if any(mbraces):
|
|
|
|
# If any of those passes matched, we're good to go; it's OOC.
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2016-11-18 03:37:22 -05:00
|
|
|
def kxhandleInput(ctx, text=None, flavor=None):
|
|
|
|
"""The function that user input that should be sent to the server is routed
|
2016-12-06 20:04:19 -05:00
|
|
|
through. Handles lexing, splitting, and quirk application, as well as
|
|
|
|
sending."""
|
|
|
|
# TODO: This needs a 'dryrun' option, and ways to specify alternative
|
|
|
|
# outputs and such, if it's to handle all of these.
|
2016-11-18 03:37:22 -05:00
|
|
|
# Flavor is important for logic, ctx is 'self'.
|
|
|
|
# Flavors are 'convo', 'menus', and 'memos' - so named after the source
|
|
|
|
# files for the original sentMessage variants.
|
|
|
|
|
|
|
|
if flavor is None:
|
2016-11-29 15:20:41 -05:00
|
|
|
raise ValueError("A flavor is needed to determine suitable logic!")
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
if text is None:
|
|
|
|
# Fetch the raw text from the input box.
|
|
|
|
text = ctx.textInput.text()
|
2021-03-23 17:36:43 -04:00
|
|
|
text = str(ctx.textInput.text())
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
# Preprocessing stuff.
|
2016-11-19 13:49:40 -05:00
|
|
|
msg = text.strip()
|
|
|
|
if msg == "" or msg.startswith("PESTERCHUM:"):
|
2016-11-18 03:37:22 -05:00
|
|
|
# We don't allow users to send system messages. There's also no
|
|
|
|
# point if they haven't entered anything.
|
|
|
|
return
|
|
|
|
|
|
|
|
# Add the *raw* text to our history.
|
|
|
|
ctx.history.add(text)
|
2016-11-19 13:49:40 -05:00
|
|
|
|
|
|
|
oocDetected = _is_ooc(msg, strict=True)
|
|
|
|
|
2016-11-18 03:37:22 -05:00
|
|
|
if flavor != "menus":
|
2016-11-19 13:49:40 -05:00
|
|
|
# Determine if we should be OOC.
|
2016-11-18 03:37:22 -05:00
|
|
|
is_ooc = ctx.ooc or oocDetected
|
2016-11-19 13:49:40 -05:00
|
|
|
# Determine if the line actually *is* OOC.
|
|
|
|
if is_ooc and not oocDetected:
|
2016-11-18 03:37:22 -05:00
|
|
|
# If we're supposed to be OOC, apply it artificially.
|
2021-03-23 17:36:43 -04:00
|
|
|
msg = "(( {} ))".format(msg)
|
2016-11-18 03:37:22 -05:00
|
|
|
# Also, quirk stuff.
|
|
|
|
should_quirk = ctx.applyquirks
|
|
|
|
else:
|
|
|
|
# 'menus' means a quirk tester window, which doesn't have an OOC
|
2016-11-19 13:49:40 -05:00
|
|
|
# variable, so we assume it's not OOC.
|
|
|
|
# It also invariably has quirks enabled, so there's no setting for
|
|
|
|
# that.
|
2016-11-18 03:37:22 -05:00
|
|
|
is_ooc = False
|
|
|
|
should_quirk = True
|
2016-11-19 13:49:40 -05:00
|
|
|
|
|
|
|
# I'm pretty sure that putting a space before a /me *should* break the
|
|
|
|
# /me, but in practice, that's not the case.
|
|
|
|
is_action = msg.startswith("/me")
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
# Begin message processing.
|
|
|
|
# We use 'text' despite its lack of processing because it's simpler.
|
|
|
|
if should_quirk and not (is_action or is_ooc):
|
2016-12-07 17:21:53 -05:00
|
|
|
if flavor != "menus":
|
|
|
|
# Fetch the quirks we'll have to apply.
|
|
|
|
quirks = ctx.mainwindow.userprofile.quirks
|
|
|
|
else:
|
|
|
|
# The quirk testing window uses a different set.
|
2016-12-07 17:40:37 -05:00
|
|
|
quirks = dataobjs.pesterQuirks(ctx.parent().testquirks())
|
2016-12-07 17:21:53 -05:00
|
|
|
|
2016-11-18 03:37:22 -05:00
|
|
|
try:
|
|
|
|
# Do quirk things. (Ugly, but it'll have to do for now.)
|
|
|
|
# TODO: Look into the quirk system, modify/redo it.
|
|
|
|
# Gotta encapsulate or we might parse the wrong way.
|
|
|
|
msg = quirks.apply([msg])
|
|
|
|
except Exception as err:
|
|
|
|
# Tell the user we couldn't do quirk things.
|
|
|
|
# TODO: Include the actual error...and the quirk it came from?
|
2021-03-23 17:36:43 -04:00
|
|
|
msgbox = QtWidgets.QMessageBox()
|
2016-11-18 03:37:22 -05:00
|
|
|
msgbox.setText("Whoa there! There seems to be a problem.")
|
|
|
|
err_info = "A quirk seems to be having a problem. (Error: {!s})"
|
|
|
|
err_info = err_info.format(err)
|
|
|
|
msgbox.setInformativeText(err_info)
|
|
|
|
msgbox.exec_()
|
|
|
|
return
|
|
|
|
|
|
|
|
# Debug output.
|
2016-11-29 05:31:41 -05:00
|
|
|
try:
|
|
|
|
# Turns out that Windows consoles can't handle unicode, heh...who'da
|
|
|
|
# thunk. We have to repr() this, as such.
|
2021-03-23 17:36:43 -04:00
|
|
|
print(repr(msg))
|
2016-11-29 05:31:41 -05:00
|
|
|
except Exception as err:
|
2021-03-23 17:36:43 -04:00
|
|
|
print("(Couldn't print processed message: {!s})".format(err))
|
2016-11-30 07:20:15 -05:00
|
|
|
|
2016-11-18 03:37:22 -05:00
|
|
|
# karxi: We have a list...but I'm not sure if we ever get anything else, so
|
2016-11-19 13:49:40 -05:00
|
|
|
# best to play it safe. I may remove this during later refactoring.
|
2016-11-18 03:37:22 -05:00
|
|
|
if isinstance(msg, list):
|
|
|
|
for i, m in enumerate(msg):
|
|
|
|
if isinstance(m, lexercon.Chunk):
|
|
|
|
# NOTE: KLUDGE. Filters out old PChum objects.
|
|
|
|
# karxi: This only works because I went in and subtyped them to
|
|
|
|
# an object type I provided - just so I could pluck them out
|
|
|
|
# later.
|
|
|
|
msg[i] = m.convert(format="ctag")
|
2021-03-23 17:36:43 -04:00
|
|
|
msg = ''.join(msg)
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
# Quirks have been applied. Lex the messages (finally).
|
|
|
|
msg = kxlexMsg(msg)
|
|
|
|
|
2016-11-30 07:20:15 -05:00
|
|
|
# Debug output.
|
|
|
|
try:
|
2021-03-23 17:36:43 -04:00
|
|
|
print(repr(msg))
|
2016-11-30 07:20:15 -05:00
|
|
|
except Exception as err:
|
2021-03-23 17:36:43 -04:00
|
|
|
print("(Couldn't print lexed message: {!s})".format(err))
|
2016-11-30 07:20:15 -05:00
|
|
|
|
2016-11-18 03:37:22 -05:00
|
|
|
# Remove coloring if this is a /me!
|
|
|
|
if is_action:
|
|
|
|
# Filter out formatting specifiers (just ctags, at the moment).
|
2021-03-23 17:36:43 -04:00
|
|
|
msg = [m for m in msg if not isinstance(m,
|
2016-11-18 03:37:22 -05:00
|
|
|
(lexercon.CTag, lexercon.CTagEnd)
|
2021-03-23 17:36:43 -04:00
|
|
|
)]
|
2016-11-18 03:37:22 -05:00
|
|
|
# We'll also add /me to the beginning of any new messages, later.
|
|
|
|
|
|
|
|
# Put what's necessary in before splitting.
|
|
|
|
# Fetch our time if we're producing this for a memo.
|
|
|
|
if flavor == "memos":
|
|
|
|
if ctx.time.getTime() == None:
|
|
|
|
ctx.sendtime()
|
|
|
|
grammar = ctx.time.getGrammar()
|
|
|
|
# Oh, great...there's a parsing error to work around. Times are added
|
|
|
|
# automatically when received, but not when added directly?... I'll
|
|
|
|
# have to unify that.
|
|
|
|
# TODO: Fix parsing disparity.
|
|
|
|
initials = ctx.mainwindow.profile().initials()
|
|
|
|
colorcmd = ctx.mainwindow.profile().colorcmd()
|
|
|
|
# We'll use those later.
|
|
|
|
|
|
|
|
# Split the messages so we don't go over the buffer and lose text.
|
|
|
|
maxlen = _max_msg_len()
|
|
|
|
# Since we have to do some post-processing, we need to adjust the maximum
|
|
|
|
# length we can use.
|
|
|
|
if flavor == "convo":
|
|
|
|
# The old Pesterchum setup used 200 for this.
|
|
|
|
maxlen = 300
|
|
|
|
elif flavor == "memos":
|
|
|
|
# Use the max, with some room added so we can make additions.
|
2016-12-06 20:04:19 -05:00
|
|
|
# The additions are theoretically 23 characters long, max.
|
|
|
|
maxlen -= 25
|
2016-11-18 03:37:22 -05:00
|
|
|
|
|
|
|
# Split the message. (Finally.)
|
|
|
|
# This is also set up to parse it into strings.
|
|
|
|
lexmsgs = kxsplitMsg(msg, "pchum", maxlen=maxlen)
|
|
|
|
# Strip off the excess.
|
|
|
|
for i, m in enumerate(lexmsgs):
|
|
|
|
lexmsgs[i] = m.strip()
|
|
|
|
|
|
|
|
# Pester message handling.
|
|
|
|
if flavor == "convo":
|
|
|
|
# if ceased, rebegin
|
|
|
|
if hasattr(ctx, 'chumopen') and not ctx.chumopen:
|
|
|
|
ctx.mainwindow.newConvoStarted.emit(
|
2021-03-23 17:36:43 -04:00
|
|
|
QString(ctx.title()), True
|
2016-11-18 03:37:22 -05:00
|
|
|
)
|
|
|
|
ctx.setChumOpen(True)
|
|
|
|
|
|
|
|
# Post-process and send the messages.
|
|
|
|
for i, lm in enumerate(lexmsgs):
|
|
|
|
# If we're working with an action and we split, it should have /mes.
|
|
|
|
if is_action and i > 0:
|
|
|
|
# Add them post-split.
|
2021-03-23 17:36:43 -04:00
|
|
|
lm = "/me " + lm
|
2016-11-18 03:37:22 -05:00
|
|
|
# NOTE: No reason to reassign for now, but...why not?
|
|
|
|
lexmsgs[i] = lm
|
|
|
|
|
|
|
|
# Copy the lexed result.
|
|
|
|
# Note that memos have to separate processing here. The adds and sends
|
|
|
|
# should be kept to the end because of that, near the emission.
|
|
|
|
clientMsg = copy(lm)
|
|
|
|
serverMsg = copy(lm)
|
|
|
|
|
|
|
|
# Memo-specific processing.
|
2016-11-19 13:39:56 -05:00
|
|
|
if flavor == "memos" and not is_action:
|
2016-11-18 03:37:22 -05:00
|
|
|
# Quirks were already applied, so get the prefix/postfix stuff
|
|
|
|
# ready.
|
|
|
|
# We fetched the information outside of the loop, so just
|
|
|
|
# construct the messages.
|
|
|
|
|
2021-03-23 17:36:43 -04:00
|
|
|
clientMsg = "<c={1}>{2}{3}{4}: {0}</c>".format(
|
2016-11-18 03:37:22 -05:00
|
|
|
clientMsg, colorcmd, grammar.pcf, initials, grammar.number
|
|
|
|
)
|
|
|
|
# Not sure if this needs a space at the end...?
|
2021-03-23 17:36:43 -04:00
|
|
|
serverMsg = "<c={1}>{2}: {0}</c>".format(
|
2016-11-18 03:37:22 -05:00
|
|
|
serverMsg, colorcmd, initials)
|
|
|
|
|
|
|
|
ctx.addMessage(clientMsg, True)
|
|
|
|
if flavor != "menus":
|
|
|
|
# If we're not doing quirk testing, actually send.
|
|
|
|
ctx.messageSent.emit(serverMsg, ctx.title())
|
|
|
|
|
|
|
|
# Clear the input.
|
|
|
|
ctx.textInput.setText("")
|
2011-05-10 02:33:59 -04:00
|
|
|
|
2011-02-05 12:17:33 -05:00
|
|
|
|
|
|
|
def addTimeInitial(string, grammar):
|
|
|
|
endofi = string.find(":")
|
|
|
|
endoftag = string.find(">")
|
2011-02-13 04:27:12 -05:00
|
|
|
# support Doc Scratch mode
|
|
|
|
if (endoftag < 0 or endoftag > 16) or (endofi < 0 or endofi > 17):
|
2011-02-05 12:17:33 -05:00
|
|
|
return string
|
|
|
|
return string[0:endoftag+1]+grammar.pcf+string[endoftag+1:endofi]+grammar.number+string[endofi:]
|
|
|
|
|
|
|
|
def timeProtocol(cmd):
|
|
|
|
dir = cmd[0]
|
2011-02-13 20:32:02 -05:00
|
|
|
if dir == "?":
|
|
|
|
return mysteryTime(0)
|
2011-02-05 12:17:33 -05:00
|
|
|
cmd = cmd[1:]
|
|
|
|
cmd = re.sub("[^0-9:]", "", cmd)
|
|
|
|
try:
|
|
|
|
l = [int(x) for x in cmd.split(":")]
|
|
|
|
except ValueError:
|
|
|
|
l = [0,0]
|
|
|
|
timed = timedelta(0, l[0]*3600+l[1]*60)
|
|
|
|
if dir == "P":
|
|
|
|
timed = timed*-1
|
|
|
|
return timed
|
2011-02-05 13:56:25 -05:00
|
|
|
|
|
|
|
def timeDifference(td):
|
2011-02-13 20:32:02 -05:00
|
|
|
if type(td) is mysteryTime:
|
|
|
|
return "??:?? FROM ????"
|
2011-02-05 13:56:25 -05:00
|
|
|
if td < timedelta(0):
|
|
|
|
when = "AGO"
|
|
|
|
else:
|
|
|
|
when = "FROM NOW"
|
|
|
|
atd = abs(td)
|
|
|
|
minutes = (atd.days*86400 + atd.seconds) // 60
|
|
|
|
hours = minutes // 60
|
|
|
|
leftoverminutes = minutes % 60
|
|
|
|
if atd == timedelta(0):
|
|
|
|
timetext = "RIGHT NOW"
|
|
|
|
elif atd < timedelta(0,3600):
|
2011-02-06 01:02:39 -05:00
|
|
|
if minutes == 1:
|
|
|
|
timetext = "%d MINUTE %s" % (minutes, when)
|
2011-04-14 03:04:33 -04:00
|
|
|
else:
|
2011-02-06 01:02:39 -05:00
|
|
|
timetext = "%d MINUTES %s" % (minutes, when)
|
2011-02-05 13:56:25 -05:00
|
|
|
elif atd < timedelta(0,3600*100):
|
2011-02-06 01:02:39 -05:00
|
|
|
if hours == 1 and leftoverminutes == 0:
|
|
|
|
timetext = "%d:%02d HOUR %s" % (hours, leftoverminutes, when)
|
|
|
|
else:
|
|
|
|
timetext = "%d:%02d HOURS %s" % (hours, leftoverminutes, when)
|
2011-02-05 13:56:25 -05:00
|
|
|
else:
|
|
|
|
timetext = "%d HOURS %s" % (hours, when)
|
|
|
|
return timetext
|
2011-02-08 17:47:07 -05:00
|
|
|
|
2011-04-14 03:07:05 -04:00
|
|
|
def nonerep(text):
|
|
|
|
return text
|
|
|
|
|
|
|
|
class parseLeaf(object):
|
|
|
|
def __init__(self, function, parent):
|
|
|
|
self.nodes = []
|
|
|
|
self.function = function
|
|
|
|
self.parent = parent
|
|
|
|
def append(self, node):
|
|
|
|
self.nodes.append(node)
|
|
|
|
def expand(self, mo):
|
|
|
|
out = ""
|
|
|
|
for n in self.nodes:
|
|
|
|
if type(n) == parseLeaf:
|
|
|
|
out += n.expand(mo)
|
|
|
|
elif type(n) == backreference:
|
|
|
|
out += mo.group(int(n.number))
|
|
|
|
else:
|
|
|
|
out += n
|
|
|
|
out = self.function(out)
|
|
|
|
return out
|
2016-11-18 03:37:22 -05:00
|
|
|
|
2011-04-14 03:07:05 -04:00
|
|
|
class backreference(object):
|
|
|
|
def __init__(self, number):
|
|
|
|
self.number = number
|
|
|
|
def __str__(self):
|
|
|
|
return self.number
|
|
|
|
|
|
|
|
def parseRegexpFunctions(to):
|
|
|
|
parsed = parseLeaf(nonerep, None)
|
|
|
|
current = parsed
|
|
|
|
curi = 0
|
2011-06-07 11:48:35 -04:00
|
|
|
functiondict = quirkloader.quirks
|
2011-04-14 03:07:05 -04:00
|
|
|
while curi < len(to):
|
|
|
|
tmp = to[curi:]
|
|
|
|
mo = _functionre.search(tmp)
|
|
|
|
if mo is not None:
|
|
|
|
if mo.start() > 0:
|
|
|
|
current.append(to[curi:curi+mo.start()])
|
|
|
|
backr = _groupre.search(mo.group())
|
|
|
|
if backr is not None:
|
|
|
|
current.append(backreference(backr.group(1)))
|
2021-03-23 17:36:43 -04:00
|
|
|
elif mo.group()[:-1] in list(functiondict.keys()):
|
2012-11-12 23:52:26 -05:00
|
|
|
p = parseLeaf(functiondict[mo.group()[:-1]], current)
|
2011-04-14 03:07:05 -04:00
|
|
|
current.append(p)
|
|
|
|
current = p
|
|
|
|
elif mo.group() == ")":
|
|
|
|
if current.parent is not None:
|
|
|
|
current = current.parent
|
|
|
|
else:
|
|
|
|
current.append(")")
|
|
|
|
curi = mo.end()+curi
|
|
|
|
else:
|
|
|
|
current.append(to[curi:])
|
|
|
|
curi = len(to)
|
|
|
|
return parsed
|
2011-05-10 02:33:59 -04:00
|
|
|
|
2011-04-14 03:07:05 -04:00
|
|
|
|
2011-02-23 06:06:00 -05:00
|
|
|
def img2smiley(string):
|
2021-03-23 17:36:43 -04:00
|
|
|
string = str(string)
|
2011-02-23 06:06:00 -05:00
|
|
|
def imagerep(mo):
|
|
|
|
return reverse_smiley[mo.group(1)]
|
2011-08-10 13:42:54 -04:00
|
|
|
string = re.sub(r'<img src="smilies/(\S+)" />', imagerep, string)
|
2011-02-23 06:06:00 -05:00
|
|
|
return string
|
2011-02-08 17:47:07 -05:00
|
|
|
|
|
|
|
smiledict = {
|
2011-08-10 19:06:23 -04:00
|
|
|
":rancorous:": "pc_rancorous.png",
|
|
|
|
":apple:": "apple.png",
|
|
|
|
":bathearst:": "bathearst.png",
|
2011-02-22 19:49:47 -05:00
|
|
|
":cathearst:": "cathearst.png",
|
2011-08-10 19:06:23 -04:00
|
|
|
":woeful:": "pc_bemused.png",
|
|
|
|
":sorrow:": "blacktear.png",
|
|
|
|
":pleasant:": "pc_pleasant.png",
|
2011-02-08 17:47:07 -05:00
|
|
|
":blueghost:": "blueslimer.gif",
|
2011-02-14 15:04:57 -05:00
|
|
|
":slimer:": "slimer.gif",
|
2011-08-10 19:06:23 -04:00
|
|
|
":candycorn:": "candycorn.png",
|
2011-04-14 03:04:33 -04:00
|
|
|
":cheer:": "cheer.gif",
|
2011-02-08 17:47:07 -05:00
|
|
|
":duhjohn:": "confusedjohn.gif",
|
2011-08-10 19:06:23 -04:00
|
|
|
":datrump:": "datrump.png",
|
|
|
|
":facepalm:": "facepalm.png",
|
2011-02-08 17:47:07 -05:00
|
|
|
":bonk:": "headbonk.gif",
|
2011-08-10 19:06:23 -04:00
|
|
|
":mspa:": "mspa_face.png",
|
2011-02-08 17:47:07 -05:00
|
|
|
":gun:": "mspa_reader.gif",
|
|
|
|
":cal:": "lilcal.png",
|
2011-08-10 19:06:23 -04:00
|
|
|
":amazedfirman:": "pc_amazedfirman.png",
|
|
|
|
":amazed:": "pc_amazed.png",
|
|
|
|
":chummy:": "pc_chummy.png",
|
|
|
|
":cool:": "pccool.png",
|
|
|
|
":smooth:": "pccool.png",
|
|
|
|
":distraughtfirman": "pc_distraughtfirman.png",
|
|
|
|
":distraught:": "pc_distraught.png",
|
|
|
|
":insolent:": "pc_insolent.png",
|
|
|
|
":bemused:": "pc_bemused.png",
|
|
|
|
":3:": "pckitty.png",
|
|
|
|
":mystified:": "pc_mystified.png",
|
|
|
|
":pranky:": "pc_pranky.png",
|
|
|
|
":tense:": "pc_tense.png",
|
2011-02-08 17:47:07 -05:00
|
|
|
":record:": "record.gif",
|
|
|
|
":squiddle:": "squiddle.gif",
|
|
|
|
":tab:": "tab.gif",
|
2011-08-10 19:06:23 -04:00
|
|
|
":beetip:": "theprofessor.png",
|
2011-02-08 17:47:07 -05:00
|
|
|
":flipout:": "weasel.gif",
|
2011-08-10 19:06:23 -04:00
|
|
|
":befuddled:": "what.png",
|
|
|
|
":pumpkin:": "whatpumpkin.png",
|
|
|
|
":trollcool:": "trollcool.png",
|
2011-03-05 20:21:45 -05:00
|
|
|
":jadecry:": "jadespritehead.gif",
|
2011-03-07 19:14:21 -05:00
|
|
|
":ecstatic:": "ecstatic.png",
|
|
|
|
":relaxed:": "relaxed.png",
|
|
|
|
":discontent:": "discontent.png",
|
|
|
|
":devious:": "devious.png",
|
|
|
|
":sleek:": "sleek.png",
|
|
|
|
":detestful:": "detestful.png",
|
|
|
|
":mirthful:": "mirthful.png",
|
|
|
|
":manipulative:": "manipulative.png",
|
|
|
|
":vigorous:": "vigorous.png",
|
|
|
|
":perky:": "perky.png",
|
2011-08-10 19:06:23 -04:00
|
|
|
":acceptant:": "acceptant.png",
|
2011-10-25 02:37:49 -04:00
|
|
|
":olliesouty:": "olliesouty.gif",
|
2011-11-04 03:36:51 -04:00
|
|
|
":billiards:": "poolballS.gif",
|
|
|
|
":billiardslarge:": "poolballL.gif",
|
2011-12-24 01:15:22 -05:00
|
|
|
":whatdidyoudo:": "whatdidyoudo.gif",
|
2012-01-23 03:59:31 -05:00
|
|
|
":brocool:": "pcstrider.png",
|
2012-02-03 03:28:37 -05:00
|
|
|
":trollbro:": "trollbro.png",
|
2012-02-04 20:24:10 -05:00
|
|
|
":playagame:": "saw.gif",
|
2012-03-31 21:34:15 -04:00
|
|
|
":trollc00l:": "trollc00l.gif",
|
2012-04-09 23:44:28 -04:00
|
|
|
":suckers:": "Suckers.gif",
|
2012-04-20 02:38:41 -04:00
|
|
|
":scorpio:": "scorpio.gif",
|
2012-06-11 21:58:30 -04:00
|
|
|
":shades:": "shades.png",
|
2011-03-05 20:21:45 -05:00
|
|
|
}
|
2011-02-11 18:37:31 -05:00
|
|
|
|
2011-08-11 04:59:51 -04:00
|
|
|
if ostools.isOSXBundle():
|
|
|
|
for emote in smiledict:
|
|
|
|
graphic = smiledict[emote]
|
|
|
|
if graphic.find(".gif"):
|
|
|
|
graphic = graphic.replace(".gif", ".png")
|
|
|
|
smiledict[emote] = graphic
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2021-03-23 17:36:43 -04:00
|
|
|
reverse_smiley = dict((v,k) for k, v in smiledict.items())
|
|
|
|
_smilere = re.compile("|".join(list(smiledict.keys())))
|
2011-05-10 02:33:59 -04:00
|
|
|
|
|
|
|
class ThemeException(Exception):
|
|
|
|
def __init__(self, value):
|
|
|
|
self.parameter = value
|
|
|
|
def __str__(self):
|
|
|
|
return repr(self.parameter)
|
|
|
|
|
|
|
|
def themeChecker(theme):
|
|
|
|
needs = ["main/size", "main/icon", "main/windowtitle", "main/style", \
|
|
|
|
"main/background-image", "main/menubar/style", "main/menu/menuitem", \
|
|
|
|
"main/menu/style", "main/menu/selected", "main/close/image", \
|
|
|
|
"main/close/loc", "main/minimize/image", "main/minimize/loc", \
|
|
|
|
"main/menu/loc", "main/menus/client/logviewer", \
|
|
|
|
"main/menus/client/addgroup", "main/menus/client/options", \
|
|
|
|
"main/menus/client/exit", "main/menus/client/userlist", \
|
|
|
|
"main/menus/client/memos", "main/menus/client/import", \
|
|
|
|
"main/menus/client/idle", "main/menus/client/reconnect", \
|
|
|
|
"main/menus/client/_name", "main/menus/profile/quirks", \
|
|
|
|
"main/menus/profile/block", "main/menus/profile/color", \
|
|
|
|
"main/menus/profile/switch", "main/menus/profile/_name", \
|
|
|
|
"main/menus/help/about", "main/menus/help/_name", "main/moodlabel/text", \
|
|
|
|
"main/moodlabel/loc", "main/moodlabel/style", "main/moods", \
|
|
|
|
"main/addchum/style", "main/addchum/text", "main/addchum/size", \
|
|
|
|
"main/addchum/loc", "main/pester/text", "main/pester/size", \
|
|
|
|
"main/pester/loc", "main/block/text", "main/block/size", "main/block/loc", \
|
|
|
|
"main/mychumhandle/label/text", "main/mychumhandle/label/loc", \
|
|
|
|
"main/mychumhandle/label/style", "main/mychumhandle/handle/loc", \
|
|
|
|
"main/mychumhandle/handle/size", "main/mychumhandle/handle/style", \
|
|
|
|
"main/mychumhandle/colorswatch/size", "main/mychumhandle/colorswatch/loc", \
|
|
|
|
"main/defaultmood", "main/chums/size", "main/chums/loc", \
|
|
|
|
"main/chums/style", "main/menus/rclickchumlist/pester", \
|
|
|
|
"main/menus/rclickchumlist/removechum", \
|
|
|
|
"main/menus/rclickchumlist/blockchum", "main/menus/rclickchumlist/viewlog", \
|
|
|
|
"main/menus/rclickchumlist/removegroup", \
|
|
|
|
"main/menus/rclickchumlist/renamegroup", \
|
|
|
|
"main/menus/rclickchumlist/movechum", "convo/size", \
|
|
|
|
"convo/tabwindow/style", "convo/tabs/tabstyle", "convo/tabs/style", \
|
|
|
|
"convo/tabs/selectedstyle", "convo/style", "convo/margins", \
|
|
|
|
"convo/chumlabel/text", "convo/chumlabel/style", "convo/chumlabel/align/h", \
|
|
|
|
"convo/chumlabel/align/v", "convo/chumlabel/maxheight", \
|
|
|
|
"convo/chumlabel/minheight", "main/menus/rclickchumlist/quirksoff", \
|
|
|
|
"main/menus/rclickchumlist/addchum", "main/menus/rclickchumlist/blockchum", \
|
|
|
|
"main/menus/rclickchumlist/unblockchum", \
|
|
|
|
"main/menus/rclickchumlist/viewlog", "main/trollslum/size", \
|
|
|
|
"main/trollslum/style", "main/trollslum/label/text", \
|
|
|
|
"main/trollslum/label/style", "main/menus/profile/block", \
|
|
|
|
"main/chums/moods/blocked/icon", "convo/systemMsgColor", \
|
|
|
|
"convo/textarea/style", "convo/text/beganpester", "convo/text/ceasepester", \
|
|
|
|
"convo/text/blocked", "convo/text/unblocked", "convo/text/blockedmsg", \
|
|
|
|
"convo/text/idle", "convo/input/style", "memos/memoicon", \
|
|
|
|
"memos/textarea/style", "memos/systemMsgColor", "convo/text/joinmemo", \
|
|
|
|
"memos/input/style", "main/menus/rclickchumlist/banuser", \
|
|
|
|
"main/menus/rclickchumlist/opuser", "main/menus/rclickchumlist/voiceuser", \
|
|
|
|
"memos/margins", "convo/text/openmemo", "memos/size", "memos/style", \
|
|
|
|
"memos/label/text", "memos/label/style", "memos/label/align/h", \
|
|
|
|
"memos/label/align/v", "memos/label/maxheight", "memos/label/minheight", \
|
|
|
|
"memos/userlist/style", "memos/userlist/width", "memos/time/text/width", \
|
|
|
|
"memos/time/text/style", "memos/time/arrows/left", \
|
|
|
|
"memos/time/arrows/style", "memos/time/buttons/style", \
|
|
|
|
"memos/time/arrows/right", "memos/op/icon", "memos/voice/icon", \
|
|
|
|
"convo/text/closememo", "convo/text/kickedmemo", \
|
|
|
|
"main/chums/userlistcolor", "main/defaultwindow/style", \
|
|
|
|
"main/chums/moods", "main/chums/moods/chummy/icon", "main/menus/help/help", \
|
2012-06-26 02:54:12 -04:00
|
|
|
"main/menus/help/calsprite", "main/menus/help/nickserv", "main/menus/help/chanserv", \
|
2011-07-12 03:15:47 -04:00
|
|
|
"main/menus/rclickchumlist/invitechum", "main/menus/client/randen", \
|
|
|
|
"main/menus/rclickchumlist/memosetting", "main/menus/rclickchumlist/memonoquirk", \
|
|
|
|
"main/menus/rclickchumlist/memohidden", "main/menus/rclickchumlist/memoinvite", \
|
2011-08-22 04:13:43 -04:00
|
|
|
"main/menus/rclickchumlist/memomute", "main/menus/rclickchumlist/notes"]
|
2011-05-10 02:33:59 -04:00
|
|
|
|
|
|
|
for n in needs:
|
|
|
|
try:
|
|
|
|
theme[n]
|
|
|
|
except KeyError:
|
|
|
|
raise ThemeException("Missing theme requirement: %s" % (n))
|