Removed unused and unreachable/dead code
Signed-off-by: Venkatesh Pitta <venkateshpitta@gmail.com>
This commit is contained in:
parent
634a49cd6d
commit
c548ff96a1
|
@ -7,7 +7,7 @@ import sys
|
|||
import traceback
|
||||
|
||||
matches = []
|
||||
for root, dirnames, filenames in os.walk('src'):
|
||||
for root, _, filenames in os.walk('src'):
|
||||
for filename in fnmatch.filter(filenames, '*.py'):
|
||||
matches.append(os.path.join(root, filename))
|
||||
|
||||
|
|
|
@ -169,11 +169,9 @@ def testCompiler():
|
|||
)
|
||||
|
||||
dist = Distribution()
|
||||
dist.ext_modules = [bitmsghash]
|
||||
cmd = build_ext(dist)
|
||||
cmd.initialize_options()
|
||||
cmd.finalize_options()
|
||||
cmd.force = True
|
||||
try:
|
||||
cmd.run()
|
||||
except CompileError:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from math import ceil
|
||||
from os import stat, getenv, path
|
||||
from os import getenv, path
|
||||
from pybloom import BloomFilter as BloomFilter1
|
||||
from pybloomfilter import BloomFilter as BloomFilter2
|
||||
import sqlite3
|
||||
|
@ -9,7 +9,6 @@ from time import time
|
|||
|
||||
conn = sqlite3.connect(path.join(getenv("HOME"), '.config/PyBitmessage/messages.dat'))
|
||||
|
||||
conn.text_factory = str
|
||||
cur = conn.cursor()
|
||||
rawlen = 0
|
||||
itemcount = 0
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import importlib
|
||||
from os import listdir, path
|
||||
from pprint import pprint
|
||||
import sys
|
||||
|
|
|
@ -44,6 +44,5 @@ signal.signal(signal.SIGTERM, signal_handler)
|
|||
bso = ctypes.CDLL(os.path.join("bitmsghash", "bitmsghash.so"))
|
||||
|
||||
bmpow = bso.BitmessagePOW
|
||||
bmpow.restype = ctypes.c_ulonglong
|
||||
|
||||
_doCPoW(2**44, "")
|
||||
|
|
|
@ -45,8 +45,6 @@ def sslHandshake(sock, server=False):
|
|||
context = ssl.SSLContext(sslProtocolVersion())
|
||||
context.set_ciphers(sslProtocolCiphers())
|
||||
context.set_ecdh_curve("secp256k1")
|
||||
context.check_hostname = False
|
||||
context.verify_mode = ssl.CERT_NONE
|
||||
context.options = ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_SINGLE_ECDH_USE | ssl.OP_CIPHER_SERVER_PREFERENCE
|
||||
sslSock = context.wrap_socket(sock, server_side = server, do_handshake_on_connect=False)
|
||||
else:
|
||||
|
|
2
packages/collectd/pybitmessagestatus.py
Normal file → Executable file
2
packages/collectd/pybitmessagestatus.py
Normal file → Executable file
|
@ -40,12 +40,10 @@ def read_callback():
|
|||
|
||||
for i in ["networkConnections", "numberOfPubkeysProcessed", "numberOfMessagesProcessed", "numberOfBroadcastsProcessed"]:
|
||||
metric = collectd.Values()
|
||||
metric.plugin = "pybitmessagestatus"
|
||||
if i[0:6] == "number":
|
||||
metric.type = 'counter'
|
||||
else:
|
||||
metric.type = 'gauge'
|
||||
metric.type_instance = i.lower()
|
||||
try:
|
||||
metric.values = [clientStatus[i]]
|
||||
except:
|
||||
|
|
|
@ -5,16 +5,6 @@ from binascii import hexlify, unhexlify
|
|||
|
||||
#from debug import logger
|
||||
|
||||
#There is another copy of this function in Bitmessagemain.py
|
||||
def convertIntToString(n):
|
||||
a = __builtins__.hex(n)
|
||||
if a[-1:] == 'L':
|
||||
a = a[:-1]
|
||||
if (len(a) % 2) == 0:
|
||||
return unhexlify(a[2:])
|
||||
else:
|
||||
return unhexlify('0'+a[2:])
|
||||
|
||||
ALPHABET = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
|
||||
|
||||
def encodeBase58(num, alphabet=ALPHABET):
|
||||
|
|
173
src/api.py
173
src/api.py
|
@ -56,7 +56,6 @@ class APIError(Exception):
|
|||
|
||||
|
||||
class StoppableXMLRPCServer(SimpleXMLRPCServer):
|
||||
allow_reuse_address = True
|
||||
|
||||
def serve_forever(self):
|
||||
while state.shutdown == 0:
|
||||
|
@ -68,79 +67,79 @@ class StoppableXMLRPCServer(SimpleXMLRPCServer):
|
|||
# http://code.activestate.com/recipes/501148-xmlrpc-serverclient-which-does-cookie-handling-and/
|
||||
class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||
|
||||
def do_POST(self):
|
||||
# Handles the HTTP POST request.
|
||||
# Attempts to interpret all HTTP POST requests as XML-RPC calls,
|
||||
# which are forwarded to the server's _dispatch method for handling.
|
||||
# def do_POST(self):
|
||||
# # Handles the HTTP POST request.
|
||||
# # Attempts to interpret all HTTP POST requests as XML-RPC calls,
|
||||
# # which are forwarded to the server's _dispatch method for handling.
|
||||
|
||||
# Note: this method is the same as in SimpleXMLRPCRequestHandler,
|
||||
# just hacked to handle cookies
|
||||
# # Note: this method is the same as in SimpleXMLRPCRequestHandler,
|
||||
# # just hacked to handle cookies
|
||||
|
||||
# Check that the path is legal
|
||||
if not self.is_rpc_path_valid():
|
||||
self.report_404()
|
||||
return
|
||||
# # Check that the path is legal
|
||||
# if not self.is_rpc_path_valid():
|
||||
# self.report_404()
|
||||
# return
|
||||
|
||||
try:
|
||||
# Get arguments by reading body of request.
|
||||
# We read this in chunks to avoid straining
|
||||
# socket.read(); around the 10 or 15Mb mark, some platforms
|
||||
# begin to have problems (bug #792570).
|
||||
max_chunk_size = 10 * 1024 * 1024
|
||||
size_remaining = int(self.headers["content-length"])
|
||||
L = []
|
||||
while size_remaining:
|
||||
chunk_size = min(size_remaining, max_chunk_size)
|
||||
L.append(self.rfile.read(chunk_size))
|
||||
size_remaining -= len(L[-1])
|
||||
data = ''.join(L)
|
||||
# try:
|
||||
# # Get arguments by reading body of request.
|
||||
# # We read this in chunks to avoid straining
|
||||
# # socket.read(); around the 10 or 15Mb mark, some platforms
|
||||
# # begin to have problems (bug #792570).
|
||||
# max_chunk_size = 10 * 1024 * 1024
|
||||
# size_remaining = int(self.headers["content-length"])
|
||||
# L = []
|
||||
# while size_remaining:
|
||||
# chunk_size = min(size_remaining, max_chunk_size)
|
||||
# L.append(self.rfile.read(chunk_size))
|
||||
# size_remaining -= len(L[-1])
|
||||
# data = ''.join(L)
|
||||
|
||||
# In previous versions of SimpleXMLRPCServer, _dispatch
|
||||
# could be overridden in this class, instead of in
|
||||
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
|
||||
# check to see if a subclass implements _dispatch and dispatch
|
||||
# using that method if present.
|
||||
response = self.server._marshaled_dispatch(
|
||||
data, getattr(self, '_dispatch', None)
|
||||
)
|
||||
except: # This should only happen if the module is buggy
|
||||
# internal error, report as HTTP server error
|
||||
self.send_response(500)
|
||||
self.end_headers()
|
||||
else:
|
||||
# got a valid XML RPC response
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "text/xml")
|
||||
self.send_header("Content-length", str(len(response)))
|
||||
# # In previous versions of SimpleXMLRPCServer, _dispatch
|
||||
# # could be overridden in this class, instead of in
|
||||
# # SimpleXMLRPCDispatcher. To maintain backwards compatibility,
|
||||
# # check to see if a subclass implements _dispatch and dispatch
|
||||
# # using that method if present.
|
||||
# response = self.server._marshaled_dispatch(
|
||||
# data, getattr(self, '_dispatch', None)
|
||||
# )
|
||||
# except: # This should only happen if the module is buggy
|
||||
# # internal error, report as HTTP server error
|
||||
# self.send_response(500)
|
||||
# self.end_headers()
|
||||
# else:
|
||||
# # got a valid XML RPC response
|
||||
# self.send_response(200)
|
||||
# self.send_header("Content-type", "text/xml")
|
||||
# self.send_header("Content-length", str(len(response)))
|
||||
|
||||
# HACK :start -> sends cookies here
|
||||
if self.cookies:
|
||||
for cookie in self.cookies:
|
||||
self.send_header('Set-Cookie', cookie.output(header=''))
|
||||
# HACK :end
|
||||
# # HACK :start -> sends cookies here
|
||||
# if self.cookies:
|
||||
# for cookie in self.cookies:
|
||||
# self.send_header('Set-Cookie', cookie.output(header=''))
|
||||
# # HACK :end
|
||||
|
||||
self.end_headers()
|
||||
self.wfile.write(response)
|
||||
# self.end_headers()
|
||||
# self.wfile.write(response)
|
||||
|
||||
# shut down the connection
|
||||
self.wfile.flush()
|
||||
self.connection.shutdown(1)
|
||||
# # shut down the connection
|
||||
# self.wfile.flush()
|
||||
# self.connection.shutdown(1)
|
||||
|
||||
def APIAuthenticateClient(self):
|
||||
if 'Authorization' in self.headers:
|
||||
# handle Basic authentication
|
||||
(enctype, encstr) = self.headers.get('Authorization').split()
|
||||
(emailid, password) = encstr.decode('base64').split(':')
|
||||
if emailid == BMConfigParser().get('bitmessagesettings', 'apiusername') and password == BMConfigParser().get('bitmessagesettings', 'apipassword'):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
logger.warn('Authentication failed because header lacks Authentication field')
|
||||
time.sleep(2)
|
||||
return False
|
||||
# def APIAuthenticateClient(self):
|
||||
# if 'Authorization' in self.headers:
|
||||
# # handle Basic authentication
|
||||
# (_, encstr) = self.headers.get('Authorization').split()
|
||||
# (emailid, password) = encstr.decode('base64').split(':')
|
||||
# if emailid == BMConfigParser().get('bitmessagesettings', 'apiusername') and password == BMConfigParser().get('bitmessagesettings', 'apipassword'):
|
||||
# return True
|
||||
# else:
|
||||
# return False
|
||||
# else:
|
||||
# logger.warn('Authentication failed because header lacks Authentication field')
|
||||
# time.sleep(2)
|
||||
# return False
|
||||
|
||||
return False
|
||||
# return False
|
||||
|
||||
def _decode(self, text, decode_type):
|
||||
try:
|
||||
|
@ -445,7 +444,7 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
|||
if len(addressGeneratorReturnValue) == 0:
|
||||
raise APIError(24, 'Chan address is already present.')
|
||||
#TODO: this variable is not used to anything
|
||||
createdAddress = addressGeneratorReturnValue[0] # in case we ever want it for anything.
|
||||
#createdAddress = addressGeneratorReturnValue[0] # in case we ever want it for anything, uncomment
|
||||
return "success"
|
||||
|
||||
def HandleLeaveChan(self, params):
|
||||
|
@ -1042,29 +1041,29 @@ class MySimpleXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
|||
handlers['deleteAndVacuum'] = HandleDeleteAndVacuum
|
||||
handlers['shutdown'] = HandleShutdown
|
||||
|
||||
def _handle_request(self, method, params):
|
||||
if (self.handlers.has_key(method)):
|
||||
return self.handlers[method](self, params)
|
||||
else:
|
||||
raise APIError(20, 'Invalid method: %s' % method)
|
||||
# def _handle_request(self, method, params):
|
||||
# if (self.handlers.has_key(method)):
|
||||
# return self.handlers[method](self, params)
|
||||
# else:
|
||||
# raise APIError(20, 'Invalid method: %s' % method)
|
||||
|
||||
def _dispatch(self, method, params):
|
||||
self.cookies = []
|
||||
# def _dispatch(self, method, params):
|
||||
# self.cookies = []
|
||||
|
||||
validuser = self.APIAuthenticateClient()
|
||||
if not validuser:
|
||||
time.sleep(2)
|
||||
return "RPC Username or password incorrect or HTTP header lacks authentication at all."
|
||||
# validuser = self.APIAuthenticateClient()
|
||||
# if not validuser:
|
||||
# time.sleep(2)
|
||||
# return "RPC Username or password incorrect or HTTP header lacks authentication at all."
|
||||
|
||||
try:
|
||||
return self._handle_request(method, params)
|
||||
except APIError as e:
|
||||
return str(e)
|
||||
except varintDecodeError as e:
|
||||
logger.error(e)
|
||||
return "API Error 0026: Data contains a malformed varint. Some details: %s" % e
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
return "API Error 0021: Unexpected API Failure - %s" % str(e)
|
||||
# try:
|
||||
# return self._handle_request(method, params)
|
||||
# except APIError as e:
|
||||
# return str(e)
|
||||
# except varintDecodeError as e:
|
||||
# logger.error(e)
|
||||
# return "API Error 0026: Data contains a malformed varint. Some details: %s" % e
|
||||
# except Exception as e:
|
||||
# logger.exception(e)
|
||||
# return "API Error 0021: Unexpected API Failure - %s" % str(e)
|
||||
|
||||
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
|
||||
import os
|
||||
import sys
|
||||
import StringIO
|
||||
from textwrap import *
|
||||
|
||||
import time
|
||||
|
@ -35,7 +34,6 @@ import shutdown
|
|||
quit = False
|
||||
menutab = 1
|
||||
menu = ["Inbox", "Send", "Sent", "Your Identities", "Subscriptions", "Address Book", "Blacklist", "Network Status"]
|
||||
naptime = 100
|
||||
log = ""
|
||||
logpad = None
|
||||
inventorydata = 0
|
||||
|
@ -47,7 +45,6 @@ sentbox = []
|
|||
sentcur = 0
|
||||
addresses = []
|
||||
addrcur = 0
|
||||
addrcopy = 0
|
||||
subscriptions = []
|
||||
subcur = 0
|
||||
addrbook = []
|
||||
|
@ -71,7 +68,6 @@ class errLog:
|
|||
def flush(self):
|
||||
pass
|
||||
printlog = printLog()
|
||||
errlog = errLog()
|
||||
|
||||
|
||||
def cpair(a):
|
||||
|
|
|
@ -418,12 +418,12 @@ All parameters are optional.
|
|||
|
||||
|
||||
#TODO: nice function but no one is using this
|
||||
def getApiAddress(self):
|
||||
if not BMConfigParser().safeGetBoolean('bitmessagesettings', 'apienabled'):
|
||||
return None
|
||||
address = BMConfigParser().get('bitmessagesettings', 'apiinterface')
|
||||
port = BMConfigParser().getint('bitmessagesettings', 'apiport')
|
||||
return {'address':address,'port':port}
|
||||
# def getApiAddress(self):
|
||||
# if not BMConfigParser().safeGetBoolean('bitmessagesettings', 'apienabled'):
|
||||
# return None
|
||||
# address = BMConfigParser().get('bitmessagesettings', 'apiinterface')
|
||||
# port = BMConfigParser().getint('bitmessagesettings', 'apiport')
|
||||
# return {'address':address,'port':port}
|
||||
|
||||
|
||||
def main():
|
||||
|
|
|
@ -17,7 +17,7 @@ from bmconfigparser import BMConfigParser
|
|||
import defaults
|
||||
from namecoin import namecoinConnection
|
||||
from messageview import MessageView
|
||||
from migrationwizard import Ui_MigrationWizard
|
||||
# from migrationwizard import Ui_MigrationWizard
|
||||
from foldertree import (
|
||||
AccountMixin, Ui_FolderWidget, Ui_AddressWidget, Ui_SubscriptionWidget,
|
||||
MessageList_AddressWidget, MessageList_SubjectWidget,
|
||||
|
@ -280,10 +280,10 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
_translate(
|
||||
"MainWindow", "Copy address to clipboard"),
|
||||
self.on_action_Clipboard)
|
||||
self.actionSpecialAddressBehavior = self.ui.addressContextMenuToolbar.addAction(
|
||||
_translate(
|
||||
"MainWindow", "Special address behavior..."),
|
||||
self.on_action_SpecialAddressBehaviorDialog)
|
||||
# self.actionSpecialAddressBehavior = self.ui.addressContextMenuToolbar.addAction(
|
||||
# _translate(
|
||||
# "MainWindow", "Special address behavior..."),
|
||||
# self.on_action_SpecialAddressBehaviorDialog)
|
||||
|
||||
self.ui.treeWidgetChans.setContextMenuPolicy(
|
||||
QtCore.Qt.CustomContextMenu)
|
||||
|
@ -851,12 +851,12 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
self.appIndicatorShowOrHideWindow()
|
||||
|
||||
# unchecks the show item on the application indicator
|
||||
def appIndicatorHide(self):
|
||||
if self.actionShow is None:
|
||||
return
|
||||
if self.actionShow.isChecked():
|
||||
self.actionShow.setChecked(False)
|
||||
self.appIndicatorShowOrHideWindow()
|
||||
# def appIndicatorHide(self):
|
||||
# if self.actionShow is None:
|
||||
# return
|
||||
# if self.actionShow.isChecked():
|
||||
# self.actionShow.setChecked(False)
|
||||
# self.appIndicatorShowOrHideWindow()
|
||||
|
||||
def appIndicatorSwitchQuietMode(self):
|
||||
BMConfigParser().set(
|
||||
|
@ -1546,32 +1546,32 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
elif dialog.radioButtonConfigureNetwork.isChecked():
|
||||
self.click_actionSettings()
|
||||
|
||||
def showMigrationWizard(self, level):
|
||||
self.migrationWizardInstance = Ui_MigrationWizard(["a"])
|
||||
if self.migrationWizardInstance.exec_():
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
# def showMigrationWizard(self, level):
|
||||
# self.migrationWizardInstance = Ui_MigrationWizard(["a"])
|
||||
# if self.migrationWizardInstance.exec_():
|
||||
# pass
|
||||
# else:
|
||||
# pass
|
||||
|
||||
def changeEvent(self, event):
|
||||
if event.type() == QtCore.QEvent.LanguageChange:
|
||||
self.ui.retranslateUi(self)
|
||||
self.init_inbox_popup_menu(False)
|
||||
self.init_identities_popup_menu(False)
|
||||
self.init_chan_popup_menu(False)
|
||||
self.init_addressbook_popup_menu(False)
|
||||
self.init_subscriptions_popup_menu(False)
|
||||
self.init_sent_popup_menu(False)
|
||||
self.ui.blackwhitelist.init_blacklist_popup_menu(False)
|
||||
if event.type() == QtCore.QEvent.WindowStateChange:
|
||||
if self.windowState() & QtCore.Qt.WindowMinimized:
|
||||
if BMConfigParser().getboolean('bitmessagesettings', 'minimizetotray') and not 'darwin' in sys.platform:
|
||||
QtCore.QTimer.singleShot(0, self.appIndicatorHide)
|
||||
elif event.oldState() & QtCore.Qt.WindowMinimized:
|
||||
# The window state has just been changed to
|
||||
# Normal/Maximised/FullScreen
|
||||
pass
|
||||
# QtGui.QWidget.changeEvent(self, event)
|
||||
# def changeEvent(self, event):
|
||||
# if event.type() == QtCore.QEvent.LanguageChange:
|
||||
# self.ui.retranslateUi(self)
|
||||
# self.init_inbox_popup_menu(False)
|
||||
# self.init_identities_popup_menu(False)
|
||||
# self.init_chan_popup_menu(False)
|
||||
# self.init_addressbook_popup_menu(False)
|
||||
# self.init_subscriptions_popup_menu(False)
|
||||
# self.init_sent_popup_menu(False)
|
||||
# self.ui.blackwhitelist.init_blacklist_popup_menu(False)
|
||||
# if event.type() == QtCore.QEvent.WindowStateChange:
|
||||
# if self.windowState() & QtCore.Qt.WindowMinimized:
|
||||
# if BMConfigParser().getboolean('bitmessagesettings', 'minimizetotray') and not 'darwin' in sys.platform:
|
||||
# QtCore.QTimer.singleShot(0, self.appIndicatorHide)
|
||||
# elif event.oldState() & QtCore.Qt.WindowMinimized:
|
||||
# # The window state has just been changed to
|
||||
# # Normal/Maximised/FullScreen
|
||||
# pass
|
||||
# # QtGui.QWidget.changeEvent(self, event)
|
||||
|
||||
def __icon_activated(self, reason):
|
||||
if reason == QtGui.QSystemTrayIcon.Trigger:
|
||||
|
@ -2135,17 +2135,17 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
self.updateStatusBar(_translate(
|
||||
"MainWindow", "Broadcast queued."))
|
||||
|
||||
def click_pushButtonLoadFromAddressBook(self):
|
||||
self.ui.tabWidget.setCurrentIndex(5)
|
||||
for i in range(4):
|
||||
time.sleep(0.1)
|
||||
self.statusbar.clearMessage()
|
||||
time.sleep(0.1)
|
||||
self.updateStatusBar(_translate(
|
||||
"MainWindow",
|
||||
"Right click one or more entries in your address book and"
|
||||
" select \'Send message to this address\'."
|
||||
))
|
||||
# def click_pushButtonLoadFromAddressBook(self):
|
||||
# self.ui.tabWidget.setCurrentIndex(5)
|
||||
# for i in range(4):
|
||||
# time.sleep(0.1)
|
||||
# self.statusbar.clearMessage()
|
||||
# time.sleep(0.1)
|
||||
# self.updateStatusBar(_translate(
|
||||
# "MainWindow",
|
||||
# "Right click one or more entries in your address book and"
|
||||
# " select \'Send message to this address\'."
|
||||
# ))
|
||||
|
||||
def click_pushButtonFetchNamecoinID(self):
|
||||
nc = namecoinConnection()
|
||||
|
@ -2846,25 +2846,25 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
os._exit(0)
|
||||
|
||||
# window close event
|
||||
def closeEvent(self, event):
|
||||
self.appIndicatorHide()
|
||||
trayonclose = False
|
||||
# def closeEvent(self, event):
|
||||
# self.appIndicatorHide()
|
||||
# trayonclose = False
|
||||
|
||||
try:
|
||||
trayonclose = BMConfigParser().getboolean(
|
||||
'bitmessagesettings', 'trayonclose')
|
||||
except Exception:
|
||||
pass
|
||||
# try:
|
||||
# trayonclose = BMConfigParser().getboolean(
|
||||
# 'bitmessagesettings', 'trayonclose')
|
||||
# except Exception:
|
||||
# pass
|
||||
|
||||
# always ignore, it shuts down by itself
|
||||
if self.quitAccepted:
|
||||
event.accept()
|
||||
return
|
||||
# # always ignore, it shuts down by itself
|
||||
# if self.quitAccepted:
|
||||
# event.accept()
|
||||
# return
|
||||
|
||||
event.ignore()
|
||||
if not trayonclose:
|
||||
# quit the application
|
||||
self.quit()
|
||||
# event.ignore()
|
||||
# if not trayonclose:
|
||||
# # quit the application
|
||||
# self.quit()
|
||||
|
||||
def on_action_InboxMessageForceHtml(self):
|
||||
msgid = self.getCurrentMessageId()
|
||||
|
@ -3092,24 +3092,24 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
"Error: You cannot add the same address to your blacklist"
|
||||
" twice. Try renaming the existing one if you want."))
|
||||
|
||||
def deleteRowFromMessagelist(self, row = None, inventoryHash = None, ackData = None, messageLists = None):
|
||||
if messageLists is None:
|
||||
messageLists = (self.ui.tableWidgetInbox, self.ui.tableWidgetInboxChans, self.ui.tableWidgetInboxSubscriptions)
|
||||
elif type(messageLists) not in (list, tuple):
|
||||
messageLists = (messageLists)
|
||||
for messageList in messageLists:
|
||||
if row is not None:
|
||||
inventoryHash = str(messageList.item(row, 3).data(
|
||||
QtCore.Qt.UserRole).toPyObject())
|
||||
messageList.removeRow(row)
|
||||
elif inventoryHash is not None:
|
||||
for i in range(messageList.rowCount() - 1, -1, -1):
|
||||
if messageList.item(i, 3).data(QtCore.Qt.UserRole).toPyObject() == inventoryHash:
|
||||
messageList.removeRow(i)
|
||||
elif ackData is not None:
|
||||
for i in range(messageList.rowCount() - 1, -1, -1):
|
||||
if messageList.item(i, 3).data(QtCore.Qt.UserRole).toPyObject() == ackData:
|
||||
messageList.removeRow(i)
|
||||
# def deleteRowFromMessagelist(self, row = None, inventoryHash = None, ackData = None, messageLists = None):
|
||||
# if messageLists is None:
|
||||
# messageLists = (self.ui.tableWidgetInbox, self.ui.tableWidgetInboxChans, self.ui.tableWidgetInboxSubscriptions)
|
||||
# elif type(messageLists) not in (list, tuple):
|
||||
# messageLists = (messageLists)
|
||||
# for messageList in messageLists:
|
||||
# if row is not None:
|
||||
# inventoryHash = str(messageList.item(row, 3).data(
|
||||
# QtCore.Qt.UserRole).toPyObject())
|
||||
# messageList.removeRow(row)
|
||||
# elif inventoryHash is not None:
|
||||
# for i in range(messageList.rowCount() - 1, -1, -1):
|
||||
# if messageList.item(i, 3).data(QtCore.Qt.UserRole).toPyObject() == inventoryHash:
|
||||
# messageList.removeRow(i)
|
||||
# elif ackData is not None:
|
||||
# for i in range(messageList.rowCount() - 1, -1, -1):
|
||||
# if messageList.item(i, 3).data(QtCore.Qt.UserRole).toPyObject() == ackData:
|
||||
# messageList.removeRow(i)
|
||||
|
||||
# Send item on the Inbox tab to trash
|
||||
def on_action_InboxTrash(self):
|
||||
|
@ -3457,16 +3457,16 @@ class MyForm(settingsmixin.SMainWindow):
|
|||
else:
|
||||
return False
|
||||
|
||||
def getAccountTreeWidget(self, account):
|
||||
try:
|
||||
if account.type == AccountMixin.CHAN:
|
||||
return self.ui.treeWidgetChans
|
||||
elif account.type == AccountMixin.SUBSCRIPTION:
|
||||
return self.ui.treeWidgetSubscriptions
|
||||
else:
|
||||
return self.ui.treeWidgetYourIdentities
|
||||
except:
|
||||
return self.ui.treeWidgetYourIdentities
|
||||
# def getAccountTreeWidget(self, account):
|
||||
# try:
|
||||
# if account.type == AccountMixin.CHAN:
|
||||
# return self.ui.treeWidgetChans
|
||||
# elif account.type == AccountMixin.SUBSCRIPTION:
|
||||
# return self.ui.treeWidgetSubscriptions
|
||||
# else:
|
||||
# return self.ui.treeWidgetYourIdentities
|
||||
# except:
|
||||
# return self.ui.treeWidgetYourIdentities
|
||||
|
||||
def getCurrentMessagelist(self):
|
||||
currentIndex = self.ui.tabWidget.currentIndex();
|
||||
|
|
|
@ -129,8 +129,8 @@ class AddressPassPhraseValidatorMixin():
|
|||
else:
|
||||
return (QtGui.QValidator.Intermediate, pos)
|
||||
|
||||
def checkData(self):
|
||||
return self.validate("", 0)
|
||||
# def checkData(self):
|
||||
# return self.validate("", 0)
|
||||
|
||||
class AddressValidator(QtGui.QValidator, AddressPassPhraseValidatorMixin):
|
||||
def __init__(self, parent=None, passPhraseObject=None, feedBackObject=None, buttonBox=None, addressMandatory=True):
|
||||
|
|
|
@ -1669,7 +1669,7 @@ qt_resource_struct = "\
|
|||
def qInitResources():
|
||||
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
|
||||
|
||||
def qCleanupResources():
|
||||
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
|
||||
# def qCleanupResources():
|
||||
# QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
|
||||
|
||||
qInitResources()
|
||||
|
|
|
@ -763,7 +763,7 @@ class Ui_MainWindow(object):
|
|||
self.actionDeleteAllTrashedMessages.setText(_translate("MainWindow", "Delete all trashed messages", None))
|
||||
self.actionJoinChan.setText(_translate("MainWindow", "Join / Create chan", None))
|
||||
|
||||
import bitmessage_icons_rc
|
||||
# import bitmessage_icons_rc
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
|
|
@ -108,9 +108,9 @@ class Blacklist(QtGui.QWidget, RetranslateMixin):
|
|||
# Popup menu for the Blacklist page
|
||||
self.blacklistContextMenuToolbar = QtGui.QToolBar()
|
||||
# Actions
|
||||
self.actionBlacklistNew = self.blacklistContextMenuToolbar.addAction(
|
||||
_translate(
|
||||
"MainWindow", "Add new entry"), self.on_action_BlacklistNew)
|
||||
# self.actionBlacklistNew = self.blacklistContextMenuToolbar.addAction(
|
||||
# _translate(
|
||||
# "MainWindow", "Add new entry"), self.on_action_BlacklistNew)
|
||||
self.actionBlacklistDelete = self.blacklistContextMenuToolbar.addAction(
|
||||
_translate(
|
||||
"MainWindow", "Delete"), self.on_action_BlacklistDelete)
|
||||
|
@ -174,8 +174,8 @@ class Blacklist(QtGui.QWidget, RetranslateMixin):
|
|||
self.tableWidgetBlacklist.setSortingEnabled(True)
|
||||
|
||||
# Group of functions for the Blacklist dialog box
|
||||
def on_action_BlacklistNew(self):
|
||||
self.click_pushButtonAddBlacklist()
|
||||
# def on_action_BlacklistNew(self):
|
||||
# self.click_pushButtonAddBlacklist()
|
||||
|
||||
def on_action_BlacklistDelete(self):
|
||||
currentRow = self.tableWidgetBlacklist.currentRow()
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from PyQt4 import QtCore, QtGui
|
||||
from string import find, rfind, rstrip, lstrip
|
||||
from string import find, rstrip, lstrip# , rfind
|
||||
|
||||
from tr import _translate
|
||||
from bmconfigparser import BMConfigParser
|
||||
|
@ -501,46 +501,46 @@ class AddressBookCompleter(QtGui.QCompleter):
|
|||
if oldPos != self.cursorPos:
|
||||
self.cursorPos = -1
|
||||
|
||||
def splitPath(self, path):
|
||||
stringList = []
|
||||
text = unicode(path.toUtf8(), encoding="UTF-8")
|
||||
splitIndex = rfind(text[0:self.widget().cursorPosition()], ";") + 1
|
||||
str = text[splitIndex:self.widget().cursorPosition()]
|
||||
str = rstrip(lstrip(str))
|
||||
stringList.append(str)
|
||||
return stringList
|
||||
# def splitPath(self, path):
|
||||
# stringList = []
|
||||
# text = unicode(path.toUtf8(), encoding="UTF-8")
|
||||
# splitIndex = rfind(text[0:self.widget().cursorPosition()], ";") + 1
|
||||
# str = text[splitIndex:self.widget().cursorPosition()]
|
||||
# str = rstrip(lstrip(str))
|
||||
# stringList.append(str)
|
||||
# return stringList
|
||||
|
||||
def pathFromIndex(self, index):
|
||||
autoString = unicode(index.data(QtCore.Qt.EditRole).toString().toUtf8(), encoding="UTF-8")
|
||||
text = unicode(self.widget().text().toUtf8(), encoding="UTF-8")
|
||||
# def pathFromIndex(self, index):
|
||||
# autoString = unicode(index.data(QtCore.Qt.EditRole).toString().toUtf8(), encoding="UTF-8")
|
||||
# text = unicode(self.widget().text().toUtf8(), encoding="UTF-8")
|
||||
|
||||
# If cursor position was saved, restore it, else save it
|
||||
if self.cursorPos != -1:
|
||||
self.widget().setCursorPosition(self.cursorPos)
|
||||
else:
|
||||
self.cursorPos = self.widget().cursorPosition()
|
||||
# # If cursor position was saved, restore it, else save it
|
||||
# if self.cursorPos != -1:
|
||||
# self.widget().setCursorPosition(self.cursorPos)
|
||||
# else:
|
||||
# self.cursorPos = self.widget().cursorPosition()
|
||||
|
||||
# Get current prosition
|
||||
curIndex = self.widget().cursorPosition()
|
||||
# # Get current prosition
|
||||
# curIndex = self.widget().cursorPosition()
|
||||
|
||||
# prev_delimiter_index should actually point at final white space AFTER the delimiter
|
||||
# Get index of last delimiter before current position
|
||||
prevDelimiterIndex = rfind(text[0:curIndex], ";")
|
||||
while text[prevDelimiterIndex + 1] == " ":
|
||||
prevDelimiterIndex += 1
|
||||
# # prev_delimiter_index should actually point at final white space AFTER the delimiter
|
||||
# # Get index of last delimiter before current position
|
||||
# prevDelimiterIndex = rfind(text[0:curIndex], ";")
|
||||
# while text[prevDelimiterIndex + 1] == " ":
|
||||
# prevDelimiterIndex += 1
|
||||
|
||||
# Get index of first delimiter after current position (or EOL if no delimiter after cursor)
|
||||
nextDelimiterIndex = find(text, ";", curIndex)
|
||||
if nextDelimiterIndex == -1:
|
||||
nextDelimiterIndex = len(text)
|
||||
# # Get index of first delimiter after current position (or EOL if no delimiter after cursor)
|
||||
# nextDelimiterIndex = find(text, ";", curIndex)
|
||||
# if nextDelimiterIndex == -1:
|
||||
# nextDelimiterIndex = len(text)
|
||||
|
||||
# Get part of string that occurs before cursor
|
||||
part1 = text[0:prevDelimiterIndex + 1]
|
||||
# # Get part of string that occurs before cursor
|
||||
# part1 = text[0:prevDelimiterIndex + 1]
|
||||
|
||||
# Get string value from before auto finished string is selected
|
||||
pre = text[prevDelimiterIndex + 1:curIndex - 1];
|
||||
# # Get string value from before auto finished string is selected
|
||||
# pre = text[prevDelimiterIndex + 1:curIndex - 1];
|
||||
|
||||
# Get part of string that occurs AFTER cursor
|
||||
part2 = text[nextDelimiterIndex:]
|
||||
# # Get part of string that occurs AFTER cursor
|
||||
# part2 = text[nextDelimiterIndex:]
|
||||
|
||||
return part1 + autoString + part2;
|
||||
# return part1 + autoString + part2;
|
||||
|
|
|
@ -12,7 +12,7 @@ class LanguageBox(QtGui.QComboBox):
|
|||
self.populate()
|
||||
|
||||
def populate(self):
|
||||
self.languages = []
|
||||
# self.languages = []
|
||||
self.clear()
|
||||
localesPath = os.path.join (paths.codePath(), 'translations')
|
||||
configuredLocale = "system"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from PyQt4 import QtCore, QtGui
|
||||
|
||||
import multiprocessing
|
||||
# import multiprocessing
|
||||
import Queue
|
||||
from urlparse import urlparse
|
||||
from safehtmlparser import *
|
||||
|
@ -75,9 +75,9 @@ class MessageView(QtGui.QTextBrowser):
|
|||
if reply == QtGui.QMessageBox.Yes:
|
||||
QtGui.QDesktopServices.openUrl(link)
|
||||
|
||||
def loadResource (self, restype, name):
|
||||
if restype == QtGui.QTextDocument.ImageResource and name.scheme() == "bmmsg":
|
||||
pass
|
||||
# def loadResource (self, restype, name):
|
||||
# if restype == QtGui.QTextDocument.ImageResource and name.scheme() == "bmmsg":
|
||||
# pass
|
||||
# QImage correctImage;
|
||||
# lookup the correct QImage from a cache
|
||||
# return QVariant::fromValue(correctImage);
|
||||
|
@ -85,8 +85,8 @@ class MessageView(QtGui.QTextBrowser):
|
|||
# elif restype == QtGui.QTextDocument.ImageResource:
|
||||
# elif restype == QtGui.QTextDocument.StyleSheetResource:
|
||||
# elif restype == QtGui.QTextDocument.UserResource:
|
||||
else:
|
||||
pass
|
||||
# else:
|
||||
# pass
|
||||
# by default, this will interpret it as a local file
|
||||
# QtGui.QTextBrowser.loadResource(restype, name)
|
||||
|
||||
|
@ -132,7 +132,7 @@ class MessageView(QtGui.QTextBrowser):
|
|||
self.html = SafeHTMLParser()
|
||||
self.html.reset()
|
||||
self.html.reset_safe()
|
||||
self.html.allow_picture = True
|
||||
# self.html.allow_picture = True
|
||||
self.html.feed(data)
|
||||
self.html.close()
|
||||
self.showPlain()
|
||||
|
|
14
src/bitmessageqt/migrationwizard.py
Normal file → Executable file
14
src/bitmessageqt/migrationwizard.py
Normal file → Executable file
|
@ -14,8 +14,8 @@ class MigrationWizardIntroPage(QtGui.QWizardPage):
|
|||
layout.addWidget(label)
|
||||
self.setLayout(layout)
|
||||
|
||||
def nextId(self):
|
||||
return 1
|
||||
# def nextId(self):
|
||||
# return 1
|
||||
|
||||
|
||||
class MigrationWizardAddressesPage(QtGui.QWizardPage):
|
||||
|
@ -30,8 +30,8 @@ class MigrationWizardAddressesPage(QtGui.QWizardPage):
|
|||
layout.addWidget(label)
|
||||
self.setLayout(layout)
|
||||
|
||||
def nextId(self):
|
||||
return 10
|
||||
# def nextId(self):
|
||||
# return 10
|
||||
|
||||
|
||||
class MigrationWizardGPUPage(QtGui.QWizardPage):
|
||||
|
@ -46,8 +46,8 @@ class MigrationWizardGPUPage(QtGui.QWizardPage):
|
|||
layout.addWidget(label)
|
||||
self.setLayout(layout)
|
||||
|
||||
def nextId(self):
|
||||
return 10
|
||||
# def nextId(self):
|
||||
# return 10
|
||||
|
||||
|
||||
class MigrationWizardConclusionPage(QtGui.QWizardPage):
|
||||
|
@ -67,7 +67,7 @@ class Ui_MigrationWizard(QtGui.QWizard):
|
|||
def __init__(self, addresses):
|
||||
super(QtGui.QWizard, self).__init__()
|
||||
|
||||
self.pages = {}
|
||||
# self.pages = {}
|
||||
|
||||
page = MigrationWizardIntroPage()
|
||||
self.setPage(0, page)
|
||||
|
|
100
src/bitmessageqt/newaddresswizard.py
Normal file → Executable file
100
src/bitmessageqt/newaddresswizard.py
Normal file → Executable file
|
@ -23,11 +23,11 @@ class NewAddressWizardIntroPage(QtGui.QWizardPage):
|
|||
layout.addWidget(self.onlyBM)
|
||||
self.setLayout(layout)
|
||||
|
||||
def nextId(self):
|
||||
if self.emailAsWell.isChecked():
|
||||
return 4
|
||||
else:
|
||||
return 1
|
||||
# def nextId(self):
|
||||
# if self.emailAsWell.isChecked():
|
||||
# return 4
|
||||
# else:
|
||||
# return 1
|
||||
|
||||
|
||||
class NewAddressWizardRngPassphrasePage(QtGui.QWizardPage):
|
||||
|
@ -58,11 +58,11 @@ class NewAddressWizardRngPassphrasePage(QtGui.QWizardPage):
|
|||
layout.addWidget(self.deterministicAddress)
|
||||
self.setLayout(layout)
|
||||
|
||||
def nextId(self):
|
||||
if self.randomAddress.isChecked():
|
||||
return 2
|
||||
else:
|
||||
return 3
|
||||
# def nextId(self):
|
||||
# if self.randomAddress.isChecked():
|
||||
# return 2
|
||||
# else:
|
||||
# return 3
|
||||
|
||||
class NewAddressWizardRandomPage(QtGui.QWizardPage):
|
||||
def __init__(self, addresses):
|
||||
|
@ -111,8 +111,8 @@ class NewAddressWizardRandomPage(QtGui.QWizardPage):
|
|||
# self.onlyBM = QtGui.QRadioButton("Bitmessage-only account (no email)")
|
||||
# self.emailAsWell.setChecked(True)
|
||||
|
||||
def nextId(self):
|
||||
return 6
|
||||
# def nextId(self):
|
||||
# return 6
|
||||
|
||||
|
||||
class NewAddressWizardPassphrasePage(QtGui.QWizardPage):
|
||||
|
@ -154,8 +154,8 @@ class NewAddressWizardPassphrasePage(QtGui.QWizardPage):
|
|||
layout.addWidget(label4, 7, 2, 1, 2)
|
||||
self.setLayout(layout)
|
||||
|
||||
def nextId(self):
|
||||
return 6
|
||||
# def nextId(self):
|
||||
# return 6
|
||||
|
||||
|
||||
class NewAddressWizardEmailProviderPage(QtGui.QWizardPage):
|
||||
|
@ -176,8 +176,8 @@ class NewAddressWizardEmailProviderPage(QtGui.QWizardPage):
|
|||
# layout.addWidget(self.mailchuck)
|
||||
self.setLayout(layout)
|
||||
|
||||
def nextId(self):
|
||||
return 5
|
||||
# def nextId(self):
|
||||
# return 5
|
||||
|
||||
|
||||
class NewAddressWizardEmailAddressPage(QtGui.QWizardPage):
|
||||
|
@ -202,8 +202,8 @@ class NewAddressWizardEmailAddressPage(QtGui.QWizardPage):
|
|||
layout.addWidget(self.randomEmail)
|
||||
self.setLayout(layout)
|
||||
|
||||
def nextId(self):
|
||||
return 6
|
||||
# def nextId(self):
|
||||
# return 6
|
||||
|
||||
|
||||
class NewAddressWizardWaitPage(QtGui.QWizardPage):
|
||||
|
@ -240,32 +240,32 @@ class NewAddressWizardWaitPage(QtGui.QWizardPage):
|
|||
if i == 50:
|
||||
self.emit(QtCore.SIGNAL('completeChanged()'))
|
||||
|
||||
def isComplete(self):
|
||||
# def isComplete(self):
|
||||
# print "val = " + str(self.progressBar.value())
|
||||
if self.progressBar.value() >= 50:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
# if self.progressBar.value() >= 50:
|
||||
# return True
|
||||
# else:
|
||||
# return False
|
||||
|
||||
def initializePage(self):
|
||||
if self.field("emailAsWell").toBool():
|
||||
val = "yes/"
|
||||
else:
|
||||
val = "no/"
|
||||
if self.field("onlyBM").toBool():
|
||||
val += "yes"
|
||||
else:
|
||||
val += "no"
|
||||
# def initializePage(self):
|
||||
# if self.field("emailAsWell").toBool():
|
||||
# val = "yes/"
|
||||
# else:
|
||||
# val = "no/"
|
||||
# if self.field("onlyBM").toBool():
|
||||
# val += "yes"
|
||||
# else:
|
||||
# val += "no"
|
||||
|
||||
self.label.setText("Wait! " + val)
|
||||
# self.label.setText("Wait! " + val)
|
||||
# self.wizard().button(QtGui.QWizard.NextButton).setEnabled(False)
|
||||
self.progressBar.setValue(0)
|
||||
self.thread = NewAddressThread()
|
||||
self.connect(self.thread, self.thread.signal, self.update)
|
||||
self.thread.start()
|
||||
# self.progressBar.setValue(0)
|
||||
# self.thread = NewAddressThread()
|
||||
# self.connect(self.thread, self.thread.signal, self.update)
|
||||
# self.thread.start()
|
||||
|
||||
def nextId(self):
|
||||
return 10
|
||||
# def nextId(self):
|
||||
# return 10
|
||||
|
||||
|
||||
class NewAddressWizardConclusionPage(QtGui.QWizardPage):
|
||||
|
@ -284,7 +284,7 @@ class Ui_NewAddressWizard(QtGui.QWizard):
|
|||
def __init__(self, addresses):
|
||||
super(QtGui.QWizard, self).__init__()
|
||||
|
||||
self.pages = {}
|
||||
# self.pages = {}
|
||||
|
||||
page = NewAddressWizardIntroPage()
|
||||
self.setPage(0, page)
|
||||
|
@ -316,20 +316,20 @@ class NewAddressThread(QtCore.QThread):
|
|||
def __del__(self):
|
||||
self.wait()
|
||||
|
||||
def createDeterministic(self):
|
||||
pass
|
||||
# def createDeterministic(self):
|
||||
# pass
|
||||
|
||||
def createPassphrase(self):
|
||||
pass
|
||||
# def createPassphrase(self):
|
||||
# pass
|
||||
|
||||
def broadcastAddress(self):
|
||||
pass
|
||||
# def broadcastAddress(self):
|
||||
# pass
|
||||
|
||||
def registerMailchuck(self):
|
||||
pass
|
||||
# def registerMailchuck(self):
|
||||
# pass
|
||||
|
||||
def waitRegistration(self):
|
||||
pass
|
||||
# def waitRegistration(self):
|
||||
# pass
|
||||
|
||||
def run(self):
|
||||
import time
|
||||
|
|
|
@ -1,27 +1,27 @@
|
|||
from HTMLParser import HTMLParser
|
||||
import inspect
|
||||
import re
|
||||
from urllib import quote, quote_plus
|
||||
# from urllib import quote_plus# , quote
|
||||
from urlparse import urlparse
|
||||
|
||||
class SafeHTMLParser(HTMLParser):
|
||||
# from html5lib.sanitiser
|
||||
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
|
||||
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
|
||||
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
|
||||
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
|
||||
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
|
||||
'figcaption', 'figure', 'footer', 'font', 'header', 'h1',
|
||||
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins',
|
||||
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
|
||||
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
|
||||
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
|
||||
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
|
||||
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
|
||||
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
|
||||
# acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
|
||||
# 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
|
||||
# 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
|
||||
# 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
|
||||
# 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
|
||||
# 'figcaption', 'figure', 'footer', 'font', 'header', 'h1',
|
||||
# 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins',
|
||||
# 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
|
||||
# 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
|
||||
# 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
|
||||
# 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
|
||||
# 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
|
||||
# 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
|
||||
replaces_pre = [["&", "&"], ["\"", """], ["<", "<"], [">", ">"]]
|
||||
replaces_post = [["\n", "<br/>"], ["\t", " "], [" ", " "], [" ", " "], ["<br/> ", "<br/> "]]
|
||||
src_schemes = [ "data" ]
|
||||
# src_schemes = [ "data" ]
|
||||
#uriregex1 = re.compile(r'(?i)\b((?:(https?|ftp|bitcoin):(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?]))')
|
||||
uriregex1 = re.compile(r'((https?|ftp|bitcoin):(?:/{1,3}|[a-z0-9%])(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
|
||||
uriregex2 = re.compile(r'<a href="([^"]+)&')
|
||||
|
@ -46,56 +46,56 @@ class SafeHTMLParser(HTMLParser):
|
|||
self.reset_safe()
|
||||
|
||||
def reset_safe(self):
|
||||
self.elements = set()
|
||||
# self.elements = set()
|
||||
self.raw = u""
|
||||
self.sanitised = u""
|
||||
self.has_html = False
|
||||
self.allow_picture = False
|
||||
self.allow_external_src = False
|
||||
# self.allow_picture = False
|
||||
# self.allow_external_src = False
|
||||
|
||||
def add_if_acceptable(self, tag, attrs = None):
|
||||
if tag not in SafeHTMLParser.acceptable_elements:
|
||||
return
|
||||
self.sanitised += "<"
|
||||
if inspect.stack()[1][3] == "handle_endtag":
|
||||
self.sanitised += "/"
|
||||
self.sanitised += tag
|
||||
if attrs is not None:
|
||||
for attr, val in attrs:
|
||||
if tag == "img" and attr == "src" and not self.allow_picture:
|
||||
val = ""
|
||||
elif attr == "src" and not self.allow_external_src:
|
||||
url = urlparse(val)
|
||||
if url.scheme not in SafeHTMLParser.src_schemes:
|
||||
val = ""
|
||||
self.sanitised += " " + quote_plus(attr)
|
||||
if not (val is None):
|
||||
self.sanitised += "=\"" + val + "\""
|
||||
if inspect.stack()[1][3] == "handle_startendtag":
|
||||
self.sanitised += "/"
|
||||
self.sanitised += ">"
|
||||
# def add_if_acceptable(self, tag, attrs = None):
|
||||
# if tag not in SafeHTMLParser.acceptable_elements:
|
||||
# return
|
||||
# self.sanitised += "<"
|
||||
# if inspect.stack()[1][3] == "handle_endtag":
|
||||
# self.sanitised += "/"
|
||||
# self.sanitised += tag
|
||||
# if attrs is not None:
|
||||
# for attr, val in attrs:
|
||||
# if tag == "img" and attr == "src" and not self.allow_picture:
|
||||
# val = ""
|
||||
# elif attr == "src" and not self.allow_external_src:
|
||||
# url = urlparse(val)
|
||||
# if url.scheme not in SafeHTMLParser.src_schemes:
|
||||
# val = ""
|
||||
# self.sanitised += " " + quote_plus(attr)
|
||||
# if not (val is None):
|
||||
# self.sanitised += "=\"" + val + "\""
|
||||
# if inspect.stack()[1][3] == "handle_startendtag":
|
||||
# self.sanitised += "/"
|
||||
# self.sanitised += ">"
|
||||
|
||||
def handle_starttag(self, tag, attrs):
|
||||
if tag in SafeHTMLParser.acceptable_elements:
|
||||
self.has_html = True
|
||||
self.add_if_acceptable(tag, attrs)
|
||||
# def handle_starttag(self, tag, attrs):
|
||||
# if tag in SafeHTMLParser.acceptable_elements:
|
||||
# self.has_html = True
|
||||
# self.add_if_acceptable(tag, attrs)
|
||||
|
||||
def handle_endtag(self, tag):
|
||||
self.add_if_acceptable(tag)
|
||||
# def handle_endtag(self, tag):
|
||||
# self.add_if_acceptable(tag)
|
||||
|
||||
def handle_startendtag(self, tag, attrs):
|
||||
if tag in SafeHTMLParser.acceptable_elements:
|
||||
self.has_html = True
|
||||
self.add_if_acceptable(tag, attrs)
|
||||
# def handle_startendtag(self, tag, attrs):
|
||||
# if tag in SafeHTMLParser.acceptable_elements:
|
||||
# self.has_html = True
|
||||
# self.add_if_acceptable(tag, attrs)
|
||||
|
||||
def handle_data(self, data):
|
||||
self.sanitised += data
|
||||
# def handle_data(self, data):
|
||||
# self.sanitised += data
|
||||
|
||||
def handle_charref(self, name):
|
||||
self.sanitised += "&#" + name + ";"
|
||||
# def handle_charref(self, name):
|
||||
# self.sanitised += "&#" + name + ";"
|
||||
|
||||
def handle_entityref(self, name):
|
||||
self.sanitised += "&" + name + ";"
|
||||
# def handle_entityref(self, name):
|
||||
# self.sanitised += "&" + name + ";"
|
||||
|
||||
def feed(self, data):
|
||||
try:
|
||||
|
@ -112,11 +112,11 @@ class SafeHTMLParser(HTMLParser):
|
|||
tmp = SafeHTMLParser.replace_post(tmp)
|
||||
self.raw += tmp
|
||||
|
||||
def is_html(self, text = None, allow_picture = False):
|
||||
if text:
|
||||
self.reset()
|
||||
self.reset_safe()
|
||||
self.allow_picture = allow_picture
|
||||
self.feed(text)
|
||||
self.close()
|
||||
return self.has_html
|
||||
# def is_html(self, text = None, allow_picture = False):
|
||||
# if text:
|
||||
# self.reset()
|
||||
# self.reset_safe()
|
||||
# self.allow_picture = allow_picture
|
||||
# self.feed(text)
|
||||
# self.close()
|
||||
# return self.has_html
|
||||
|
|
|
@ -513,4 +513,4 @@ class Ui_settingsDialog(object):
|
|||
self.label_23.setText(_translate("settingsDialog", "months.", None))
|
||||
self.tabWidgetSettings.setTabText(self.tabWidgetSettings.indexOf(self.tabResendsExpire), _translate("settingsDialog", "Resends Expire", None))
|
||||
|
||||
import bitmessage_icons_rc
|
||||
# import bitmessage_icons_rc
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# sound type constants
|
||||
SOUND_NONE = 0
|
||||
# SOUND_NONE = 0
|
||||
SOUND_KNOWN = 1
|
||||
SOUND_UNKNOWN = 2
|
||||
SOUND_CONNECTED = 3
|
||||
|
|
|
@ -119,11 +119,11 @@ class BMConfigParser(ConfigParser.SafeConfigParser):
|
|||
except AttributeError:
|
||||
return True
|
||||
|
||||
def validate_bitmessagesettings_maxoutboundconnections(self, value):
|
||||
try:
|
||||
value = int(value)
|
||||
except ValueError:
|
||||
return False
|
||||
if value < 0 or value > 8:
|
||||
return False
|
||||
return True
|
||||
# def validate_bitmessagesettings_maxoutboundconnections(self, value):
|
||||
# try:
|
||||
# value = int(value)
|
||||
# except ValueError:
|
||||
# return False
|
||||
# if value < 0 or value > 8:
|
||||
# return False
|
||||
# return True
|
||||
|
|
|
@ -47,8 +47,8 @@ class objectHashHolder(threading.Thread):
|
|||
def holdPeer(self,peerDetails):
|
||||
self.collectionOfPeerLists[random.randrange(0, objectHashHolder.size)].append(peerDetails)
|
||||
|
||||
def hashCount(self):
|
||||
return sum([len(x) for x in self.collectionOfHashLists if type(x) is list])
|
||||
# def hashCount(self):
|
||||
# return sum([len(x) for x in self.collectionOfHashLists if type(x) is list])
|
||||
|
||||
def close(self):
|
||||
self.shutdown = True
|
||||
|
|
|
@ -15,7 +15,7 @@ import highlevelcrypto
|
|||
from addresses import *
|
||||
from bmconfigparser import BMConfigParser
|
||||
import helper_generic
|
||||
from helper_generic import addDataPadding
|
||||
# from helper_generic import addDataPadding
|
||||
import helper_bitcoin
|
||||
import helper_inbox
|
||||
import helper_msgcoding
|
||||
|
@ -209,7 +209,7 @@ class objectProcessor(threading.Thread):
|
|||
if len(data) < 146: # sanity check. This is the minimum possible length.
|
||||
logger.debug('(within processpubkey) payloadLength less than 146. Sanity check failed.')
|
||||
return
|
||||
bitfieldBehaviors = data[readPosition:readPosition + 4]
|
||||
# bitfieldBehaviors = data[readPosition:readPosition + 4]
|
||||
readPosition += 4
|
||||
publicSigningKey = data[readPosition:readPosition + 64]
|
||||
# Is it possible for a public key to be invalid such that trying to
|
||||
|
@ -258,16 +258,16 @@ class objectProcessor(threading.Thread):
|
|||
if len(data) < 170: # sanity check.
|
||||
logger.warning('(within processpubkey) payloadLength less than 170. Sanity check failed.')
|
||||
return
|
||||
bitfieldBehaviors = data[readPosition:readPosition + 4]
|
||||
# bitfieldBehaviors = data[readPosition:readPosition + 4]
|
||||
readPosition += 4
|
||||
publicSigningKey = '\x04' + data[readPosition:readPosition + 64]
|
||||
readPosition += 64
|
||||
publicEncryptionKey = '\x04' + data[readPosition:readPosition + 64]
|
||||
readPosition += 64
|
||||
specifiedNonceTrialsPerByte, specifiedNonceTrialsPerByteLength = decodeVarint(
|
||||
_, specifiedNonceTrialsPerByteLength = decodeVarint(
|
||||
data[readPosition:readPosition + 10])
|
||||
readPosition += specifiedNonceTrialsPerByteLength
|
||||
specifiedPayloadLengthExtraBytes, specifiedPayloadLengthExtraBytesLength = decodeVarint(
|
||||
_, specifiedPayloadLengthExtraBytesLength = decodeVarint(
|
||||
data[readPosition:readPosition + 10])
|
||||
readPosition += specifiedPayloadLengthExtraBytesLength
|
||||
endOfSignedDataPosition = readPosition
|
||||
|
@ -874,18 +874,18 @@ class objectProcessor(threading.Thread):
|
|||
else:
|
||||
return '[' + mailingListName + '] ' + subject
|
||||
|
||||
def decodeType2Message(self, message):
|
||||
bodyPositionIndex = string.find(message, '\nBody:')
|
||||
if bodyPositionIndex > 1:
|
||||
subject = message[8:bodyPositionIndex]
|
||||
# Only save and show the first 500 characters of the subject.
|
||||
# Any more is probably an attack.
|
||||
subject = subject[:500]
|
||||
body = message[bodyPositionIndex + 6:]
|
||||
else:
|
||||
subject = ''
|
||||
body = message
|
||||
# Throw away any extra lines (headers) after the subject.
|
||||
if subject:
|
||||
subject = subject.splitlines()[0]
|
||||
return subject, body
|
||||
# def decodeType2Message(self, message):
|
||||
# bodyPositionIndex = string.find(message, '\nBody:')
|
||||
# if bodyPositionIndex > 1:
|
||||
# subject = message[8:bodyPositionIndex]
|
||||
# # Only save and show the first 500 characters of the subject.
|
||||
# # Any more is probably an attack.
|
||||
# subject = subject[:500]
|
||||
# body = message[bodyPositionIndex + 6:]
|
||||
# else:
|
||||
# subject = ''
|
||||
# body = message
|
||||
# # Throw away any extra lines (headers) after the subject.
|
||||
# if subject:
|
||||
# subject = subject.splitlines()[0]
|
||||
# return subject, body
|
||||
|
|
|
@ -26,7 +26,7 @@ from binascii import hexlify
|
|||
from addresses import *
|
||||
from bmconfigparser import BMConfigParser
|
||||
from class_objectHashHolder import objectHashHolder
|
||||
from helper_generic import addDataPadding, isHostInPrivateIPRange
|
||||
from helper_generic import isHostInPrivateIPRange# , addDataPadding
|
||||
from helper_sql import sqlQuery
|
||||
import knownnodes
|
||||
from debug import logger
|
||||
|
@ -278,8 +278,8 @@ class receiveDataThread(threading.Thread):
|
|||
context = ssl.SSLContext(protocol.sslProtocolVersion)
|
||||
context.set_ciphers(protocol.sslProtocolCiphers)
|
||||
context.set_ecdh_curve("secp256k1")
|
||||
context.check_hostname = False
|
||||
context.verify_mode = ssl.CERT_NONE
|
||||
# context.check_hostname = False
|
||||
# context.verify_mode = ssl.CERT_NONE
|
||||
# also exclude TLSv1 and TLSv1.1 in the future
|
||||
context.options = ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_SINGLE_ECDH_USE | ssl.OP_CIPHER_SERVER_PREFERENCE
|
||||
self.sslSock = context.wrap_socket(self.sock, server_side = not self.initiatedConnection, do_handshake_on_connect=False)
|
||||
|
@ -791,7 +791,7 @@ class receiveDataThread(threading.Thread):
|
|||
timestamp, = unpack('>Q', data[12:20])
|
||||
self.timeOffset = timestamp - int(time.time())
|
||||
|
||||
self.myExternalIP = socket.inet_ntoa(data[40:44])
|
||||
# self.myExternalIP = socket.inet_ntoa(data[40:44])
|
||||
# print 'myExternalIP', self.myExternalIP
|
||||
self.remoteNodeIncomingPort, = unpack('>H', data[70:72])
|
||||
# print 'remoteNodeIncomingPort', self.remoteNodeIncomingPort
|
||||
|
|
|
@ -10,7 +10,7 @@ import socket
|
|||
from ssl import SSLError, SSL_ERROR_WANT_WRITE
|
||||
import sys
|
||||
|
||||
from helper_generic import addDataPadding
|
||||
# from helper_generic import addDataPadding
|
||||
from class_objectHashHolder import *
|
||||
from addresses import *
|
||||
from debug import logger
|
||||
|
@ -71,7 +71,7 @@ class sendDataThread(threading.Thread):
|
|||
# if not 'Bad file descriptor' in err:
|
||||
logger.error('sock.sendall error: %s\n' % err)
|
||||
|
||||
self.versionSent = 1
|
||||
# self.versionSent = 1
|
||||
|
||||
def sendBytes(self, data = ""):
|
||||
self.buffer += data
|
||||
|
|
|
@ -16,7 +16,7 @@ from debug import logger
|
|||
import defaults
|
||||
from helper_sql import *
|
||||
import helper_inbox
|
||||
from helper_generic import addDataPadding
|
||||
# from helper_generic import addDataPadding
|
||||
import helper_msgcoding
|
||||
from helper_threading import *
|
||||
from inventory import Inventory, PendingUpload
|
||||
|
@ -232,7 +232,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
|
||||
TTL = int(28 * 24 * 60 * 60 + random.randrange(-300, 300))# 28 days from now plus or minus five minutes
|
||||
embeddedTime = int(time.time() + TTL)
|
||||
signedTimeForProtocolV2 = embeddedTime - TTL
|
||||
# signedTimeForProtocolV2 = embeddedTime - TTL
|
||||
"""
|
||||
According to the protocol specification, the expiresTime along with the pubkey information is
|
||||
signed. But to be backwards compatible during the upgrade period, we shall sign not the
|
||||
|
@ -536,7 +536,7 @@ class singleWorker(threading.Thread, StoppableThread):
|
|||
toaddress, fromaddress, subject, message, ackdata, status, TTL, retryNumber, encoding = row
|
||||
toStatus, toAddressVersionNumber, toStreamNumber, toRipe = decodeAddress(
|
||||
toaddress)
|
||||
fromStatus, fromAddressVersionNumber, fromStreamNumber, fromRipe = decodeAddress(
|
||||
_, fromAddressVersionNumber, fromStreamNumber, _ = decodeAddress(
|
||||
fromaddress)
|
||||
|
||||
# We may or may not already have the pubkey for this toAddress. Let's check.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import asyncore
|
||||
import base64
|
||||
import email
|
||||
from email.parser import Parser
|
||||
# from email.parser import Parser
|
||||
from email.header import decode_header
|
||||
import re
|
||||
import signal
|
||||
|
@ -24,30 +24,30 @@ SMTPDOMAIN = "bmaddr.lan"
|
|||
LISTENPORT = 8425
|
||||
|
||||
class smtpServerChannel(smtpd.SMTPChannel):
|
||||
def smtp_EHLO(self, arg):
|
||||
if not arg:
|
||||
self.push('501 Syntax: HELO hostname')
|
||||
return
|
||||
self.push('250-PyBitmessage %s' % softwareVersion)
|
||||
self.push('250 AUTH PLAIN')
|
||||
# def smtp_EHLO(self, arg):
|
||||
# if not arg:
|
||||
# self.push('501 Syntax: HELO hostname')
|
||||
# return
|
||||
# self.push('250-PyBitmessage %s' % softwareVersion)
|
||||
# self.push('250 AUTH PLAIN')
|
||||
|
||||
def smtp_AUTH(self, arg):
|
||||
if not arg or arg[0:5] not in ["PLAIN"]:
|
||||
self.push('501 Syntax: AUTH PLAIN')
|
||||
return
|
||||
authstring = arg[6:]
|
||||
try:
|
||||
decoded = base64.b64decode(authstring)
|
||||
correctauth = "\x00" + BMConfigParser().safeGet("bitmessagesettings", "smtpdusername", "") + \
|
||||
"\x00" + BMConfigParser().safeGet("bitmessagesettings", "smtpdpassword", "")
|
||||
logger.debug("authstring: %s / %s", correctauth, decoded)
|
||||
if correctauth == decoded:
|
||||
self.auth = True
|
||||
self.push('235 2.7.0 Authentication successful')
|
||||
else:
|
||||
raise Exception("Auth fail")
|
||||
except:
|
||||
self.push('501 Authentication fail')
|
||||
# def smtp_AUTH(self, arg):
|
||||
# if not arg or arg[0:5] not in ["PLAIN"]:
|
||||
# self.push('501 Syntax: AUTH PLAIN')
|
||||
# return
|
||||
# authstring = arg[6:]
|
||||
# try:
|
||||
# decoded = base64.b64decode(authstring)
|
||||
# correctauth = "\x00" + BMConfigParser().safeGet("bitmessagesettings", "smtpdusername", "") + \
|
||||
# "\x00" + BMConfigParser().safeGet("bitmessagesettings", "smtpdpassword", "")
|
||||
# logger.debug("authstring: %s / %s", correctauth, decoded)
|
||||
# if correctauth == decoded:
|
||||
# self.auth = True
|
||||
# self.push('235 2.7.0 Authentication successful')
|
||||
# else:
|
||||
# raise Exception("Auth fail")
|
||||
# except:
|
||||
# self.push('501 Authentication fail')
|
||||
|
||||
def smtp_DATA(self, arg):
|
||||
if not hasattr(self, "auth") or not self.auth:
|
||||
|
@ -62,7 +62,7 @@ class smtpServerPyBitmessage(smtpd.SMTPServer):
|
|||
if pair is not None:
|
||||
conn, addr = pair
|
||||
# print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
|
||||
self.channel = smtpServerChannel(self, conn, addr)
|
||||
# self.channel = smtpServerChannel(self, conn, addr)
|
||||
|
||||
def send(self, fromAddress, toAddress, subject, message):
|
||||
status, addressVersionNumber, streamNumber, ripe = decodeAddress(toAddress)
|
||||
|
@ -101,61 +101,61 @@ class smtpServerPyBitmessage(smtpd.SMTPServer):
|
|||
return ret
|
||||
|
||||
|
||||
def process_message(self, peer, mailfrom, rcpttos, data):
|
||||
# def process_message(self, peer, mailfrom, rcpttos, data):
|
||||
# print 'Receiving message from:', peer
|
||||
p = re.compile(".*<([^>]+)>")
|
||||
if not hasattr(self.channel, "auth") or not self.channel.auth:
|
||||
logger.error("Missing or invalid auth")
|
||||
return
|
||||
try:
|
||||
self.msg_headers = Parser().parsestr(data)
|
||||
except:
|
||||
logger.error("Invalid headers")
|
||||
return
|
||||
# p = re.compile(".*<([^>]+)>")
|
||||
# if not hasattr(self.channel, "auth") or not self.channel.auth:
|
||||
# logger.error("Missing or invalid auth")
|
||||
# return
|
||||
# try:
|
||||
# self.msg_headers = Parser().parsestr(data)
|
||||
# except:
|
||||
# logger.error("Invalid headers")
|
||||
# return
|
||||
|
||||
try:
|
||||
sender, domain = p.sub(r'\1', mailfrom).split("@")
|
||||
if domain != SMTPDOMAIN:
|
||||
raise Exception("Bad domain %s", domain)
|
||||
if sender not in BMConfigParser().addresses():
|
||||
raise Exception("Nonexisting user %s", sender)
|
||||
except Exception as err:
|
||||
logger.debug("Bad envelope from %s: %s", mailfrom, repr(err))
|
||||
msg_from = self.decode_header("from")
|
||||
try:
|
||||
msg_from = p.sub(r'\1', self.decode_header("from")[0])
|
||||
sender, domain = msg_from.split("@")
|
||||
if domain != SMTPDOMAIN:
|
||||
raise Exception("Bad domain %s", domain)
|
||||
if sender not in BMConfigParser().addresses():
|
||||
raise Exception("Nonexisting user %s", sender)
|
||||
except Exception as err:
|
||||
logger.error("Bad headers from %s: %s", msg_from, repr(err))
|
||||
return
|
||||
# try:
|
||||
# sender, domain = p.sub(r'\1', mailfrom).split("@")
|
||||
# if domain != SMTPDOMAIN:
|
||||
# raise Exception("Bad domain %s", domain)
|
||||
# if sender not in BMConfigParser().addresses():
|
||||
# raise Exception("Nonexisting user %s", sender)
|
||||
# except Exception as err:
|
||||
# logger.debug("Bad envelope from %s: %s", mailfrom, repr(err))
|
||||
# msg_from = self.decode_header("from")
|
||||
# try:
|
||||
# msg_from = p.sub(r'\1', self.decode_header("from")[0])
|
||||
# sender, domain = msg_from.split("@")
|
||||
# if domain != SMTPDOMAIN:
|
||||
# raise Exception("Bad domain %s", domain)
|
||||
# if sender not in BMConfigParser().addresses():
|
||||
# raise Exception("Nonexisting user %s", sender)
|
||||
# except Exception as err:
|
||||
# logger.error("Bad headers from %s: %s", msg_from, repr(err))
|
||||
# return
|
||||
|
||||
try:
|
||||
msg_subject = self.decode_header('subject')[0]
|
||||
except:
|
||||
msg_subject = "Subject missing..."
|
||||
# try:
|
||||
# msg_subject = self.decode_header('subject')[0]
|
||||
# except:
|
||||
# msg_subject = "Subject missing..."
|
||||
|
||||
msg_tmp = email.message_from_string(data)
|
||||
body = u''
|
||||
for part in msg_tmp.walk():
|
||||
if part and part.get_content_type() == "text/plain":
|
||||
body += part.get_payload(decode=1).decode(part.get_content_charset('utf-8'), errors='replace')
|
||||
# msg_tmp = email.message_from_string(data)
|
||||
# body = u''
|
||||
# for part in msg_tmp.walk():
|
||||
# if part and part.get_content_type() == "text/plain":
|
||||
# body += part.get_payload(decode=1).decode(part.get_content_charset('utf-8'), errors='replace')
|
||||
|
||||
for to in rcpttos:
|
||||
try:
|
||||
rcpt, domain = p.sub(r'\1', to).split("@")
|
||||
if domain != SMTPDOMAIN:
|
||||
raise Exception("Bad domain %s", domain)
|
||||
logger.debug("Sending %s to %s about %s", sender, rcpt, msg_subject)
|
||||
self.send(sender, rcpt, msg_subject, body)
|
||||
logger.info("Relayed %s to %s", sender, rcpt)
|
||||
except Exception as err:
|
||||
logger.error( "Bad to %s: %s", to, repr(err))
|
||||
continue
|
||||
return
|
||||
# for to in rcpttos:
|
||||
# try:
|
||||
# rcpt, domain = p.sub(r'\1', to).split("@")
|
||||
# if domain != SMTPDOMAIN:
|
||||
# raise Exception("Bad domain %s", domain)
|
||||
# logger.debug("Sending %s to %s about %s", sender, rcpt, msg_subject)
|
||||
# self.send(sender, rcpt, msg_subject, body)
|
||||
# logger.info("Relayed %s to %s", sender, rcpt)
|
||||
# except Exception as err:
|
||||
# logger.error( "Bad to %s: %s", to, repr(err))
|
||||
# continue
|
||||
# return
|
||||
|
||||
class smtpServer(threading.Thread, StoppableThread):
|
||||
def __init__(self, parent=None):
|
||||
|
@ -167,16 +167,16 @@ class smtpServer(threading.Thread, StoppableThread):
|
|||
super(smtpServer, self).stopThread()
|
||||
self.server.close()
|
||||
return
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
# for ip in ('127.0.0.1', BMConfigParser().get('bitmessagesettings', 'onionbindip')):
|
||||
for ip in ('127.0.0.1'):
|
||||
try:
|
||||
s.connect((ip, LISTENPORT))
|
||||
s.shutdown(socket.SHUT_RDWR)
|
||||
s.close()
|
||||
break
|
||||
except:
|
||||
pass
|
||||
# for ip in ('127.0.0.1'):
|
||||
# try:
|
||||
# s.connect((ip, LISTENPORT))
|
||||
# s.shutdown(socket.SHUT_RDWR)
|
||||
# s.close()
|
||||
# break
|
||||
# except:
|
||||
# pass
|
||||
|
||||
def run(self):
|
||||
asyncore.loop(1)
|
||||
|
|
|
@ -28,7 +28,7 @@ class sqlThread(threading.Thread):
|
|||
|
||||
def run(self):
|
||||
self.conn = sqlite3.connect(state.appdata + 'messages.dat')
|
||||
self.conn.text_factory = str
|
||||
# self.conn.text_factory = str
|
||||
self.cur = self.conn.cursor()
|
||||
|
||||
self.cur.execute('PRAGMA secure_delete = true')
|
||||
|
@ -525,7 +525,7 @@ class sqlThread(threading.Thread):
|
|||
shutil.move(
|
||||
paths.lookupAppdataFolder() + 'messages.dat', paths.lookupExeFolder() + 'messages.dat')
|
||||
self.conn = sqlite3.connect(paths.lookupExeFolder() + 'messages.dat')
|
||||
self.conn.text_factory = str
|
||||
# self.conn.text_factory = str
|
||||
self.cur = self.conn.cursor()
|
||||
elif item == 'movemessagstoappdata':
|
||||
logger.debug('the sqlThread is moving the messages.dat file to the Appdata folder.')
|
||||
|
@ -541,7 +541,7 @@ class sqlThread(threading.Thread):
|
|||
shutil.move(
|
||||
paths.lookupExeFolder() + 'messages.dat', paths.lookupAppdataFolder() + 'messages.dat')
|
||||
self.conn = sqlite3.connect(paths.lookupAppdataFolder() + 'messages.dat')
|
||||
self.conn.text_factory = str
|
||||
# self.conn.text_factory = str
|
||||
self.cur = self.conn.cursor()
|
||||
elif item == 'deleteandvacuume':
|
||||
self.cur.execute('''delete from inbox where folder='trash' ''')
|
||||
|
|
|
@ -28,8 +28,8 @@ helper_startup.loadConfig()
|
|||
# examples are here: https://bitmessage.org/forum/index.php/topic,4820.msg11163.html#msg11163
|
||||
log_level = 'WARNING'
|
||||
|
||||
def log_uncaught_exceptions(ex_cls, ex, tb):
|
||||
logging.critical('Unhandled exception', exc_info=(ex_cls, ex, tb))
|
||||
# def log_uncaught_exceptions(ex_cls, ex, tb):
|
||||
# logging.critical('Unhandled exception', exc_info=(ex_cls, ex, tb))
|
||||
|
||||
def configureLogging():
|
||||
have_logging = False
|
||||
|
@ -45,7 +45,7 @@ def configureLogging():
|
|||
# no need to confuse the user if the logger config is missing entirely
|
||||
print "Using default logger configuration"
|
||||
|
||||
sys.excepthook = log_uncaught_exceptions
|
||||
# sys.excepthook = log_uncaught_exceptions
|
||||
|
||||
if have_logging:
|
||||
return False
|
||||
|
|
|
@ -187,8 +187,8 @@ class DuplicateKeyException(UnpackException):
|
|||
|
||||
|
||||
# Backwards compatibility
|
||||
KeyNotPrimitiveException = UnhashableKeyException
|
||||
KeyDuplicateException = DuplicateKeyException
|
||||
# KeyNotPrimitiveException = UnhashableKeyException
|
||||
# KeyDuplicateException = DuplicateKeyException
|
||||
|
||||
#############################################################################
|
||||
# Exported Functions and Glob
|
||||
|
@ -196,9 +196,9 @@ KeyDuplicateException = DuplicateKeyException
|
|||
|
||||
# Exported functions and variables, set up in __init()
|
||||
pack = None
|
||||
packb = None
|
||||
# packb = None
|
||||
unpack = None
|
||||
unpackb = None
|
||||
# unpackb = None
|
||||
dump = None
|
||||
dumps = None
|
||||
load = None
|
||||
|
@ -982,21 +982,21 @@ def __init():
|
|||
# Map packb and unpackb to the appropriate version
|
||||
if sys.version_info[0] == 3:
|
||||
pack = _pack3
|
||||
packb = _packb3
|
||||
# packb = _packb3
|
||||
dump = _pack3
|
||||
dumps = _packb3
|
||||
unpack = _unpack3
|
||||
unpackb = _unpackb3
|
||||
# unpackb = _unpackb3
|
||||
load = _unpack3
|
||||
loads = _unpackb3
|
||||
xrange = range
|
||||
else:
|
||||
pack = _pack2
|
||||
packb = _packb2
|
||||
# packb = _packb2
|
||||
dump = _pack2
|
||||
dumps = _packb2
|
||||
unpack = _unpack2
|
||||
unpackb = _unpackb2
|
||||
# unpackb = _unpackb2
|
||||
load = _unpack2
|
||||
loads = _unpackb2
|
||||
|
||||
|
|
|
@ -21,17 +21,17 @@ def powQueueSize():
|
|||
pass
|
||||
return curWorkerQueue
|
||||
|
||||
def convertIntToString(n):
|
||||
a = __builtins__.hex(n)
|
||||
if a[-1:] == 'L':
|
||||
a = a[:-1]
|
||||
if (len(a) % 2) == 0:
|
||||
return unhexlify(a[2:])
|
||||
else:
|
||||
return unhexlify('0' + a[2:])
|
||||
# def convertIntToString(n):
|
||||
# a = __builtins__.hex(n)
|
||||
# if a[-1:] == 'L':
|
||||
# a = a[:-1]
|
||||
# if (len(a) % 2) == 0:
|
||||
# return unhexlify(a[2:])
|
||||
# else:
|
||||
# return unhexlify('0' + a[2:])
|
||||
|
||||
def convertStringToInt(s):
|
||||
return int(hexlify(s), 16)
|
||||
# def convertStringToInt(s):
|
||||
# return int(hexlify(s), 16)
|
||||
|
||||
def allThreadTraceback(frame):
|
||||
id2name = dict([(th.ident, th.name) for th in enumerate()])
|
||||
|
@ -84,5 +84,5 @@ def isHostInPrivateIPRange(host):
|
|||
return True
|
||||
return False
|
||||
|
||||
def addDataPadding(data, desiredMsgLength = 12, paddingChar = '\x00'):
|
||||
return data + paddingChar * (desiredMsgLength - len(data))
|
||||
# def addDataPadding(data, desiredMsgLength = 12, paddingChar = '\x00'):
|
||||
# return data + paddingChar * (desiredMsgLength - len(data))
|
||||
|
|
2
src/helper_msgcoding.py
Normal file → Executable file
2
src/helper_msgcoding.py
Normal file → Executable file
|
@ -15,7 +15,7 @@ from debug import logger
|
|||
import messagetypes
|
||||
from tr import _translate
|
||||
|
||||
BITMESSAGE_ENCODING_IGNORE = 0
|
||||
# BITMESSAGE_ENCODING_IGNORE = 0
|
||||
BITMESSAGE_ENCODING_TRIVIAL = 1
|
||||
BITMESSAGE_ENCODING_SIMPLE = 2
|
||||
BITMESSAGE_ENCODING_EXTENDED = 3
|
||||
|
|
|
@ -28,8 +28,8 @@ def encrypt(msg,hexPubkey):
|
|||
def decrypt(msg,hexPrivkey):
|
||||
return makeCryptor(hexPrivkey).decrypt(msg)
|
||||
# Decrypts message with an existing pyelliptic.ECC.ECC object
|
||||
def decryptFast(msg,cryptor):
|
||||
return cryptor.decrypt(msg)
|
||||
# def decryptFast(msg,cryptor):
|
||||
# return cryptor.decrypt(msg)
|
||||
# Signs with hex private key
|
||||
def sign(msg,hexPrivkey):
|
||||
# pyelliptic is upgrading from SHA1 to SHA256 for signing. We must
|
||||
|
|
|
@ -10,8 +10,8 @@ from helper_sql import *
|
|||
from singleton import Singleton
|
||||
|
||||
# TODO make this dynamic, and watch out for frozen, like with messagetypes
|
||||
import storage.sqlite
|
||||
import storage.filesystem
|
||||
# import storage.sqlite
|
||||
# import storage.filesystem
|
||||
|
||||
@Singleton
|
||||
class Inventory():
|
||||
|
@ -103,7 +103,7 @@ class PendingUpload(object):
|
|||
self.deadline = 0
|
||||
self.maxLen = 0
|
||||
# during shutdown, wait up to 20 seconds to finish uploading
|
||||
self.shutdownWait = 20
|
||||
# self.shutdownWait = 20
|
||||
# forget tracking objects after 60 seconds
|
||||
self.objectWait = 60
|
||||
# wait 10 seconds between clears
|
||||
|
|
|
@ -20,15 +20,15 @@ def saveKnownNodes(dirName = None):
|
|||
with open(os.path.join(dirName, 'knownnodes.dat'), 'wb') as output:
|
||||
pickle.dump(knownNodes, output)
|
||||
|
||||
def increaseRating(peer):
|
||||
increaseAmount = 0.1
|
||||
maxRating = 1
|
||||
with knownNodesLock:
|
||||
for stream in knownNodes.keys():
|
||||
try:
|
||||
knownNodes[stream][peer]["rating"] = min(knownNodes[stream][peer]["rating"] + increaseAmount, maxRating)
|
||||
except KeyError:
|
||||
pass
|
||||
# def increaseRating(peer):
|
||||
# increaseAmount = 0.1
|
||||
# maxRating = 1
|
||||
# with knownNodesLock:
|
||||
# for stream in knownNodes.keys():
|
||||
# try:
|
||||
# knownNodes[stream][peer]["rating"] = min(knownNodes[stream][peer]["rating"] + increaseAmount, maxRating)
|
||||
# except KeyError:
|
||||
# pass
|
||||
|
||||
def decreaseRating(peer):
|
||||
decreaseAmount = 0.1
|
||||
|
|
|
@ -13,7 +13,7 @@ from binascii import hexlify
|
|||
appdata = paths.lookupAppdataFolder()
|
||||
|
||||
conn = sqlite3.connect( appdata + 'messages.dat' )
|
||||
conn.text_factory = str
|
||||
# conn.text_factory = str
|
||||
cur = conn.cursor()
|
||||
|
||||
def readInbox():
|
||||
|
@ -25,78 +25,78 @@ def readInbox():
|
|||
for row in output:
|
||||
print row
|
||||
|
||||
def readSent():
|
||||
print 'Printing everything in Sent table:'
|
||||
item = '''select * from sent where folder !='trash' '''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
for row in output:
|
||||
msgid, toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, sleeptill, status, retrynumber, folder, encodingtype, ttl = row
|
||||
print hexlify(msgid), toaddress, 'toripe:', hexlify(toripe), 'fromaddress:', fromaddress, 'ENCODING TYPE:', encodingtype, 'SUBJECT:', repr(subject), 'MESSAGE:', repr(message), 'ACKDATA:', hexlify(ackdata), lastactiontime, status, retrynumber, folder
|
||||
# def readSent():
|
||||
# print 'Printing everything in Sent table:'
|
||||
# item = '''select * from sent where folder !='trash' '''
|
||||
# parameters = ''
|
||||
# cur.execute(item, parameters)
|
||||
# output = cur.fetchall()
|
||||
# for row in output:
|
||||
# msgid, toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, sleeptill, status, retrynumber, folder, encodingtype, ttl = row
|
||||
# print hexlify(msgid), toaddress, 'toripe:', hexlify(toripe), 'fromaddress:', fromaddress, 'ENCODING TYPE:', encodingtype, 'SUBJECT:', repr(subject), 'MESSAGE:', repr(message), 'ACKDATA:', hexlify(ackdata), lastactiontime, status, retrynumber, folder
|
||||
|
||||
def readSubscriptions():
|
||||
print 'Printing everything in subscriptions table:'
|
||||
item = '''select * from subscriptions'''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
for row in output:
|
||||
print row
|
||||
# def readSubscriptions():
|
||||
# print 'Printing everything in subscriptions table:'
|
||||
# item = '''select * from subscriptions'''
|
||||
# parameters = ''
|
||||
# cur.execute(item, parameters)
|
||||
# output = cur.fetchall()
|
||||
# for row in output:
|
||||
# print row
|
||||
|
||||
def readPubkeys():
|
||||
print 'Printing everything in pubkeys table:'
|
||||
item = '''select address, transmitdata, time, usedpersonally from pubkeys'''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
for row in output:
|
||||
address, transmitdata, time, usedpersonally = row
|
||||
print 'Address:', address, '\tTime first broadcast:', unicode(strftime('%a, %d %b %Y %I:%M %p',localtime(time)),'utf-8'), '\tUsed by me personally:', usedpersonally, '\tFull pubkey message:', hexlify(transmitdata)
|
||||
# def readPubkeys():
|
||||
# print 'Printing everything in pubkeys table:'
|
||||
# item = '''select address, transmitdata, time, usedpersonally from pubkeys'''
|
||||
# parameters = ''
|
||||
# cur.execute(item, parameters)
|
||||
# output = cur.fetchall()
|
||||
# for row in output:
|
||||
# address, transmitdata, time, usedpersonally = row
|
||||
# print 'Address:', address, '\tTime first broadcast:', unicode(strftime('%a, %d %b %Y %I:%M %p',localtime(time)),'utf-8'), '\tUsed by me personally:', usedpersonally, '\tFull pubkey message:', hexlify(transmitdata)
|
||||
|
||||
def readInventory():
|
||||
print 'Printing everything in inventory table:'
|
||||
item = '''select hash, objecttype, streamnumber, payload, expirestime from inventory'''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
for row in output:
|
||||
hash, objecttype, streamnumber, payload, expirestime = row
|
||||
print 'Hash:', hexlify(hash), objecttype, streamnumber, '\t', hexlify(payload), '\t', unicode(strftime('%a, %d %b %Y %I:%M %p',localtime(expirestime)),'utf-8')
|
||||
# def readInventory():
|
||||
# print 'Printing everything in inventory table:'
|
||||
# item = '''select hash, objecttype, streamnumber, payload, expirestime from inventory'''
|
||||
# parameters = ''
|
||||
# cur.execute(item, parameters)
|
||||
# output = cur.fetchall()
|
||||
# for row in output:
|
||||
# hash, objecttype, streamnumber, payload, expirestime = row
|
||||
# print 'Hash:', hexlify(hash), objecttype, streamnumber, '\t', hexlify(payload), '\t', unicode(strftime('%a, %d %b %Y %I:%M %p',localtime(expirestime)),'utf-8')
|
||||
|
||||
|
||||
def takeInboxMessagesOutOfTrash():
|
||||
item = '''update inbox set folder='inbox' where folder='trash' '''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
conn.commit()
|
||||
print 'done'
|
||||
# def takeInboxMessagesOutOfTrash():
|
||||
# item = '''update inbox set folder='inbox' where folder='trash' '''
|
||||
# parameters = ''
|
||||
# cur.execute(item, parameters)
|
||||
# output = cur.fetchall()
|
||||
# conn.commit()
|
||||
# print 'done'
|
||||
|
||||
def takeSentMessagesOutOfTrash():
|
||||
item = '''update sent set folder='sent' where folder='trash' '''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
conn.commit()
|
||||
print 'done'
|
||||
# def takeSentMessagesOutOfTrash():
|
||||
# item = '''update sent set folder='sent' where folder='trash' '''
|
||||
# parameters = ''
|
||||
# cur.execute(item, parameters)
|
||||
# output = cur.fetchall()
|
||||
# conn.commit()
|
||||
# print 'done'
|
||||
|
||||
def markAllInboxMessagesAsUnread():
|
||||
item = '''update inbox set read='0' '''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
conn.commit()
|
||||
queues.UISignalQueue.put(('changedInboxUnread', None))
|
||||
print 'done'
|
||||
# def markAllInboxMessagesAsUnread():
|
||||
# item = '''update inbox set read='0' '''
|
||||
# parameters = ''
|
||||
# cur.execute(item, parameters)
|
||||
# output = cur.fetchall()
|
||||
# conn.commit()
|
||||
# queues.UISignalQueue.put(('changedInboxUnread', None))
|
||||
# print 'done'
|
||||
|
||||
def vacuum():
|
||||
item = '''VACUUM'''
|
||||
parameters = ''
|
||||
cur.execute(item, parameters)
|
||||
output = cur.fetchall()
|
||||
conn.commit()
|
||||
print 'done'
|
||||
# def vacuum():
|
||||
# item = '''VACUUM'''
|
||||
# parameters = ''
|
||||
# cur.execute(item, parameters)
|
||||
# output = cur.fetchall()
|
||||
# conn.commit()
|
||||
# print 'done'
|
||||
|
||||
#takeInboxMessagesOutOfTrash()
|
||||
#takeSentMessagesOutOfTrash()
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from collections import deque
|
||||
# from collections import deque
|
||||
import Queue
|
||||
import random
|
||||
|
||||
|
@ -12,23 +12,23 @@ class MultiQueue(Queue.Queue):
|
|||
Queue.Queue.__init__(self, maxsize)
|
||||
|
||||
# Initialize the queue representation
|
||||
def _init(self, maxsize):
|
||||
self.iter = 0
|
||||
self.queues = []
|
||||
for i in range(self.queueCount):
|
||||
self.queues.append(deque())
|
||||
# def _init(self, maxsize):
|
||||
# self.iter = 0
|
||||
# self.queues = []
|
||||
# for i in range(self.queueCount):
|
||||
# self.queues.append(deque())
|
||||
|
||||
def _qsize(self, len=len):
|
||||
return len(self.queues[self.iter])
|
||||
# def _qsize(self, len=len):
|
||||
# return len(self.queues[self.iter])
|
||||
|
||||
# Put a new item in the queue
|
||||
def _put(self, item):
|
||||
#self.queue.append(item)
|
||||
self.queues[random.randrange(self.queueCount)].append((item))
|
||||
# def _put(self, item):
|
||||
# #self.queue.append(item)
|
||||
# self.queues[random.randrange(self.queueCount)].append((item))
|
||||
|
||||
# Get an item from the queue
|
||||
def _get(self):
|
||||
return self.queues[self.iter].popleft()
|
||||
# def _get(self):
|
||||
# return self.queues[self.iter].popleft()
|
||||
|
||||
def iterate(self):
|
||||
self.iter = (self.iter + 1) % self.queueCount
|
||||
|
|
|
@ -119,8 +119,8 @@ class AdvancedDispatcher(asyncore.dispatcher):
|
|||
def handle_connect(self):
|
||||
self.lastTx = time.time()
|
||||
|
||||
def state_close(self):
|
||||
return False
|
||||
# def state_close(self):
|
||||
# return False
|
||||
|
||||
def handle_close(self):
|
||||
with self.readLock:
|
||||
|
|
|
@ -302,7 +302,7 @@ def poll_poller(timeout=0.0, map=None):
|
|||
|
||||
# Aliases for backward compatibility
|
||||
poll = select_poller
|
||||
poll2 = poll3 = poll_poller
|
||||
# poll2 = poll3 = poll_poller
|
||||
|
||||
def epoll_poller(timeout=0.0, map=None):
|
||||
"""A poller which uses epoll(), supported on Linux 2.5.44 and newer."""
|
||||
|
@ -468,7 +468,7 @@ class dispatcher:
|
|||
connected = False
|
||||
accepting = False
|
||||
connecting = False
|
||||
closing = False
|
||||
# closing = False
|
||||
addr = None
|
||||
ignore_log_types = frozenset(['warning'])
|
||||
poller_registered = False
|
||||
|
@ -563,7 +563,7 @@ class dispatcher:
|
|||
self.poller_registered = False
|
||||
|
||||
def create_socket(self, family=socket.AF_INET, socket_type=socket.SOCK_STREAM):
|
||||
self.family_and_type = family, socket_type
|
||||
# self.family_and_type = family, socket_type
|
||||
sock = socket.socket(family, socket_type)
|
||||
sock.setblocking(0)
|
||||
self.set_socket(sock)
|
||||
|
@ -762,7 +762,7 @@ class dispatcher:
|
|||
self.handle_expt()
|
||||
|
||||
def handle_error(self):
|
||||
nil, t, v, tbinfo = compact_traceback()
|
||||
_, t, v, tbinfo = compact_traceback()
|
||||
|
||||
# sometimes a user repr method will crash.
|
||||
try:
|
||||
|
@ -811,28 +811,28 @@ class dispatcher:
|
|||
# [for more sophisticated usage use asynchat.async_chat]
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class dispatcher_with_send(dispatcher):
|
||||
# class dispatcher_with_send(dispatcher):
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
dispatcher.__init__(self, sock, map)
|
||||
self.out_buffer = b''
|
||||
# def __init__(self, sock=None, map=None):
|
||||
# dispatcher.__init__(self, sock, map)
|
||||
# self.out_buffer = b''
|
||||
|
||||
def initiate_send(self):
|
||||
num_sent = 0
|
||||
num_sent = dispatcher.send(self, self.out_buffer[:512])
|
||||
self.out_buffer = self.out_buffer[num_sent:]
|
||||
# def initiate_send(self):
|
||||
# num_sent = 0
|
||||
# num_sent = dispatcher.send(self, self.out_buffer[:512])
|
||||
# self.out_buffer = self.out_buffer[num_sent:]
|
||||
|
||||
def handle_write(self):
|
||||
self.initiate_send()
|
||||
# def handle_write(self):
|
||||
# self.initiate_send()
|
||||
|
||||
def writable(self):
|
||||
return (not self.connected) or len(self.out_buffer)
|
||||
# def writable(self):
|
||||
# return (not self.connected) or len(self.out_buffer)
|
||||
|
||||
def send(self, data):
|
||||
if self.debug:
|
||||
self.log_info('sending %s' % repr(data))
|
||||
self.out_buffer = self.out_buffer + data
|
||||
self.initiate_send()
|
||||
# def send(self, data):
|
||||
# if self.debug:
|
||||
# self.log_info('sending %s' % repr(data))
|
||||
# self.out_buffer = self.out_buffer + data
|
||||
# self.initiate_send()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# used for debugging.
|
||||
|
@ -892,53 +892,53 @@ def close_all(map=None, ignore_all=False):
|
|||
if os.name == 'posix':
|
||||
import fcntl
|
||||
|
||||
class file_wrapper:
|
||||
# Here we override just enough to make a file
|
||||
# look like a socket for the purposes of asyncore.
|
||||
# The passed fd is automatically os.dup()'d
|
||||
# class file_wrapper:
|
||||
# # Here we override just enough to make a file
|
||||
# # look like a socket for the purposes of asyncore.
|
||||
# # The passed fd is automatically os.dup()'d
|
||||
|
||||
def __init__(self, fd):
|
||||
self.fd = os.dup(fd)
|
||||
# def __init__(self, fd):
|
||||
# self.fd = os.dup(fd)
|
||||
|
||||
def recv(self, *args):
|
||||
return os.read(self.fd, *args)
|
||||
# def recv(self, *args):
|
||||
# return os.read(self.fd, *args)
|
||||
|
||||
def send(self, *args):
|
||||
return os.write(self.fd, *args)
|
||||
# def send(self, *args):
|
||||
# return os.write(self.fd, *args)
|
||||
|
||||
def getsockopt(self, level, optname, buflen=None):
|
||||
if (level == socket.SOL_SOCKET and
|
||||
optname == socket.SO_ERROR and
|
||||
not buflen):
|
||||
return 0
|
||||
raise NotImplementedError("Only asyncore specific behaviour "
|
||||
"implemented.")
|
||||
# def getsockopt(self, level, optname, buflen=None):
|
||||
# if (level == socket.SOL_SOCKET and
|
||||
# optname == socket.SO_ERROR and
|
||||
# not buflen):
|
||||
# return 0
|
||||
# raise NotImplementedError("Only asyncore specific behaviour "
|
||||
# "implemented.")
|
||||
|
||||
read = recv
|
||||
write = send
|
||||
# read = recv
|
||||
# write = send
|
||||
|
||||
def close(self):
|
||||
os.close(self.fd)
|
||||
# def close(self):
|
||||
# os.close(self.fd)
|
||||
|
||||
def fileno(self):
|
||||
return self.fd
|
||||
# def fileno(self):
|
||||
# return self.fd
|
||||
|
||||
class file_dispatcher(dispatcher):
|
||||
# class file_dispatcher(dispatcher):
|
||||
|
||||
def __init__(self, fd, map=None):
|
||||
dispatcher.__init__(self, None, map)
|
||||
self.connected = True
|
||||
try:
|
||||
fd = fd.fileno()
|
||||
except AttributeError:
|
||||
pass
|
||||
self.set_file(fd)
|
||||
# set it to non-blocking mode
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
|
||||
flags = flags | os.O_NONBLOCK
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
||||
# def __init__(self, fd, map=None):
|
||||
# dispatcher.__init__(self, None, map)
|
||||
# self.connected = True
|
||||
# try:
|
||||
# fd = fd.fileno()
|
||||
# except AttributeError:
|
||||
# pass
|
||||
# self.set_file(fd)
|
||||
# # set it to non-blocking mode
|
||||
# flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
|
||||
# flags = flags | os.O_NONBLOCK
|
||||
# fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
||||
|
||||
def set_file(self, fd):
|
||||
self.socket = file_wrapper(fd)
|
||||
self._fileno = self.socket.fileno()
|
||||
self.add_channel()
|
||||
# def set_file(self, fd):
|
||||
# self.socket = file_wrapper(fd)
|
||||
# self._fileno = self.socket.fileno()
|
||||
# self.add_channel()
|
||||
|
|
|
@ -38,7 +38,7 @@ class BMProtoExcessiveDataError(BMProtoError):
|
|||
|
||||
class BMProto(AdvancedDispatcher, ObjectTracker):
|
||||
# ~1.6 MB which is the maximum possible size of an inv message.
|
||||
maxMessageSize = 1600100
|
||||
# maxMessageSize = 1600100
|
||||
# 2**18 = 256kB is the maximum size of an object payload
|
||||
maxObjectPayloadSize = 2**18
|
||||
# protocol specification says max 1000 addresses in one addr command
|
||||
|
@ -46,7 +46,7 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
# protocol specification says max 50000 objects in one inv command
|
||||
maxObjectCount = 50000
|
||||
# address is online if online less than this many seconds ago
|
||||
addressAlive = 10800
|
||||
# addressAlive = 10800
|
||||
# maximum time offset
|
||||
maxTimeOffset = 3600
|
||||
|
||||
|
@ -54,10 +54,10 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
AdvancedDispatcher.__init__(self, sock)
|
||||
self.isOutbound = False
|
||||
# packet/connection from a local IP
|
||||
self.local = False
|
||||
# self.local = False
|
||||
|
||||
def bm_proto_reset(self):
|
||||
self.magic = None
|
||||
# self.magic = None
|
||||
self.command = None
|
||||
self.payloadLength = 0
|
||||
self.checksum = None
|
||||
|
@ -67,22 +67,22 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
self.expectBytes = protocol.Header.size
|
||||
self.object = None
|
||||
|
||||
def state_bm_header(self):
|
||||
self.magic, self.command, self.payloadLength, self.checksum = protocol.Header.unpack(self.read_buf[:protocol.Header.size])
|
||||
self.command = self.command.rstrip('\x00')
|
||||
if self.magic != 0xE9BEB4D9:
|
||||
# skip 1 byte in order to sync
|
||||
self.set_state("bm_header", length=1)
|
||||
self.bm_proto_reset()
|
||||
logger.debug("Bad magic")
|
||||
if self.socket.type == socket.SOCK_STREAM:
|
||||
self.close_reason = "Bad magic"
|
||||
self.set_state("close")
|
||||
return False
|
||||
if self.payloadLength > BMProto.maxMessageSize:
|
||||
self.invalid = True
|
||||
self.set_state("bm_command", length=protocol.Header.size, expectBytes=self.payloadLength)
|
||||
return True
|
||||
# def state_bm_header(self):
|
||||
# self.magic, self.command, self.payloadLength, self.checksum = protocol.Header.unpack(self.read_buf[:protocol.Header.size])
|
||||
# self.command = self.command.rstrip('\x00')
|
||||
# if self.magic != 0xE9BEB4D9:
|
||||
# # skip 1 byte in order to sync
|
||||
# self.set_state("bm_header", length=1)
|
||||
# self.bm_proto_reset()
|
||||
# logger.debug("Bad magic")
|
||||
# if self.socket.type == socket.SOCK_STREAM:
|
||||
# self.close_reason = "Bad magic"
|
||||
# self.set_state("close")
|
||||
# return False
|
||||
# if self.payloadLength > BMProto.maxMessageSize:
|
||||
# self.invalid = True
|
||||
# self.set_state("bm_command", length=protocol.Header.size, expectBytes=self.payloadLength)
|
||||
# return True
|
||||
|
||||
def state_bm_command(self):
|
||||
self.payload = self.read_buf[:self.payloadLength]
|
||||
|
@ -132,10 +132,10 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
# else assume the command requires a different state to follow
|
||||
return True
|
||||
|
||||
def decode_payload_string(self, length):
|
||||
value = self.payload[self.payloadOffset:self.payloadOffset+length]
|
||||
self.payloadOffset += length
|
||||
return value
|
||||
# def decode_payload_string(self, length):
|
||||
# value = self.payload[self.payloadOffset:self.payloadOffset+length]
|
||||
# self.payloadOffset += length
|
||||
# return value
|
||||
|
||||
def decode_payload_varint(self):
|
||||
value, offset = addresses.decodeVarint(self.payload[self.payloadOffset:])
|
||||
|
@ -272,60 +272,60 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
logger.error("%s:%i error: %i, %s", self.destination.host, self.destination.port, fatalStatus, errorText)
|
||||
return True
|
||||
|
||||
def bm_command_getdata(self):
|
||||
items = self.decode_payload_content("l32s")
|
||||
# skip?
|
||||
if time.time() < self.skipUntil:
|
||||
return True
|
||||
#TODO make this more asynchronous
|
||||
random.shuffle(items)
|
||||
for i in map(str, items):
|
||||
if Dandelion().hasHash(i) and \
|
||||
self != Dandelion().objectChildStem(i):
|
||||
self.antiIntersectionDelay()
|
||||
logger.info('%s asked for a stem object we didn\'t offer to it.', self.destination)
|
||||
break
|
||||
else:
|
||||
try:
|
||||
self.append_write_buf(protocol.CreatePacket('object', Inventory()[i].payload))
|
||||
except KeyError:
|
||||
self.antiIntersectionDelay()
|
||||
logger.info('%s asked for an object we don\'t have.', self.destination)
|
||||
break
|
||||
# I think that aborting after the first missing/stem object is more secure
|
||||
# when using random reordering, as the recipient won't know exactly which objects we refuse to deliver
|
||||
return True
|
||||
# def bm_command_getdata(self):
|
||||
# items = self.decode_payload_content("l32s")
|
||||
# # skip?
|
||||
# if time.time() < self.skipUntil:
|
||||
# return True
|
||||
# #TODO make this more asynchronous
|
||||
# random.shuffle(items)
|
||||
# for i in map(str, items):
|
||||
# if Dandelion().hasHash(i) and \
|
||||
# self != Dandelion().objectChildStem(i):
|
||||
# self.antiIntersectionDelay()
|
||||
# logger.info('%s asked for a stem object we didn\'t offer to it.', self.destination)
|
||||
# break
|
||||
# else:
|
||||
# try:
|
||||
# self.append_write_buf(protocol.CreatePacket('object', Inventory()[i].payload))
|
||||
# except KeyError:
|
||||
# self.antiIntersectionDelay()
|
||||
# logger.info('%s asked for an object we don\'t have.', self.destination)
|
||||
# break
|
||||
# # I think that aborting after the first missing/stem object is more secure
|
||||
# # when using random reordering, as the recipient won't know exactly which objects we refuse to deliver
|
||||
# return True
|
||||
|
||||
def _command_inv(self, dandelion=False):
|
||||
items = self.decode_payload_content("l32s")
|
||||
# def _command_inv(self, dandelion=False):
|
||||
# items = self.decode_payload_content("l32s")
|
||||
|
||||
if len(items) >= BMProto.maxObjectCount:
|
||||
logger.error("Too many items in %sinv message!", "d" if dandelion else "")
|
||||
raise BMProtoExcessiveDataError()
|
||||
else:
|
||||
pass
|
||||
# if len(items) >= BMProto.maxObjectCount:
|
||||
# logger.error("Too many items in %sinv message!", "d" if dandelion else "")
|
||||
# raise BMProtoExcessiveDataError()
|
||||
# else:
|
||||
# pass
|
||||
|
||||
# ignore dinv if dandelion turned off
|
||||
if dandelion and not state.dandelion:
|
||||
return True
|
||||
# # ignore dinv if dandelion turned off
|
||||
# if dandelion and not state.dandelion:
|
||||
# return True
|
||||
|
||||
for i in map(str, items):
|
||||
if i in Inventory() and not Dandelion().hasHash(i):
|
||||
continue
|
||||
if dandelion and not Dandelion().hasHash(i):
|
||||
Dandelion().addHash(i, self)
|
||||
self.handleReceivedInventory(i)
|
||||
# for i in map(str, items):
|
||||
# if i in Inventory() and not Dandelion().hasHash(i):
|
||||
# continue
|
||||
# if dandelion and not Dandelion().hasHash(i):
|
||||
# Dandelion().addHash(i, self)
|
||||
# self.handleReceivedInventory(i)
|
||||
|
||||
return True
|
||||
# return True
|
||||
|
||||
def bm_command_inv(self):
|
||||
return self._command_inv(False)
|
||||
# def bm_command_inv(self):
|
||||
# return self._command_inv(False)
|
||||
|
||||
def bm_command_dinv(self):
|
||||
"""
|
||||
Dandelion stem announce
|
||||
"""
|
||||
return self._command_inv(True)
|
||||
# def bm_command_dinv(self):
|
||||
# """
|
||||
# Dandelion stem announce
|
||||
# """
|
||||
# return self._command_inv(True)
|
||||
|
||||
def bm_command_object(self):
|
||||
objectOffset = self.payloadOffset
|
||||
|
@ -370,90 +370,90 @@ class BMProto(AdvancedDispatcher, ObjectTracker):
|
|||
invQueue.put((self.object.streamNumber, self.object.inventoryHash, self.destination))
|
||||
return True
|
||||
|
||||
def _decode_addr(self):
|
||||
return self.decode_payload_content("LQIQ16sH")
|
||||
# def _decode_addr(self):
|
||||
# return self.decode_payload_content("LQIQ16sH")
|
||||
|
||||
def bm_command_addr(self):
|
||||
addresses = self._decode_addr()
|
||||
for i in addresses:
|
||||
seenTime, stream, services, ip, port = i
|
||||
decodedIP = protocol.checkIPAddress(str(ip))
|
||||
if stream not in state.streamsInWhichIAmParticipating:
|
||||
continue
|
||||
if decodedIP is not False and seenTime > time.time() - BMProto.addressAlive:
|
||||
peer = state.Peer(decodedIP, port)
|
||||
try:
|
||||
if knownnodes.knownNodes[stream][peer]["lastseen"] > seenTime:
|
||||
continue
|
||||
except KeyError:
|
||||
pass
|
||||
if len(knownnodes.knownNodes[stream]) < int(BMConfigParser().get("knownnodes", "maxnodes")):
|
||||
with knownnodes.knownNodesLock:
|
||||
try:
|
||||
knownnodes.knownNodes[stream][peer]["lastseen"] = seenTime
|
||||
except (TypeError, KeyError):
|
||||
knownnodes.knownNodes[stream][peer] = {
|
||||
"lastseen": seenTime,
|
||||
"rating": 0,
|
||||
"self": False,
|
||||
}
|
||||
addrQueue.put((stream, peer, self.destination))
|
||||
return True
|
||||
# def bm_command_addr(self):
|
||||
# addresses = self._decode_addr()
|
||||
# for i in addresses:
|
||||
# seenTime, stream, services, ip, port = i
|
||||
# decodedIP = protocol.checkIPAddress(str(ip))
|
||||
# if stream not in state.streamsInWhichIAmParticipating:
|
||||
# continue
|
||||
# if decodedIP is not False and seenTime > time.time() - BMProto.addressAlive:
|
||||
# peer = state.Peer(decodedIP, port)
|
||||
# try:
|
||||
# if knownnodes.knownNodes[stream][peer]["lastseen"] > seenTime:
|
||||
# continue
|
||||
# except KeyError:
|
||||
# pass
|
||||
# if len(knownnodes.knownNodes[stream]) < int(BMConfigParser().get("knownnodes", "maxnodes")):
|
||||
# with knownnodes.knownNodesLock:
|
||||
# try:
|
||||
# knownnodes.knownNodes[stream][peer]["lastseen"] = seenTime
|
||||
# except (TypeError, KeyError):
|
||||
# knownnodes.knownNodes[stream][peer] = {
|
||||
# "lastseen": seenTime,
|
||||
# "rating": 0,
|
||||
# "self": False,
|
||||
# }
|
||||
# addrQueue.put((stream, peer, self.destination))
|
||||
# return True
|
||||
|
||||
def bm_command_portcheck(self):
|
||||
portCheckerQueue.put(state.Peer(self.destination, self.peerNode.port))
|
||||
return True
|
||||
# def bm_command_portcheck(self):
|
||||
# portCheckerQueue.put(state.Peer(self.destination, self.peerNode.port))
|
||||
# return True
|
||||
|
||||
def bm_command_ping(self):
|
||||
self.append_write_buf(protocol.CreatePacket('pong'))
|
||||
return True
|
||||
# def bm_command_ping(self):
|
||||
# self.append_write_buf(protocol.CreatePacket('pong'))
|
||||
# return True
|
||||
|
||||
def bm_command_pong(self):
|
||||
# nothing really
|
||||
return True
|
||||
# def bm_command_pong(self):
|
||||
# # nothing really
|
||||
# return True
|
||||
|
||||
def bm_command_verack(self):
|
||||
self.verackReceived = True
|
||||
if self.verackSent:
|
||||
if self.isSSL:
|
||||
self.set_state("tls_init", length=self.payloadLength, expectBytes=0)
|
||||
return False
|
||||
self.set_state("connection_fully_established", length=self.payloadLength, expectBytes=0)
|
||||
return False
|
||||
return True
|
||||
# def bm_command_verack(self):
|
||||
# self.verackReceived = True
|
||||
# if self.verackSent:
|
||||
# if self.isSSL:
|
||||
# self.set_state("tls_init", length=self.payloadLength, expectBytes=0)
|
||||
# return False
|
||||
# self.set_state("connection_fully_established", length=self.payloadLength, expectBytes=0)
|
||||
# return False
|
||||
# return True
|
||||
|
||||
def bm_command_version(self):
|
||||
self.remoteProtocolVersion, self.services, self.timestamp, self.sockNode, self.peerNode, self.nonce, \
|
||||
self.userAgent, self.streams = self.decode_payload_content("IQQiiQlsLv")
|
||||
self.nonce = struct.pack('>Q', self.nonce)
|
||||
self.timeOffset = self.timestamp - int(time.time())
|
||||
logger.debug("remoteProtocolVersion: %i", self.remoteProtocolVersion)
|
||||
logger.debug("services: 0x%08X", self.services)
|
||||
logger.debug("time offset: %i", self.timestamp - int(time.time()))
|
||||
logger.debug("my external IP: %s", self.sockNode.host)
|
||||
logger.debug("remote node incoming address: %s:%i", self.destination.host, self.peerNode.port)
|
||||
logger.debug("user agent: %s", self.userAgent)
|
||||
logger.debug("streams: [%s]", ",".join(map(str,self.streams)))
|
||||
if not self.peerValidityChecks():
|
||||
# TODO ABORT
|
||||
return True
|
||||
#shared.connectedHostsList[self.destination] = self.streams[0]
|
||||
self.append_write_buf(protocol.CreatePacket('verack'))
|
||||
self.verackSent = True
|
||||
if not self.isOutbound:
|
||||
self.append_write_buf(protocol.assembleVersionMessage(self.destination.host, self.destination.port, \
|
||||
network.connectionpool.BMConnectionPool().streams, True, nodeid=self.nodeid))
|
||||
#print "%s:%i: Sending version" % (self.destination.host, self.destination.port)
|
||||
if ((self.services & protocol.NODE_SSL == protocol.NODE_SSL) and
|
||||
protocol.haveSSL(not self.isOutbound)):
|
||||
self.isSSL = True
|
||||
if self.verackReceived:
|
||||
if self.isSSL:
|
||||
self.set_state("tls_init", length=self.payloadLength, expectBytes=0)
|
||||
return False
|
||||
self.set_state("connection_fully_established", length=self.payloadLength, expectBytes=0)
|
||||
return False
|
||||
return True
|
||||
# def bm_command_version(self):
|
||||
# self.remoteProtocolVersion, self.services, self.timestamp, self.sockNode, self.peerNode, self.nonce, \
|
||||
# self.userAgent, self.streams = self.decode_payload_content("IQQiiQlsLv")
|
||||
# self.nonce = struct.pack('>Q', self.nonce)
|
||||
# self.timeOffset = self.timestamp - int(time.time())
|
||||
# logger.debug("remoteProtocolVersion: %i", self.remoteProtocolVersion)
|
||||
# logger.debug("services: 0x%08X", self.services)
|
||||
# logger.debug("time offset: %i", self.timestamp - int(time.time()))
|
||||
# logger.debug("my external IP: %s", self.sockNode.host)
|
||||
# logger.debug("remote node incoming address: %s:%i", self.destination.host, self.peerNode.port)
|
||||
# logger.debug("user agent: %s", self.userAgent)
|
||||
# logger.debug("streams: [%s]", ",".join(map(str,self.streams)))
|
||||
# if not self.peerValidityChecks():
|
||||
# # TODO ABORT
|
||||
# return True
|
||||
# #shared.connectedHostsList[self.destination] = self.streams[0]
|
||||
# self.append_write_buf(protocol.CreatePacket('verack'))
|
||||
# self.verackSent = True
|
||||
# if not self.isOutbound:
|
||||
# self.append_write_buf(protocol.assembleVersionMessage(self.destination.host, self.destination.port, \
|
||||
# network.connectionpool.BMConnectionPool().streams, True, nodeid=self.nodeid))
|
||||
# #print "%s:%i: Sending version" % (self.destination.host, self.destination.port)
|
||||
# if ((self.services & protocol.NODE_SSL == protocol.NODE_SSL) and
|
||||
# protocol.haveSSL(not self.isOutbound)):
|
||||
# self.isSSL = True
|
||||
# if self.verackReceived:
|
||||
# if self.isSSL:
|
||||
# self.set_state("tls_init", length=self.payloadLength, expectBytes=0)
|
||||
# return False
|
||||
# self.set_state("connection_fully_established", length=self.payloadLength, expectBytes=0)
|
||||
# return False
|
||||
# return True
|
||||
|
||||
def peerValidityChecks(self):
|
||||
if self.remoteProtocolVersion < 3:
|
||||
|
|
|
@ -50,13 +50,13 @@ class Dandelion():
|
|||
stream,
|
||||
self.poissonTimeout())
|
||||
|
||||
def setHashStream(self, hashId, stream=1):
|
||||
with self.lock:
|
||||
if hashId in self.hashMap:
|
||||
self.hashMap[hashId] = Stem(
|
||||
self.hashMap[hashId].child,
|
||||
stream,
|
||||
self.poissonTimeout())
|
||||
# def setHashStream(self, hashId, stream=1):
|
||||
# with self.lock:
|
||||
# if hashId in self.hashMap:
|
||||
# self.hashMap[hashId] = Stem(
|
||||
# self.hashMap[hashId].child,
|
||||
# stream,
|
||||
# self.poissonTimeout())
|
||||
|
||||
def removeHash(self, hashId, reason="no reason specified"):
|
||||
logging.debug("%s entering fluff mode due to %s.", ''.join('%02x'%ord(i) for i in hashId), reason)
|
||||
|
@ -72,16 +72,16 @@ class Dandelion():
|
|||
def objectChildStem(self, hashId):
|
||||
return self.hashMap[hashId].child
|
||||
|
||||
def maybeAddStem(self, connection):
|
||||
# fewer than MAX_STEMS outbound connections at last reshuffle?
|
||||
with self.lock:
|
||||
if len(self.stem) < MAX_STEMS:
|
||||
self.stem.append(connection)
|
||||
for k in (k for k, v in self.nodeMap.iteritems() if v is None):
|
||||
self.nodeMap[k] = connection
|
||||
for k, v in {k: v for k, v in self.hashMap.iteritems() if v.child is None}.iteritems():
|
||||
self.hashMap[k] = Stem(connection, v.stream, self.poissonTimeout())
|
||||
invQueue.put((v.stream, k, v.child))
|
||||
# def maybeAddStem(self, connection):
|
||||
# # fewer than MAX_STEMS outbound connections at last reshuffle?
|
||||
# with self.lock:
|
||||
# if len(self.stem) < MAX_STEMS:
|
||||
# self.stem.append(connection)
|
||||
# for k in (k for k, v in self.nodeMap.iteritems() if v is None):
|
||||
# self.nodeMap[k] = connection
|
||||
# for k, v in {k: v for k, v in self.hashMap.iteritems() if v.child is None}.iteritems():
|
||||
# self.hashMap[k] = Stem(connection, v.stream, self.poissonTimeout())
|
||||
# invQueue.put((v.stream, k, v.child))
|
||||
|
||||
|
||||
def maybeRemoveStem(self, connection):
|
||||
|
|
|
@ -12,9 +12,9 @@ import protocol
|
|||
from state import missingObjects
|
||||
|
||||
class DownloadThread(threading.Thread, StoppableThread):
|
||||
minPending = 200
|
||||
# minPending = 200
|
||||
maxRequestChunk = 1000
|
||||
requestTimeout = 60
|
||||
# requestTimeout = 60
|
||||
cleanInterval = 60
|
||||
requestExpires = 3600
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ from proxy import Proxy, ProxyError, GeneralProxyError
|
|||
from socks5 import Socks5Connection, Socks5Resolver, Socks5AuthError, Socks5Error
|
||||
from socks4a import Socks4aConnection, Socks4aResolver, Socks4aError
|
||||
|
||||
class HttpError(ProxyError): pass
|
||||
# class HttpError(ProxyError): pass
|
||||
|
||||
|
||||
class HttpConnection(AdvancedDispatcher):
|
||||
|
@ -24,13 +24,13 @@ class HttpConnection(AdvancedDispatcher):
|
|||
self.set_state("http_request_sent", 0)
|
||||
return False
|
||||
|
||||
def state_http_request_sent(self):
|
||||
if len(self.read_buf) > 0:
|
||||
print "Received %ib" % (len(self.read_buf))
|
||||
self.read_buf = b""
|
||||
if not self.connected:
|
||||
self.set_state("close", 0)
|
||||
return False
|
||||
# def state_http_request_sent(self):
|
||||
# if len(self.read_buf) > 0:
|
||||
# print "Received %ib" % (len(self.read_buf))
|
||||
# self.read_buf = b""
|
||||
# if not self.connected:
|
||||
# self.set_state("close", 0)
|
||||
# return False
|
||||
|
||||
|
||||
class Socks5HttpConnection(Socks5Connection, HttpConnection):
|
||||
|
@ -38,9 +38,9 @@ class Socks5HttpConnection(Socks5Connection, HttpConnection):
|
|||
self.path = path
|
||||
Socks5Connection.__init__(self, address=(host, 80))
|
||||
|
||||
def state_socks_handshake_done(self):
|
||||
HttpConnection.state_init(self)
|
||||
return False
|
||||
# def state_socks_handshake_done(self):
|
||||
# HttpConnection.state_init(self)
|
||||
# return False
|
||||
|
||||
|
||||
class Socks4aHttpConnection(Socks4aConnection, HttpConnection):
|
||||
|
@ -48,9 +48,9 @@ class Socks4aHttpConnection(Socks4aConnection, HttpConnection):
|
|||
Socks4aConnection.__init__(self, address=(host, 80))
|
||||
self.path = path
|
||||
|
||||
def state_socks_handshake_done(self):
|
||||
HttpConnection.state_init(self)
|
||||
return False
|
||||
# def state_socks_handshake_done(self):
|
||||
# HttpConnection.state_init(self)
|
||||
# return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -112,7 +112,7 @@ class HTTPServer(asyncore.dispatcher):
|
|||
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.set_reuse_addr()
|
||||
self.bind(('127.0.0.1', HTTPServer.port))
|
||||
self.connections = 0
|
||||
# self.connections = 0
|
||||
self.listen(5)
|
||||
|
||||
def handle_accept(self):
|
||||
|
@ -120,7 +120,7 @@ class HTTPServer(asyncore.dispatcher):
|
|||
if pair is not None:
|
||||
sock, addr = pair
|
||||
# print 'Incoming connection from %s' % repr(addr)
|
||||
self.connections += 1
|
||||
# self.connections += 1
|
||||
# if self.connections % 1000 == 0:
|
||||
# print "Processed %i connections, active %i" % (self.connections, len(asyncore.socket_map))
|
||||
HTTPRequestHandler(sock)
|
||||
|
@ -138,7 +138,7 @@ class HTTPSServer(HTTPServer):
|
|||
if pair is not None:
|
||||
sock, addr = pair
|
||||
# print 'Incoming connection from %s' % repr(addr)
|
||||
self.connections += 1
|
||||
# self.connections += 1
|
||||
# if self.connections % 1000 == 0:
|
||||
# print "Processed %i connections, active %i" % (self.connections, len(asyncore.socket_map))
|
||||
HTTPSRequestHandler(sock)
|
||||
|
|
|
@ -26,10 +26,10 @@ haveBloom = False
|
|||
|
||||
class ObjectTracker(object):
|
||||
invCleanPeriod = 300
|
||||
invInitialCapacity = 50000
|
||||
invErrorRate = 0.03
|
||||
# invInitialCapacity = 50000
|
||||
# invErrorRate = 0.03
|
||||
trackingExpires = 3600
|
||||
initialTimeOffset = 60
|
||||
# initialTimeOffset = 60
|
||||
|
||||
def __init__(self):
|
||||
self.objectsNewToMe = RandomTrackingDict()
|
||||
|
@ -39,17 +39,17 @@ class ObjectTracker(object):
|
|||
self.initAddrBloom()
|
||||
self.lastCleaned = time.time()
|
||||
|
||||
def initInvBloom(self):
|
||||
if haveBloom:
|
||||
# def initInvBloom(self):
|
||||
# if haveBloom:
|
||||
# lock?
|
||||
self.invBloom = BloomFilter(capacity=ObjectTracker.invInitialCapacity,
|
||||
error_rate=ObjectTracker.invErrorRate)
|
||||
# self.invBloom = BloomFilter(capacity=ObjectTracker.invInitialCapacity,
|
||||
# error_rate=ObjectTracker.invErrorRate)
|
||||
|
||||
def initAddrBloom(self):
|
||||
if haveBloom:
|
||||
# def initAddrBloom(self):
|
||||
# if haveBloom:
|
||||
# lock?
|
||||
self.addrBloom = BloomFilter(capacity=ObjectTracker.invInitialCapacity,
|
||||
error_rate=ObjectTracker.invErrorRate)
|
||||
# self.addrBloom = BloomFilter(capacity=ObjectTracker.invInitialCapacity,
|
||||
# error_rate=ObjectTracker.invErrorRate)
|
||||
|
||||
def clean(self):
|
||||
if self.lastCleaned < time.time() - ObjectTracker.invCleanPeriod:
|
||||
|
@ -65,23 +65,23 @@ class ObjectTracker(object):
|
|||
self.objectsNewToThem = {k: v for k, v in self.objectsNewToThem.iteritems() if v >= deadline}
|
||||
self.lastCleaned = time.time()
|
||||
|
||||
def hasObj(self, hashid):
|
||||
if haveBloom:
|
||||
return hashid in self.invBloom
|
||||
else:
|
||||
return hashid in self.objectsNewToMe
|
||||
# def hasObj(self, hashid):
|
||||
# if haveBloom:
|
||||
# return hashid in self.invBloom
|
||||
# else:
|
||||
# return hashid in self.objectsNewToMe
|
||||
|
||||
def handleReceivedInventory(self, hashId):
|
||||
if haveBloom:
|
||||
self.invBloom.add(hashId)
|
||||
try:
|
||||
with self.objectsNewToThemLock:
|
||||
del self.objectsNewToThem[hashId]
|
||||
except KeyError:
|
||||
pass
|
||||
if hashId not in missingObjects:
|
||||
missingObjects[hashId] = time.time()
|
||||
self.objectsNewToMe[hashId] = True
|
||||
# def handleReceivedInventory(self, hashId):
|
||||
# if haveBloom:
|
||||
# self.invBloom.add(hashId)
|
||||
# try:
|
||||
# with self.objectsNewToThemLock:
|
||||
# del self.objectsNewToThem[hashId]
|
||||
# except KeyError:
|
||||
# pass
|
||||
# if hashId not in missingObjects:
|
||||
# missingObjects[hashId] = time.time()
|
||||
# self.objectsNewToMe[hashId] = True
|
||||
|
||||
def handleReceivedObject(self, streamNumber, hashid):
|
||||
for i in network.connectionpool.BMConnectionPool().inboundConnections.values() + network.connectionpool.BMConnectionPool().outboundConnections.values():
|
||||
|
@ -106,13 +106,13 @@ class ObjectTracker(object):
|
|||
except KeyError:
|
||||
pass
|
||||
|
||||
def hasAddr(self, addr):
|
||||
if haveBloom:
|
||||
return addr in self.invBloom
|
||||
# def hasAddr(self, addr):
|
||||
# if haveBloom:
|
||||
# return addr in self.invBloom
|
||||
|
||||
def addAddr(self, hashid):
|
||||
if haveBloom:
|
||||
self.addrBloom.add(hashid)
|
||||
# def addAddr(self, hashid):
|
||||
# if haveBloom:
|
||||
# self.addrBloom.add(hashid)
|
||||
|
||||
# addr sending -> per node upload queue, and flush every minute or so
|
||||
# inv sending -> if not in bloom, inv immediately, otherwise put into a per node upload queue and flush every minute or so
|
||||
|
|
|
@ -71,13 +71,13 @@ class Proxy(AdvancedDispatcher):
|
|||
raise ValueError
|
||||
self.__class__._onion_proxy = address
|
||||
|
||||
@property
|
||||
def onion_auth(self):
|
||||
return self.__class__._onion_auth
|
||||
# @property
|
||||
# def onion_auth(self):
|
||||
# return self.__class__._onion_auth
|
||||
|
||||
@onion_auth.setter
|
||||
def onion_auth(self, authTuple):
|
||||
self.__class__._onion_auth = authTuple
|
||||
# @onion_auth.setter
|
||||
# def onion_auth(self, authTuple):
|
||||
# self.__class__._onion_auth = authTuple
|
||||
|
||||
def __init__(self, address):
|
||||
if not isinstance(address, state.Peer):
|
||||
|
|
|
@ -54,33 +54,33 @@ class Socks4aConnection(Socks4a):
|
|||
def __init__(self, address):
|
||||
Socks4a.__init__(self, address=address)
|
||||
|
||||
def state_auth_done(self):
|
||||
# Now we can request the actual connection
|
||||
rmtrslv = False
|
||||
self.append_write_buf(struct.pack('>BBH', 0x04, 0x01, self.destination[1]))
|
||||
# If the given destination address is an IP address, we'll
|
||||
# use the IPv4 address request even if remote resolving was specified.
|
||||
try:
|
||||
self.ipaddr = socket.inet_aton(self.destination[0])
|
||||
self.append_write_buf(self.ipaddr)
|
||||
except socket.error:
|
||||
# Well it's not an IP number, so it's probably a DNS name.
|
||||
if Proxy._remote_dns:
|
||||
# Resolve remotely
|
||||
rmtrslv = True
|
||||
self.ipaddr = None
|
||||
self.append_write_buf(struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01))
|
||||
else:
|
||||
# Resolve locally
|
||||
self.ipaddr = socket.inet_aton(socket.gethostbyname(self.destination[0]))
|
||||
self.append_write_buf(self.ipaddr)
|
||||
if self._auth:
|
||||
self.append_write_buf(self._auth[0])
|
||||
self.append_write_buf(chr(0x00).encode())
|
||||
if rmtrslv:
|
||||
self.append_write_buf(self.destination[0] + chr(0x00).encode())
|
||||
self.set_state("pre_connect", length=0, expectBytes=8)
|
||||
return True
|
||||
# def state_auth_done(self):
|
||||
# # Now we can request the actual connection
|
||||
# rmtrslv = False
|
||||
# self.append_write_buf(struct.pack('>BBH', 0x04, 0x01, self.destination[1]))
|
||||
# # If the given destination address is an IP address, we'll
|
||||
# # use the IPv4 address request even if remote resolving was specified.
|
||||
# try:
|
||||
# self.ipaddr = socket.inet_aton(self.destination[0])
|
||||
# self.append_write_buf(self.ipaddr)
|
||||
# except socket.error:
|
||||
# # Well it's not an IP number, so it's probably a DNS name.
|
||||
# if Proxy._remote_dns:
|
||||
# # Resolve remotely
|
||||
# rmtrslv = True
|
||||
# self.ipaddr = None
|
||||
# self.append_write_buf(struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01))
|
||||
# else:
|
||||
# # Resolve locally
|
||||
# self.ipaddr = socket.inet_aton(socket.gethostbyname(self.destination[0]))
|
||||
# self.append_write_buf(self.ipaddr)
|
||||
# if self._auth:
|
||||
# self.append_write_buf(self._auth[0])
|
||||
# self.append_write_buf(chr(0x00).encode())
|
||||
# if rmtrslv:
|
||||
# self.append_write_buf(self.destination[0] + chr(0x00).encode())
|
||||
# self.set_state("pre_connect", length=0, expectBytes=8)
|
||||
# return True
|
||||
|
||||
def state_pre_connect(self):
|
||||
try:
|
||||
|
@ -96,16 +96,16 @@ class Socks4aResolver(Socks4a):
|
|||
self.port = 8444
|
||||
Socks4a.__init__(self, address=(self.host, self.port))
|
||||
|
||||
def state_auth_done(self):
|
||||
# Now we can request the actual connection
|
||||
self.append_write_buf(struct.pack('>BBH', 0x04, 0xF0, self.destination[1]))
|
||||
self.append_write_buf(struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01))
|
||||
if self._auth:
|
||||
self.append_write_buf(self._auth[0])
|
||||
self.append_write_buf(chr(0x00).encode())
|
||||
self.append_write_buf(self.host + chr(0x00).encode())
|
||||
self.set_state("pre_connect", length=0, expectBytes=8)
|
||||
return True
|
||||
# def state_auth_done(self):
|
||||
# # Now we can request the actual connection
|
||||
# self.append_write_buf(struct.pack('>BBH', 0x04, 0xF0, self.destination[1]))
|
||||
# self.append_write_buf(struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01))
|
||||
# if self._auth:
|
||||
# self.append_write_buf(self._auth[0])
|
||||
# self.append_write_buf(chr(0x00).encode())
|
||||
# self.append_write_buf(self.host + chr(0x00).encode())
|
||||
# self.set_state("pre_connect", length=0, expectBytes=8)
|
||||
# return True
|
||||
|
||||
def resolved(self):
|
||||
print "Resolved %s as %s" % (self.host, self.proxy_sock_name())
|
||||
|
|
|
@ -38,40 +38,40 @@ class Socks5(Proxy):
|
|||
self.set_state("auth_1", length=0, expectBytes=2)
|
||||
return True
|
||||
|
||||
def state_auth_1(self):
|
||||
ret = struct.unpack('BB', self.read_buf)
|
||||
if ret[0] != 5:
|
||||
# general error
|
||||
raise GeneralProxyError(1)
|
||||
elif ret[1] == 0:
|
||||
# no auth required
|
||||
self.set_state("auth_done", length=2)
|
||||
elif ret[1] == 2:
|
||||
# username/password
|
||||
self.append_write_buf(struct.pack('BB', 1, len(self._auth[0])) + \
|
||||
self._auth[0] + struct.pack('B', len(self._auth[1])) + \
|
||||
self._auth[1])
|
||||
self.set_state("auth_needed", length=2, expectBytes=2)
|
||||
else:
|
||||
if ret[1] == 0xff:
|
||||
# auth error
|
||||
raise Socks5AuthError(2)
|
||||
else:
|
||||
# other error
|
||||
raise GeneralProxyError(1)
|
||||
return True
|
||||
# def state_auth_1(self):
|
||||
# ret = struct.unpack('BB', self.read_buf)
|
||||
# if ret[0] != 5:
|
||||
# # general error
|
||||
# raise GeneralProxyError(1)
|
||||
# elif ret[1] == 0:
|
||||
# # no auth required
|
||||
# self.set_state("auth_done", length=2)
|
||||
# elif ret[1] == 2:
|
||||
# # username/password
|
||||
# self.append_write_buf(struct.pack('BB', 1, len(self._auth[0])) + \
|
||||
# self._auth[0] + struct.pack('B', len(self._auth[1])) + \
|
||||
# self._auth[1])
|
||||
# self.set_state("auth_needed", length=2, expectBytes=2)
|
||||
# else:
|
||||
# if ret[1] == 0xff:
|
||||
# # auth error
|
||||
# raise Socks5AuthError(2)
|
||||
# else:
|
||||
# # other error
|
||||
# raise GeneralProxyError(1)
|
||||
# return True
|
||||
|
||||
def state_auth_needed(self):
|
||||
ret = struct.unpack('BB', self.read_buf[0:2])
|
||||
if ret[0] != 1:
|
||||
# general error
|
||||
raise GeneralProxyError(1)
|
||||
if ret[1] != 0:
|
||||
# auth error
|
||||
raise Socks5AuthError(3)
|
||||
# all ok
|
||||
self.set_state("auth_done", length=2)
|
||||
return True
|
||||
# def state_auth_needed(self):
|
||||
# ret = struct.unpack('BB', self.read_buf[0:2])
|
||||
# if ret[0] != 1:
|
||||
# # general error
|
||||
# raise GeneralProxyError(1)
|
||||
# if ret[1] != 0:
|
||||
# # auth error
|
||||
# raise Socks5AuthError(3)
|
||||
# # all ok
|
||||
# self.set_state("auth_done", length=2)
|
||||
# return True
|
||||
|
||||
def state_pre_connect(self):
|
||||
# Get the response
|
||||
|
@ -95,30 +95,30 @@ class Socks5(Proxy):
|
|||
raise GeneralProxyError(1)
|
||||
return True
|
||||
|
||||
def state_proxy_addr_1(self):
|
||||
self.boundaddr = self.read_buf[0:4]
|
||||
self.set_state("proxy_port", length=4, expectBytes=2)
|
||||
return True
|
||||
# def state_proxy_addr_1(self):
|
||||
# self.boundaddr = self.read_buf[0:4]
|
||||
# self.set_state("proxy_port", length=4, expectBytes=2)
|
||||
# return True
|
||||
|
||||
def state_proxy_addr_2_1(self):
|
||||
self.address_length = ord(self.read_buf[0:1])
|
||||
self.set_state("proxy_addr_2_2", length=1, expectBytes=self.address_length)
|
||||
return True
|
||||
# def state_proxy_addr_2_1(self):
|
||||
# self.address_length = ord(self.read_buf[0:1])
|
||||
# self.set_state("proxy_addr_2_2", length=1, expectBytes=self.address_length)
|
||||
# return True
|
||||
|
||||
def state_proxy_addr_2_2(self):
|
||||
self.boundaddr = self.read_buf[0:self.address_length]
|
||||
self.set_state("proxy_port", length=self.address_length, expectBytes=2)
|
||||
return True
|
||||
# def state_proxy_addr_2_2(self):
|
||||
# self.boundaddr = self.read_buf[0:self.address_length]
|
||||
# self.set_state("proxy_port", length=self.address_length, expectBytes=2)
|
||||
# return True
|
||||
|
||||
def state_proxy_port(self):
|
||||
self.boundport = struct.unpack(">H", self.read_buf[0:2])[0]
|
||||
self.__proxysockname = (self.boundaddr, self.boundport)
|
||||
if self.ipaddr is not None:
|
||||
self.__proxypeername = (socket.inet_ntoa(self.ipaddr), self.destination[1])
|
||||
else:
|
||||
self.__proxypeername = (self.destination[0], self.destport)
|
||||
self.set_state("proxy_handshake_done", length=2)
|
||||
return True
|
||||
# def state_proxy_port(self):
|
||||
# self.boundport = struct.unpack(">H", self.read_buf[0:2])[0]
|
||||
# self.__proxysockname = (self.boundaddr, self.boundport)
|
||||
# if self.ipaddr is not None:
|
||||
# self.__proxypeername = (socket.inet_ntoa(self.ipaddr), self.destination[1])
|
||||
# else:
|
||||
# self.__proxypeername = (self.destination[0], self.destport)
|
||||
# self.set_state("proxy_handshake_done", length=2)
|
||||
# return True
|
||||
|
||||
def proxy_sock_name(self):
|
||||
return socket.inet_ntoa(self.__proxysockname[0])
|
||||
|
@ -128,27 +128,27 @@ class Socks5Connection(Socks5):
|
|||
def __init__(self, address):
|
||||
Socks5.__init__(self, address=address)
|
||||
|
||||
def state_auth_done(self):
|
||||
# Now we can request the actual connection
|
||||
self.append_write_buf(struct.pack('BBB', 0x05, 0x01, 0x00))
|
||||
# If the given destination address is an IP address, we'll
|
||||
# use the IPv4 address request even if remote resolving was specified.
|
||||
try:
|
||||
self.ipaddr = socket.inet_aton(self.destination[0])
|
||||
self.append_write_buf(chr(0x01).encode() + self.ipaddr)
|
||||
except socket.error:
|
||||
# Well it's not an IP number, so it's probably a DNS name.
|
||||
if Proxy._remote_dns:
|
||||
# Resolve remotely
|
||||
self.ipaddr = None
|
||||
self.append_write_buf(chr(0x03).encode() + chr(len(self.destination[0])).encode() + self.destination[0])
|
||||
else:
|
||||
# Resolve locally
|
||||
self.ipaddr = socket.inet_aton(socket.gethostbyname(self.destination[0]))
|
||||
self.append_write_buf(chr(0x01).encode() + self.ipaddr)
|
||||
self.append_write_buf(struct.pack(">H", self.destination[1]))
|
||||
self.set_state("pre_connect", length=0, expectBytes=4)
|
||||
return True
|
||||
# def state_auth_done(self):
|
||||
# # Now we can request the actual connection
|
||||
# self.append_write_buf(struct.pack('BBB', 0x05, 0x01, 0x00))
|
||||
# # If the given destination address is an IP address, we'll
|
||||
# # use the IPv4 address request even if remote resolving was specified.
|
||||
# try:
|
||||
# self.ipaddr = socket.inet_aton(self.destination[0])
|
||||
# self.append_write_buf(chr(0x01).encode() + self.ipaddr)
|
||||
# except socket.error:
|
||||
# # Well it's not an IP number, so it's probably a DNS name.
|
||||
# if Proxy._remote_dns:
|
||||
# # Resolve remotely
|
||||
# self.ipaddr = None
|
||||
# self.append_write_buf(chr(0x03).encode() + chr(len(self.destination[0])).encode() + self.destination[0])
|
||||
# else:
|
||||
# # Resolve locally
|
||||
# self.ipaddr = socket.inet_aton(socket.gethostbyname(self.destination[0]))
|
||||
# self.append_write_buf(chr(0x01).encode() + self.ipaddr)
|
||||
# self.append_write_buf(struct.pack(">H", self.destination[1]))
|
||||
# self.set_state("pre_connect", length=0, expectBytes=4)
|
||||
# return True
|
||||
|
||||
def state_pre_connect(self):
|
||||
try:
|
||||
|
@ -164,13 +164,13 @@ class Socks5Resolver(Socks5):
|
|||
self.port = 8444
|
||||
Socks5.__init__(self, address=(self.host, self.port))
|
||||
|
||||
def state_auth_done(self):
|
||||
# Now we can request the actual connection
|
||||
self.append_write_buf(struct.pack('BBB', 0x05, 0xF0, 0x00))
|
||||
self.append_write_buf(chr(0x03).encode() + chr(len(self.host)).encode() + str(self.host))
|
||||
self.append_write_buf(struct.pack(">H", self.port))
|
||||
self.set_state("pre_connect", length=0, expectBytes=4)
|
||||
return True
|
||||
# def state_auth_done(self):
|
||||
# # Now we can request the actual connection
|
||||
# self.append_write_buf(struct.pack('BBB', 0x05, 0xF0, 0x00))
|
||||
# self.append_write_buf(chr(0x03).encode() + chr(len(self.host)).encode() + str(self.host))
|
||||
# self.append_write_buf(struct.pack(">H", self.port))
|
||||
# self.set_state("pre_connect", length=0, expectBytes=4)
|
||||
# return True
|
||||
|
||||
def resolved(self):
|
||||
print "Resolved %s as %s" % (self.host, self.proxy_sock_name())
|
||||
|
|
|
@ -66,10 +66,10 @@ class TCPConnection(BMProto, TLSDispatcher):
|
|||
self.connect(self.destination)
|
||||
logger.debug("Connecting to %s:%i", self.destination.host, self.destination.port)
|
||||
encodedAddr = protocol.encodeHost(self.destination.host)
|
||||
if protocol.checkIPAddress(encodedAddr, True) and not protocol.checkSocksIP(self.destination.host):
|
||||
self.local = True
|
||||
else:
|
||||
self.local = False
|
||||
# if protocol.checkIPAddress(encodedAddr, True) and not protocol.checkSocksIP(self.destination.host):
|
||||
# self.local = True
|
||||
# else:
|
||||
# self.local = False
|
||||
#shared.connectedHostsList[self.destination] = 0
|
||||
ObjectTracker.__init__(self)
|
||||
self.bm_proto_reset()
|
||||
|
@ -91,68 +91,68 @@ class TCPConnection(BMProto, TLSDispatcher):
|
|||
logger.debug("Skipping processing getdata due to missing object for %.2fs", delay)
|
||||
self.skipUntil = time.time() + delay
|
||||
|
||||
def state_connection_fully_established(self):
|
||||
self.set_connection_fully_established()
|
||||
self.set_state("bm_header")
|
||||
self.bm_proto_reset()
|
||||
return True
|
||||
# def state_connection_fully_established(self):
|
||||
# self.set_connection_fully_established()
|
||||
# self.set_state("bm_header")
|
||||
# self.bm_proto_reset()
|
||||
# return True
|
||||
|
||||
def set_connection_fully_established(self):
|
||||
if not self.isOutbound and not self.local:
|
||||
shared.clientHasReceivedIncomingConnections = True
|
||||
UISignalQueue.put(('setStatusIcon', 'green'))
|
||||
UISignalQueue.put(('updateNetworkStatusTab', (self.isOutbound, True, self.destination)))
|
||||
self.antiIntersectionDelay(True)
|
||||
self.fullyEstablished = True
|
||||
if self.isOutbound:
|
||||
knownnodes.increaseRating(self.destination)
|
||||
if self.isOutbound:
|
||||
Dandelion().maybeAddStem(self)
|
||||
self.sendAddr()
|
||||
self.sendBigInv()
|
||||
# def set_connection_fully_established(self):
|
||||
# if not self.isOutbound and not self.local:
|
||||
# shared.clientHasReceivedIncomingConnections = True
|
||||
# UISignalQueue.put(('setStatusIcon', 'green'))
|
||||
# UISignalQueue.put(('updateNetworkStatusTab', (self.isOutbound, True, self.destination)))
|
||||
# self.antiIntersectionDelay(True)
|
||||
# self.fullyEstablished = True
|
||||
# if self.isOutbound:
|
||||
# knownnodes.increaseRating(self.destination)
|
||||
# if self.isOutbound:
|
||||
# Dandelion().maybeAddStem(self)
|
||||
# self.sendAddr()
|
||||
# self.sendBigInv()
|
||||
|
||||
def sendAddr(self):
|
||||
# We are going to share a maximum number of 1000 addrs (per overlapping
|
||||
# stream) with our peer. 500 from overlapping streams, 250 from the
|
||||
# left child stream, and 250 from the right child stream.
|
||||
maxAddrCount = BMConfigParser().safeGetInt("bitmessagesettings", "maxaddrperstreamsend", 500)
|
||||
# def sendAddr(self):
|
||||
# # We are going to share a maximum number of 1000 addrs (per overlapping
|
||||
# # stream) with our peer. 500 from overlapping streams, 250 from the
|
||||
# # left child stream, and 250 from the right child stream.
|
||||
# maxAddrCount = BMConfigParser().safeGetInt("bitmessagesettings", "maxaddrperstreamsend", 500)
|
||||
|
||||
# init
|
||||
addressCount = 0
|
||||
payload = b''
|
||||
# # init
|
||||
# # addressCount = 0
|
||||
# payload = b''
|
||||
|
||||
templist = []
|
||||
addrs = {}
|
||||
for stream in self.streams:
|
||||
with knownnodes.knownNodesLock:
|
||||
if len(knownnodes.knownNodes[stream]) > 0:
|
||||
filtered = {k: v for k, v in knownnodes.knownNodes[stream].items()
|
||||
if v["lastseen"] > (int(time.time()) - shared.maximumAgeOfNodesThatIAdvertiseToOthers)}
|
||||
elemCount = len(filtered)
|
||||
if elemCount > maxAddrCount:
|
||||
elemCount = maxAddrCount
|
||||
# only if more recent than 3 hours
|
||||
addrs[stream] = random.sample(filtered.items(), elemCount)
|
||||
# sent 250 only if the remote isn't interested in it
|
||||
if len(knownnodes.knownNodes[stream * 2]) > 0 and stream not in self.streams:
|
||||
filtered = {k: v for k, v in knownnodes.knownNodes[stream*2].items()
|
||||
if v["lastseen"] > (int(time.time()) - shared.maximumAgeOfNodesThatIAdvertiseToOthers)}
|
||||
elemCount = len(filtered)
|
||||
if elemCount > maxAddrCount / 2:
|
||||
elemCount = int(maxAddrCount / 2)
|
||||
addrs[stream * 2] = random.sample(filtered.items(), elemCount)
|
||||
if len(knownnodes.knownNodes[(stream * 2) + 1]) > 0 and stream not in self.streams:
|
||||
filtered = {k: v for k, v in knownnodes.knownNodes[stream*2+1].items()
|
||||
if v["lastseen"] > (int(time.time()) - shared.maximumAgeOfNodesThatIAdvertiseToOthers)}
|
||||
elemCount = len(filtered)
|
||||
if elemCount > maxAddrCount / 2:
|
||||
elemCount = int(maxAddrCount / 2)
|
||||
addrs[stream * 2 + 1] = random.sample(filtered.items(), elemCount)
|
||||
for substream in addrs.keys():
|
||||
for peer, params in addrs[substream]:
|
||||
templist.append((substream, peer, params["lastseen"]))
|
||||
if len(templist) > 0:
|
||||
self.append_write_buf(BMProto.assembleAddr(templist))
|
||||
# templist = []
|
||||
# addrs = {}
|
||||
# for stream in self.streams:
|
||||
# with knownnodes.knownNodesLock:
|
||||
# if len(knownnodes.knownNodes[stream]) > 0:
|
||||
# filtered = {k: v for k, v in knownnodes.knownNodes[stream].items()
|
||||
# if v["lastseen"] > (int(time.time()) - shared.maximumAgeOfNodesThatIAdvertiseToOthers)}
|
||||
# elemCount = len(filtered)
|
||||
# if elemCount > maxAddrCount:
|
||||
# elemCount = maxAddrCount
|
||||
# # only if more recent than 3 hours
|
||||
# addrs[stream] = random.sample(filtered.items(), elemCount)
|
||||
# # sent 250 only if the remote isn't interested in it
|
||||
# if len(knownnodes.knownNodes[stream * 2]) > 0 and stream not in self.streams:
|
||||
# filtered = {k: v for k, v in knownnodes.knownNodes[stream*2].items()
|
||||
# if v["lastseen"] > (int(time.time()) - shared.maximumAgeOfNodesThatIAdvertiseToOthers)}
|
||||
# elemCount = len(filtered)
|
||||
# if elemCount > maxAddrCount / 2:
|
||||
# elemCount = int(maxAddrCount / 2)
|
||||
# addrs[stream * 2] = random.sample(filtered.items(), elemCount)
|
||||
# if len(knownnodes.knownNodes[(stream * 2) + 1]) > 0 and stream not in self.streams:
|
||||
# filtered = {k: v for k, v in knownnodes.knownNodes[stream*2+1].items()
|
||||
# if v["lastseen"] > (int(time.time()) - shared.maximumAgeOfNodesThatIAdvertiseToOthers)}
|
||||
# elemCount = len(filtered)
|
||||
# if elemCount > maxAddrCount / 2:
|
||||
# elemCount = int(maxAddrCount / 2)
|
||||
# addrs[stream * 2 + 1] = random.sample(filtered.items(), elemCount)
|
||||
# for substream in addrs.keys():
|
||||
# for peer, params in addrs[substream]:
|
||||
# templist.append((substream, peer, params["lastseen"]))
|
||||
# if len(templist) > 0:
|
||||
# self.append_write_buf(BMProto.assembleAddr(templist))
|
||||
|
||||
def sendBigInv(self):
|
||||
def sendChunk():
|
||||
|
@ -275,14 +275,14 @@ class TCPServer(AdvancedDispatcher):
|
|||
BMConfigParser().save()
|
||||
break
|
||||
self.destination = state.Peer(host, port)
|
||||
self.bound = True
|
||||
# self.bound = True
|
||||
self.listen(5)
|
||||
|
||||
def is_bound(self):
|
||||
try:
|
||||
return self.bound
|
||||
except AttributeError:
|
||||
return False
|
||||
# def is_bound(self):
|
||||
# try:
|
||||
# return self.bound
|
||||
# except AttributeError:
|
||||
# return False
|
||||
|
||||
def handle_accept(self):
|
||||
pair = self.accept()
|
||||
|
@ -314,12 +314,12 @@ if __name__ == "__main__":
|
|||
asyncore.loop(timeout=10, count=1)
|
||||
continue
|
||||
|
||||
proxy = Socks5BMConnection(host)
|
||||
while len(asyncore.socket_map) > 0:
|
||||
# proxy = Socks5BMConnection(host)
|
||||
# while len(asyncore.socket_map) > 0:
|
||||
# print "loop, state = %s" % (proxy.state)
|
||||
asyncore.loop(timeout=10, count=1)
|
||||
# asyncore.loop(timeout=10, count=1)
|
||||
|
||||
proxy = Socks4aBMConnection(host)
|
||||
while len(asyncore.socket_map) > 0:
|
||||
# proxy = Socks4aBMConnection(host)
|
||||
# while len(asyncore.socket_map) > 0:
|
||||
# print "loop, state = %s" % (proxy.state)
|
||||
asyncore.loop(timeout=10, count=1)
|
||||
# asyncore.loop(timeout=10, count=1)
|
||||
|
|
|
@ -20,52 +20,52 @@ class TLSDispatcher(AdvancedDispatcher):
|
|||
def __init__(self, address=None, sock=None,
|
||||
certfile=None, keyfile=None, server_side=False, ciphers=protocol.sslProtocolCiphers):
|
||||
self.want_read = self.want_write = True
|
||||
if certfile is None:
|
||||
self.certfile = os.path.join(paths.codePath(), 'sslkeys', 'cert.pem')
|
||||
else:
|
||||
self.certfile = certfile
|
||||
if keyfile is None:
|
||||
self.keyfile = os.path.join(paths.codePath(), 'sslkeys', 'key.pem')
|
||||
else:
|
||||
self.keyfile = keyfile
|
||||
self.server_side = server_side
|
||||
self.ciphers = ciphers
|
||||
# if certfile is None:
|
||||
# self.certfile = os.path.join(paths.codePath(), 'sslkeys', 'cert.pem')
|
||||
# else:
|
||||
# self.certfile = certfile
|
||||
# if keyfile is None:
|
||||
# self.keyfile = os.path.join(paths.codePath(), 'sslkeys', 'key.pem')
|
||||
# else:
|
||||
# self.keyfile = keyfile
|
||||
# self.server_side = server_side
|
||||
# self.ciphers = ciphers
|
||||
self.tlsStarted = False
|
||||
self.tlsDone = False
|
||||
self.tlsVersion = "N/A"
|
||||
self.isSSL = False
|
||||
# self.isSSL = False
|
||||
|
||||
def state_tls_init(self):
|
||||
self.isSSL = True
|
||||
self.tlsStarted = True
|
||||
# Once the connection has been established, it's safe to wrap the
|
||||
# socket.
|
||||
if sys.version_info >= (2,7,9):
|
||||
context = ssl.create_default_context(purpose = ssl.Purpose.SERVER_AUTH if self.server_side else ssl.Purpose.CLIENT_AUTH)
|
||||
context.set_ciphers(self.ciphers)
|
||||
context.set_ecdh_curve("secp256k1")
|
||||
context.check_hostname = False
|
||||
context.verify_mode = ssl.CERT_NONE
|
||||
# also exclude TLSv1 and TLSv1.1 in the future
|
||||
context.options = ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_SINGLE_ECDH_USE | ssl.OP_CIPHER_SERVER_PREFERENCE
|
||||
self.sslSocket = context.wrap_socket(self.socket, server_side = self.server_side, do_handshake_on_connect=False)
|
||||
else:
|
||||
self.sslSocket = ssl.wrap_socket(self.socket,
|
||||
server_side=self.server_side,
|
||||
ssl_version=protocol.sslProtocolVersion,
|
||||
certfile=self.certfile,
|
||||
keyfile=self.keyfile,
|
||||
ciphers=self.ciphers,
|
||||
do_handshake_on_connect=False)
|
||||
self.sslSocket.setblocking(0)
|
||||
self.want_read = self.want_write = True
|
||||
self.set_state("tls_handshake")
|
||||
return False
|
||||
# if hasattr(self.socket, "context"):
|
||||
# self.socket.context.set_ecdh_curve("secp256k1")
|
||||
# def state_tls_init(self):
|
||||
# self.isSSL = True
|
||||
# self.tlsStarted = True
|
||||
# # Once the connection has been established, it's safe to wrap the
|
||||
# # socket.
|
||||
# if sys.version_info >= (2,7,9):
|
||||
# context = ssl.create_default_context(purpose = ssl.Purpose.SERVER_AUTH if self.server_side else ssl.Purpose.CLIENT_AUTH)
|
||||
# context.set_ciphers(self.ciphers)
|
||||
# context.set_ecdh_curve("secp256k1")
|
||||
# context.check_hostname = False
|
||||
# context.verify_mode = ssl.CERT_NONE
|
||||
# # also exclude TLSv1 and TLSv1.1 in the future
|
||||
# context.options = ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_SINGLE_ECDH_USE | ssl.OP_CIPHER_SERVER_PREFERENCE
|
||||
# self.sslSocket = context.wrap_socket(self.socket, server_side = self.server_side, do_handshake_on_connect=False)
|
||||
# else:
|
||||
# self.sslSocket = ssl.wrap_socket(self.socket,
|
||||
# server_side=self.server_side,
|
||||
# ssl_version=protocol.sslProtocolVersion,
|
||||
# certfile=self.certfile,
|
||||
# keyfile=self.keyfile,
|
||||
# ciphers=self.ciphers,
|
||||
# do_handshake_on_connect=False)
|
||||
# self.sslSocket.setblocking(0)
|
||||
# self.want_read = self.want_write = True
|
||||
# self.set_state("tls_handshake")
|
||||
# return False
|
||||
# # if hasattr(self.socket, "context"):
|
||||
# # self.socket.context.set_ecdh_curve("secp256k1")
|
||||
|
||||
def state_tls_handshake(self):
|
||||
return False
|
||||
# def state_tls_handshake(self):
|
||||
# return False
|
||||
|
||||
def writable(self):
|
||||
try:
|
||||
|
|
|
@ -69,55 +69,55 @@ class UDPSocket(BMProto):
|
|||
def bm_command_error(self):
|
||||
return BMProto.bm_command_error(self)
|
||||
|
||||
def bm_command_getdata(self):
|
||||
return True
|
||||
# def bm_command_getdata(self):
|
||||
# return True
|
||||
# return BMProto.bm_command_getdata(self)
|
||||
|
||||
def bm_command_inv(self):
|
||||
return True
|
||||
# def bm_command_inv(self):
|
||||
# return True
|
||||
# return BMProto.bm_command_inv(self)
|
||||
|
||||
def bm_command_object(self):
|
||||
return BMProto.bm_command_object(self)
|
||||
|
||||
def bm_command_addr(self):
|
||||
# def bm_command_addr(self):
|
||||
# BMProto.bm_command_object(self)
|
||||
addresses = self._decode_addr()
|
||||
# addresses = self._decode_addr()
|
||||
# only allow peer discovery from private IPs in order to avoid attacks from random IPs on the internet
|
||||
if not self.local:
|
||||
return True
|
||||
remoteport = False
|
||||
for i in addresses:
|
||||
seenTime, stream, services, ip, port = i
|
||||
decodedIP = protocol.checkIPAddress(str(ip))
|
||||
if stream not in state.streamsInWhichIAmParticipating:
|
||||
continue
|
||||
if seenTime < time.time() - BMProto.maxTimeOffset or seenTime > time.time() + BMProto.maxTimeOffset:
|
||||
continue
|
||||
if decodedIP is False:
|
||||
# if the address isn't local, interpret it as the hosts' own announcement
|
||||
remoteport = port
|
||||
if remoteport is False:
|
||||
return True
|
||||
logger.debug("received peer discovery from %s:%i (port %i):", self.destination.host, self.destination.port, remoteport)
|
||||
if self.local:
|
||||
state.discoveredPeers[state.Peer(self.destination.host, remoteport)] = time.time()
|
||||
return True
|
||||
# if not self.local:
|
||||
# return True
|
||||
# remoteport = False
|
||||
# for i in addresses:
|
||||
# seenTime, stream, services, ip, port = i
|
||||
# decodedIP = protocol.checkIPAddress(str(ip))
|
||||
# if stream not in state.streamsInWhichIAmParticipating:
|
||||
# continue
|
||||
# if seenTime < time.time() - BMProto.maxTimeOffset or seenTime > time.time() + BMProto.maxTimeOffset:
|
||||
# continue
|
||||
# if decodedIP is False:
|
||||
# # if the address isn't local, interpret it as the hosts' own announcement
|
||||
# remoteport = port
|
||||
# if remoteport is False:
|
||||
# return True
|
||||
# logger.debug("received peer discovery from %s:%i (port %i):", self.destination.host, self.destination.port, remoteport)
|
||||
# if self.local:
|
||||
# state.discoveredPeers[state.Peer(self.destination.host, remoteport)] = time.time()
|
||||
# return True
|
||||
|
||||
def bm_command_portcheck(self):
|
||||
return True
|
||||
# def bm_command_portcheck(self):
|
||||
# return True
|
||||
|
||||
def bm_command_ping(self):
|
||||
return True
|
||||
# def bm_command_ping(self):
|
||||
# return True
|
||||
|
||||
def bm_command_pong(self):
|
||||
return True
|
||||
# def bm_command_pong(self):
|
||||
# return True
|
||||
|
||||
def bm_command_verack(self):
|
||||
return True
|
||||
# def bm_command_verack(self):
|
||||
# return True
|
||||
|
||||
def bm_command_version(self):
|
||||
return True
|
||||
# def bm_command_version(self):
|
||||
# return True
|
||||
|
||||
def handle_connect(self):
|
||||
return
|
||||
|
@ -137,10 +137,10 @@ class UDPSocket(BMProto):
|
|||
|
||||
self.destination = state.Peer(addr[0], addr[1])
|
||||
encodedAddr = protocol.encodeHost(addr[0])
|
||||
if protocol.checkIPAddress(encodedAddr, True):
|
||||
self.local = True
|
||||
else:
|
||||
self.local = False
|
||||
# if protocol.checkIPAddress(encodedAddr, True):
|
||||
# self.local = True
|
||||
# else:
|
||||
# self.local = False
|
||||
# overwrite the old buffer to avoid mixing data and so that self.local works correctly
|
||||
self.read_buf[0:] = recdata
|
||||
self.bm_proto_reset()
|
||||
|
@ -165,12 +165,12 @@ if __name__ == "__main__":
|
|||
asyncore.loop(timeout=10, count=1)
|
||||
continue
|
||||
|
||||
proxy = Socks5BMConnection(host)
|
||||
while len(asyncore.socket_map) > 0:
|
||||
# proxy = Socks5BMConnection(host)
|
||||
# while len(asyncore.socket_map) > 0:
|
||||
# print "loop, state = %s" % (proxy.state)
|
||||
asyncore.loop(timeout=10, count=1)
|
||||
# asyncore.loop(timeout=10, count=1)
|
||||
|
||||
proxy = Socks4aBMConnection(host)
|
||||
while len(asyncore.socket_map) > 0:
|
||||
# proxy = Socks4aBMConnection(host)
|
||||
# while len(asyncore.socket_map) > 0:
|
||||
# print "loop, state = %s" % (proxy.state)
|
||||
asyncore.loop(timeout=10, count=1)
|
||||
# asyncore.loop(timeout=10, count=1)
|
||||
|
|
4
src/openclpow.py
Normal file → Executable file
4
src/openclpow.py
Normal file → Executable file
|
@ -92,11 +92,11 @@ def do_opencl_pow(hash, target):
|
|||
cl.enqueue_read_buffer(queue, dest_buf, output)
|
||||
queue.finish()
|
||||
progress += globamt
|
||||
sofar = time.time() - start
|
||||
# sofar = time.time() - start
|
||||
# logger.debug("Working for %.3fs, %.2f Mh/s", sofar, (progress / sofar) / 1000000)
|
||||
if shutdown != 0:
|
||||
raise Exception ("Interrupted")
|
||||
taken = time.time() - start
|
||||
# taken = time.time() - start
|
||||
# logger.debug("Took %d tries.", progress)
|
||||
return output[0][0]
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ class Image(qrcode.image.base.BaseImage):
|
|||
def __init__(self, border, width, box_size):
|
||||
self.border = border
|
||||
self.width = width
|
||||
self.box_size = box_size
|
||||
# self.box_size = box_size
|
||||
size = (width + border * 2) * box_size
|
||||
self._image = QtGui.QImage(
|
||||
size, size, QtGui.QImage.Format_RGB16)
|
||||
|
@ -25,16 +25,16 @@ class Image(qrcode.image.base.BaseImage):
|
|||
def pixmap(self):
|
||||
return QtGui.QPixmap.fromImage(self._image)
|
||||
|
||||
def drawrect(self, row, col):
|
||||
painter = QtGui.QPainter(self._image)
|
||||
painter.fillRect(
|
||||
(col + self.border) * self.box_size,
|
||||
(row + self.border) * self.box_size,
|
||||
self.box_size, self.box_size,
|
||||
QtCore.Qt.black)
|
||||
# def drawrect(self, row, col):
|
||||
# painter = QtGui.QPainter(self._image)
|
||||
# painter.fillRect(
|
||||
# (col + self.border) * self.box_size,
|
||||
# (row + self.border) * self.box_size,
|
||||
# self.box_size, self.box_size,
|
||||
# QtCore.Qt.black)
|
||||
|
||||
def save(self, stream, kind=None):
|
||||
pass
|
||||
# def save(self, stream, kind=None):
|
||||
# pass
|
||||
|
||||
|
||||
class Ui_qrcodeDialog(object):
|
||||
|
|
|
@ -126,30 +126,30 @@ def _doGPUPoW(target, initialHash):
|
|||
logger.debug("GPU PoW done")
|
||||
return [trialValue, nonce]
|
||||
|
||||
def estimate(difficulty, format = False):
|
||||
ret = difficulty / 10
|
||||
if ret < 1:
|
||||
ret = 1
|
||||
if format:
|
||||
out = str(int(ret)) + " seconds"
|
||||
if ret > 60:
|
||||
ret /= 60
|
||||
out = str(int(ret)) + " minutes"
|
||||
if ret > 60:
|
||||
ret /= 60
|
||||
out = str(int(ret)) + " hours"
|
||||
if ret > 24:
|
||||
ret /= 24
|
||||
out = str(int(ret)) + " days"
|
||||
if ret > 7:
|
||||
out = str(int(ret)) + " weeks"
|
||||
if ret > 31:
|
||||
out = str(int(ret)) + " months"
|
||||
if ret > 366:
|
||||
ret /= 366
|
||||
out = str(int(ret)) + " years"
|
||||
else:
|
||||
return ret
|
||||
# def estimate(difficulty, format = False):
|
||||
# ret = difficulty / 10
|
||||
# if ret < 1:
|
||||
# ret = 1
|
||||
# if format:
|
||||
# out = str(int(ret)) + " seconds"
|
||||
# if ret > 60:
|
||||
# ret /= 60
|
||||
# out = str(int(ret)) + " minutes"
|
||||
# if ret > 60:
|
||||
# ret /= 60
|
||||
# out = str(int(ret)) + " hours"
|
||||
# if ret > 24:
|
||||
# ret /= 24
|
||||
# out = str(int(ret)) + " days"
|
||||
# if ret > 7:
|
||||
# out = str(int(ret)) + " weeks"
|
||||
# if ret > 31:
|
||||
# out = str(int(ret)) + " months"
|
||||
# if ret > 366:
|
||||
# ret /= 366
|
||||
# out = str(int(ret)) + " years"
|
||||
# else:
|
||||
# return ret
|
||||
|
||||
def getPowType():
|
||||
if openclpow.openclEnabled():
|
||||
|
@ -252,7 +252,7 @@ def init():
|
|||
bso = ctypes.WinDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib))
|
||||
logger.info("Loaded C PoW DLL (stdcall) %s", bitmsglib)
|
||||
bmpow = bso.BitmessagePOW
|
||||
bmpow.restype = ctypes.c_ulonglong
|
||||
# bmpow.restype = ctypes.c_ulonglong
|
||||
_doCPoW(2**63, "")
|
||||
logger.info("Successfully tested C PoW DLL (stdcall) %s", bitmsglib)
|
||||
except:
|
||||
|
@ -262,7 +262,7 @@ def init():
|
|||
bso = ctypes.CDLL(os.path.join(paths.codePath(), "bitmsghash", bitmsglib))
|
||||
logger.info("Loaded C PoW DLL (cdecl) %s", bitmsglib)
|
||||
bmpow = bso.BitmessagePOW
|
||||
bmpow.restype = ctypes.c_ulonglong
|
||||
# bmpow.restype = ctypes.c_ulonglong
|
||||
_doCPoW(2**63, "")
|
||||
logger.info("Successfully tested C PoW DLL (cdecl) %s", bitmsglib)
|
||||
except:
|
||||
|
@ -286,7 +286,7 @@ def init():
|
|||
if bso:
|
||||
try:
|
||||
bmpow = bso.BitmessagePOW
|
||||
bmpow.restype = ctypes.c_ulonglong
|
||||
# bmpow.restype = ctypes.c_ulonglong
|
||||
except:
|
||||
bmpow = None
|
||||
else:
|
||||
|
|
|
@ -29,17 +29,17 @@ NODE_DANDELION = 8
|
|||
BITFIELD_DOESACK = 1
|
||||
|
||||
#Error types
|
||||
STATUS_WARNING = 0
|
||||
STATUS_ERROR = 1
|
||||
STATUS_FATAL = 2
|
||||
# STATUS_WARNING = 0
|
||||
# STATUS_ERROR = 1
|
||||
# STATUS_FATAL = 2
|
||||
|
||||
#Object types
|
||||
OBJECT_GETPUBKEY = 0
|
||||
OBJECT_PUBKEY = 1
|
||||
OBJECT_MSG = 2
|
||||
OBJECT_BROADCAST = 3
|
||||
OBJECT_I2P = 0x493250
|
||||
OBJECT_ADDR = 0x61646472
|
||||
# OBJECT_I2P = 0x493250
|
||||
# OBJECT_ADDR = 0x61646472
|
||||
|
||||
eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack(
|
||||
'>Q', random.randrange(1, 18446744073709551615))
|
||||
|
@ -48,7 +48,7 @@ eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack(
|
|||
#New code should use CreatePacket instead of Header.pack
|
||||
Header = Struct('!L12sL4s')
|
||||
|
||||
VersionPacket = Struct('>LqQ20s4s36sH')
|
||||
# VersionPacket = Struct('>LqQ20s4s36sH')
|
||||
|
||||
# Bitfield
|
||||
|
||||
|
@ -306,17 +306,17 @@ def decryptAndCheckPubkeyPayload(data, address):
|
|||
return 'failed'
|
||||
|
||||
readPosition = 0
|
||||
bitfieldBehaviors = decryptedData[readPosition:readPosition + 4]
|
||||
# bitfieldBehaviors = decryptedData[readPosition:readPosition + 4]
|
||||
readPosition += 4
|
||||
publicSigningKey = '\x04' + decryptedData[readPosition:readPosition + 64]
|
||||
readPosition += 64
|
||||
publicEncryptionKey = '\x04' + decryptedData[readPosition:readPosition + 64]
|
||||
readPosition += 64
|
||||
specifiedNonceTrialsPerByte, specifiedNonceTrialsPerByteLength = decodeVarint(
|
||||
decryptedData[readPosition:readPosition + 10])
|
||||
# specifiedNonceTrialsPerByte, specifiedNonceTrialsPerByteLength = decodeVarint(
|
||||
# decryptedData[readPosition:readPosition + 10])
|
||||
readPosition += specifiedNonceTrialsPerByteLength
|
||||
specifiedPayloadLengthExtraBytes, specifiedPayloadLengthExtraBytesLength = decodeVarint(
|
||||
decryptedData[readPosition:readPosition + 10])
|
||||
# specifiedPayloadLengthExtraBytes, specifiedPayloadLengthExtraBytesLength = decodeVarint(
|
||||
# decryptedData[readPosition:readPosition + 10])
|
||||
readPosition += specifiedPayloadLengthExtraBytesLength
|
||||
storedData += decryptedData[:readPosition]
|
||||
signedData += decryptedData[:readPosition]
|
||||
|
@ -415,8 +415,8 @@ def checkAndShareObjectWithPeers(data):
|
|||
def _checkAndShareUndefinedObjectWithPeers(data):
|
||||
embeddedTime, = unpack('>Q', data[8:16])
|
||||
readPosition = 20 # bypass nonce, time, and object type
|
||||
objectVersion, objectVersionLength = decodeVarint(
|
||||
data[readPosition:readPosition + 9])
|
||||
# objectVersion, objectVersionLength = decodeVarint(
|
||||
# data[readPosition:readPosition + 9])
|
||||
readPosition += objectVersionLength
|
||||
streamNumber, streamNumberLength = decodeVarint(
|
||||
data[readPosition:readPosition + 9])
|
||||
|
@ -438,8 +438,8 @@ def _checkAndShareUndefinedObjectWithPeers(data):
|
|||
def _checkAndShareMsgWithPeers(data):
|
||||
embeddedTime, = unpack('>Q', data[8:16])
|
||||
readPosition = 20 # bypass nonce, time, and object type
|
||||
objectVersion, objectVersionLength = decodeVarint(
|
||||
data[readPosition:readPosition + 9])
|
||||
# objectVersion, objectVersionLength = decodeVarint(
|
||||
# data[readPosition:readPosition + 9])
|
||||
readPosition += objectVersionLength
|
||||
streamNumber, streamNumberLength = decodeVarint(
|
||||
data[readPosition:readPosition + 9])
|
||||
|
|
|
@ -16,4 +16,4 @@ __all__ = [
|
|||
from .openssl import OpenSSL
|
||||
from .ecc import ECC
|
||||
from .cipher import Cipher
|
||||
from .hash import hmac_sha256, hmac_sha512, pbkdf2
|
||||
from .hash import hmac_sha256# , hmac_sha512, pbkdf2
|
||||
|
|
|
@ -74,8 +74,8 @@ def hex_to_point(h): return (decode(h[2:66],16),decode(h[66:],16))
|
|||
|
||||
def point_to_hex(p): return '04'+encode(p[0],16,64)+encode(p[1],16,64)
|
||||
|
||||
def multiply(privkey,pubkey):
|
||||
return point_to_hex(base10_multiply(hex_to_point(pubkey),decode(privkey,16)))
|
||||
# def multiply(privkey,pubkey):
|
||||
# return point_to_hex(base10_multiply(hex_to_point(pubkey),decode(privkey,16)))
|
||||
|
||||
def privtopub(privkey):
|
||||
return point_to_hex(base10_multiply(G,decode(privkey,16)))
|
||||
|
@ -86,21 +86,21 @@ def add(p1,p2):
|
|||
else:
|
||||
return point_to_hex(base10_add(hex_to_point(p1),hex_to_point(p2)))
|
||||
|
||||
def hash_160(string):
|
||||
intermed = hashlib.sha256(string).digest()
|
||||
ripemd160 = hashlib.new('ripemd160')
|
||||
ripemd160.update(intermed)
|
||||
return ripemd160.digest()
|
||||
# def hash_160(string):
|
||||
# intermed = hashlib.sha256(string).digest()
|
||||
# ripemd160 = hashlib.new('ripemd160')
|
||||
# ripemd160.update(intermed)
|
||||
# return ripemd160.digest()
|
||||
|
||||
def dbl_sha256(string):
|
||||
return hashlib.sha256(hashlib.sha256(string).digest()).digest()
|
||||
# def dbl_sha256(string):
|
||||
# return hashlib.sha256(hashlib.sha256(string).digest()).digest()
|
||||
|
||||
def bin_to_b58check(inp):
|
||||
inp_fmtd = '\x00' + inp
|
||||
leadingzbytes = len(re.match('^\x00*',inp_fmtd).group(0))
|
||||
checksum = dbl_sha256(inp_fmtd)[:4]
|
||||
return '1' * leadingzbytes + changebase(inp_fmtd+checksum,256,58)
|
||||
# def bin_to_b58check(inp):
|
||||
# inp_fmtd = '\x00' + inp
|
||||
# leadingzbytes = len(re.match('^\x00*',inp_fmtd).group(0))
|
||||
# checksum = dbl_sha256(inp_fmtd)[:4]
|
||||
# return '1' * leadingzbytes + changebase(inp_fmtd+checksum,256,58)
|
||||
|
||||
#Convert a public key (in hex) to a Bitcoin address
|
||||
def pubkey_to_address(pubkey):
|
||||
return bin_to_b58check(hash_160(changebase(pubkey,16,256)))
|
||||
# def pubkey_to_address(pubkey):
|
||||
# return bin_to_b58check(hash_160(changebase(pubkey,16,256)))
|
||||
|
|
20
src/pyelliptic/cipher.py
Normal file → Executable file
20
src/pyelliptic/cipher.py
Normal file → Executable file
|
@ -35,22 +35,22 @@ class Cipher:
|
|||
else:
|
||||
raise Exception("RTFM ...")
|
||||
|
||||
@staticmethod
|
||||
def get_all_cipher():
|
||||
"""
|
||||
static method, returns all ciphers available
|
||||
"""
|
||||
return OpenSSL.cipher_algo.keys()
|
||||
# @staticmethod
|
||||
# def get_all_cipher():
|
||||
# """
|
||||
# static method, returns all ciphers available
|
||||
# """
|
||||
# return OpenSSL.cipher_algo.keys()
|
||||
|
||||
@staticmethod
|
||||
def get_blocksize(ciphername):
|
||||
cipher = OpenSSL.get_cipher(ciphername)
|
||||
return cipher.get_blocksize()
|
||||
|
||||
@staticmethod
|
||||
def gen_IV(ciphername):
|
||||
cipher = OpenSSL.get_cipher(ciphername)
|
||||
return OpenSSL.rand(cipher.get_blocksize())
|
||||
# @staticmethod
|
||||
# def gen_IV(ciphername):
|
||||
# cipher = OpenSSL.get_cipher(ciphername)
|
||||
# return OpenSSL.rand(cipher.get_blocksize())
|
||||
|
||||
def update(self, input):
|
||||
i = OpenSSL.c_int(0)
|
||||
|
|
80
src/pyelliptic/ecc.py
Normal file → Executable file
80
src/pyelliptic/ecc.py
Normal file → Executable file
|
@ -75,18 +75,18 @@ class ECC:
|
|||
self.pubkey_y = pubkey_y
|
||||
self.privkey = privkey
|
||||
|
||||
@staticmethod
|
||||
def get_curves():
|
||||
"""
|
||||
static method, returns the list of all the curves available
|
||||
"""
|
||||
return OpenSSL.curves.keys()
|
||||
# @staticmethod
|
||||
# def get_curves():
|
||||
# """
|
||||
# static method, returns the list of all the curves available
|
||||
# """
|
||||
# return OpenSSL.curves.keys()
|
||||
|
||||
def get_curve(self):
|
||||
return OpenSSL.get_curve_by_id(self.curve)
|
||||
|
||||
def get_curve_id(self):
|
||||
return self.curve
|
||||
# def get_curve_id(self):
|
||||
# return self.curve
|
||||
|
||||
def get_pubkey(self):
|
||||
"""
|
||||
|
@ -100,15 +100,15 @@ class ECC:
|
|||
self.pubkey_y
|
||||
))
|
||||
|
||||
def get_privkey(self):
|
||||
"""
|
||||
High level function which returns
|
||||
curve(2) + len_of_privkey(2) + privkey
|
||||
"""
|
||||
return b''.join((pack('!H', self.curve),
|
||||
pack('!H', len(self.privkey)),
|
||||
self.privkey
|
||||
))
|
||||
# def get_privkey(self):
|
||||
# """
|
||||
# High level function which returns
|
||||
# curve(2) + len_of_privkey(2) + privkey
|
||||
# """
|
||||
# return b''.join((pack('!H', self.curve),
|
||||
# pack('!H', len(self.privkey)),
|
||||
# self.privkey
|
||||
# ))
|
||||
|
||||
@staticmethod
|
||||
def _decode_pubkey(pubkey):
|
||||
|
@ -178,15 +178,15 @@ class ECC:
|
|||
OpenSSL.BN_free(pub_key_x)
|
||||
OpenSSL.BN_free(pub_key_y)
|
||||
|
||||
def get_ecdh_key(self, pubkey):
|
||||
"""
|
||||
High level function. Compute public key with the local private key
|
||||
and returns a 512bits shared key
|
||||
"""
|
||||
curve, pubkey_x, pubkey_y, i = ECC._decode_pubkey(pubkey)
|
||||
if curve != self.curve:
|
||||
raise Exception("ECC keys must be from the same curve !")
|
||||
return sha512(self.raw_get_ecdh_key(pubkey_x, pubkey_y)).digest()
|
||||
# def get_ecdh_key(self, pubkey):
|
||||
# """
|
||||
# High level function. Compute public key with the local private key
|
||||
# and returns a 512bits shared key
|
||||
# """
|
||||
# curve, pubkey_x, pubkey_y, i = ECC._decode_pubkey(pubkey)
|
||||
# if curve != self.curve:
|
||||
# raise Exception("ECC keys must be from the same curve !")
|
||||
# return sha512(self.raw_get_ecdh_key(pubkey_x, pubkey_y)).digest()
|
||||
|
||||
def raw_get_ecdh_key(self, pubkey_x, pubkey_y):
|
||||
try:
|
||||
|
@ -243,20 +243,20 @@ class ECC:
|
|||
OpenSSL.EC_KEY_free(own_key)
|
||||
OpenSSL.BN_free(own_priv_key)
|
||||
|
||||
def check_key(self, privkey, pubkey):
|
||||
"""
|
||||
Check the public key and the private key.
|
||||
The private key is optional (replace by None)
|
||||
"""
|
||||
curve, pubkey_x, pubkey_y, i = ECC._decode_pubkey(pubkey)
|
||||
if privkey is None:
|
||||
raw_privkey = None
|
||||
curve2 = curve
|
||||
else:
|
||||
curve2, raw_privkey, i = ECC._decode_privkey(privkey)
|
||||
if curve != curve2:
|
||||
raise Exception("Bad public and private key")
|
||||
return self.raw_check_key(raw_privkey, pubkey_x, pubkey_y, curve)
|
||||
# def check_key(self, privkey, pubkey):
|
||||
# """
|
||||
# Check the public key and the private key.
|
||||
# The private key is optional (replace by None)
|
||||
# """
|
||||
# curve, pubkey_x, pubkey_y, i = ECC._decode_pubkey(pubkey)
|
||||
# if privkey is None:
|
||||
# raw_privkey = None
|
||||
# curve2 = curve
|
||||
# else:
|
||||
# curve2, raw_privkey, i = ECC._decode_privkey(privkey)
|
||||
# if curve != curve2:
|
||||
# raise Exception("Bad public and private key")
|
||||
# return self.raw_check_key(raw_privkey, pubkey_x, pubkey_y, curve)
|
||||
|
||||
def raw_check_key(self, privkey, pubkey_x, pubkey_y, curve=None):
|
||||
if curve is None:
|
||||
|
|
40
src/pyelliptic/hash.py
Normal file → Executable file
40
src/pyelliptic/hash.py
Normal file → Executable file
|
@ -45,25 +45,25 @@ def hmac_sha256(k, m):
|
|||
return md.raw
|
||||
|
||||
|
||||
def hmac_sha512(k, m):
|
||||
"""
|
||||
Compute the key and the message with HMAC SHA512
|
||||
"""
|
||||
key = OpenSSL.malloc(k, len(k))
|
||||
d = OpenSSL.malloc(m, len(m))
|
||||
md = OpenSSL.malloc(0, 64)
|
||||
i = OpenSSL.pointer(OpenSSL.c_int(0))
|
||||
OpenSSL.HMAC(OpenSSL.EVP_sha512(), key, len(k), d, len(m), md, i)
|
||||
return md.raw
|
||||
# def hmac_sha512(k, m):
|
||||
# """
|
||||
# Compute the key and the message with HMAC SHA512
|
||||
# """
|
||||
# key = OpenSSL.malloc(k, len(k))
|
||||
# d = OpenSSL.malloc(m, len(m))
|
||||
# md = OpenSSL.malloc(0, 64)
|
||||
# i = OpenSSL.pointer(OpenSSL.c_int(0))
|
||||
# OpenSSL.HMAC(OpenSSL.EVP_sha512(), key, len(k), d, len(m), md, i)
|
||||
# return md.raw
|
||||
|
||||
|
||||
def pbkdf2(password, salt=None, i=10000, keylen=64):
|
||||
if salt is None:
|
||||
salt = OpenSSL.rand(8)
|
||||
p_password = OpenSSL.malloc(password, len(password))
|
||||
p_salt = OpenSSL.malloc(salt, len(salt))
|
||||
output = OpenSSL.malloc(0, keylen)
|
||||
OpenSSL.PKCS5_PBKDF2_HMAC(p_password, len(password), p_salt,
|
||||
len(p_salt), i, OpenSSL.EVP_sha256(),
|
||||
keylen, output)
|
||||
return salt, output.raw
|
||||
# def pbkdf2(password, salt=None, i=10000, keylen=64):
|
||||
# if salt is None:
|
||||
# salt = OpenSSL.rand(8)
|
||||
# p_password = OpenSSL.malloc(password, len(password))
|
||||
# p_salt = OpenSSL.malloc(salt, len(salt))
|
||||
# output = OpenSSL.malloc(0, keylen)
|
||||
# OpenSSL.PKCS5_PBKDF2_HMAC(p_password, len(password), p_salt,
|
||||
# len(p_salt), i, OpenSSL.EVP_sha256(),
|
||||
# keylen, output)
|
||||
# return salt, output.raw
|
||||
|
|
316
src/pyelliptic/openssl.py
Normal file → Executable file
316
src/pyelliptic/openssl.py
Normal file → Executable file
|
@ -24,8 +24,8 @@ class CipherName:
|
|||
def get_pointer(self):
|
||||
return self._pointer()
|
||||
|
||||
def get_name(self):
|
||||
return self._name
|
||||
# def get_name(self):
|
||||
# return self._name
|
||||
|
||||
def get_blocksize(self):
|
||||
return self._blocksize
|
||||
|
@ -39,20 +39,20 @@ def get_version(library):
|
|||
#OpenSSL 1.1
|
||||
OPENSSL_VERSION = 0
|
||||
OPENSSL_CFLAGS = 1
|
||||
library.OpenSSL_version.argtypes = [ctypes.c_int]
|
||||
library.OpenSSL_version.restype = ctypes.c_char_p
|
||||
version = library.OpenSSL_version(OPENSSL_VERSION)
|
||||
# library.OpenSSL_version.argtypes = [ctypes.c_int]
|
||||
# library.OpenSSL_version.restype = ctypes.c_char_p
|
||||
# version = library.OpenSSL_version(OPENSSL_VERSION)
|
||||
cflags = library.OpenSSL_version(OPENSSL_CFLAGS)
|
||||
library.OpenSSL_version_num.restype = ctypes.c_long
|
||||
# library.OpenSSL_version_num.restype = ctypes.c_long
|
||||
hexversion = library.OpenSSL_version_num()
|
||||
except AttributeError:
|
||||
try:
|
||||
#OpenSSL 1.0
|
||||
SSLEAY_VERSION = 0
|
||||
SSLEAY_CFLAGS = 2
|
||||
library.SSLeay.restype = ctypes.c_long
|
||||
library.SSLeay_version.restype = ctypes.c_char_p
|
||||
library.SSLeay_version.argtypes = [ctypes.c_int]
|
||||
# library.SSLeay.restype = ctypes.c_long
|
||||
# library.SSLeay_version.restype = ctypes.c_char_p
|
||||
# library.SSLeay_version.argtypes = [ctypes.c_int]
|
||||
version = library.SSLeay_version(SSLEAY_VERSION)
|
||||
cflags = library.SSLeay_version(SSLEAY_CFLAGS)
|
||||
hexversion = library.SSLeay()
|
||||
|
@ -71,8 +71,8 @@ class _OpenSSL:
|
|||
Build the wrapper
|
||||
"""
|
||||
self._lib = ctypes.CDLL(library)
|
||||
self._version, self._hexversion, self._cflags = get_version(self._lib)
|
||||
self._libreSSL = self._version.startswith("LibreSSL")
|
||||
# self._version, self._hexversion, self._cflags = get_version(self._lib)
|
||||
# self._libreSSL = self._version.startswith("LibreSSL")
|
||||
|
||||
self.pointer = ctypes.pointer
|
||||
self.c_int = ctypes.c_int
|
||||
|
@ -80,148 +80,148 @@ class _OpenSSL:
|
|||
self.create_string_buffer = ctypes.create_string_buffer
|
||||
|
||||
self.BN_new = self._lib.BN_new
|
||||
self.BN_new.restype = ctypes.c_void_p
|
||||
self.BN_new.argtypes = []
|
||||
# self.BN_new.restype = ctypes.c_void_p
|
||||
# self.BN_new.argtypes = []
|
||||
|
||||
self.BN_free = self._lib.BN_free
|
||||
self.BN_free.restype = None
|
||||
self.BN_free.argtypes = [ctypes.c_void_p]
|
||||
# self.BN_free.restype = None
|
||||
# self.BN_free.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.BN_num_bits = self._lib.BN_num_bits
|
||||
self.BN_num_bits.restype = ctypes.c_int
|
||||
self.BN_num_bits.argtypes = [ctypes.c_void_p]
|
||||
# self.BN_num_bits.restype = ctypes.c_int
|
||||
# self.BN_num_bits.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.BN_bn2bin = self._lib.BN_bn2bin
|
||||
self.BN_bn2bin.restype = ctypes.c_int
|
||||
self.BN_bn2bin.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.BN_bn2bin.restype = ctypes.c_int
|
||||
# self.BN_bn2bin.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.BN_bin2bn = self._lib.BN_bin2bn
|
||||
self.BN_bin2bn.restype = ctypes.c_void_p
|
||||
self.BN_bin2bn.argtypes = [ctypes.c_void_p, ctypes.c_int,
|
||||
ctypes.c_void_p]
|
||||
# self.BN_bin2bn.restype = ctypes.c_void_p
|
||||
# self.BN_bin2bn.argtypes = [ctypes.c_void_p, ctypes.c_int,
|
||||
# ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_free = self._lib.EC_KEY_free
|
||||
self.EC_KEY_free.restype = None
|
||||
self.EC_KEY_free.argtypes = [ctypes.c_void_p]
|
||||
# self.EC_KEY_free.restype = None
|
||||
# self.EC_KEY_free.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_new_by_curve_name = self._lib.EC_KEY_new_by_curve_name
|
||||
self.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
|
||||
self.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
|
||||
# self.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
|
||||
# self.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
|
||||
|
||||
self.EC_KEY_generate_key = self._lib.EC_KEY_generate_key
|
||||
self.EC_KEY_generate_key.restype = ctypes.c_int
|
||||
self.EC_KEY_generate_key.argtypes = [ctypes.c_void_p]
|
||||
# self.EC_KEY_generate_key.restype = ctypes.c_int
|
||||
# self.EC_KEY_generate_key.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_check_key = self._lib.EC_KEY_check_key
|
||||
self.EC_KEY_check_key.restype = ctypes.c_int
|
||||
self.EC_KEY_check_key.argtypes = [ctypes.c_void_p]
|
||||
# self.EC_KEY_check_key.restype = ctypes.c_int
|
||||
# self.EC_KEY_check_key.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_get0_private_key = self._lib.EC_KEY_get0_private_key
|
||||
self.EC_KEY_get0_private_key.restype = ctypes.c_void_p
|
||||
self.EC_KEY_get0_private_key.argtypes = [ctypes.c_void_p]
|
||||
# self.EC_KEY_get0_private_key.restype = ctypes.c_void_p
|
||||
# self.EC_KEY_get0_private_key.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_get0_public_key = self._lib.EC_KEY_get0_public_key
|
||||
self.EC_KEY_get0_public_key.restype = ctypes.c_void_p
|
||||
self.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
|
||||
# self.EC_KEY_get0_public_key.restype = ctypes.c_void_p
|
||||
# self.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_get0_group = self._lib.EC_KEY_get0_group
|
||||
self.EC_KEY_get0_group.restype = ctypes.c_void_p
|
||||
self.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
|
||||
# self.EC_KEY_get0_group.restype = ctypes.c_void_p
|
||||
# self.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_POINT_get_affine_coordinates_GFp = self._lib.EC_POINT_get_affine_coordinates_GFp
|
||||
self.EC_POINT_get_affine_coordinates_GFp.restype = ctypes.c_int
|
||||
self.EC_POINT_get_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.EC_POINT_get_affine_coordinates_GFp.restype = ctypes.c_int
|
||||
# self.EC_POINT_get_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key
|
||||
self.EC_KEY_set_private_key.restype = ctypes.c_int
|
||||
self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p]
|
||||
# self.EC_KEY_set_private_key.restype = ctypes.c_int
|
||||
# self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p,
|
||||
# ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_set_public_key = self._lib.EC_KEY_set_public_key
|
||||
self.EC_KEY_set_public_key.restype = ctypes.c_int
|
||||
self.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p]
|
||||
# self.EC_KEY_set_public_key.restype = ctypes.c_int
|
||||
# self.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p,
|
||||
# ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_set_group = self._lib.EC_KEY_set_group
|
||||
self.EC_KEY_set_group.restype = ctypes.c_int
|
||||
self.EC_KEY_set_group.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.EC_KEY_set_group.restype = ctypes.c_int
|
||||
# self.EC_KEY_set_group.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EC_POINT_set_affine_coordinates_GFp = self._lib.EC_POINT_set_affine_coordinates_GFp
|
||||
self.EC_POINT_set_affine_coordinates_GFp.restype = ctypes.c_int
|
||||
self.EC_POINT_set_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.EC_POINT_set_affine_coordinates_GFp.restype = ctypes.c_int
|
||||
# self.EC_POINT_set_affine_coordinates_GFp.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EC_POINT_new = self._lib.EC_POINT_new
|
||||
self.EC_POINT_new.restype = ctypes.c_void_p
|
||||
self.EC_POINT_new.argtypes = [ctypes.c_void_p]
|
||||
# self.EC_POINT_new.restype = ctypes.c_void_p
|
||||
# self.EC_POINT_new.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_POINT_free = self._lib.EC_POINT_free
|
||||
self.EC_POINT_free.restype = None
|
||||
self.EC_POINT_free.argtypes = [ctypes.c_void_p]
|
||||
# self.EC_POINT_free.restype = None
|
||||
# self.EC_POINT_free.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.BN_CTX_free = self._lib.BN_CTX_free
|
||||
self.BN_CTX_free.restype = None
|
||||
self.BN_CTX_free.argtypes = [ctypes.c_void_p]
|
||||
# self.BN_CTX_free.restype = None
|
||||
# self.BN_CTX_free.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EC_POINT_mul = self._lib.EC_POINT_mul
|
||||
self.EC_POINT_mul.restype = None
|
||||
self.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.EC_POINT_mul.restype = None
|
||||
# self.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EC_KEY_set_private_key = self._lib.EC_KEY_set_private_key
|
||||
self.EC_KEY_set_private_key.restype = ctypes.c_int
|
||||
self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p]
|
||||
# self.EC_KEY_set_private_key.restype = ctypes.c_int
|
||||
# self.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p,
|
||||
# ctypes.c_void_p]
|
||||
|
||||
if self._hexversion >= 0x10100000 and not self._libreSSL:
|
||||
self.EC_KEY_OpenSSL = self._lib.EC_KEY_OpenSSL
|
||||
self._lib.EC_KEY_OpenSSL.restype = ctypes.c_void_p
|
||||
self._lib.EC_KEY_OpenSSL.argtypes = []
|
||||
# self._lib.EC_KEY_OpenSSL.restype = ctypes.c_void_p
|
||||
# self._lib.EC_KEY_OpenSSL.argtypes = []
|
||||
|
||||
self.EC_KEY_set_method = self._lib.EC_KEY_set_method
|
||||
self._lib.EC_KEY_set_method.restype = ctypes.c_int
|
||||
self._lib.EC_KEY_set_method.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self._lib.EC_KEY_set_method.restype = ctypes.c_int
|
||||
# self._lib.EC_KEY_set_method.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
else:
|
||||
self.ECDH_OpenSSL = self._lib.ECDH_OpenSSL
|
||||
self._lib.ECDH_OpenSSL.restype = ctypes.c_void_p
|
||||
self._lib.ECDH_OpenSSL.argtypes = []
|
||||
# self._lib.ECDH_OpenSSL.restype = ctypes.c_void_p
|
||||
# self._lib.ECDH_OpenSSL.argtypes = []
|
||||
|
||||
self.ECDH_set_method = self._lib.ECDH_set_method
|
||||
self._lib.ECDH_set_method.restype = ctypes.c_int
|
||||
self._lib.ECDH_set_method.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self._lib.ECDH_set_method.restype = ctypes.c_int
|
||||
# self._lib.ECDH_set_method.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.BN_CTX_new = self._lib.BN_CTX_new
|
||||
self._lib.BN_CTX_new.restype = ctypes.c_void_p
|
||||
self._lib.BN_CTX_new.argtypes = []
|
||||
# self._lib.BN_CTX_new.restype = ctypes.c_void_p
|
||||
# self._lib.BN_CTX_new.argtypes = []
|
||||
|
||||
self.ECDH_compute_key = self._lib.ECDH_compute_key
|
||||
self.ECDH_compute_key.restype = ctypes.c_int
|
||||
self.ECDH_compute_key.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.ECDH_compute_key.restype = ctypes.c_int
|
||||
# self.ECDH_compute_key.argtypes = [ctypes.c_void_p,
|
||||
# ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_CipherInit_ex = self._lib.EVP_CipherInit_ex
|
||||
self.EVP_CipherInit_ex.restype = ctypes.c_int
|
||||
self.EVP_CipherInit_ex.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.EVP_CipherInit_ex.restype = ctypes.c_int
|
||||
# self.EVP_CipherInit_ex.argtypes = [ctypes.c_void_p,
|
||||
# ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_CIPHER_CTX_new = self._lib.EVP_CIPHER_CTX_new
|
||||
self.EVP_CIPHER_CTX_new.restype = ctypes.c_void_p
|
||||
self.EVP_CIPHER_CTX_new.argtypes = []
|
||||
# self.EVP_CIPHER_CTX_new.restype = ctypes.c_void_p
|
||||
# self.EVP_CIPHER_CTX_new.argtypes = []
|
||||
|
||||
# Cipher
|
||||
self.EVP_aes_128_cfb128 = self._lib.EVP_aes_128_cfb128
|
||||
self.EVP_aes_128_cfb128.restype = ctypes.c_void_p
|
||||
self.EVP_aes_128_cfb128.argtypes = []
|
||||
# self.EVP_aes_128_cfb128.restype = ctypes.c_void_p
|
||||
# self.EVP_aes_128_cfb128.argtypes = []
|
||||
|
||||
self.EVP_aes_256_cfb128 = self._lib.EVP_aes_256_cfb128
|
||||
self.EVP_aes_256_cfb128.restype = ctypes.c_void_p
|
||||
self.EVP_aes_256_cfb128.argtypes = []
|
||||
# self.EVP_aes_256_cfb128.restype = ctypes.c_void_p
|
||||
# self.EVP_aes_256_cfb128.argtypes = []
|
||||
|
||||
self.EVP_aes_128_cbc = self._lib.EVP_aes_128_cbc
|
||||
self.EVP_aes_128_cbc.restype = ctypes.c_void_p
|
||||
self.EVP_aes_128_cbc.argtypes = []
|
||||
# self.EVP_aes_128_cbc.restype = ctypes.c_void_p
|
||||
# self.EVP_aes_128_cbc.argtypes = []
|
||||
|
||||
self.EVP_aes_256_cbc = self._lib.EVP_aes_256_cbc
|
||||
self.EVP_aes_256_cbc.restype = ctypes.c_void_p
|
||||
self.EVP_aes_256_cbc.argtypes = []
|
||||
# self.EVP_aes_256_cbc.restype = ctypes.c_void_p
|
||||
# self.EVP_aes_256_cbc.argtypes = []
|
||||
|
||||
#self.EVP_aes_128_ctr = self._lib.EVP_aes_128_ctr
|
||||
#self.EVP_aes_128_ctr.restype = ctypes.c_void_p
|
||||
|
@ -232,138 +232,138 @@ class _OpenSSL:
|
|||
#self.EVP_aes_256_ctr.argtypes = []
|
||||
|
||||
self.EVP_aes_128_ofb = self._lib.EVP_aes_128_ofb
|
||||
self.EVP_aes_128_ofb.restype = ctypes.c_void_p
|
||||
self.EVP_aes_128_ofb.argtypes = []
|
||||
# self.EVP_aes_128_ofb.restype = ctypes.c_void_p
|
||||
# self.EVP_aes_128_ofb.argtypes = []
|
||||
|
||||
self.EVP_aes_256_ofb = self._lib.EVP_aes_256_ofb
|
||||
self.EVP_aes_256_ofb.restype = ctypes.c_void_p
|
||||
self.EVP_aes_256_ofb.argtypes = []
|
||||
# self.EVP_aes_256_ofb.restype = ctypes.c_void_p
|
||||
# self.EVP_aes_256_ofb.argtypes = []
|
||||
|
||||
self.EVP_bf_cbc = self._lib.EVP_bf_cbc
|
||||
self.EVP_bf_cbc.restype = ctypes.c_void_p
|
||||
self.EVP_bf_cbc.argtypes = []
|
||||
# self.EVP_bf_cbc.restype = ctypes.c_void_p
|
||||
# self.EVP_bf_cbc.argtypes = []
|
||||
|
||||
self.EVP_bf_cfb64 = self._lib.EVP_bf_cfb64
|
||||
self.EVP_bf_cfb64.restype = ctypes.c_void_p
|
||||
self.EVP_bf_cfb64.argtypes = []
|
||||
# self.EVP_bf_cfb64.restype = ctypes.c_void_p
|
||||
# self.EVP_bf_cfb64.argtypes = []
|
||||
|
||||
self.EVP_rc4 = self._lib.EVP_rc4
|
||||
self.EVP_rc4.restype = ctypes.c_void_p
|
||||
self.EVP_rc4.argtypes = []
|
||||
# self.EVP_rc4.restype = ctypes.c_void_p
|
||||
# self.EVP_rc4.argtypes = []
|
||||
|
||||
if self._hexversion >= 0x10100000 and not self._libreSSL:
|
||||
self.EVP_CIPHER_CTX_reset = self._lib.EVP_CIPHER_CTX_reset
|
||||
self.EVP_CIPHER_CTX_reset.restype = ctypes.c_int
|
||||
self.EVP_CIPHER_CTX_reset.argtypes = [ctypes.c_void_p]
|
||||
# self.EVP_CIPHER_CTX_reset.restype = ctypes.c_int
|
||||
# self.EVP_CIPHER_CTX_reset.argtypes = [ctypes.c_void_p]
|
||||
else:
|
||||
self.EVP_CIPHER_CTX_cleanup = self._lib.EVP_CIPHER_CTX_cleanup
|
||||
self.EVP_CIPHER_CTX_cleanup.restype = ctypes.c_int
|
||||
self.EVP_CIPHER_CTX_cleanup.argtypes = [ctypes.c_void_p]
|
||||
# self.EVP_CIPHER_CTX_cleanup.restype = ctypes.c_int
|
||||
# self.EVP_CIPHER_CTX_cleanup.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EVP_CIPHER_CTX_free = self._lib.EVP_CIPHER_CTX_free
|
||||
self.EVP_CIPHER_CTX_free.restype = None
|
||||
self.EVP_CIPHER_CTX_free.argtypes = [ctypes.c_void_p]
|
||||
# self.EVP_CIPHER_CTX_free.restype = None
|
||||
# self.EVP_CIPHER_CTX_free.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EVP_CipherUpdate = self._lib.EVP_CipherUpdate
|
||||
self.EVP_CipherUpdate.restype = ctypes.c_int
|
||||
self.EVP_CipherUpdate.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
|
||||
# self.EVP_CipherUpdate.restype = ctypes.c_int
|
||||
# self.EVP_CipherUpdate.argtypes = [ctypes.c_void_p,
|
||||
# ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
|
||||
|
||||
self.EVP_CipherFinal_ex = self._lib.EVP_CipherFinal_ex
|
||||
self.EVP_CipherFinal_ex.restype = ctypes.c_int
|
||||
self.EVP_CipherFinal_ex.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.EVP_CipherFinal_ex.restype = ctypes.c_int
|
||||
# self.EVP_CipherFinal_ex.argtypes = [ctypes.c_void_p,
|
||||
# ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_DigestInit = self._lib.EVP_DigestInit
|
||||
self.EVP_DigestInit.restype = ctypes.c_int
|
||||
self._lib.EVP_DigestInit.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.EVP_DigestInit.restype = ctypes.c_int
|
||||
# self._lib.EVP_DigestInit.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_DigestInit_ex = self._lib.EVP_DigestInit_ex
|
||||
self.EVP_DigestInit_ex.restype = ctypes.c_int
|
||||
self._lib.EVP_DigestInit_ex.argtypes = 3 * [ctypes.c_void_p]
|
||||
# self.EVP_DigestInit_ex.restype = ctypes.c_int
|
||||
# self._lib.EVP_DigestInit_ex.argtypes = 3 * [ctypes.c_void_p]
|
||||
|
||||
self.EVP_DigestUpdate = self._lib.EVP_DigestUpdate
|
||||
self.EVP_DigestUpdate.restype = ctypes.c_int
|
||||
self.EVP_DigestUpdate.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p, ctypes.c_int]
|
||||
# self.EVP_DigestUpdate.restype = ctypes.c_int
|
||||
# self.EVP_DigestUpdate.argtypes = [ctypes.c_void_p,
|
||||
# ctypes.c_void_p, ctypes.c_int]
|
||||
|
||||
self.EVP_DigestFinal = self._lib.EVP_DigestFinal
|
||||
self.EVP_DigestFinal.restype = ctypes.c_int
|
||||
self.EVP_DigestFinal.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.EVP_DigestFinal.restype = ctypes.c_int
|
||||
# self.EVP_DigestFinal.argtypes = [ctypes.c_void_p,
|
||||
# ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_DigestFinal_ex = self._lib.EVP_DigestFinal_ex
|
||||
self.EVP_DigestFinal_ex.restype = ctypes.c_int
|
||||
self.EVP_DigestFinal_ex.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.EVP_DigestFinal_ex.restype = ctypes.c_int
|
||||
# self.EVP_DigestFinal_ex.argtypes = [ctypes.c_void_p,
|
||||
# ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.ECDSA_sign = self._lib.ECDSA_sign
|
||||
self.ECDSA_sign.restype = ctypes.c_int
|
||||
self.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p,
|
||||
ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.ECDSA_sign.restype = ctypes.c_int
|
||||
# self.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p,
|
||||
# ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.ECDSA_verify = self._lib.ECDSA_verify
|
||||
self.ECDSA_verify.restype = ctypes.c_int
|
||||
self.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p,
|
||||
ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
|
||||
# self.ECDSA_verify.restype = ctypes.c_int
|
||||
# self.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p,
|
||||
# ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
|
||||
|
||||
if self._hexversion >= 0x10100000 and not self._libreSSL:
|
||||
self.EVP_MD_CTX_new = self._lib.EVP_MD_CTX_new
|
||||
self.EVP_MD_CTX_new.restype = ctypes.c_void_p
|
||||
self.EVP_MD_CTX_new.argtypes = []
|
||||
# self.EVP_MD_CTX_new.restype = ctypes.c_void_p
|
||||
# self.EVP_MD_CTX_new.argtypes = []
|
||||
|
||||
self.EVP_MD_CTX_reset = self._lib.EVP_MD_CTX_reset
|
||||
self.EVP_MD_CTX_reset.restype = None
|
||||
self.EVP_MD_CTX_reset.argtypes = [ctypes.c_void_p]
|
||||
# self.EVP_MD_CTX_reset.restype = None
|
||||
# self.EVP_MD_CTX_reset.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EVP_MD_CTX_free = self._lib.EVP_MD_CTX_free
|
||||
self.EVP_MD_CTX_free.restype = None
|
||||
self.EVP_MD_CTX_free.argtypes = [ctypes.c_void_p]
|
||||
# self.EVP_MD_CTX_free.restype = None
|
||||
# self.EVP_MD_CTX_free.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EVP_sha1 = self._lib.EVP_sha1
|
||||
self.EVP_sha1.restype = ctypes.c_void_p
|
||||
self.EVP_sha1.argtypes = []
|
||||
# self.EVP_sha1.restype = ctypes.c_void_p
|
||||
# self.EVP_sha1.argtypes = []
|
||||
|
||||
self.digest_ecdsa_sha1 = self.EVP_sha1
|
||||
else:
|
||||
self.EVP_MD_CTX_create = self._lib.EVP_MD_CTX_create
|
||||
self.EVP_MD_CTX_create.restype = ctypes.c_void_p
|
||||
self.EVP_MD_CTX_create.argtypes = []
|
||||
# self.EVP_MD_CTX_create.restype = ctypes.c_void_p
|
||||
# self.EVP_MD_CTX_create.argtypes = []
|
||||
|
||||
self.EVP_MD_CTX_init = self._lib.EVP_MD_CTX_init
|
||||
self.EVP_MD_CTX_init.restype = None
|
||||
self.EVP_MD_CTX_init.argtypes = [ctypes.c_void_p]
|
||||
# self.EVP_MD_CTX_init.restype = None
|
||||
# self.EVP_MD_CTX_init.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EVP_MD_CTX_destroy = self._lib.EVP_MD_CTX_destroy
|
||||
self.EVP_MD_CTX_destroy.restype = None
|
||||
self.EVP_MD_CTX_destroy.argtypes = [ctypes.c_void_p]
|
||||
# self.EVP_MD_CTX_destroy.restype = None
|
||||
# self.EVP_MD_CTX_destroy.argtypes = [ctypes.c_void_p]
|
||||
|
||||
self.EVP_ecdsa = self._lib.EVP_ecdsa
|
||||
self._lib.EVP_ecdsa.restype = ctypes.c_void_p
|
||||
self._lib.EVP_ecdsa.argtypes = []
|
||||
# self._lib.EVP_ecdsa.restype = ctypes.c_void_p
|
||||
# self._lib.EVP_ecdsa.argtypes = []
|
||||
|
||||
self.digest_ecdsa_sha1 = self.EVP_ecdsa
|
||||
|
||||
self.RAND_bytes = self._lib.RAND_bytes
|
||||
self.RAND_bytes.restype = ctypes.c_int
|
||||
self.RAND_bytes.argtypes = [ctypes.c_void_p, ctypes.c_int]
|
||||
# self.RAND_bytes.restype = ctypes.c_int
|
||||
# self.RAND_bytes.argtypes = [ctypes.c_void_p, ctypes.c_int]
|
||||
|
||||
self.EVP_sha256 = self._lib.EVP_sha256
|
||||
self.EVP_sha256.restype = ctypes.c_void_p
|
||||
self.EVP_sha256.argtypes = []
|
||||
# self.EVP_sha256.restype = ctypes.c_void_p
|
||||
# self.EVP_sha256.argtypes = []
|
||||
|
||||
self.i2o_ECPublicKey = self._lib.i2o_ECPublicKey
|
||||
self.i2o_ECPublicKey.restype = ctypes.c_void_p
|
||||
self.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.i2o_ECPublicKey.restype = ctypes.c_void_p
|
||||
# self.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
self.EVP_sha512 = self._lib.EVP_sha512
|
||||
self.EVP_sha512.restype = ctypes.c_void_p
|
||||
self.EVP_sha512.argtypes = []
|
||||
# self.EVP_sha512.restype = ctypes.c_void_p
|
||||
# self.EVP_sha512.argtypes = []
|
||||
|
||||
self.HMAC = self._lib.HMAC
|
||||
self.HMAC.restype = ctypes.c_void_p
|
||||
self.HMAC.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,
|
||||
ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
|
||||
# self.HMAC.restype = ctypes.c_void_p
|
||||
# self.HMAC.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,
|
||||
# ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
|
||||
|
||||
try:
|
||||
self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC
|
||||
|
@ -371,11 +371,11 @@ class _OpenSSL:
|
|||
# The above is not compatible with all versions of OSX.
|
||||
self.PKCS5_PBKDF2_HMAC = self._lib.PKCS5_PBKDF2_HMAC_SHA1
|
||||
|
||||
self.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int
|
||||
self.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_void_p, ctypes.c_int,
|
||||
ctypes.c_void_p, ctypes.c_int,
|
||||
ctypes.c_int, ctypes.c_void_p,
|
||||
ctypes.c_int, ctypes.c_void_p]
|
||||
# self.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int
|
||||
# self.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_void_p, ctypes.c_int,
|
||||
# ctypes.c_void_p, ctypes.c_int,
|
||||
# ctypes.c_int, ctypes.c_void_p,
|
||||
# ctypes.c_int, ctypes.c_void_p]
|
||||
|
||||
self._set_ciphers()
|
||||
self._set_curves()
|
||||
|
|
|
@ -64,11 +64,11 @@ class RandomTrackingDict(object):
|
|||
del self.dictionary[key]
|
||||
self.len -= 1
|
||||
|
||||
def setMaxPending(self, maxPending):
|
||||
self.maxPending = maxPending
|
||||
# def setMaxPending(self, maxPending):
|
||||
# self.maxPending = maxPending
|
||||
|
||||
def setPendingTimeout(self, pendingTimeout):
|
||||
self.pendingTimeout = pendingTimeout
|
||||
# def setPendingTimeout(self, pendingTimeout):
|
||||
# self.pendingTimeout = pendingTimeout
|
||||
|
||||
def randomKeys(self, count=1):
|
||||
if self.len == 0 or ((self.pendingLen >= self.maxPending or
|
||||
|
|
4
src/singleinstance.py
Normal file → Executable file
4
src/singleinstance.py
Normal file → Executable file
|
@ -2,7 +2,7 @@
|
|||
|
||||
import atexit
|
||||
import errno
|
||||
from multiprocessing import Process
|
||||
# from multiprocessing import Process
|
||||
import os
|
||||
import sys
|
||||
import state
|
||||
|
@ -21,7 +21,7 @@ class singleinstance:
|
|||
"""
|
||||
def __init__(self, flavor_id="", daemon=False):
|
||||
self.initialized = False
|
||||
self.counter = 0
|
||||
# self.counter = 0
|
||||
self.daemon = daemon
|
||||
self.lockPid = None
|
||||
self.lockfile = os.path.normpath(os.path.join(state.appdata, 'singleton%s.lock' % flavor_id))
|
||||
|
|
|
@ -92,25 +92,25 @@ _socks4errors = ("request granted",
|
|||
"request rejected because the client program and identd report different user-ids",
|
||||
"unknown error")
|
||||
|
||||
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
|
||||
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
|
||||
Sets a default proxy which all further socksocket objects will use,
|
||||
unless explicitly changed.
|
||||
"""
|
||||
global _defaultproxy
|
||||
_defaultproxy = (proxytype, addr, port, rdns, username, password)
|
||||
# def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
|
||||
# """setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
|
||||
# Sets a default proxy which all further socksocket objects will use,
|
||||
# unless explicitly changed.
|
||||
# """
|
||||
# global _defaultproxy
|
||||
# _defaultproxy = (proxytype, addr, port, rdns, username, password)
|
||||
|
||||
def wrapmodule(module):
|
||||
"""wrapmodule(module)
|
||||
Attempts to replace a module's socket library with a SOCKS socket. Must set
|
||||
a default proxy using setdefaultproxy(...) first.
|
||||
This will only work on modules that import socket directly into the namespace;
|
||||
most of the Python Standard Library falls into this category.
|
||||
"""
|
||||
if _defaultproxy != None:
|
||||
module.socket.socket = socksocket
|
||||
else:
|
||||
raise GeneralProxyError((4, "no proxy specified"))
|
||||
# def wrapmodule(module):
|
||||
# """wrapmodule(module)
|
||||
# Attempts to replace a module's socket library with a SOCKS socket. Must set
|
||||
# a default proxy using setdefaultproxy(...) first.
|
||||
# This will only work on modules that import socket directly into the namespace;
|
||||
# most of the Python Standard Library falls into this category.
|
||||
# """
|
||||
# if _defaultproxy != None:
|
||||
# module.socket.socket = socksocket
|
||||
# else:
|
||||
# raise GeneralProxyError((4, "no proxy specified"))
|
||||
|
||||
class socksocket(socket.socket):
|
||||
"""socksocket([family[, type[, proto]]]) -> socket object
|
||||
|
@ -287,17 +287,17 @@ class socksocket(socket.socket):
|
|||
boundport = struct.unpack(">H", self.__recvall(2))[0]
|
||||
return ip
|
||||
|
||||
def getproxysockname(self):
|
||||
"""getsockname() -> address info
|
||||
Returns the bound IP address and port number at the proxy.
|
||||
"""
|
||||
return self.__proxysockname
|
||||
# def getproxysockname(self):
|
||||
# """getsockname() -> address info
|
||||
# Returns the bound IP address and port number at the proxy.
|
||||
# """
|
||||
# return self.__proxysockname
|
||||
|
||||
def getproxypeername(self):
|
||||
"""getproxypeername() -> address info
|
||||
Returns the IP and port number of the proxy.
|
||||
"""
|
||||
return _orgsocket.getpeername(self)
|
||||
# def getproxypeername(self):
|
||||
# """getproxypeername() -> address info
|
||||
# Returns the IP and port number of the proxy.
|
||||
# """
|
||||
# return _orgsocket.getpeername(self)
|
||||
|
||||
def getpeername(self):
|
||||
"""getpeername() -> address info
|
||||
|
@ -306,8 +306,8 @@ class socksocket(socket.socket):
|
|||
"""
|
||||
return self.__proxypeername
|
||||
|
||||
def getproxytype(self):
|
||||
return self.__proxy[0]
|
||||
# def getproxytype(self):
|
||||
# return self.__proxy[0]
|
||||
|
||||
def __negotiatesocks4(self,destaddr,destport):
|
||||
"""__negotiatesocks4(self,destaddr,destport)
|
||||
|
|
|
@ -114,8 +114,8 @@ class FilesystemInventory(InventoryStorage):
|
|||
# for i, v in self._inventory.items():
|
||||
# print "loaded stream: %s, %i items" % (i, len(v))
|
||||
|
||||
def stream_list(self):
|
||||
return self._inventory.keys()
|
||||
# def stream_list(self):
|
||||
# return self._inventory.keys()
|
||||
|
||||
def object_list(self):
|
||||
return [unhexlify(x) for x in listdir(path.join(self.baseDir, FilesystemInventory.objectDir))]
|
||||
|
@ -130,7 +130,7 @@ class FilesystemInventory(InventoryStorage):
|
|||
def getMetadata(self, hashId):
|
||||
try:
|
||||
with open(path.join(self.baseDir, FilesystemInventory.objectDir, hexlify(hashId), FilesystemInventory.metadataFilename), 'r') as f:
|
||||
objectType, streamNumber, expiresTime, tag, undef = string.split(f.read(), ",", 4)
|
||||
objectType, streamNumber, expiresTime, tag, _ = string.split(f.read(), ",", 4)
|
||||
return [int(objectType), int(streamNumber), int(expiresTime), unhexlify(tag)]
|
||||
except IOError:
|
||||
raise KeyError
|
||||
|
|
|
@ -42,7 +42,7 @@ class InventoryStorage(Storage, collections.MutableMapping):
|
|||
def clean(self):
|
||||
raise NotImplementedError
|
||||
|
||||
class MailboxStorage(Storage, collections.MutableMapping):
|
||||
def __init__(self):
|
||||
# class MailboxStorage(Storage, collections.MutableMapping):
|
||||
# def __init__(self):
|
||||
# super(self.__class__, self).__init__()
|
||||
pass
|
||||
# pass
|
||||
|
|
|
@ -12,11 +12,10 @@ class Throttle(object):
|
|||
|
||||
def __init__(self, limit=0):
|
||||
self.limit = limit
|
||||
self.speed = 0
|
||||
# self.speed = 0
|
||||
self.chunkSize = Throttle.maxChunkSize
|
||||
self.txTime = int(time.time())
|
||||
self.txLen = 0
|
||||
self.total = 0
|
||||
self.timer = threading.Event()
|
||||
self.lock = threading.RLock()
|
||||
self.resetChunkSize()
|
||||
|
@ -25,7 +24,7 @@ class Throttle(object):
|
|||
with self.lock:
|
||||
now = int(time.time())
|
||||
if now > self.txTime:
|
||||
self.speed = self.txLen / (now - self.txTime)
|
||||
# self.speed = self.txLen / (now - self.txTime)
|
||||
self.txLen -= self.limit * (now - self.txTime)
|
||||
self.txTime = now
|
||||
if self.txLen < 0 or self.limit == 0:
|
||||
|
@ -34,7 +33,6 @@ class Throttle(object):
|
|||
def wait(self, dataLen):
|
||||
with self.lock:
|
||||
self.txLen += dataLen
|
||||
self.total += dataLen
|
||||
while state.shutdown == 0:
|
||||
self.recalculate()
|
||||
if self.limit == 0:
|
||||
|
@ -43,9 +41,9 @@ class Throttle(object):
|
|||
break
|
||||
self.timer.wait(0.2)
|
||||
|
||||
def getSpeed(self):
|
||||
self.recalculate()
|
||||
return self.speed
|
||||
# def getSpeed(self):
|
||||
# self.recalculate()
|
||||
# return self.speed
|
||||
|
||||
def resetChunkSize(self):
|
||||
with self.lock:
|
||||
|
@ -65,17 +63,17 @@ class SendThrottle(Throttle):
|
|||
def __init__(self):
|
||||
Throttle.__init__(self, BMConfigParser().safeGetInt('bitmessagesettings', 'maxuploadrate')*1024)
|
||||
|
||||
def resetLimit(self):
|
||||
with self.lock:
|
||||
self.limit = BMConfigParser().safeGetInt('bitmessagesettings', 'maxuploadrate')*1024
|
||||
Throttle.resetChunkSize(self)
|
||||
# def resetLimit(self):
|
||||
# with self.lock:
|
||||
# self.limit = BMConfigParser().safeGetInt('bitmessagesettings', 'maxuploadrate')*1024
|
||||
# Throttle.resetChunkSize(self)
|
||||
|
||||
@Singleton
|
||||
class ReceiveThrottle(Throttle):
|
||||
def __init__(self):
|
||||
Throttle.__init__(self, BMConfigParser().safeGetInt('bitmessagesettings', 'maxdownloadrate')*1024)
|
||||
|
||||
def resetLimit(self):
|
||||
with self.lock:
|
||||
self.limit = BMConfigParser().safeGetInt('bitmessagesettings', 'maxdownloadrate')*1024
|
||||
Throttle.resetChunkSize(self)
|
||||
# def resetLimit(self):
|
||||
# with self.lock:
|
||||
# self.limit = BMConfigParser().safeGetInt('bitmessagesettings', 'maxdownloadrate')*1024
|
||||
# Throttle.resetChunkSize(self)
|
||||
|
|
|
@ -1,2 +1 @@
|
|||
softwareName = 'PyBitmessage'
|
||||
softwareVersion = '0.6.3.2'
|
||||
|
|
Reference in New Issue
Block a user