2013-02-18 21:22:48 +01:00
#!/usr/bin/env python2.7
2012-11-19 20:45:05 +01:00
# Copyright (c) 2012 Jonathan Warren
# Copyright (c) 2012 The Bitmessage developers
# Distributed under the MIT/X11 software license. See the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#Right now, PyBitmessage only support connecting to stream 1. It doesn't yet contain logic to expand into further streams.
2013-05-02 22:05:31 +02:00
#The software version variable is now held in shared.py
2013-03-30 20:56:01 +01:00
verbose = 1
2012-11-19 20:45:05 +01:00
maximumAgeOfAnObjectThatIAmWillingToAccept = 216000 #Equals two days and 12 hours.
lengthOfTimeToLeaveObjectsInInventory = 237600 #Equals two days and 18 hours. This should be longer than maximumAgeOfAnObjectThatIAmWillingToAccept so that we don't process messages twice.
2013-02-11 22:28:38 +01:00
lengthOfTimeToHoldOnToAllPubkeys = 2419200 #Equals 4 weeks. You could make this longer if you want but making it shorter would not be advisable because there is a very small possibility that it could keep you from obtaining a needed pubkey for a period of time.
2012-11-19 20:45:05 +01:00
maximumAgeOfObjectsThatIAdvertiseToOthers = 216000 #Equals two days and 12 hours
maximumAgeOfNodesThatIAdvertiseToOthers = 10800 #Equals three hours
2013-02-26 21:07:51 +01:00
storeConfigFilesInSameDirectoryAsProgramByDefault = False #The user may de-select Portable Mode in the settings if they want the config files to stay in the application data folder.
2013-01-22 20:29:49 +01:00
useVeryEasyProofOfWorkForTesting = False #If you set this to True while on the normal network, you won't be able to send or sometimes receive messages.
2013-04-30 18:22:47 +02:00
encryptedBroadcastSwitchoverTime = 1369735200
2012-11-19 20:45:05 +01:00
import sys
2013-05-01 22:06:55 +02:00
import ConfigParser
2012-11-19 20:45:05 +01:00
import Queue
2013-05-02 17:53:54 +02:00
from addresses import *
import shared
2012-11-19 20:45:05 +01:00
from defaultKnownNodes import *
import time
import socket
import threading
import hashlib
from struct import *
import pickle
import random
import sqlite3
2013-05-02 17:53:54 +02:00
import threading
2013-04-04 18:32:25 +02:00
from time import strftime , localtime , gmtime
2013-02-26 21:07:51 +01:00
import shutil #used for moving the messages.dat file
2012-12-04 18:11:14 +01:00
import string
2012-12-18 19:09:10 +01:00
import socks
2013-01-21 01:00:46 +01:00
import highlevelcrypto
2013-01-16 17:52:52 +01:00
from pyelliptic . openssl import OpenSSL
import ctypes
from pyelliptic import arithmetic
2013-05-01 22:06:55 +02:00
import signal #Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully.
2013-03-22 18:23:44 +01:00
#The next 3 are used for the API
2013-03-19 18:32:37 +01:00
from SimpleXMLRPCServer import *
import json
from subprocess import call #used when the API must execute an outside program
2013-05-13 11:29:14 +02:00
import singleton
2013-05-29 22:01:12 +02:00
import proofofwork
2013-05-01 22:06:55 +02:00
2013-04-12 19:51:14 +02:00
#For each stream to which we connect, several outgoingSynSender threads will exist and will collectively create 8 connections with peers.
2013-05-01 22:06:55 +02:00
class outgoingSynSender ( threading . Thread ) :
def __init__ ( self ) :
threading . Thread . __init__ ( self )
2012-11-19 20:45:05 +01:00
def setup ( self , streamNumber ) :
self . streamNumber = streamNumber
def run ( self ) :
time . sleep ( 1 )
2013-04-12 19:51:14 +02:00
global alreadyAttemptedConnectionsListResetTime
2012-11-19 20:45:05 +01:00
while True :
2013-06-11 00:53:15 +02:00
if len ( selfInitiatedConnections [ self . streamNumber ] ) > = 8 : #maximum number of outgoing connections = 8
time . sleep ( 10 )
else :
2012-11-19 20:45:05 +01:00
random . seed ( )
2013-05-02 17:53:54 +02:00
HOST , = random . sample ( shared . knownNodes [ self . streamNumber ] , 1 )
2013-04-12 19:51:14 +02:00
alreadyAttemptedConnectionsListLock . acquire ( )
2013-05-03 18:05:57 +02:00
while HOST in alreadyAttemptedConnectionsList or HOST in shared . connectedHostsList :
2013-04-12 19:51:14 +02:00
alreadyAttemptedConnectionsListLock . release ( )
2012-11-19 20:45:05 +01:00
#print 'choosing new sample'
random . seed ( )
2013-05-02 17:53:54 +02:00
HOST , = random . sample ( shared . knownNodes [ self . streamNumber ] , 1 )
2012-11-19 20:45:05 +01:00
time . sleep ( 1 )
#Clear out the alreadyAttemptedConnectionsList every half hour so that this program will again attempt a connection to any nodes, even ones it has already tried.
2013-04-12 19:51:14 +02:00
if ( time . time ( ) - alreadyAttemptedConnectionsListResetTime ) > 1800 :
alreadyAttemptedConnectionsList . clear ( )
alreadyAttemptedConnectionsListResetTime = int ( time . time ( ) )
alreadyAttemptedConnectionsListLock . acquire ( )
alreadyAttemptedConnectionsList [ HOST ] = 0
alreadyAttemptedConnectionsListLock . release ( )
2013-05-02 17:53:54 +02:00
PORT , timeNodeLastSeen = shared . knownNodes [ self . streamNumber ] [ HOST ]
2012-12-18 19:09:10 +01:00
sock = socks . socksocket ( socket . AF_INET , socket . SOCK_STREAM )
2013-04-29 19:46:09 +02:00
#This option apparently avoids the TIME_WAIT state so that we can rebind faster
sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 )
2012-12-19 07:22:04 +01:00
sock . settimeout ( 20 )
2013-05-02 17:53:54 +02:00
if shared . config . get ( ' bitmessagesettings ' , ' socksproxytype ' ) == ' none ' and verbose > = 2 :
shared . printLock . acquire ( )
2012-12-18 19:09:10 +01:00
print ' Trying an outgoing connection to ' , HOST , ' : ' , PORT
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-12-18 19:09:10 +01:00
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
2013-05-02 17:53:54 +02:00
elif shared . config . get ( ' bitmessagesettings ' , ' socksproxytype ' ) == ' SOCKS4a ' :
2013-04-12 19:51:14 +02:00
if verbose > = 2 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-12 19:51:14 +02:00
print ' (Using SOCKS4a) Trying an outgoing connection to ' , HOST , ' : ' , PORT
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-12-18 19:09:10 +01:00
proxytype = socks . PROXY_TYPE_SOCKS4
2013-05-02 17:53:54 +02:00
sockshostname = shared . config . get ( ' bitmessagesettings ' , ' sockshostname ' )
socksport = shared . config . getint ( ' bitmessagesettings ' , ' socksport ' )
2012-12-18 19:09:10 +01:00
rdns = True #Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway.
2013-05-02 17:53:54 +02:00
if shared . config . getboolean ( ' bitmessagesettings ' , ' socksauthentication ' ) :
socksusername = shared . config . get ( ' bitmessagesettings ' , ' socksusername ' )
sockspassword = shared . config . get ( ' bitmessagesettings ' , ' sockspassword ' )
2012-12-18 19:09:10 +01:00
sock . setproxy ( proxytype , sockshostname , socksport , rdns , socksusername , sockspassword )
else :
sock . setproxy ( proxytype , sockshostname , socksport , rdns )
2013-05-02 17:53:54 +02:00
elif shared . config . get ( ' bitmessagesettings ' , ' socksproxytype ' ) == ' SOCKS5 ' :
2013-04-12 19:51:14 +02:00
if verbose > = 2 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-12 19:51:14 +02:00
print ' (Using SOCKS5) Trying an outgoing connection to ' , HOST , ' : ' , PORT
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-12-18 19:09:10 +01:00
proxytype = socks . PROXY_TYPE_SOCKS5
2013-05-02 17:53:54 +02:00
sockshostname = shared . config . get ( ' bitmessagesettings ' , ' sockshostname ' )
socksport = shared . config . getint ( ' bitmessagesettings ' , ' socksport ' )
2012-12-18 19:09:10 +01:00
rdns = True #Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway.
2013-05-02 17:53:54 +02:00
if shared . config . getboolean ( ' bitmessagesettings ' , ' socksauthentication ' ) :
socksusername = shared . config . get ( ' bitmessagesettings ' , ' socksusername ' )
sockspassword = shared . config . get ( ' bitmessagesettings ' , ' sockspassword ' )
2012-12-18 19:09:10 +01:00
sock . setproxy ( proxytype , sockshostname , socksport , rdns , socksusername , sockspassword )
else :
sock . setproxy ( proxytype , sockshostname , socksport , rdns )
2012-11-19 20:45:05 +01:00
try :
sock . connect ( ( HOST , PORT ) )
rd = receiveDataThread ( )
2013-05-01 22:06:55 +02:00
rd . daemon = True # close the main program even if there are threads left
2013-02-03 06:16:50 +01:00
objectsOfWhichThisRemoteNodeIsAlreadyAware = { }
2013-04-12 19:51:14 +02:00
rd . setup ( sock , HOST , PORT , self . streamNumber , objectsOfWhichThisRemoteNodeIsAlreadyAware )
2012-11-19 20:45:05 +01:00
rd . start ( )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-03-26 17:19:18 +01:00
print self , ' connected to ' , HOST , ' during an outgoing attempt. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-18 21:22:48 +01:00
2012-11-19 20:45:05 +01:00
sd = sendDataThread ( )
2013-02-03 06:16:50 +01:00
sd . setup ( sock , HOST , PORT , self . streamNumber , objectsOfWhichThisRemoteNodeIsAlreadyAware )
2012-11-19 20:45:05 +01:00
sd . start ( )
sd . sendVersionMessage ( )
2012-12-18 19:09:10 +01:00
except socks . GeneralProxyError , err :
2013-04-12 19:51:14 +02:00
if verbose > = 2 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-12 19:51:14 +02:00
print ' Could NOT connect to ' , HOST , ' during outgoing attempt. ' , err
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
PORT , timeLastSeen = shared . knownNodes [ self . streamNumber ] [ HOST ]
if ( int ( time . time ( ) ) - timeLastSeen ) > 172800 and len ( shared . knownNodes [ self . streamNumber ] ) > 1000 : # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the shared.knownNodes data-structure.
shared . knownNodesLock . acquire ( )
del shared . knownNodes [ self . streamNumber ] [ HOST ]
shared . knownNodesLock . release ( )
2013-05-18 18:11:21 +02:00
shared . printLock . acquire ( )
2013-05-02 17:53:54 +02:00
print ' deleting ' , HOST , ' from shared.knownNodes because it is more than 48 hours old and we could not connect to it. '
2013-05-18 18:11:21 +02:00
shared . printLock . release ( )
2012-12-18 19:09:10 +01:00
except socks . Socks5AuthError , err :
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateStatusBar ' , " SOCKS5 Authentication problem: " + str ( err ) ) )
2012-12-18 19:09:10 +01:00
except socks . Socks5Error , err :
pass
print ' SOCKS5 error. (It is possible that the server wants authentication).) ' , str ( err )
except socks . Socks4Error , err :
print ' Socks4Error: ' , err
except socket . error , err :
2013-05-02 17:53:54 +02:00
if shared . config . get ( ' bitmessagesettings ' , ' socksproxytype ' ) [ 0 : 5 ] == ' SOCKS ' :
2012-12-18 22:36:37 +01:00
print ' Bitmessage MIGHT be having trouble connecting to the SOCKS server. ' + str ( err )
2012-12-18 19:09:10 +01:00
else :
2013-04-12 19:51:14 +02:00
if verbose > = 1 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-12 19:51:14 +02:00
print ' Could NOT connect to ' , HOST , ' during outgoing attempt. ' , err
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
PORT , timeLastSeen = shared . knownNodes [ self . streamNumber ] [ HOST ]
if ( int ( time . time ( ) ) - timeLastSeen ) > 172800 and len ( shared . knownNodes [ self . streamNumber ] ) > 1000 : # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the knownNodes data-structure.
shared . knownNodesLock . acquire ( )
del shared . knownNodes [ self . streamNumber ] [ HOST ]
shared . knownNodesLock . release ( )
2013-05-25 19:35:13 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' deleting ' , HOST , ' from knownNodes because it is more than 48 hours old and we could not connect to it. '
2013-05-25 19:35:13 +02:00
shared . printLock . release ( )
2012-12-18 19:32:48 +01:00
except Exception , err :
2013-04-17 21:00:42 +02:00
sys . stderr . write ( ' An exception has occurred in the outgoingSynSender thread that was not caught by other exception types: %s \n ' % err )
2013-06-11 00:53:15 +02:00
time . sleep ( 0.1 )
2012-11-19 20:45:05 +01:00
#Only one singleListener thread will ever exist. It creates the receiveDataThread and sendDataThread for each incoming connection. Note that it cannot set the stream number because it is not known yet- the other node will have to tell us its stream number in a version message. If we don't care about their stream, we will close the connection (within the recversion function of the recieveData thread)
2013-05-01 22:06:55 +02:00
class singleListener ( threading . Thread ) :
def __init__ ( self ) :
threading . Thread . __init__ ( self )
2013-02-18 21:22:48 +01:00
2012-11-19 20:45:05 +01:00
def run ( self ) :
2012-12-18 19:09:10 +01:00
#We don't want to accept incoming connections if the user is using a SOCKS proxy. If they eventually select proxy 'none' then this will start listening for connections.
2013-05-02 17:53:54 +02:00
while shared . config . get ( ' bitmessagesettings ' , ' socksproxytype ' ) [ 0 : 5 ] == ' SOCKS ' :
2012-12-18 19:09:10 +01:00
time . sleep ( 300 )
2012-11-19 20:45:05 +01:00
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-02-03 06:16:50 +01:00
print ' Listening for incoming connections. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
HOST = ' ' # Symbolic name meaning all available interfaces
2013-05-02 17:53:54 +02:00
PORT = shared . config . getint ( ' bitmessagesettings ' , ' port ' )
2012-11-19 20:45:05 +01:00
sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM )
#This option apparently avoids the TIME_WAIT state so that we can rebind faster
sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 )
sock . bind ( ( HOST , PORT ) )
sock . listen ( 2 )
while True :
2013-01-22 06:21:32 +01:00
#We don't want to accept incoming connections if the user is using a SOCKS proxy. If the user eventually select proxy 'none' then this will start listening for connections.
2013-05-02 17:53:54 +02:00
while shared . config . get ( ' bitmessagesettings ' , ' socksproxytype ' ) [ 0 : 5 ] == ' SOCKS ' :
2012-12-18 19:09:10 +01:00
time . sleep ( 10 )
2013-05-21 18:08:37 +02:00
while len ( shared . connectedHostsList ) > 220 :
shared . printLock . acquire ( )
print ' We are connected to too many people. Not accepting further incoming connections for ten seconds. '
shared . printLock . release ( )
time . sleep ( 10 )
2012-11-19 20:45:05 +01:00
a , ( HOST , PORT ) = sock . accept ( )
2013-05-21 18:08:37 +02:00
#The following code will, unfortunately, block an incoming connection if someone else on the same LAN is already connected because the two computers will share the same external IP. This is here to prevent connection flooding.
while HOST in shared . connectedHostsList :
shared . printLock . acquire ( )
2013-05-21 20:10:48 +02:00
print ' We are already connected to ' , HOST + ' . Ignoring connection. '
2013-05-21 18:08:37 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
a . close ( )
2013-05-21 18:08:37 +02:00
a , ( HOST , PORT ) = sock . accept ( )
2013-02-03 06:16:50 +01:00
objectsOfWhichThisRemoteNodeIsAlreadyAware = { }
2013-05-21 18:51:52 +02:00
a . settimeout ( 20 )
2012-11-19 20:45:05 +01:00
sd = sendDataThread ( )
2013-02-03 06:16:50 +01:00
sd . setup ( a , HOST , PORT , - 1 , objectsOfWhichThisRemoteNodeIsAlreadyAware )
2012-11-19 20:45:05 +01:00
sd . start ( )
2013-05-07 22:25:01 +02:00
rd = receiveDataThread ( )
rd . daemon = True # close the main program even if there are threads left
rd . setup ( a , HOST , PORT , - 1 , objectsOfWhichThisRemoteNodeIsAlreadyAware )
rd . start ( )
shared . printLock . acquire ( )
print self , ' connected to ' , HOST , ' during INCOMING request. '
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
#This thread is created either by the synSenderThread(for outgoing connections) or the singleListenerThread(for incoming connectiosn).
2013-05-01 22:06:55 +02:00
class receiveDataThread ( threading . Thread ) :
def __init__ ( self ) :
threading . Thread . __init__ ( self )
2012-11-19 20:45:05 +01:00
self . data = ' '
self . verackSent = False
self . verackReceived = False
2013-04-12 19:51:14 +02:00
def setup ( self , sock , HOST , port , streamNumber , objectsOfWhichThisRemoteNodeIsAlreadyAware ) :
2012-11-19 20:45:05 +01:00
self . sock = sock
self . HOST = HOST
self . PORT = port
self . streamNumber = streamNumber
2013-01-22 06:21:32 +01:00
self . payloadLength = 0 #This is the protocol payload length thus it doesn't include the 24 byte message header
2013-02-04 22:49:02 +01:00
self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave = { }
2013-05-03 18:05:57 +02:00
shared . connectedHostsList [ self . HOST ] = 0 #The very fact that this receiveData thread exists shows that we are connected to the remote host. Let's add it to this list so that an outgoingSynSender thread doesn't try to connect to it.
2012-11-19 20:45:05 +01:00
self . connectionIsOrWasFullyEstablished = False #set to true after the remote node and I accept each other's version messages. This is needed to allow the user interface to accurately reflect the current number of connections.
if self . streamNumber == - 1 : #This was an incoming connection. Send out a version message if we accept the other node's version message.
self . initiatedConnection = False
else :
self . initiatedConnection = True
2013-04-12 19:51:14 +02:00
selfInitiatedConnections [ streamNumber ] [ self ] = 0
2012-12-19 17:51:51 +01:00
self . ackDataThatWeHaveYetToSend = [ ] #When we receive a message bound for us, we store the acknowledgement that we need to send (the ackdata) here until we are done processing all other data received from this peer.
2013-02-03 06:16:50 +01:00
self . objectsOfWhichThisRemoteNodeIsAlreadyAware = objectsOfWhichThisRemoteNodeIsAlreadyAware
2012-11-19 20:45:05 +01:00
def run ( self ) :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-05-03 18:05:57 +02:00
print ' ID of the receiveDataThread is ' , str ( id ( self ) ) + ' . The size of the shared.connectedHostsList is now ' , len ( shared . connectedHostsList )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
while True :
try :
2013-04-25 22:11:00 +02:00
self . data + = self . sock . recv ( 4096 )
2012-11-19 20:45:05 +01:00
except socket . timeout :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-30 16:54:30 +02:00
print ' Timeout occurred waiting for data from ' , self . HOST + ' . Closing receiveData thread. (ID: ' , str ( id ( self ) ) + ' ) '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
break
except Exception , err :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-30 16:54:30 +02:00
print ' sock.recv error. Closing receiveData thread (HOST: ' , self . HOST , ' ID: ' , str ( id ( self ) ) + ' ). ' , err
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
break
#print 'Received', repr(self.data)
if self . data == " " :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-30 16:54:30 +02:00
print ' Connection to ' , self . HOST , ' closed. Closing receiveData thread. (ID: ' , str ( id ( self ) ) + ' ) '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
break
else :
self . processData ( )
2013-02-18 21:22:48 +01:00
2012-11-19 20:45:05 +01:00
2013-02-18 21:22:48 +01:00
2013-04-12 20:01:22 +02:00
try :
del selfInitiatedConnections [ self . streamNumber ] [ self ]
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-29 18:46:33 +02:00
print ' removed self (a receiveDataThread) from selfInitiatedConnections '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-12 20:01:22 +02:00
except :
pass
2013-05-02 17:53:54 +02:00
shared . broadcastToSendDataQueues ( ( 0 , ' shutdown ' , self . HOST ) )
2012-12-18 19:09:10 +01:00
try :
2013-05-03 18:05:57 +02:00
del shared . connectedHostsList [ self . HOST ]
2012-12-18 19:09:10 +01:00
except Exception , err :
2013-05-07 22:31:18 +02:00
shared . printLock . acquire ( )
2013-05-03 18:05:57 +02:00
print ' Could not delete ' , self . HOST , ' from shared.connectedHostsList. ' , err
2013-05-07 22:31:18 +02:00
shared . printLock . release ( )
2013-05-28 19:30:44 +02:00
try :
del numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer [ self . HOST ]
except :
pass
2013-05-03 18:05:57 +02:00
shared . UISignalQueue . put ( ( ' updateNetworkStatusTab ' , ' no data ' ) )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-05-03 21:53:38 +02:00
print ' The size of the connectedHostsList is now: ' , len ( shared . connectedHostsList )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-18 21:22:48 +01:00
2012-11-19 20:45:05 +01:00
def processData ( self ) :
global verbose
2013-03-28 22:56:20 +01:00
#if verbose >= 3:
2013-05-02 17:53:54 +02:00
#shared.printLock.acquire()
2012-12-05 18:47:30 +01:00
#print 'self.data is currently ', repr(self.data)
2013-05-02 17:53:54 +02:00
#shared.printLock.release()
2013-02-12 21:00:04 +01:00
if len ( self . data ) < 20 : #if so little of the data has arrived that we can't even unpack the payload length
2013-05-29 23:18:44 +02:00
return
if self . data [ 0 : 4 ] != ' \xe9 \xbe \xb4 \xd9 ' :
2013-03-28 22:56:20 +01:00
if verbose > = 1 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-06-11 08:33:48 +02:00
print ' The magic bytes were not correct. First 40 bytes of data: ' + repr ( self . data [ 0 : 40 ] )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-12 21:00:04 +01:00
self . data = " "
2013-05-29 23:18:44 +02:00
return
self . payloadLength , = unpack ( ' >L ' , self . data [ 16 : 20 ] )
if len ( self . data ) < self . payloadLength + 24 : #check if the whole message has arrived yet.
return
if self . data [ 20 : 24 ] != hashlib . sha512 ( self . data [ 24 : self . payloadLength + 24 ] ) . digest ( ) [ 0 : 4 ] : #test the checksum in the message. If it is correct...
print ' Checksum incorrect. Clearing this message. '
self . data = self . data [ self . payloadLength + 24 : ]
self . processData ( )
return
#The time we've last seen this node is obviously right now since we just received valid data from it. So update the knownNodes list so that other peers can be made aware of its existance.
if self . initiatedConnection and self . connectionIsOrWasFullyEstablished : #The remote port is only something we should share with others if it is the remote node's incoming port (rather than some random operating-system-assigned outgoing port).
shared . knownNodesLock . acquire ( )
shared . knownNodes [ self . streamNumber ] [ self . HOST ] = ( self . PORT , int ( time . time ( ) ) )
shared . knownNodesLock . release ( )
if self . payloadLength < = 180000000 : #If the size of the message is greater than 180MB, ignore it. (I get memory errors when processing messages much larger than this though it is concievable that this value will have to be lowered if some systems are less tolarant of large messages.)
remoteCommand = self . data [ 4 : 16 ]
shared . printLock . acquire ( )
print ' remoteCommand ' , repr ( remoteCommand . replace ( ' \x00 ' , ' ' ) ) , ' from ' , self . HOST
shared . printLock . release ( )
if remoteCommand == ' version \x00 \x00 \x00 \x00 \x00 ' :
self . recversion ( self . data [ 24 : self . payloadLength + 24 ] )
elif remoteCommand == ' verack \x00 \x00 \x00 \x00 \x00 \x00 ' :
self . recverack ( )
elif remoteCommand == ' addr \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 ' and self . connectionIsOrWasFullyEstablished :
self . recaddr ( self . data [ 24 : self . payloadLength + 24 ] )
elif remoteCommand == ' getpubkey \x00 \x00 \x00 ' and self . connectionIsOrWasFullyEstablished :
self . recgetpubkey ( self . data [ 24 : self . payloadLength + 24 ] )
elif remoteCommand == ' pubkey \x00 \x00 \x00 \x00 \x00 \x00 ' and self . connectionIsOrWasFullyEstablished :
self . recpubkey ( self . data [ 24 : self . payloadLength + 24 ] )
elif remoteCommand == ' inv \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 ' and self . connectionIsOrWasFullyEstablished :
self . recinv ( self . data [ 24 : self . payloadLength + 24 ] )
elif remoteCommand == ' getdata \x00 \x00 \x00 \x00 \x00 ' and self . connectionIsOrWasFullyEstablished :
self . recgetdata ( self . data [ 24 : self . payloadLength + 24 ] )
elif remoteCommand == ' msg \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 ' and self . connectionIsOrWasFullyEstablished :
self . recmsg ( self . data [ 24 : self . payloadLength + 24 ] )
elif remoteCommand == ' broadcast \x00 \x00 \x00 ' and self . connectionIsOrWasFullyEstablished :
self . recbroadcast ( self . data [ 24 : self . payloadLength + 24 ] )
elif remoteCommand == ' ping \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 ' and self . connectionIsOrWasFullyEstablished :
self . sendpong ( )
elif remoteCommand == ' pong \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 ' and self . connectionIsOrWasFullyEstablished :
pass
elif remoteCommand == ' alert \x00 \x00 \x00 \x00 \x00 \x00 \x00 ' and self . connectionIsOrWasFullyEstablished :
pass
self . data = self . data [ self . payloadLength + 24 : ] #take this message out and then process the next message
if self . data == ' ' :
while len ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave ) > 0 :
random . seed ( )
objectHash , = random . sample ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave , 1 )
if objectHash in shared . inventory :
shared . printLock . acquire ( )
print ' Inventory (in memory) already has object listed in inv message. '
shared . printLock . release ( )
del self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave [ objectHash ]
elif isInSqlInventory ( objectHash ) :
if verbose > = 3 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-05-29 23:18:44 +02:00
print ' Inventory (SQL on disk) already has object listed in inv message. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-05-29 23:18:44 +02:00
del self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave [ objectHash ]
else :
self . sendgetdata ( objectHash )
del self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave [ objectHash ] #It is possible that the remote node doesn't respond with the object. In that case, we'll very likely get it from someone else anyway.
if len ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave ) == 0 :
shared . printLock . acquire ( )
print ' (concerning ' , self . HOST + ' ) ' , ' number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now ' , len ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave )
shared . printLock . release ( )
try :
del numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer [ self . HOST ] #this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
except :
2013-02-06 22:17:49 +01:00
pass
2013-05-29 23:18:44 +02:00
break
if len ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave ) == 0 :
shared . printLock . acquire ( )
print ' (concerning ' , self . HOST + ' ) ' , ' number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now ' , len ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave )
shared . printLock . release ( )
try :
del numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer [ self . HOST ] #this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
except :
pass
if len ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave ) > 0 :
shared . printLock . acquire ( )
print ' (concerning ' , self . HOST + ' ) ' , ' number of objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave is now ' , len ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave )
shared . printLock . release ( )
numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer [ self . HOST ] = len ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave ) #this data structure is maintained so that we can keep track of how many total objects, across all connections, are currently outstanding. If it goes too high it can indicate that we are under attack by multiple nodes working together.
if len ( self . ackDataThatWeHaveYetToSend ) > 0 :
self . data = self . ackDataThatWeHaveYetToSend . pop ( )
self . processData ( )
2012-12-05 18:47:30 +01:00
2012-11-19 20:45:05 +01:00
2013-04-26 19:20:30 +02:00
def isProofOfWorkSufficient ( self , data , nonceTrialsPerByte = 0 , payloadLengthExtraBytes = 0 ) :
2013-05-02 21:59:10 +02:00
if nonceTrialsPerByte < shared . networkDefaultProofOfWorkNonceTrialsPerByte :
nonceTrialsPerByte = shared . networkDefaultProofOfWorkNonceTrialsPerByte
if payloadLengthExtraBytes < shared . networkDefaultPayloadLengthExtraBytes :
payloadLengthExtraBytes = shared . networkDefaultPayloadLengthExtraBytes
2013-03-28 22:56:20 +01:00
POW , = unpack ( ' >Q ' , hashlib . sha512 ( hashlib . sha512 ( data [ : 8 ] + hashlib . sha512 ( data [ 8 : ] ) . digest ( ) ) . digest ( ) ) . digest ( ) [ 0 : 8 ] )
2012-12-05 18:47:30 +01:00
#print 'POW:', POW
2013-04-26 19:20:30 +02:00
return POW < = 2 * * 64 / ( ( len ( data ) + payloadLengthExtraBytes ) * ( nonceTrialsPerByte ) )
2012-11-19 20:45:05 +01:00
def sendpong ( self ) :
print ' Sending pong '
2013-05-03 18:24:47 +02:00
try :
self . sock . sendall ( ' \xE9 \xBE \xB4 \xD9 \x70 \x6F \x6E \x67 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xcf \x83 \xe1 \x35 ' )
except Exception , err :
#if not 'Bad file descriptor' in err:
shared . printLock . acquire ( )
sys . stderr . write ( ' sock.sendall error: %s \n ' % err )
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
def recverack ( self ) :
print ' verack received '
self . verackReceived = True
if self . verackSent == True :
#We have thus both sent and received a verack.
self . connectionFullyEstablished ( )
def connectionFullyEstablished ( self ) :
self . connectionIsOrWasFullyEstablished = True
if not self . initiatedConnection :
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' setStatusIcon ' , ' green ' ) )
2013-05-21 18:08:37 +02:00
self . sock . settimeout ( 600 ) #We'll send out a pong every 5 minutes to make sure the connection stays alive if there has been no other traffic to send lately.
2013-05-03 18:05:57 +02:00
shared . UISignalQueue . put ( ( ' updateNetworkStatusTab ' , ' no data ' ) )
2013-05-02 17:53:54 +02:00
remoteNodeIncomingPort , remoteNodeSeenTime = shared . knownNodes [ self . streamNumber ] [ self . HOST ]
shared . printLock . acquire ( )
2012-11-23 09:22:56 +01:00
print ' Connection fully established with ' , self . HOST , remoteNodeIncomingPort
2013-05-03 18:05:57 +02:00
print ' The size of the connectedHostsList is now ' , len ( shared . connectedHostsList )
2013-05-02 17:53:54 +02:00
print ' The length of sendDataQueues is now: ' , len ( shared . sendDataQueues )
2012-11-19 20:45:05 +01:00
print ' broadcasting addr from within connectionFullyEstablished function. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-23 09:22:56 +01:00
self . broadcastaddr ( [ ( int ( time . time ( ) ) , self . streamNumber , 1 , self . HOST , remoteNodeIncomingPort ) ] ) #This lets all of our peers know about this new node.
2013-01-22 06:21:32 +01:00
self . sendaddr ( ) #This is one large addr message to this one peer.
2013-05-03 18:05:57 +02:00
if not self . initiatedConnection and len ( shared . connectedHostsList ) > 200 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' We are connected to too many people. Closing connection. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
shared . broadcastToSendDataQueues ( ( 0 , ' shutdown ' , self . HOST ) )
2012-11-19 20:45:05 +01:00
return
self . sendBigInv ( )
2013-05-14 19:06:29 +02:00
def sendBigInv ( self ) :
shared . sqlLock . acquire ( )
#Select all hashes which are younger than two days old and in this stream.
t = ( int ( time . time ( ) ) - maximumAgeOfObjectsThatIAdvertiseToOthers , int ( time . time ( ) ) - lengthOfTimeToHoldOnToAllPubkeys , self . streamNumber )
shared . sqlSubmitQueue . put ( ''' SELECT hash FROM inventory WHERE ((receivedtime>? and objecttype<> ' pubkey ' ) or (receivedtime>? and objecttype= ' pubkey ' )) and streamnumber=? ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
bigInvList = { }
for row in queryreturn :
hash , = row
if hash not in self . objectsOfWhichThisRemoteNodeIsAlreadyAware :
bigInvList [ hash ] = 0
#We also have messages in our inventory in memory (which is a python dictionary). Let's fetch those too.
for hash , storedValue in shared . inventory . items ( ) :
if hash not in self . objectsOfWhichThisRemoteNodeIsAlreadyAware :
objectType , streamNumber , payload , receivedTime = storedValue
if streamNumber == self . streamNumber and receivedTime > int ( time . time ( ) ) - maximumAgeOfObjectsThatIAdvertiseToOthers :
2013-02-03 06:16:50 +01:00
bigInvList [ hash ] = 0
2013-05-14 19:06:29 +02:00
numberOfObjectsInInvMessage = 0
payload = ' '
#Now let us start appending all of these hashes together. They will be sent out in a big inv message to our new peer.
for hash , storedValue in bigInvList . items ( ) :
payload + = hash
numberOfObjectsInInvMessage + = 1
if numberOfObjectsInInvMessage > = 50000 : #We can only send a max of 50000 items per inv message but we may have more objects to advertise. They must be split up into multiple inv messages.
2012-11-23 09:22:56 +01:00
self . sendinvMessageToJustThisOnePeer ( numberOfObjectsInInvMessage , payload )
2013-05-14 19:06:29 +02:00
payload = ' '
numberOfObjectsInInvMessage = 0
if numberOfObjectsInInvMessage > 0 :
self . sendinvMessageToJustThisOnePeer ( numberOfObjectsInInvMessage , payload )
2013-02-18 21:22:48 +01:00
2012-11-23 09:22:56 +01:00
#Self explanatory. Notice that there is also a broadcastinv function for broadcasting invs to everyone in our stream.
def sendinvMessageToJustThisOnePeer ( self , numberOfObjects , payload ) :
2012-11-19 20:45:05 +01:00
payload = encodeVarint ( numberOfObjects ) + payload
headerData = ' \xe9 \xbe \xb4 \xd9 ' #magic bits, slighly different from Bitcoin's magic bits.
2013-02-04 22:49:02 +01:00
headerData + = ' inv \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 '
headerData + = pack ( ' >L ' , len ( payload ) )
headerData + = hashlib . sha512 ( payload ) . digest ( ) [ : 4 ]
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-02-03 06:16:50 +01:00
print ' Sending huge inv message with ' , numberOfObjects , ' objects to just this one peer '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-05-03 18:24:47 +02:00
try :
self . sock . sendall ( headerData + payload )
except Exception , err :
#if not 'Bad file descriptor' in err:
shared . printLock . acquire ( )
sys . stderr . write ( ' sock.sendall error: %s \n ' % err )
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
#We have received a broadcast message
2013-03-28 22:56:20 +01:00
def recbroadcast ( self , data ) :
2013-02-06 22:17:49 +01:00
self . messageProcessingStartTime = time . time ( )
2012-11-19 20:45:05 +01:00
#First we must check to make sure the proof of work is sufficient.
2013-03-28 22:56:20 +01:00
if not self . isProofOfWorkSufficient ( data ) :
2012-11-23 09:22:56 +01:00
print ' Proof of work in broadcast message insufficient. '
2012-11-19 20:45:05 +01:00
return
2013-04-17 20:24:16 +02:00
readPosition = 8 #bypass the nonce
embeddedTime , = unpack ( ' >I ' , data [ readPosition : readPosition + 4 ] )
#This section is used for the transition from 32 bit time to 64 bit time in the protocol.
if embeddedTime == 0 :
embeddedTime , = unpack ( ' >Q ' , data [ readPosition : readPosition + 8 ] )
readPosition + = 8
else :
readPosition + = 4
2012-11-19 20:45:05 +01:00
if embeddedTime > ( int ( time . time ( ) ) + 10800 ) : #prevent funny business
print ' The embedded time in this broadcast message is more than three hours in the future. That doesn \' t make sense. Ignoring message. '
return
if embeddedTime < ( int ( time . time ( ) ) - maximumAgeOfAnObjectThatIAmWillingToAccept ) :
print ' The embedded time in this broadcast message is too old. Ignoring message. '
return
2013-04-02 18:23:34 +02:00
if len ( data ) < 180 :
2012-11-19 20:45:05 +01:00
print ' The payload length of this broadcast packet is unreasonably low. Someone is probably trying funny business. Ignoring message. '
return
2013-04-26 19:20:30 +02:00
#Let us check to make sure the stream number is correct (thus preventing an individual from sending broadcasts out on the wrong streams or all streams).
broadcastVersion , broadcastVersionLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
if broadcastVersion > = 2 :
2013-04-26 19:38:58 +02:00
streamNumber , streamNumberLength = decodeVarint ( data [ readPosition + broadcastVersionLength : readPosition + broadcastVersionLength + 10 ] )
2013-04-26 19:20:30 +02:00
if streamNumber != self . streamNumber :
print ' The stream number encoded in this broadcast message ( ' + str ( streamNumber ) + ' ) does not match the stream number on which it was received. Ignoring it. '
return
2013-05-02 17:53:54 +02:00
shared . inventoryLock . acquire ( )
2013-03-28 22:56:20 +01:00
self . inventoryHash = calculateInventoryHash ( data )
2013-05-02 17:53:54 +02:00
if self . inventoryHash in shared . inventory :
2012-11-19 20:45:05 +01:00
print ' We have already received this broadcast object. Ignoring. '
2013-05-02 17:53:54 +02:00
shared . inventoryLock . release ( )
2012-11-19 20:45:05 +01:00
return
2013-02-06 22:17:49 +01:00
elif isInSqlInventory ( self . inventoryHash ) :
2012-11-19 20:45:05 +01:00
print ' We have already received this broadcast object (it is stored on disk in the SQL inventory). Ignoring it. '
2013-05-02 17:53:54 +02:00
shared . inventoryLock . release ( )
2012-11-19 20:45:05 +01:00
return
#It is valid so far. Let's let our peers know about it.
objectType = ' broadcast '
2013-05-02 17:53:54 +02:00
shared . inventory [ self . inventoryHash ] = ( objectType , self . streamNumber , data , embeddedTime )
shared . inventoryLock . release ( )
2013-02-06 22:17:49 +01:00
self . broadcastinv ( self . inventoryHash )
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' incrementNumberOfBroadcastsProcessed ' , ' no data ' ) )
2013-02-18 21:22:48 +01:00
2013-02-06 22:17:49 +01:00
2013-04-17 20:24:16 +02:00
self . processbroadcast ( readPosition , data ) #When this function returns, we will have either successfully processed this broadcast because we are interested in it, ignored it because we aren't interested in it, or found problem with the broadcast that warranted ignoring it.
2013-02-06 22:17:49 +01:00
2013-02-18 21:22:48 +01:00
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we haven't used the specified amount of time, we shall sleep. These values are mostly the same values used for msg messages although broadcast messages are processed faster.
2013-04-02 18:23:34 +02:00
if len ( data ) > 100000000 : #Size is greater than 100 megabytes
2013-02-06 22:17:49 +01:00
lengthOfTimeWeShouldUseToProcessThisMessage = 100 #seconds.
2013-04-02 18:23:34 +02:00
elif len ( data ) > 10000000 : #Between 100 and 10 megabytes
2013-02-06 22:17:49 +01:00
lengthOfTimeWeShouldUseToProcessThisMessage = 20 #seconds.
2013-04-02 18:23:34 +02:00
elif len ( data ) > 1000000 : #Between 10 and 1 megabyte
2013-02-18 21:22:48 +01:00
lengthOfTimeWeShouldUseToProcessThisMessage = 3 #seconds.
2013-02-06 22:17:49 +01:00
else : #Less than 1 megabyte
2013-04-26 19:38:58 +02:00
lengthOfTimeWeShouldUseToProcessThisMessage = .6 #seconds.
2013-02-06 22:17:49 +01:00
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - ( time . time ( ) - self . messageProcessingStartTime )
if sleepTime > 0 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-02-06 22:17:49 +01:00
print ' Timing attack mitigation: Sleeping for ' , sleepTime , ' seconds. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-06 22:17:49 +01:00
time . sleep ( sleepTime )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-02-06 22:17:49 +01:00
print ' Total message processing time: ' , time . time ( ) - self . messageProcessingStartTime , ' seconds. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-06 22:17:49 +01:00
#A broadcast message has a valid time and POW and requires processing. The recbroadcast function calls this one.
2013-04-17 20:24:16 +02:00
def processbroadcast ( self , readPosition , data ) :
2013-03-28 22:56:20 +01:00
broadcastVersion , broadcastVersionLength = decodeVarint ( data [ readPosition : readPosition + 9 ] )
2012-11-19 20:45:05 +01:00
readPosition + = broadcastVersionLength
2013-04-26 19:20:30 +02:00
if broadcastVersion < 1 or broadcastVersion > 2 :
2013-04-26 22:07:58 +02:00
print ' Cannot decode incoming broadcast versions higher than 2. Assuming the sender isn \' being silly, you should upgrade Bitmessage because this message shall be ignored. '
2012-11-19 20:45:05 +01:00
return
2013-04-26 19:20:30 +02:00
if broadcastVersion == 1 :
beginningOfPubkeyPosition = readPosition #used when we add the pubkey to our pubkey table
sendersAddressVersion , sendersAddressVersionLength = decodeVarint ( data [ readPosition : readPosition + 9 ] )
if sendersAddressVersion < = 1 or sendersAddressVersion > = 3 :
#Cannot decode senderAddressVersion higher than 2. Assuming the sender isn\' being silly, you should upgrade Bitmessage because this message shall be ignored.
2013-01-24 20:52:48 +01:00
return
2013-04-26 19:20:30 +02:00
readPosition + = sendersAddressVersionLength
if sendersAddressVersion == 2 :
sendersStream , sendersStreamLength = decodeVarint ( data [ readPosition : readPosition + 9 ] )
readPosition + = sendersStreamLength
behaviorBitfield = data [ readPosition : readPosition + 4 ]
readPosition + = 4
sendersPubSigningKey = ' \x04 ' + data [ readPosition : readPosition + 64 ]
readPosition + = 64
sendersPubEncryptionKey = ' \x04 ' + data [ readPosition : readPosition + 64 ]
readPosition + = 64
endOfPubkeyPosition = readPosition
sendersHash = data [ readPosition : readPosition + 20 ]
2013-05-02 17:53:54 +02:00
if sendersHash not in shared . broadcastSendersForWhichImWatching :
2013-04-26 19:20:30 +02:00
#Display timing data
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-26 19:38:58 +02:00
print ' Time spent deciding that we are not interested in this v1 broadcast: ' , time . time ( ) - self . messageProcessingStartTime
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-26 19:20:30 +02:00
return
#At this point, this message claims to be from sendersHash and we are interested in it. We still have to hash the public key to make sure it is truly the key that matches the hash, and also check the signiture.
readPosition + = 20
sha = hashlib . new ( ' sha512 ' )
sha . update ( sendersPubSigningKey + sendersPubEncryptionKey )
ripe = hashlib . new ( ' ripemd160 ' )
ripe . update ( sha . digest ( ) )
if ripe . digest ( ) != sendersHash :
#The sender of this message lied.
return
messageEncodingType , messageEncodingTypeLength = decodeVarint ( data [ readPosition : readPosition + 9 ] )
if messageEncodingType == 0 :
return
readPosition + = messageEncodingTypeLength
messageLength , messageLengthLength = decodeVarint ( data [ readPosition : readPosition + 9 ] )
readPosition + = messageLengthLength
message = data [ readPosition : readPosition + messageLength ]
readPosition + = messageLength
readPositionAtBottomOfMessage = readPosition
signatureLength , signatureLengthLength = decodeVarint ( data [ readPosition : readPosition + 9 ] )
readPosition + = signatureLengthLength
signature = data [ readPosition : readPosition + signatureLength ]
try :
2013-06-07 21:06:53 +02:00
if not highlevelcrypto . verify ( data [ 12 : readPositionAtBottomOfMessage ] , signature , sendersPubSigningKey . encode ( ' hex ' ) ) :
print ' ECDSA verify failed '
return
2013-04-26 19:20:30 +02:00
print ' ECDSA verify passed '
except Exception , err :
print ' ECDSA verify failed ' , err
return
#verify passed
2013-02-06 22:17:49 +01:00
2013-04-26 19:20:30 +02:00
#Let's store the public key in case we want to reply to this person.
#We don't have the correct nonce or time (which would let us send out a pubkey message) so we'll just fill it with 1's. We won't be able to send this pubkey to others (without doing the proof of work ourselves, which this program is programmed to not do.)
t = ( ripe . digest ( ) , ' \xFF \xFF \xFF \xFF \xFF \xFF \xFF \xFF ' + ' \xFF \xFF \xFF \xFF ' + data [ beginningOfPubkeyPosition : endOfPubkeyPosition ] , int ( time . time ( ) ) , ' yes ' )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' INSERT INTO pubkeys VALUES (?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2013-06-11 05:43:06 +02:00
#shared.workerQueue.put(('newpubkey',(sendersAddressVersion,sendersStream,ripe.digest()))) #This will check to see whether we happen to be awaiting this pubkey in order to send a message. If we are, it will do the POW and send it.
self . possibleNewPubkey ( ripe . digest ( ) )
2013-03-05 18:21:55 +01:00
2013-04-26 19:20:30 +02:00
fromAddress = encodeAddress ( sendersAddressVersion , sendersStream , ripe . digest ( ) )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-26 19:20:30 +02:00
print ' fromAddress: ' , fromAddress
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-26 19:20:30 +02:00
if messageEncodingType == 2 :
bodyPositionIndex = string . find ( message , ' \n Body: ' )
if bodyPositionIndex > 1 :
subject = message [ 8 : bodyPositionIndex ]
body = message [ bodyPositionIndex + 6 : ]
else :
subject = ' '
body = message
elif messageEncodingType == 1 :
body = message
subject = ' '
elif messageEncodingType == 0 :
print ' messageEncodingType == 0. Doing nothing with the message. '
2013-01-24 20:52:48 +01:00
else :
2013-04-26 19:20:30 +02:00
body = ' Unknown encoding type. \n \n ' + repr ( message )
2013-01-24 20:52:48 +01:00
subject = ' '
2013-04-26 19:20:30 +02:00
toAddress = ' [Broadcast subscribers] '
if messageEncodingType < > 0 :
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-04-26 19:20:30 +02:00
t = ( self . inventoryHash , toAddress , fromAddress , subject , int ( time . time ( ) ) , body , ' inbox ' , messageEncodingType , 0 )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ''' INSERT INTO inbox VALUES (?,?,?,?,?,?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
shared . UISignalQueue . put ( ( ' displayNewInboxMessage ' , ( self . inventoryHash , toAddress , fromAddress , subject , body ) ) )
2013-04-26 19:20:30 +02:00
#If we are behaving as an API then we might need to run an outside command to let some program know that a new message has arrived.
2013-05-02 17:53:54 +02:00
if shared . safeConfigGetBoolean ( ' bitmessagesettings ' , ' apienabled ' ) :
2013-04-26 19:20:30 +02:00
try :
2013-05-02 17:53:54 +02:00
apiNotifyPath = shared . config . get ( ' bitmessagesettings ' , ' apinotifypath ' )
2013-04-26 19:20:30 +02:00
except :
apiNotifyPath = ' '
if apiNotifyPath != ' ' :
call ( [ apiNotifyPath , " newBroadcast " ] )
#Display timing data
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-26 19:20:30 +02:00
print ' Time spent processing this interesting broadcast: ' , time . time ( ) - self . messageProcessingStartTime
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-26 19:20:30 +02:00
if broadcastVersion == 2 :
cleartextStreamNumber , cleartextStreamNumberLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
2013-04-26 22:07:58 +02:00
readPosition + = cleartextStreamNumberLength
2013-04-26 19:20:30 +02:00
initialDecryptionSuccessful = False
2013-05-02 17:53:54 +02:00
for key , cryptorObject in shared . MyECSubscriptionCryptorObjects . items ( ) :
2013-04-26 19:20:30 +02:00
try :
decryptedData = cryptorObject . decrypt ( data [ readPosition : ] )
toRipe = key #This is the RIPE hash of the sender's pubkey. We need this below to compare to the RIPE hash of the sender's address to verify that it was encrypted by with their key rather than some other key.
initialDecryptionSuccessful = True
print ' EC decryption successful using key associated with ripe hash: ' , key . encode ( ' hex ' )
break
except Exception , err :
pass
#print 'cryptorObject.decrypt Exception:', err
if not initialDecryptionSuccessful :
2013-04-26 19:38:58 +02:00
#This is not a broadcast I am interested in.
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-26 19:38:58 +02:00
print ' Length of time program spent failing to decrypt this v2 broadcast: ' , time . time ( ) - self . messageProcessingStartTime , ' seconds. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-26 23:12:35 +02:00
return
#At this point this is a broadcast I have decrypted and thus am interested in.
signedBroadcastVersion , readPosition = decodeVarint ( decryptedData [ : 10 ] )
beginningOfPubkeyPosition = readPosition #used when we add the pubkey to our pubkey table
sendersAddressVersion , sendersAddressVersionLength = decodeVarint ( decryptedData [ readPosition : readPosition + 9 ] )
if sendersAddressVersion < 2 or sendersAddressVersion > 3 :
print ' Cannot decode senderAddressVersion other than 2 or 3. Assuming the sender isn \' being silly, you should upgrade Bitmessage because this message shall be ignored. '
return
readPosition + = sendersAddressVersionLength
sendersStream , sendersStreamLength = decodeVarint ( decryptedData [ readPosition : readPosition + 9 ] )
if sendersStream != cleartextStreamNumber :
print ' The stream number outside of the encryption on which the POW was completed doesn \' t match the stream number inside the encryption. Ignoring broadcast. '
return
readPosition + = sendersStreamLength
behaviorBitfield = decryptedData [ readPosition : readPosition + 4 ]
readPosition + = 4
sendersPubSigningKey = ' \x04 ' + decryptedData [ readPosition : readPosition + 64 ]
readPosition + = 64
sendersPubEncryptionKey = ' \x04 ' + decryptedData [ readPosition : readPosition + 64 ]
readPosition + = 64
if sendersAddressVersion > = 3 :
requiredAverageProofOfWorkNonceTrialsPerByte , varintLength = decodeVarint ( decryptedData [ readPosition : readPosition + 10 ] )
readPosition + = varintLength
print ' sender \' s requiredAverageProofOfWorkNonceTrialsPerByte is ' , requiredAverageProofOfWorkNonceTrialsPerByte
requiredPayloadLengthExtraBytes , varintLength = decodeVarint ( decryptedData [ readPosition : readPosition + 10 ] )
readPosition + = varintLength
print ' sender \' s requiredPayloadLengthExtraBytes is ' , requiredPayloadLengthExtraBytes
endOfPubkeyPosition = readPosition
sha = hashlib . new ( ' sha512 ' )
sha . update ( sendersPubSigningKey + sendersPubEncryptionKey )
ripe = hashlib . new ( ' ripemd160 ' )
ripe . update ( sha . digest ( ) )
if toRipe != ripe . digest ( ) :
print ' The encryption key used to encrypt this message doesn \' t match the keys inbedded in the message itself. Ignoring message. '
return
messageEncodingType , messageEncodingTypeLength = decodeVarint ( decryptedData [ readPosition : readPosition + 9 ] )
if messageEncodingType == 0 :
return
readPosition + = messageEncodingTypeLength
messageLength , messageLengthLength = decodeVarint ( decryptedData [ readPosition : readPosition + 9 ] )
readPosition + = messageLengthLength
message = decryptedData [ readPosition : readPosition + messageLength ]
readPosition + = messageLength
readPositionAtBottomOfMessage = readPosition
signatureLength , signatureLengthLength = decodeVarint ( decryptedData [ readPosition : readPosition + 9 ] )
readPosition + = signatureLengthLength
signature = decryptedData [ readPosition : readPosition + signatureLength ]
try :
2013-06-07 21:06:53 +02:00
if not highlevelcrypto . verify ( decryptedData [ : readPositionAtBottomOfMessage ] , signature , sendersPubSigningKey . encode ( ' hex ' ) ) :
print ' ECDSA verify failed '
return
2013-04-26 23:12:35 +02:00
print ' ECDSA verify passed '
except Exception , err :
print ' ECDSA verify failed ' , err
return
#verify passed
2013-04-26 19:20:30 +02:00
2013-04-26 23:12:35 +02:00
#Let's store the public key in case we want to reply to this person.
t = ( ripe . digest ( ) , ' \xFF \xFF \xFF \xFF \xFF \xFF \xFF \xFF ' + ' \xFF \xFF \xFF \xFF ' + decryptedData [ beginningOfPubkeyPosition : endOfPubkeyPosition ] , int ( time . time ( ) ) , ' yes ' )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' INSERT INTO pubkeys VALUES (?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2013-06-11 05:43:06 +02:00
#shared.workerQueue.put(('newpubkey',(sendersAddressVersion,sendersStream,ripe.digest()))) #This will check to see whether we happen to be awaiting this pubkey in order to send a message. If we are, it will do the POW and send it.
self . possibleNewPubkey ( ripe . digest ( ) )
2013-04-26 22:07:58 +02:00
2013-04-26 23:12:35 +02:00
fromAddress = encodeAddress ( sendersAddressVersion , sendersStream , ripe . digest ( ) )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-26 23:12:35 +02:00
print ' fromAddress: ' , fromAddress
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-26 23:12:35 +02:00
if messageEncodingType == 2 :
bodyPositionIndex = string . find ( message , ' \n Body: ' )
if bodyPositionIndex > 1 :
subject = message [ 8 : bodyPositionIndex ]
body = message [ bodyPositionIndex + 6 : ]
else :
subject = ' '
body = message
elif messageEncodingType == 1 :
body = message
subject = ' '
elif messageEncodingType == 0 :
print ' messageEncodingType == 0. Doing nothing with the message. '
else :
body = ' Unknown encoding type. \n \n ' + repr ( message )
subject = ' '
2013-04-26 19:20:30 +02:00
2013-04-26 23:12:35 +02:00
toAddress = ' [Broadcast subscribers] '
if messageEncodingType < > 0 :
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-04-26 23:12:35 +02:00
t = ( self . inventoryHash , toAddress , fromAddress , subject , int ( time . time ( ) ) , body , ' inbox ' , messageEncodingType , 0 )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ''' INSERT INTO inbox VALUES (?,?,?,?,?,?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
shared . UISignalQueue . put ( ( ' displayNewInboxMessage ' , ( self . inventoryHash , toAddress , fromAddress , subject , body ) ) )
2013-01-24 20:52:48 +01:00
2013-04-26 23:12:35 +02:00
#If we are behaving as an API then we might need to run an outside command to let some program know that a new message has arrived.
2013-05-02 17:53:54 +02:00
if shared . safeConfigGetBoolean ( ' bitmessagesettings ' , ' apienabled ' ) :
2013-04-26 23:12:35 +02:00
try :
2013-05-02 17:53:54 +02:00
apiNotifyPath = shared . config . get ( ' bitmessagesettings ' , ' apinotifypath ' )
2013-04-26 23:12:35 +02:00
except :
apiNotifyPath = ' '
if apiNotifyPath != ' ' :
call ( [ apiNotifyPath , " newBroadcast " ] )
2013-04-26 19:20:30 +02:00
2013-04-26 23:12:35 +02:00
#Display timing data
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-26 23:12:35 +02:00
print ' Time spent processing this interesting broadcast: ' , time . time ( ) - self . messageProcessingStartTime
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-01-24 20:52:48 +01:00
2012-11-19 20:45:05 +01:00
#We have received a msg message.
2013-03-28 22:56:20 +01:00
def recmsg ( self , data ) :
2013-02-06 22:17:49 +01:00
self . messageProcessingStartTime = time . time ( )
2012-11-19 20:45:05 +01:00
#First we must check to make sure the proof of work is sufficient.
2013-03-28 22:56:20 +01:00
if not self . isProofOfWorkSufficient ( data ) :
2012-11-19 20:45:05 +01:00
print ' Proof of work in msg message insufficient. '
return
2013-02-18 21:22:48 +01:00
2013-03-28 22:56:20 +01:00
readPosition = 8
embeddedTime , = unpack ( ' >I ' , data [ readPosition : readPosition + 4 ] )
2013-04-17 20:24:16 +02:00
#This section is used for the transition from 32 bit time to 64 bit time in the protocol.
if embeddedTime == 0 :
embeddedTime , = unpack ( ' >Q ' , data [ readPosition : readPosition + 8 ] )
readPosition + = 8
else :
readPosition + = 4
2012-11-19 20:45:05 +01:00
if embeddedTime > int ( time . time ( ) ) + 10800 :
print ' The time in the msg message is too new. Ignoring it. Time: ' , embeddedTime
return
if embeddedTime < int ( time . time ( ) ) - maximumAgeOfAnObjectThatIAmWillingToAccept :
print ' The time in the msg message is too old. Ignoring it. Time: ' , embeddedTime
return
2013-03-28 22:56:20 +01:00
streamNumberAsClaimedByMsg , streamNumberAsClaimedByMsgLength = decodeVarint ( data [ readPosition : readPosition + 9 ] )
2012-12-18 22:36:37 +01:00
if streamNumberAsClaimedByMsg != self . streamNumber :
2013-02-04 22:49:02 +01:00
print ' The stream number encoded in this msg ( ' + str ( streamNumberAsClaimedByMsg ) + ' ) message does not match the stream number on which it was received. Ignoring it. '
2012-12-18 22:36:37 +01:00
return
readPosition + = streamNumberAsClaimedByMsgLength
2013-03-28 22:56:20 +01:00
self . inventoryHash = calculateInventoryHash ( data )
2013-05-02 17:53:54 +02:00
shared . inventoryLock . acquire ( )
if self . inventoryHash in shared . inventory :
2012-11-19 20:45:05 +01:00
print ' We have already received this msg message. Ignoring. '
2013-05-02 17:53:54 +02:00
shared . inventoryLock . release ( )
2012-11-19 20:45:05 +01:00
return
2013-02-06 22:17:49 +01:00
elif isInSqlInventory ( self . inventoryHash ) :
2012-11-19 20:45:05 +01:00
print ' We have already received this msg message (it is stored on disk in the SQL inventory). Ignoring it. '
2013-05-02 17:53:54 +02:00
shared . inventoryLock . release ( )
2012-11-19 20:45:05 +01:00
return
#This msg message is valid. Let's let our peers know about it.
objectType = ' msg '
2013-05-02 17:53:54 +02:00
shared . inventory [ self . inventoryHash ] = ( objectType , self . streamNumber , data , embeddedTime )
shared . inventoryLock . release ( )
2013-02-06 22:17:49 +01:00
self . broadcastinv ( self . inventoryHash )
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' incrementNumberOfMessagesProcessed ' , ' no data ' ) )
2013-05-01 22:06:55 +02:00
2012-11-19 20:45:05 +01:00
2013-03-28 22:56:20 +01:00
self . processmsg ( readPosition , data ) #When this function returns, we will have either successfully processed the message bound for us, ignored it because it isn't bound for us, or found problem with the message that warranted ignoring it.
2013-02-06 22:17:49 +01:00
# Let us now set lengthOfTimeWeShouldUseToProcessThisMessage. If we haven't used the specified amount of time, we shall sleep. These values are based on test timings and you may change them at-will.
2013-04-02 18:23:34 +02:00
if len ( data ) > 100000000 : #Size is greater than 100 megabytes
2013-02-06 22:17:49 +01:00
lengthOfTimeWeShouldUseToProcessThisMessage = 100 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 MB message: 3.7 seconds.
2013-04-02 18:23:34 +02:00
elif len ( data ) > 10000000 : #Between 100 and 10 megabytes
2013-02-06 22:17:49 +01:00
lengthOfTimeWeShouldUseToProcessThisMessage = 20 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 10 MB message: 0.53 seconds. Actual length of time it takes in practice when processing a real message: 1.44 seconds.
2013-04-02 18:23:34 +02:00
elif len ( data ) > 1000000 : #Between 10 and 1 megabyte
2013-02-06 22:17:49 +01:00
lengthOfTimeWeShouldUseToProcessThisMessage = 3 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 1 MB message: 0.18 seconds. Actual length of time it takes in practice when processing a real message: 0.30 seconds.
else : #Less than 1 megabyte
lengthOfTimeWeShouldUseToProcessThisMessage = .6 #seconds. Actual length of time it took my computer to decrypt and verify the signature of a 100 KB message: 0.15 seconds. Actual length of time it takes in practice when processing a real message: 0.25 seconds.
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - ( time . time ( ) - self . messageProcessingStartTime )
if sleepTime > 0 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-02-06 22:17:49 +01:00
print ' Timing attack mitigation: Sleeping for ' , sleepTime , ' seconds. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-06 22:17:49 +01:00
time . sleep ( sleepTime )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-02-06 22:17:49 +01:00
print ' Total message processing time: ' , time . time ( ) - self . messageProcessingStartTime , ' seconds. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-03-28 22:56:20 +01:00
2013-02-06 22:17:49 +01:00
#A msg message has a valid time and POW and requires processing. The recmsg function calls this one.
2013-03-28 22:56:20 +01:00
def processmsg ( self , readPosition , encryptedData ) :
2013-02-06 22:17:49 +01:00
initialDecryptionSuccessful = False
2012-11-19 20:45:05 +01:00
#Let's check whether this is a message acknowledgement bound for us.
2013-03-28 22:56:20 +01:00
if encryptedData [ readPosition : ] in ackdataForWhichImWatching :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2012-11-23 09:22:56 +01:00
print ' This msg IS an acknowledgement bound for me. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-03-28 22:56:20 +01:00
del ackdataForWhichImWatching [ encryptedData [ readPosition : ] ]
t = ( ' ackreceived ' , encryptedData [ readPosition : ] )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ' UPDATE sent SET status=? WHERE ackdata=? ' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2013-05-29 23:18:44 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( encryptedData [ readPosition : ] , ' Acknowledgement of the message received just now. ' + unicode ( strftime ( shared . config . get ( ' bitmessagesettings ' , ' timeformat ' ) , localtime ( int ( time . time ( ) ) ) ) , ' utf-8 ' ) ) ) )
2012-11-19 20:45:05 +01:00
return
else :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-03-28 22:56:20 +01:00
print ' This was NOT an acknowledgement bound for me. '
2012-11-23 09:22:56 +01:00
#print 'ackdataForWhichImWatching', ackdataForWhichImWatching
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
#This is not an acknowledgement bound for me. See if it is a message bound for me by trying to decrypt it with my private keys.
2013-05-02 17:53:54 +02:00
for key , cryptorObject in shared . myECCryptorObjects . items ( ) :
2013-01-18 23:38:09 +01:00
try :
2013-04-26 19:20:30 +02:00
decryptedData = cryptorObject . decrypt ( encryptedData [ readPosition : ] )
2013-01-18 23:38:09 +01:00
toRipe = key #This is the RIPE hash of my pubkeys. We need this below to compare to the destination_ripe included in the encrypted data.
initialDecryptionSuccessful = True
print ' EC decryption successful using key associated with ripe hash: ' , key . encode ( ' hex ' )
break
except Exception , err :
pass
#print 'cryptorObject.decrypt Exception:', err
2013-02-05 22:53:56 +01:00
if not initialDecryptionSuccessful :
#This is not a message bound for me.
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-02-06 22:17:49 +01:00
print ' Length of time program spent failing to decrypt this message: ' , time . time ( ) - self . messageProcessingStartTime , ' seconds. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-05 22:53:56 +01:00
else :
2013-01-18 23:38:09 +01:00
#This is a message bound for me.
2013-05-02 17:53:54 +02:00
toAddress = shared . myAddressesByHash [ toRipe ] #Look up my address based on the RIPE hash.
2013-01-18 23:38:09 +01:00
readPosition = 0
2013-04-26 19:20:30 +02:00
messageVersion , messageVersionLength = decodeVarint ( decryptedData [ readPosition : readPosition + 10 ] )
2013-01-18 23:38:09 +01:00
readPosition + = messageVersionLength
if messageVersion != 1 :
print ' Cannot understand message versions other than one. Ignoring message. '
return
2013-04-26 19:20:30 +02:00
sendersAddressVersionNumber , sendersAddressVersionNumberLength = decodeVarint ( decryptedData [ readPosition : readPosition + 10 ] )
2013-01-18 23:38:09 +01:00
readPosition + = sendersAddressVersionNumberLength
if sendersAddressVersionNumber == 0 :
print ' Cannot understand sendersAddressVersionNumber = 0. Ignoring message. '
return
2013-04-24 21:48:46 +02:00
if sendersAddressVersionNumber > = 4 :
2013-04-25 22:11:00 +02:00
print ' Sender \' s address version number ' , sendersAddressVersionNumber , ' not yet supported. Ignoring message. '
2013-01-18 23:38:09 +01:00
return
2013-04-26 19:20:30 +02:00
if len ( decryptedData ) < 170 :
2013-01-18 23:38:09 +01:00
print ' Length of the unencrypted data is unreasonably short. Sanity check failed. Ignoring message. '
return
2013-04-26 19:20:30 +02:00
sendersStreamNumber , sendersStreamNumberLength = decodeVarint ( decryptedData [ readPosition : readPosition + 10 ] )
2013-01-24 17:38:12 +01:00
if sendersStreamNumber == 0 :
print ' sender \' s stream number is 0. Ignoring message. '
return
2013-01-18 23:38:09 +01:00
readPosition + = sendersStreamNumberLength
2013-04-26 19:20:30 +02:00
behaviorBitfield = decryptedData [ readPosition : readPosition + 4 ]
2013-01-18 23:38:09 +01:00
readPosition + = 4
2013-04-26 19:20:30 +02:00
pubSigningKey = ' \x04 ' + decryptedData [ readPosition : readPosition + 64 ]
2013-01-18 23:38:09 +01:00
readPosition + = 64
2013-04-26 19:20:30 +02:00
pubEncryptionKey = ' \x04 ' + decryptedData [ readPosition : readPosition + 64 ]
2013-01-18 23:38:09 +01:00
readPosition + = 64
2013-04-24 21:48:46 +02:00
if sendersAddressVersionNumber > = 3 :
2013-04-26 19:20:30 +02:00
requiredAverageProofOfWorkNonceTrialsPerByte , varintLength = decodeVarint ( decryptedData [ readPosition : readPosition + 10 ] )
2013-04-24 21:48:46 +02:00
readPosition + = varintLength
print ' sender \' s requiredAverageProofOfWorkNonceTrialsPerByte is ' , requiredAverageProofOfWorkNonceTrialsPerByte
2013-04-26 19:20:30 +02:00
requiredPayloadLengthExtraBytes , varintLength = decodeVarint ( decryptedData [ readPosition : readPosition + 10 ] )
2013-04-24 21:48:46 +02:00
readPosition + = varintLength
print ' sender \' s requiredPayloadLengthExtraBytes is ' , requiredPayloadLengthExtraBytes
2013-01-18 23:38:09 +01:00
endOfThePublicKeyPosition = readPosition #needed for when we store the pubkey in our database of pubkeys for later use.
2013-04-26 19:20:30 +02:00
if toRipe != decryptedData [ readPosition : readPosition + 20 ] :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-01-18 23:38:09 +01:00
print ' The original sender of this message did not send it to you. Someone is attempting a Surreptitious Forwarding Attack. '
2013-03-28 22:56:20 +01:00
print ' See: http://world.std.com/~dtd/sign_encrypt/sign_encrypt7.html '
2013-01-18 23:38:09 +01:00
print ' your toRipe: ' , toRipe . encode ( ' hex ' )
2013-04-26 19:20:30 +02:00
print ' embedded destination toRipe: ' , decryptedData [ readPosition : readPosition + 20 ] . encode ( ' hex ' )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-01-18 23:38:09 +01:00
return
readPosition + = 20
2013-04-26 19:20:30 +02:00
messageEncodingType , messageEncodingTypeLength = decodeVarint ( decryptedData [ readPosition : readPosition + 10 ] )
2013-01-18 23:38:09 +01:00
readPosition + = messageEncodingTypeLength
2013-04-26 19:20:30 +02:00
messageLength , messageLengthLength = decodeVarint ( decryptedData [ readPosition : readPosition + 10 ] )
2013-01-18 23:38:09 +01:00
readPosition + = messageLengthLength
2013-04-26 19:20:30 +02:00
message = decryptedData [ readPosition : readPosition + messageLength ]
2013-01-18 23:38:09 +01:00
#print 'First 150 characters of message:', repr(message[:150])
readPosition + = messageLength
2013-04-26 19:20:30 +02:00
ackLength , ackLengthLength = decodeVarint ( decryptedData [ readPosition : readPosition + 10 ] )
2013-01-18 23:38:09 +01:00
readPosition + = ackLengthLength
2013-04-26 19:20:30 +02:00
ackData = decryptedData [ readPosition : readPosition + ackLength ]
2013-01-18 23:38:09 +01:00
readPosition + = ackLength
positionOfBottomOfAckData = readPosition #needed to mark the end of what is covered by the signature
2013-04-26 19:20:30 +02:00
signatureLength , signatureLengthLength = decodeVarint ( decryptedData [ readPosition : readPosition + 10 ] )
2013-01-18 23:38:09 +01:00
readPosition + = signatureLengthLength
2013-04-26 19:20:30 +02:00
signature = decryptedData [ readPosition : readPosition + signatureLength ]
2013-01-18 23:38:09 +01:00
try :
2013-06-07 21:06:53 +02:00
if not highlevelcrypto . verify ( decryptedData [ : positionOfBottomOfAckData ] , signature , pubSigningKey . encode ( ' hex ' ) ) :
print ' ECDSA verify failed '
return
2013-01-18 23:38:09 +01:00
print ' ECDSA verify passed '
except Exception , err :
print ' ECDSA verify failed ' , err
return
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-01-24 23:45:36 +01:00
print ' As a matter of intellectual curiosity, here is the Bitcoin address associated with the keys owned by the other person: ' , calculateBitcoinAddressFromPubkey ( pubSigningKey ) , ' ..and here is the testnet address: ' , calculateTestnetAddressFromPubkey ( pubSigningKey ) , ' . The other person must take their private signing key from Bitmessage and import it into Bitcoin (or a service like Blockchain.info) for it to be of any use. Do not use this unless you know what you are doing. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-01-18 23:38:09 +01:00
#calculate the fromRipe.
sha = hashlib . new ( ' sha512 ' )
sha . update ( pubSigningKey + pubEncryptionKey )
ripe = hashlib . new ( ' ripemd160 ' )
ripe . update ( sha . digest ( ) )
#Let's store the public key in case we want to reply to this person.
2013-04-26 19:20:30 +02:00
t = ( ripe . digest ( ) , ' \xFF \xFF \xFF \xFF \xFF \xFF \xFF \xFF ' + ' \xFF \xFF \xFF \xFF ' + decryptedData [ messageVersionLength : endOfThePublicKeyPosition ] , int ( time . time ( ) ) , ' yes ' )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' INSERT INTO pubkeys VALUES (?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2013-06-11 05:43:06 +02:00
#shared.workerQueue.put(('newpubkey',(sendersAddressVersionNumber,sendersStreamNumber,ripe.digest()))) #This will check to see whether we happen to be awaiting this pubkey in order to send a message. If we are, it will do the POW and send it.
self . possibleNewPubkey ( ripe . digest ( ) )
2013-01-18 23:38:09 +01:00
fromAddress = encodeAddress ( sendersAddressVersionNumber , sendersStreamNumber , ripe . digest ( ) )
2013-04-26 23:12:35 +02:00
#If this message is bound for one of my version 3 addresses (or higher), then we must check to make sure it meets our demanded proof of work requirement.
if decodeAddress ( toAddress ) [ 1 ] > = 3 : #If the toAddress version number is 3 or higher:
2013-05-02 17:53:54 +02:00
if not shared . isAddressInMyAddressBookSubscriptionsListOrWhitelist ( fromAddress ) : #If I'm not friendly with this person:
requiredNonceTrialsPerByte = shared . config . getint ( toAddress , ' noncetrialsperbyte ' )
requiredPayloadLengthExtraBytes = shared . config . getint ( toAddress , ' payloadlengthextrabytes ' )
2013-04-26 23:12:35 +02:00
if not self . isProofOfWorkSufficient ( encryptedData , requiredNonceTrialsPerByte , requiredPayloadLengthExtraBytes ) :
print ' Proof of work in msg message insufficient only because it does not meet our higher requirement. '
return
blockMessage = False #Gets set to True if the user shouldn't see the message according to black or white lists.
2013-05-02 17:53:54 +02:00
if shared . config . get ( ' bitmessagesettings ' , ' blackwhitelist ' ) == ' black ' : #If we are using a blacklist
2013-01-18 23:38:09 +01:00
t = ( fromAddress , )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' SELECT label FROM blacklist where address=? and enabled= ' 1 ' ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2013-04-26 23:12:35 +02:00
if queryreturn != [ ] :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-26 23:12:35 +02:00
print ' Message ignored because address is in blacklist. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-26 23:12:35 +02:00
blockMessage = True
2013-01-18 23:38:09 +01:00
else : #We're using a whitelist
t = ( fromAddress , )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' SELECT label FROM whitelist where address=? and enabled= ' 1 ' ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2013-01-18 23:38:09 +01:00
if queryreturn == [ ] :
print ' Message ignored because address not in whitelist. '
blockMessage = True
if not blockMessage :
print ' fromAddress: ' , fromAddress
print ' First 150 characters of message: ' , repr ( message [ : 150 ] )
2013-05-02 17:53:54 +02:00
toLabel = shared . config . get ( toAddress , ' label ' )
2013-04-22 22:01:41 +02:00
if toLabel == ' ' :
2013-05-16 00:03:33 +02:00
toLabel = toAddress
2013-01-18 23:38:09 +01:00
if messageEncodingType == 2 :
bodyPositionIndex = string . find ( message , ' \n Body: ' )
if bodyPositionIndex > 1 :
subject = message [ 8 : bodyPositionIndex ]
2013-06-11 08:33:48 +02:00
subject = subject [ : 500 ] #Only save and show the first 500 characters of the subject. Any more is probably an attak.
2013-01-18 23:38:09 +01:00
body = message [ bodyPositionIndex + 6 : ]
else :
subject = ' '
body = message
elif messageEncodingType == 1 :
body = message
subject = ' '
elif messageEncodingType == 0 :
print ' messageEncodingType == 0. Doing nothing with the message. They probably just sent it so that we would store their public key or send their ack data for them. '
else :
body = ' Unknown encoding type. \n \n ' + repr ( message )
subject = ' '
if messageEncodingType < > 0 :
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-04-08 19:35:16 +02:00
t = ( self . inventoryHash , toAddress , fromAddress , subject , int ( time . time ( ) ) , body , ' inbox ' , messageEncodingType , 0 )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ''' INSERT INTO inbox VALUES (?,?,?,?,?,?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
shared . UISignalQueue . put ( ( ' displayNewInboxMessage ' , ( self . inventoryHash , toAddress , fromAddress , subject , body ) ) )
2013-02-26 00:09:32 +01:00
2013-03-19 18:32:37 +01:00
#If we are behaving as an API then we might need to run an outside command to let some program know that a new message has arrived.
2013-05-02 17:53:54 +02:00
if shared . safeConfigGetBoolean ( ' bitmessagesettings ' , ' apienabled ' ) :
2013-03-19 18:32:37 +01:00
try :
2013-05-02 17:53:54 +02:00
apiNotifyPath = shared . config . get ( ' bitmessagesettings ' , ' apinotifypath ' )
2013-03-19 18:32:37 +01:00
except :
apiNotifyPath = ' '
if apiNotifyPath != ' ' :
call ( [ apiNotifyPath , " newMessage " ] )
2013-02-26 00:09:32 +01:00
#Let us now check and see whether our receiving address is behaving as a mailing list
2013-05-02 17:53:54 +02:00
if shared . safeConfigGetBoolean ( toAddress , ' mailinglist ' ) :
2013-02-26 00:09:32 +01:00
try :
2013-05-02 17:53:54 +02:00
mailingListName = shared . config . get ( toAddress , ' mailinglistname ' )
2013-02-26 00:09:32 +01:00
except :
mailingListName = ' '
#Let us send out this message as a broadcast
subject = self . addMailingListNameToSubject ( subject , mailingListName )
#Let us now send this message out as a broadcast
2013-04-04 18:32:25 +02:00
message = strftime ( " %a , % Y- % m- %d % H: % M: % S UTC " , gmtime ( ) ) + ' Message ostensibly from ' + fromAddress + ' : \n \n ' + body
fromAddress = toAddress #The fromAddress for the broadcast that we are about to send is the toAddress (my address) for the msg message we are currently processing.
2013-02-26 00:09:32 +01:00
ackdata = OpenSSL . rand ( 32 ) #We don't actually need the ackdata for acknowledgement since this is a broadcast message but we can use it to update the user interface when the POW is done generating.
toAddress = ' [Broadcast subscribers] '
ripe = ' '
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-06-08 02:44:30 +02:00
t = ( ' ' , toAddress , ripe , fromAddress , subject , message , ackdata , int ( time . time ( ) ) , ' broadcastqueued ' , 1 , 1 , ' sent ' , 2 )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ''' INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2013-02-26 00:09:32 +01:00
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' displayNewSentMessage ' , ( toAddress , ' [Broadcast subscribers] ' , fromAddress , subject , message , ackdata ) ) )
2013-05-29 23:18:44 +02:00
shared . workerQueue . put ( ( ' sendbroadcast ' , ' ' ) )
2013-03-19 18:32:37 +01:00
2013-04-02 22:36:48 +02:00
if self . isAckDataValid ( ackData ) :
2013-01-18 23:38:09 +01:00
print ' ackData is valid. Will process it. '
self . ackDataThatWeHaveYetToSend . append ( ackData ) #When we have processed all data, the processData function will pop the ackData out and process it as if it is a message received from our peer.
2013-02-05 22:53:56 +01:00
#Display timing data
2013-02-06 22:17:49 +01:00
timeRequiredToAttemptToDecryptMessage = time . time ( ) - self . messageProcessingStartTime
2013-02-05 22:53:56 +01:00
successfullyDecryptMessageTimings . append ( timeRequiredToAttemptToDecryptMessage )
sum = 0
for item in successfullyDecryptMessageTimings :
sum + = item
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-02-05 22:53:56 +01:00
print ' Time to decrypt this message successfully: ' , timeRequiredToAttemptToDecryptMessage
print ' Average time for all message decryption successes since startup: ' , sum / len ( successfullyDecryptMessageTimings )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-05 22:53:56 +01:00
2013-04-02 22:36:48 +02:00
def isAckDataValid ( self , ackData ) :
if len ( ackData ) < 24 :
print ' The length of ackData is unreasonably short. Not sending ackData. '
return False
if ackData [ 0 : 4 ] != ' \xe9 \xbe \xb4 \xd9 ' :
print ' Ackdata magic bytes were wrong. Not sending ackData. '
return False
2013-04-03 00:00:43 +02:00
ackDataPayloadLength , = unpack ( ' >L ' , ackData [ 16 : 20 ] )
2013-04-02 22:36:48 +02:00
if len ( ackData ) - 24 != ackDataPayloadLength :
print ' ackData payload length doesn \' t match the payload length specified in the header. Not sending ackdata. '
return False
if ackData [ 4 : 16 ] != ' getpubkey \x00 \x00 \x00 ' and ackData [ 4 : 16 ] != ' pubkey \x00 \x00 \x00 \x00 \x00 \x00 ' and ackData [ 4 : 16 ] != ' msg \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 ' and ackData [ 4 : 16 ] != ' broadcast \x00 \x00 \x00 ' :
return False
return True
2013-02-26 00:09:32 +01:00
def addMailingListNameToSubject ( self , subject , mailingListName ) :
subject = subject . strip ( )
if subject [ : 3 ] == ' Re: ' or subject [ : 3 ] == ' RE: ' :
subject = subject [ 3 : ] . strip ( )
if ' [ ' + mailingListName + ' ] ' in subject :
return subject
else :
return ' [ ' + mailingListName + ' ] ' + subject
2013-06-11 08:33:48 +02:00
def possibleNewPubkey ( self , toRipe ) :
2013-06-11 05:43:06 +02:00
if toRipe in neededPubkeys :
print ' We have been awaiting the arrival of this pubkey. '
del neededPubkeys [ toRipe ]
t = ( toRipe , )
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' UPDATE sent SET status= ' doingmsgpow ' WHERE toripe=? AND status= ' awaitingpubkey ' and folder= ' sent ' ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
shared . workerQueue . put ( ( ' sendmessage ' , ' ' ) )
else :
shared . printLock . acquire ( )
print ' We don \' t need this pub key. We didn \' t ask for it. Pubkey hash: ' , toRipe . encode ( ' hex ' )
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
#We have received a pubkey
2013-03-28 22:56:20 +01:00
def recpubkey ( self , data ) :
2013-02-11 22:28:38 +01:00
self . pubkeyProcessingStartTime = time . time ( )
2013-03-28 22:56:20 +01:00
if len ( data ) < 146 or len ( data ) > 600 : #sanity check
2013-01-18 23:38:09 +01:00
return
2012-12-18 22:36:37 +01:00
#We must check to make sure the proof of work is sufficient.
2013-03-28 22:56:20 +01:00
if not self . isProofOfWorkSufficient ( data ) :
2012-12-18 22:36:37 +01:00
print ' Proof of work in pubkey message insufficient. '
return
2013-03-28 22:56:20 +01:00
readPosition = 8 #for the nonce
embeddedTime , = unpack ( ' >I ' , data [ readPosition : readPosition + 4 ] )
2013-04-17 20:24:16 +02:00
#This section is used for the transition from 32 bit time to 64 bit time in the protocol.
if embeddedTime == 0 :
embeddedTime , = unpack ( ' >Q ' , data [ readPosition : readPosition + 8 ] )
readPosition + = 8
else :
readPosition + = 4
2013-04-12 21:31:45 +02:00
if embeddedTime < int ( time . time ( ) ) - lengthOfTimeToHoldOnToAllPubkeys :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-02-26 00:09:32 +01:00
print ' The embedded time in this pubkey message is too old. Ignoring. Embedded time is: ' , embeddedTime
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-11 22:28:38 +01:00
return
if embeddedTime > int ( time . time ( ) ) + 10800 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-02-11 22:28:38 +01:00
print ' The embedded time in this pubkey message more than several hours in the future. This is irrational. Ignoring message. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-11 22:28:38 +01:00
return
2013-03-28 22:56:20 +01:00
addressVersion , varintLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
2012-11-19 20:45:05 +01:00
readPosition + = varintLength
2013-03-28 22:56:20 +01:00
streamNumber , varintLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
2012-11-19 20:45:05 +01:00
readPosition + = varintLength
2013-01-18 23:38:09 +01:00
if self . streamNumber != streamNumber :
print ' stream number embedded in this pubkey doesn \' t match our stream number. Ignoring. '
return
2012-11-19 20:45:05 +01:00
2013-03-28 22:56:20 +01:00
inventoryHash = calculateInventoryHash ( data )
2013-05-02 17:53:54 +02:00
shared . inventoryLock . acquire ( )
if inventoryHash in shared . inventory :
2013-02-04 22:49:02 +01:00
print ' We have already received this pubkey. Ignoring it. '
2013-05-02 17:53:54 +02:00
shared . inventoryLock . release ( )
2013-02-04 22:49:02 +01:00
return
elif isInSqlInventory ( inventoryHash ) :
print ' We have already received this pubkey (it is stored on disk in the SQL inventory). Ignoring it. '
2013-05-02 17:53:54 +02:00
shared . inventoryLock . release ( )
2013-02-04 22:49:02 +01:00
return
2013-01-18 23:38:09 +01:00
objectType = ' pubkey '
2013-05-02 17:53:54 +02:00
shared . inventory [ inventoryHash ] = ( objectType , self . streamNumber , data , embeddedTime )
shared . inventoryLock . release ( )
2013-01-18 23:38:09 +01:00
self . broadcastinv ( inventoryHash )
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' incrementNumberOfPubkeysProcessed ' , ' no data ' ) )
2012-11-19 20:45:05 +01:00
2013-03-28 22:56:20 +01:00
self . processpubkey ( data )
2013-02-11 22:28:38 +01:00
lengthOfTimeWeShouldUseToProcessThisMessage = .2
sleepTime = lengthOfTimeWeShouldUseToProcessThisMessage - ( time . time ( ) - self . pubkeyProcessingStartTime )
if sleepTime > 0 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-25 22:11:00 +02:00
print ' Timing attack mitigation: Sleeping for ' , sleepTime , ' seconds. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-11 22:28:38 +01:00
time . sleep ( sleepTime )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-25 22:11:00 +02:00
print ' Total pubkey processing time: ' , time . time ( ) - self . pubkeyProcessingStartTime , ' seconds. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-11 22:28:38 +01:00
2013-03-28 22:56:20 +01:00
def processpubkey ( self , data ) :
readPosition = 8 #for the nonce
embeddedTime , = unpack ( ' >I ' , data [ readPosition : readPosition + 4 ] )
2013-06-10 15:40:51 +02:00
#This section is used for the transition from 32 bit time to 64 bit time in the protocol.
if embeddedTime == 0 :
embeddedTime , = unpack ( ' >Q ' , data [ readPosition : readPosition + 8 ] )
readPosition + = 8
else :
readPosition + = 4
2013-03-28 22:56:20 +01:00
addressVersion , varintLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
2013-02-11 22:28:38 +01:00
readPosition + = varintLength
2013-03-28 22:56:20 +01:00
streamNumber , varintLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
2013-02-11 22:28:38 +01:00
readPosition + = varintLength
2013-01-18 23:38:09 +01:00
if addressVersion == 0 :
2013-02-11 22:28:38 +01:00
print ' (Within processpubkey) addressVersion of 0 doesn \' t make sense. '
2013-01-18 23:38:09 +01:00
return
2013-04-24 21:48:46 +02:00
if addressVersion > = 4 or addressVersion == 1 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-01-18 23:38:09 +01:00
print ' This version of Bitmessage cannot handle version ' , addressVersion , ' addresses. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-01-18 23:38:09 +01:00
return
if addressVersion == 2 :
2013-04-02 18:23:34 +02:00
if len ( data ) < 146 : #sanity check. This is the minimum possible length.
print ' (within processpubkey) payloadLength less than 146. Sanity check failed. '
2013-01-18 23:38:09 +01:00
return
2013-03-28 22:56:20 +01:00
bitfieldBehaviors = data [ readPosition : readPosition + 4 ]
2013-01-18 23:38:09 +01:00
readPosition + = 4
2013-03-28 22:56:20 +01:00
publicSigningKey = data [ readPosition : readPosition + 64 ]
2013-01-18 23:38:09 +01:00
#Is it possible for a public key to be invalid such that trying to encrypt or sign with it will cause an error? If it is, we should probably test these keys here.
readPosition + = 64
2013-03-28 22:56:20 +01:00
publicEncryptionKey = data [ readPosition : readPosition + 64 ]
2013-01-18 23:38:09 +01:00
if len ( publicEncryptionKey ) < 64 :
print ' publicEncryptionKey length less than 64. Sanity check failed. '
return
sha = hashlib . new ( ' sha512 ' )
sha . update ( ' \x04 ' + publicSigningKey + ' \x04 ' + publicEncryptionKey )
ripeHasher = hashlib . new ( ' ripemd160 ' )
ripeHasher . update ( sha . digest ( ) )
ripe = ripeHasher . digest ( )
2012-11-19 20:45:05 +01:00
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-02-03 06:16:50 +01:00
print ' within recpubkey, addressVersion: ' , addressVersion , ' , streamNumber: ' , streamNumber
2013-01-18 23:38:09 +01:00
print ' ripe ' , ripe . encode ( ' hex ' )
print ' publicSigningKey in hex: ' , publicSigningKey . encode ( ' hex ' )
print ' publicEncryptionKey in hex: ' , publicEncryptionKey . encode ( ' hex ' )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-01-18 23:38:09 +01:00
2013-02-11 22:28:38 +01:00
t = ( ripe , )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' SELECT usedpersonally FROM pubkeys WHERE hash=? AND usedpersonally= ' yes ' ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2013-02-11 22:28:38 +01:00
if queryreturn != [ ] : #if this pubkey is already in our database and if we have used it personally:
print ' We HAVE used this pubkey personally. Updating time. '
2013-04-25 22:11:00 +02:00
t = ( ripe , data , embeddedTime , ' yes ' )
2013-02-11 22:28:38 +01:00
else :
print ' We have NOT used this pubkey personally. Inserting in database. '
2013-04-25 22:11:00 +02:00
t = ( ripe , data , embeddedTime , ' no ' ) #This will also update the embeddedTime.
2013-06-11 05:43:06 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' INSERT INTO pubkeys VALUES (?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
#shared.workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
self . possibleNewPubkey ( ripe )
2013-04-24 21:48:46 +02:00
if addressVersion == 3 :
if len ( data ) < 170 : #sanity check.
print ' (within processpubkey) payloadLength less than 170. Sanity check failed. '
return
bitfieldBehaviors = data [ readPosition : readPosition + 4 ]
readPosition + = 4
publicSigningKey = ' \x04 ' + data [ readPosition : readPosition + 64 ]
#Is it possible for a public key to be invalid such that trying to encrypt or sign with it will cause an error? If it is, we should probably test these keys here.
readPosition + = 64
publicEncryptionKey = ' \x04 ' + data [ readPosition : readPosition + 64 ]
readPosition + = 64
specifiedNonceTrialsPerByte , specifiedNonceTrialsPerByteLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
readPosition + = specifiedNonceTrialsPerByteLength
specifiedPayloadLengthExtraBytes , specifiedPayloadLengthExtraBytesLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
readPosition + = specifiedPayloadLengthExtraBytesLength
2013-06-08 00:58:28 +02:00
endOfSignedDataPosition = readPosition
2013-04-24 21:48:46 +02:00
signatureLength , signatureLengthLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
2013-06-08 00:58:28 +02:00
readPosition + = signatureLengthLength
signature = data [ readPosition : readPosition + signatureLength ]
2013-04-24 21:48:46 +02:00
try :
2013-06-08 00:58:28 +02:00
if not highlevelcrypto . verify ( data [ 8 : endOfSignedDataPosition ] , signature , publicSigningKey . encode ( ' hex ' ) ) :
2013-06-07 21:06:53 +02:00
print ' ECDSA verify failed (within processpubkey) '
return
2013-04-24 21:48:46 +02:00
print ' ECDSA verify passed (within processpubkey) '
except Exception , err :
print ' ECDSA verify failed (within processpubkey) ' , err
return
sha = hashlib . new ( ' sha512 ' )
sha . update ( publicSigningKey + publicEncryptionKey )
ripeHasher = hashlib . new ( ' ripemd160 ' )
ripeHasher . update ( sha . digest ( ) )
ripe = ripeHasher . digest ( )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-24 21:48:46 +02:00
print ' within recpubkey, addressVersion: ' , addressVersion , ' , streamNumber: ' , streamNumber
print ' ripe ' , ripe . encode ( ' hex ' )
print ' publicSigningKey in hex: ' , publicSigningKey . encode ( ' hex ' )
print ' publicEncryptionKey in hex: ' , publicEncryptionKey . encode ( ' hex ' )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-24 21:48:46 +02:00
t = ( ripe , )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' SELECT usedpersonally FROM pubkeys WHERE hash=? AND usedpersonally= ' yes ' ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2013-04-24 21:48:46 +02:00
if queryreturn != [ ] : #if this pubkey is already in our database and if we have used it personally:
print ' We HAVE used this pubkey personally. Updating time. '
2013-04-25 22:11:00 +02:00
t = ( ripe , data , embeddedTime , ' yes ' )
2013-04-24 21:48:46 +02:00
else :
print ' We have NOT used this pubkey personally. Inserting in database. '
2013-04-25 22:11:00 +02:00
t = ( ripe , data , embeddedTime , ' no ' ) #This will also update the embeddedTime.
2013-06-11 05:43:06 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' INSERT INTO pubkeys VALUES (?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
#shared.workerQueue.put(('newpubkey',(addressVersion,streamNumber,ripe)))
self . possibleNewPubkey ( ripe )
2013-01-18 23:38:09 +01:00
2012-11-19 20:45:05 +01:00
#We have received a getpubkey message
2013-03-28 22:56:20 +01:00
def recgetpubkey ( self , data ) :
if not self . isProofOfWorkSufficient ( data ) :
2012-12-18 22:36:37 +01:00
print ' Proof of work in getpubkey message insufficient. '
return
2013-03-28 22:56:20 +01:00
if len ( data ) < 34 :
print ' getpubkey message doesn \' t contain enough data. Ignoring. '
return
2013-04-17 20:24:16 +02:00
readPosition = 8 #bypass the nonce
embeddedTime , = unpack ( ' >I ' , data [ readPosition : readPosition + 4 ] )
#This section is used for the transition from 32 bit time to 64 bit time in the protocol.
if embeddedTime == 0 :
embeddedTime , = unpack ( ' >Q ' , data [ readPosition : readPosition + 8 ] )
readPosition + = 8
else :
readPosition + = 4
2013-01-04 23:21:33 +01:00
if embeddedTime > int ( time . time ( ) ) + 10800 :
print ' The time in this getpubkey message is too new. Ignoring it. Time: ' , embeddedTime
return
if embeddedTime < int ( time . time ( ) ) - maximumAgeOfAnObjectThatIAmWillingToAccept :
print ' The time in this getpubkey message is too old. Ignoring it. Time: ' , embeddedTime
return
2013-04-24 21:48:46 +02:00
requestedAddressVersionNumber , addressVersionLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
2013-04-17 20:24:16 +02:00
readPosition + = addressVersionLength
streamNumber , streamNumberLength = decodeVarint ( data [ readPosition : readPosition + 10 ] )
2013-02-07 22:31:15 +01:00
if streamNumber < > self . streamNumber :
print ' The streamNumber ' , streamNumber , ' doesn \' t match our stream number: ' , self . streamNumber
return
2013-04-17 20:24:16 +02:00
readPosition + = streamNumberLength
2013-02-07 22:31:15 +01:00
2013-03-28 22:56:20 +01:00
inventoryHash = calculateInventoryHash ( data )
2013-05-02 17:53:54 +02:00
shared . inventoryLock . acquire ( )
if inventoryHash in shared . inventory :
2012-11-19 20:45:05 +01:00
print ' We have already received this getpubkey request. Ignoring it. '
2013-05-02 17:53:54 +02:00
shared . inventoryLock . release ( )
2012-12-18 22:36:37 +01:00
return
2012-11-19 20:45:05 +01:00
elif isInSqlInventory ( inventoryHash ) :
print ' We have already received this getpubkey request (it is stored on disk in the SQL inventory). Ignoring it. '
2013-05-02 17:53:54 +02:00
shared . inventoryLock . release ( )
2012-12-18 22:36:37 +01:00
return
2013-04-02 22:36:48 +02:00
2013-02-03 06:16:50 +01:00
objectType = ' getpubkey '
2013-05-02 17:53:54 +02:00
shared . inventory [ inventoryHash ] = ( objectType , self . streamNumber , data , embeddedTime )
shared . inventoryLock . release ( )
2013-01-04 23:21:33 +01:00
#This getpubkey request is valid so far. Forward to peers.
2013-02-03 06:16:50 +01:00
self . broadcastinv ( inventoryHash )
2013-02-07 22:31:15 +01:00
2013-04-24 21:48:46 +02:00
if requestedAddressVersionNumber == 0 :
print ' The requestedAddressVersionNumber of the pubkey request is zero. That doesn \' t make any sense. Ignoring it. '
2013-01-18 23:38:09 +01:00
return
2013-04-24 21:48:46 +02:00
elif requestedAddressVersionNumber == 1 :
print ' The requestedAddressVersionNumber of the pubkey request is 1 which isn \' t supported anymore. Ignoring it. '
2013-03-01 23:44:52 +01:00
return
2013-04-24 21:48:46 +02:00
elif requestedAddressVersionNumber > 3 :
print ' The requestedAddressVersionNumber of the pubkey request is too high. Can \' t understand. Ignoring it. '
2013-01-04 23:21:33 +01:00
return
2013-03-28 22:56:20 +01:00
2013-04-17 20:24:16 +02:00
requestedHash = data [ readPosition : readPosition + 20 ]
2013-03-28 22:56:20 +01:00
if len ( requestedHash ) != 20 :
print ' The length of the requested hash is not 20 bytes. Something is wrong. Ignoring. '
return
print ' the hash requested in this getpubkey request is: ' , requestedHash . encode ( ' hex ' )
2013-01-18 23:38:09 +01:00
2013-05-02 17:53:54 +02:00
if requestedHash in shared . myAddressesByHash : #if this address hash is one of mine
if decodeAddress ( shared . myAddressesByHash [ requestedHash ] ) [ 1 ] != requestedAddressVersionNumber :
shared . printLock . acquire ( )
2013-04-24 21:48:46 +02:00
sys . stderr . write ( ' (Within the recgetpubkey function) Someone requested one of my pubkeys but the requestedAddressVersionNumber doesn \' t match my actual address version number. That shouldn \' t have happened. Ignoring. \n ' )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-24 21:48:46 +02:00
return
2013-04-22 22:01:41 +02:00
try :
2013-05-02 17:53:54 +02:00
lastPubkeySendTime = int ( shared . config . get ( shared . myAddressesByHash [ requestedHash ] , ' lastpubkeysendtime ' ) )
2013-04-22 22:01:41 +02:00
except :
lastPubkeySendTime = 0
if lastPubkeySendTime < time . time ( ) - lengthOfTimeToHoldOnToAllPubkeys : #If the last time we sent our pubkey was 28 days ago
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-01-22 20:29:49 +01:00
print ' Found getpubkey-requested-hash in my list of EC hashes. Telling Worker thread to do the POW for a pubkey message and send it out. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-24 21:48:46 +02:00
if requestedAddressVersionNumber == 2 :
2013-05-02 17:53:54 +02:00
shared . workerQueue . put ( ( ' doPOWForMyV2Pubkey ' , requestedHash ) )
2013-04-24 21:48:46 +02:00
elif requestedAddressVersionNumber == 3 :
2013-05-02 17:53:54 +02:00
shared . workerQueue . put ( ( ' doPOWForMyV3Pubkey ' , requestedHash ) )
2013-04-24 21:48:46 +02:00
else :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-24 21:48:46 +02:00
print ' Found getpubkey-requested-hash in my list of EC hashes BUT we already sent it recently. Ignoring request. The lastPubkeySendTime is: ' , lastPubkeySendTime
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-22 22:01:41 +02:00
else :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-22 22:01:41 +02:00
print ' This getpubkey request is not for any of my keys. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-01-04 23:21:33 +01:00
2012-11-19 20:45:05 +01:00
#We have received an inv message
2013-03-28 22:56:20 +01:00
def recinv ( self , data ) :
2013-05-30 22:25:42 +02:00
totalNumberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave = 0 # ..from all peers, counting duplicates seperately (because they take up memory)
2013-05-29 23:18:44 +02:00
if len ( numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer ) > 0 :
2013-05-30 22:25:42 +02:00
for key , value in numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer . items ( ) :
totalNumberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave + = value
2013-05-29 23:18:44 +02:00
shared . printLock . acquire ( )
print ' number of keys(hosts) in numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer: ' , len ( numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer )
print ' totalNumberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave = ' , totalNumberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave
shared . printLock . release ( )
2013-03-28 22:56:20 +01:00
numberOfItemsInInv , lengthOfVarint = decodeVarint ( data [ : 10 ] )
2013-05-29 23:18:44 +02:00
if numberOfItemsInInv > 50000 :
sys . stderr . write ( ' Too many items in inv message! ' )
return
2013-03-28 22:56:20 +01:00
if len ( data ) < lengthOfVarint + ( numberOfItemsInInv * 32 ) :
print ' inv message doesn \' t contain enough data. Ignoring. '
return
2012-11-19 20:45:05 +01:00
if numberOfItemsInInv == 1 : #we'll just request this data from the person who advertised the object.
2013-05-28 22:50:09 +02:00
if totalNumberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave > 200000 and len ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave ) > 1000 : #inv flooding attack mitigation
2013-05-28 19:30:44 +02:00
shared . printLock . acquire ( )
print ' We already have ' , totalNumberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave , ' items yet to retrieve from peers and over 1000 from this node in particular. Ignoring this inv message. '
shared . printLock . release ( )
return
2013-03-30 23:32:30 +01:00
self . objectsOfWhichThisRemoteNodeIsAlreadyAware [ data [ lengthOfVarint : 32 + lengthOfVarint ] ] = 0
2013-05-02 17:53:54 +02:00
if data [ lengthOfVarint : 32 + lengthOfVarint ] in shared . inventory :
shared . printLock . acquire ( )
2013-03-28 22:56:20 +01:00
print ' Inventory (in memory) has inventory item already. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-03-28 22:56:20 +01:00
elif isInSqlInventory ( data [ lengthOfVarint : 32 + lengthOfVarint ] ) :
print ' Inventory (SQL on disk) has inventory item already. '
else :
self . sendgetdata ( data [ lengthOfVarint : 32 + lengthOfVarint ] )
2012-11-19 20:45:05 +01:00
else :
print ' inv message lists ' , numberOfItemsInInv , ' objects. '
for i in range ( numberOfItemsInInv ) : #upon finishing dealing with an incoming message, the receiveDataThread will request a random object from the peer. This way if we get multiple inv messages from multiple peers which list mostly the same objects, we will make getdata requests for different random objects from the various peers.
2013-03-28 22:56:20 +01:00
if len ( data [ lengthOfVarint + ( 32 * i ) : 32 + lengthOfVarint + ( 32 * i ) ] ) == 32 : #The length of an inventory hash should be 32. If it isn't 32 then the remote node is either badly programmed or behaving nefariously.
2013-05-28 22:50:09 +02:00
if totalNumberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave > 200000 and len ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave ) > 1000 : #inv flooding attack mitigation
2013-05-28 19:30:44 +02:00
shared . printLock . acquire ( )
print ' We already have ' , totalNumberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave , ' items yet to retrieve from peers and over ' , len ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave ) , ' from this node in particular. Ignoring the rest of this inv message. '
shared . printLock . release ( )
break
2013-03-28 22:56:20 +01:00
self . objectsOfWhichThisRemoteNodeIsAlreadyAware [ data [ lengthOfVarint + ( 32 * i ) : 32 + lengthOfVarint + ( 32 * i ) ] ] = 0
self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave [ data [ lengthOfVarint + ( 32 * i ) : 32 + lengthOfVarint + ( 32 * i ) ] ] = 0
2013-05-28 19:30:44 +02:00
numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer [ self . HOST ] = len ( self . objectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHave )
2013-02-04 22:49:02 +01:00
2012-11-19 20:45:05 +01:00
#Send a getdata message to our peer to request the object with the given hash
def sendgetdata ( self , hash ) :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-01-22 06:21:32 +01:00
print ' sending getdata to retrieve object with hash: ' , hash . encode ( ' hex ' )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
payload = ' \x01 ' + hash
headerData = ' \xe9 \xbe \xb4 \xd9 ' #magic bits, slighly different from Bitcoin's magic bits.
2013-02-04 22:49:02 +01:00
headerData + = ' getdata \x00 \x00 \x00 \x00 \x00 '
headerData + = pack ( ' >L ' , len ( payload ) ) #payload length. Note that we add an extra 8 for the nonce.
headerData + = hashlib . sha512 ( payload ) . digest ( ) [ : 4 ]
2013-02-26 23:20:43 +01:00
try :
2013-04-09 07:24:09 +02:00
self . sock . sendall ( headerData + payload )
2013-02-26 23:20:43 +01:00
except Exception , err :
2013-04-01 22:43:51 +02:00
#if not 'Bad file descriptor' in err:
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-05-03 18:24:47 +02:00
sys . stderr . write ( ' sock.sendall error: %s \n ' % err )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
#We have received a getdata request from our peer
2013-03-28 22:56:20 +01:00
def recgetdata ( self , data ) :
2013-04-01 18:38:15 +02:00
numberOfRequestedInventoryItems , lengthOfVarint = decodeVarint ( data [ : 10 ] )
if len ( data ) < lengthOfVarint + ( 32 * numberOfRequestedInventoryItems ) :
print ' getdata message does not contain enough data. Ignoring. '
return
for i in xrange ( numberOfRequestedInventoryItems ) :
hash = data [ lengthOfVarint + ( i * 32 ) : 32 + lengthOfVarint + ( i * 32 ) ]
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-01 18:38:15 +02:00
print ' received getdata request for item: ' , hash . encode ( ' hex ' )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
#print 'inventory is', shared.inventory
if hash in shared . inventory :
objectType , streamNumber , payload , receivedTime = shared . inventory [ hash ]
2013-04-01 18:38:15 +02:00
self . sendData ( objectType , payload )
else :
t = ( hash , )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' select objecttype, payload from inventory where hash=? ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2013-04-01 18:38:15 +02:00
if queryreturn < > [ ] :
for row in queryreturn :
objectType , payload = row
2012-11-19 20:45:05 +01:00
self . sendData ( objectType , payload )
else :
2013-04-01 18:38:15 +02:00
print ' Someone asked for an object with a getdata which is not in either our memory inventory or our SQL inventory. That shouldn \' t have happened. '
2012-11-19 20:45:05 +01:00
#Our peer has requested (in a getdata message) that we send an object.
def sendData ( self , objectType , payload ) :
2013-05-03 18:24:47 +02:00
headerData = ' \xe9 \xbe \xb4 \xd9 ' #magic bits, slighly different from Bitcoin's magic bits.
2012-11-19 20:45:05 +01:00
if objectType == ' pubkey ' :
2013-05-03 18:05:57 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' sending pubkey '
2013-05-03 18:05:57 +02:00
shared . printLock . release ( )
2013-02-04 22:49:02 +01:00
headerData + = ' pubkey \x00 \x00 \x00 \x00 \x00 \x00 '
2013-04-03 18:59:43 +02:00
elif objectType == ' getpubkey ' or objectType == ' pubkeyrequest ' :
2013-05-03 18:05:57 +02:00
shared . printLock . acquire ( )
2013-02-03 06:16:50 +01:00
print ' sending getpubkey '
2013-05-03 18:05:57 +02:00
shared . printLock . release ( )
2013-02-04 22:49:02 +01:00
headerData + = ' getpubkey \x00 \x00 \x00 '
2012-11-19 20:45:05 +01:00
elif objectType == ' msg ' :
2013-05-03 18:05:57 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' sending msg '
2013-05-03 18:05:57 +02:00
shared . printLock . release ( )
2013-02-04 22:49:02 +01:00
headerData + = ' msg \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 '
2012-11-19 20:45:05 +01:00
elif objectType == ' broadcast ' :
2013-05-03 18:05:57 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' sending broadcast '
2013-05-03 18:05:57 +02:00
shared . printLock . release ( )
2013-02-04 22:49:02 +01:00
headerData + = ' broadcast \x00 \x00 \x00 '
2012-11-19 20:45:05 +01:00
else :
sys . stderr . write ( ' Error: sendData has been asked to send a strange objectType: %s \n ' % str ( objectType ) )
2013-05-03 18:24:47 +02:00
return
headerData + = pack ( ' >L ' , len ( payload ) ) #payload length.
headerData + = hashlib . sha512 ( payload ) . digest ( ) [ : 4 ]
try :
self . sock . sendall ( headerData + payload )
except Exception , err :
#if not 'Bad file descriptor' in err:
shared . printLock . acquire ( )
sys . stderr . write ( ' sock.sendall error: %s \n ' % err )
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
#Send an inv message with just one hash to all of our peers
def broadcastinv ( self , hash ) :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-01-18 23:38:09 +01:00
print ' broadcasting inv with hash: ' , hash . encode ( ' hex ' )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
shared . broadcastToSendDataQueues ( ( self . streamNumber , ' sendinv ' , hash ) )
2012-12-05 18:47:30 +01:00
2012-11-19 20:45:05 +01:00
#We have received an addr message.
2013-03-28 22:56:20 +01:00
def recaddr ( self , data ) :
2012-11-19 20:45:05 +01:00
listOfAddressDetailsToBroadcastToPeers = [ ]
numberOfAddressesIncluded = 0
2013-03-28 22:56:20 +01:00
numberOfAddressesIncluded , lengthOfNumberOfAddresses = decodeVarint ( data [ : 10 ] )
2012-11-19 20:45:05 +01:00
if verbose > = 1 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2012-12-10 20:48:00 +01:00
print ' addr message contains ' , numberOfAddressesIncluded , ' IP addresses. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
2013-04-17 20:24:16 +02:00
if self . remoteProtocolVersion == 1 :
if numberOfAddressesIncluded > 1000 or numberOfAddressesIncluded == 0 :
return
if len ( data ) != lengthOfNumberOfAddresses + ( 34 * numberOfAddressesIncluded ) :
print ' addr message does not contain the correct amount of data. Ignoring. '
return
2013-03-12 21:03:16 +01:00
2013-04-17 20:24:16 +02:00
needToWriteKnownNodesToDisk = False
for i in range ( 0 , numberOfAddressesIncluded ) :
try :
if data [ 16 + lengthOfNumberOfAddresses + ( 34 * i ) : 28 + lengthOfNumberOfAddresses + ( 34 * i ) ] != ' \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xFF \xFF ' :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-17 20:24:16 +02:00
print ' Skipping IPv6 address. ' , repr ( data [ 16 + lengthOfNumberOfAddresses + ( 34 * i ) : 28 + lengthOfNumberOfAddresses + ( 34 * i ) ] )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
continue
except Exception , err :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-17 20:24:16 +02:00
sys . stderr . write ( ' ERROR TRYING TO UNPACK recaddr (to test for an IPv6 address). Message: %s \n ' % str ( err ) )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
break #giving up on unpacking any more. We should still be connected however.
2012-11-19 20:45:05 +01:00
2013-04-17 20:24:16 +02:00
try :
recaddrStream , = unpack ( ' >I ' , data [ 4 + lengthOfNumberOfAddresses + ( 34 * i ) : 8 + lengthOfNumberOfAddresses + ( 34 * i ) ] )
except Exception , err :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-17 20:24:16 +02:00
sys . stderr . write ( ' ERROR TRYING TO UNPACK recaddr (recaddrStream). Message: %s \n ' % str ( err ) )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
break #giving up on unpacking any more. We should still be connected however.
if recaddrStream == 0 :
continue
if recaddrStream != self . streamNumber and recaddrStream != ( self . streamNumber * 2 ) and recaddrStream != ( ( self . streamNumber * 2 ) + 1 ) : #if the embedded stream number is not in my stream or either of my child streams then ignore it. Someone might be trying funny business.
continue
try :
recaddrServices , = unpack ( ' >Q ' , data [ 8 + lengthOfNumberOfAddresses + ( 34 * i ) : 16 + lengthOfNumberOfAddresses + ( 34 * i ) ] )
except Exception , err :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-17 20:24:16 +02:00
sys . stderr . write ( ' ERROR TRYING TO UNPACK recaddr (recaddrServices). Message: %s \n ' % str ( err ) )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
break #giving up on unpacking any more. We should still be connected however.
2012-11-19 20:45:05 +01:00
2013-04-17 20:24:16 +02:00
try :
recaddrPort , = unpack ( ' >H ' , data [ 32 + lengthOfNumberOfAddresses + ( 34 * i ) : 34 + lengthOfNumberOfAddresses + ( 34 * i ) ] )
except Exception , err :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-17 20:24:16 +02:00
sys . stderr . write ( ' ERROR TRYING TO UNPACK recaddr (recaddrPort). Message: %s \n ' % str ( err ) )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
break #giving up on unpacking any more. We should still be connected however.
#print 'Within recaddr(): IP', recaddrIP, ', Port', recaddrPort, ', i', i
hostFromAddrMessage = socket . inet_ntoa ( data [ 28 + lengthOfNumberOfAddresses + ( 34 * i ) : 32 + lengthOfNumberOfAddresses + ( 34 * i ) ] )
#print 'hostFromAddrMessage', hostFromAddrMessage
if data [ 28 + lengthOfNumberOfAddresses + ( 34 * i ) ] == ' \x7F ' :
print ' Ignoring IP address in loopback range: ' , hostFromAddrMessage
continue
2013-05-23 04:01:35 +02:00
if isHostInPrivateIPRange ( hostFromAddrMessage ) :
2013-04-17 20:24:16 +02:00
print ' Ignoring IP address in private range: ' , hostFromAddrMessage
continue
timeSomeoneElseReceivedMessageFromThisNode , = unpack ( ' >I ' , data [ lengthOfNumberOfAddresses + ( 34 * i ) : 4 + lengthOfNumberOfAddresses + ( 34 * i ) ] ) #This is the 'time' value in the received addr message.
2013-05-02 17:53:54 +02:00
if recaddrStream not in shared . knownNodes : #knownNodes is a dictionary of dictionaries with one outer dictionary for each stream. If the outer stream dictionary doesn't exist yet then we must make it.
shared . knownNodesLock . acquire ( )
shared . knownNodes [ recaddrStream ] = { }
shared . knownNodesLock . release ( )
if hostFromAddrMessage not in shared . knownNodes [ recaddrStream ] :
if len ( shared . knownNodes [ recaddrStream ] ) < 20000 and timeSomeoneElseReceivedMessageFromThisNode > ( int ( time . time ( ) ) - 10800 ) and timeSomeoneElseReceivedMessageFromThisNode < ( int ( time . time ( ) ) + 10800 ) : #If we have more than 20000 nodes in our list already then just forget about adding more. Also, make sure that the time that someone else received a message from this node is within three hours from now.
shared . knownNodesLock . acquire ( )
shared . knownNodes [ recaddrStream ] [ hostFromAddrMessage ] = ( recaddrPort , timeSomeoneElseReceivedMessageFromThisNode )
shared . knownNodesLock . release ( )
2013-04-17 20:24:16 +02:00
needToWriteKnownNodesToDisk = True
hostDetails = ( timeSomeoneElseReceivedMessageFromThisNode , recaddrStream , recaddrServices , hostFromAddrMessage , recaddrPort )
listOfAddressDetailsToBroadcastToPeers . append ( hostDetails )
else :
2013-05-02 17:53:54 +02:00
PORT , timeLastReceivedMessageFromThisNode = shared . knownNodes [ recaddrStream ] [ hostFromAddrMessage ] #PORT in this case is either the port we used to connect to the remote node, or the port that was specified by someone else in a past addr message.
2013-04-17 20:24:16 +02:00
if ( timeLastReceivedMessageFromThisNode < timeSomeoneElseReceivedMessageFromThisNode ) and ( timeSomeoneElseReceivedMessageFromThisNode < int ( time . time ( ) ) ) :
2013-05-02 17:53:54 +02:00
shared . knownNodesLock . acquire ( )
shared . knownNodes [ recaddrStream ] [ hostFromAddrMessage ] = ( PORT , timeSomeoneElseReceivedMessageFromThisNode )
shared . knownNodesLock . release ( )
2013-04-17 20:24:16 +02:00
if PORT != recaddrPort :
print ' Strange occurance: The port specified in an addr message ' , str ( recaddrPort ) , ' does not match the port ' , str ( PORT ) , ' that this program (or some other peer) used to connect to it ' , str ( hostFromAddrMessage ) , ' . Perhaps they changed their port or are using a strange NAT configuration. '
if needToWriteKnownNodesToDisk : #Runs if any nodes were new to us. Also, share those nodes with our peers.
2013-05-02 17:53:54 +02:00
shared . knownNodesLock . acquire ( )
output = open ( shared . appdata + ' knownnodes.dat ' , ' wb ' )
pickle . dump ( shared . knownNodes , output )
2013-04-17 20:24:16 +02:00
output . close ( )
2013-05-02 17:53:54 +02:00
shared . knownNodesLock . release ( )
2013-04-17 20:24:16 +02:00
self . broadcastaddr ( listOfAddressDetailsToBroadcastToPeers ) #no longer broadcast
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
print ' knownNodes currently has ' , len ( shared . knownNodes [ self . streamNumber ] ) , ' nodes for this stream. '
shared . printLock . release ( )
2013-04-17 20:32:18 +02:00
elif self . remoteProtocolVersion > = 2 : #The difference is that in protocol version 2, network addresses use 64 bit times rather than 32 bit times.
2013-04-17 20:24:16 +02:00
if numberOfAddressesIncluded > 1000 or numberOfAddressesIncluded == 0 :
return
if len ( data ) != lengthOfNumberOfAddresses + ( 38 * numberOfAddressesIncluded ) :
print ' addr message does not contain the correct amount of data. Ignoring. '
return
needToWriteKnownNodesToDisk = False
for i in range ( 0 , numberOfAddressesIncluded ) :
try :
if data [ 20 + lengthOfNumberOfAddresses + ( 38 * i ) : 32 + lengthOfNumberOfAddresses + ( 38 * i ) ] != ' \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xFF \xFF ' :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-17 20:24:16 +02:00
print ' Skipping IPv6 address. ' , repr ( data [ 20 + lengthOfNumberOfAddresses + ( 38 * i ) : 32 + lengthOfNumberOfAddresses + ( 38 * i ) ] )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
continue
except Exception , err :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-17 20:24:16 +02:00
sys . stderr . write ( ' ERROR TRYING TO UNPACK recaddr (to test for an IPv6 address). Message: %s \n ' % str ( err ) )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
break #giving up on unpacking any more. We should still be connected however.
try :
recaddrStream , = unpack ( ' >I ' , data [ 8 + lengthOfNumberOfAddresses + ( 38 * i ) : 12 + lengthOfNumberOfAddresses + ( 38 * i ) ] )
except Exception , err :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-17 20:24:16 +02:00
sys . stderr . write ( ' ERROR TRYING TO UNPACK recaddr (recaddrStream). Message: %s \n ' % str ( err ) )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
break #giving up on unpacking any more. We should still be connected however.
if recaddrStream == 0 :
continue
if recaddrStream != self . streamNumber and recaddrStream != ( self . streamNumber * 2 ) and recaddrStream != ( ( self . streamNumber * 2 ) + 1 ) : #if the embedded stream number is not in my stream or either of my child streams then ignore it. Someone might be trying funny business.
continue
try :
recaddrServices , = unpack ( ' >Q ' , data [ 12 + lengthOfNumberOfAddresses + ( 38 * i ) : 20 + lengthOfNumberOfAddresses + ( 38 * i ) ] )
except Exception , err :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-17 20:24:16 +02:00
sys . stderr . write ( ' ERROR TRYING TO UNPACK recaddr (recaddrServices). Message: %s \n ' % str ( err ) )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
break #giving up on unpacking any more. We should still be connected however.
try :
2013-04-17 20:42:03 +02:00
recaddrPort , = unpack ( ' >H ' , data [ 36 + lengthOfNumberOfAddresses + ( 38 * i ) : 38 + lengthOfNumberOfAddresses + ( 38 * i ) ] )
2013-04-17 20:24:16 +02:00
except Exception , err :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-17 20:24:16 +02:00
sys . stderr . write ( ' ERROR TRYING TO UNPACK recaddr (recaddrPort). Message: %s \n ' % str ( err ) )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
break #giving up on unpacking any more. We should still be connected however.
#print 'Within recaddr(): IP', recaddrIP, ', Port', recaddrPort, ', i', i
hostFromAddrMessage = socket . inet_ntoa ( data [ 32 + lengthOfNumberOfAddresses + ( 38 * i ) : 36 + lengthOfNumberOfAddresses + ( 38 * i ) ] )
#print 'hostFromAddrMessage', hostFromAddrMessage
if data [ 32 + lengthOfNumberOfAddresses + ( 38 * i ) ] == ' \x7F ' :
print ' Ignoring IP address in loopback range: ' , hostFromAddrMessage
continue
if data [ 32 + lengthOfNumberOfAddresses + ( 38 * i ) ] == ' \x0A ' :
print ' Ignoring IP address in private range: ' , hostFromAddrMessage
continue
if data [ 32 + lengthOfNumberOfAddresses + ( 38 * i ) : 34 + lengthOfNumberOfAddresses + ( 38 * i ) ] == ' \xC0 A8 ' :
print ' Ignoring IP address in private range: ' , hostFromAddrMessage
continue
timeSomeoneElseReceivedMessageFromThisNode , = unpack ( ' >Q ' , data [ lengthOfNumberOfAddresses + ( 38 * i ) : 8 + lengthOfNumberOfAddresses + ( 38 * i ) ] ) #This is the 'time' value in the received addr message. 64-bit.
2013-05-02 17:53:54 +02:00
if recaddrStream not in shared . knownNodes : #knownNodes is a dictionary of dictionaries with one outer dictionary for each stream. If the outer stream dictionary doesn't exist yet then we must make it.
shared . knownNodesLock . acquire ( )
shared . knownNodes [ recaddrStream ] = { }
shared . knownNodesLock . release ( )
if hostFromAddrMessage not in shared . knownNodes [ recaddrStream ] :
if len ( shared . knownNodes [ recaddrStream ] ) < 20000 and timeSomeoneElseReceivedMessageFromThisNode > ( int ( time . time ( ) ) - 10800 ) and timeSomeoneElseReceivedMessageFromThisNode < ( int ( time . time ( ) ) + 10800 ) : #If we have more than 20000 nodes in our list already then just forget about adding more. Also, make sure that the time that someone else received a message from this node is within three hours from now.
shared . knownNodesLock . acquire ( )
shared . knownNodes [ recaddrStream ] [ hostFromAddrMessage ] = ( recaddrPort , timeSomeoneElseReceivedMessageFromThisNode )
shared . knownNodesLock . release ( )
2013-05-18 18:11:21 +02:00
shared . printLock . acquire ( )
2013-04-17 20:24:16 +02:00
print ' added new node ' , hostFromAddrMessage , ' to knownNodes in stream ' , recaddrStream
2013-05-18 18:11:21 +02:00
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
needToWriteKnownNodesToDisk = True
hostDetails = ( timeSomeoneElseReceivedMessageFromThisNode , recaddrStream , recaddrServices , hostFromAddrMessage , recaddrPort )
listOfAddressDetailsToBroadcastToPeers . append ( hostDetails )
else :
2013-05-02 17:53:54 +02:00
PORT , timeLastReceivedMessageFromThisNode = shared . knownNodes [ recaddrStream ] [ hostFromAddrMessage ] #PORT in this case is either the port we used to connect to the remote node, or the port that was specified by someone else in a past addr message.
2013-04-17 20:24:16 +02:00
if ( timeLastReceivedMessageFromThisNode < timeSomeoneElseReceivedMessageFromThisNode ) and ( timeSomeoneElseReceivedMessageFromThisNode < int ( time . time ( ) ) ) :
2013-05-02 17:53:54 +02:00
shared . knownNodesLock . acquire ( )
shared . knownNodes [ recaddrStream ] [ hostFromAddrMessage ] = ( PORT , timeSomeoneElseReceivedMessageFromThisNode )
shared . knownNodesLock . release ( )
2013-04-17 20:24:16 +02:00
if PORT != recaddrPort :
print ' Strange occurance: The port specified in an addr message ' , str ( recaddrPort ) , ' does not match the port ' , str ( PORT ) , ' that this program (or some other peer) used to connect to it ' , str ( hostFromAddrMessage ) , ' . Perhaps they changed their port or are using a strange NAT configuration. '
if needToWriteKnownNodesToDisk : #Runs if any nodes were new to us. Also, share those nodes with our peers.
2013-05-02 17:53:54 +02:00
shared . knownNodesLock . acquire ( )
output = open ( shared . appdata + ' knownnodes.dat ' , ' wb ' )
pickle . dump ( shared . knownNodes , output )
2013-04-17 20:24:16 +02:00
output . close ( )
2013-05-02 17:53:54 +02:00
shared . knownNodesLock . release ( )
2013-04-17 20:24:16 +02:00
self . broadcastaddr ( listOfAddressDetailsToBroadcastToPeers )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
print ' knownNodes currently has ' , len ( shared . knownNodes [ self . streamNumber ] ) , ' nodes for this stream. '
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
2012-11-19 20:45:05 +01:00
#Function runs when we want to broadcast an addr message to all of our peers. Runs when we learn of nodes that we didn't previously know about and want to share them with our peers.
def broadcastaddr ( self , listOfAddressDetailsToBroadcastToPeers ) :
numberOfAddressesInAddrMessage = len ( listOfAddressDetailsToBroadcastToPeers )
payload = ' '
for hostDetails in listOfAddressDetailsToBroadcastToPeers :
timeLastReceivedMessageFromThisNode , streamNumber , services , host , port = hostDetails
2013-04-17 20:24:16 +02:00
payload + = pack ( ' >Q ' , timeLastReceivedMessageFromThisNode ) #now uses 64-bit time
2012-11-19 20:45:05 +01:00
payload + = pack ( ' >I ' , streamNumber )
payload + = pack ( ' >q ' , services ) #service bit flags offered by this node
payload + = ' \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xFF \xFF ' + socket . inet_aton ( host )
payload + = pack ( ' >H ' , port ) #remote port
payload = encodeVarint ( numberOfAddressesInAddrMessage ) + payload
datatosend = ' \xE9 \xBE \xB4 \xD9 addr \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 '
datatosend = datatosend + pack ( ' >L ' , len ( payload ) ) #payload length
2012-11-23 01:06:42 +01:00
datatosend = datatosend + hashlib . sha512 ( payload ) . digest ( ) [ 0 : 4 ]
2012-11-19 20:45:05 +01:00
datatosend = datatosend + payload
2013-03-28 22:56:20 +01:00
if verbose > = 1 :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-01-22 06:21:32 +01:00
print ' Broadcasting addr with ' , numberOfAddressesInAddrMessage , ' entries. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
shared . broadcastToSendDataQueues ( ( self . streamNumber , ' sendaddr ' , datatosend ) )
2012-11-19 20:45:05 +01:00
#Send a big addr message to our peer
def sendaddr ( self ) :
addrsInMyStream = { }
addrsInChildStreamLeft = { }
addrsInChildStreamRight = { }
2013-05-02 17:53:54 +02:00
#print 'knownNodes', shared.knownNodes
2012-11-19 20:45:05 +01:00
#We are going to share a maximum number of 1000 addrs with our peer. 500 from this stream, 250 from the left child stream, and 250 from the right child stream.
2013-05-18 18:11:21 +02:00
shared . knownNodesLock . acquire ( )
2013-05-02 17:53:54 +02:00
if len ( shared . knownNodes [ self . streamNumber ] ) > 0 :
2012-11-19 20:45:05 +01:00
for i in range ( 500 ) :
random . seed ( )
2013-05-02 17:53:54 +02:00
HOST , = random . sample ( shared . knownNodes [ self . streamNumber ] , 1 )
2013-05-23 04:01:35 +02:00
if isHostInPrivateIPRange ( HOST ) :
2013-03-25 19:13:56 +01:00
continue
2013-05-02 17:53:54 +02:00
addrsInMyStream [ HOST ] = shared . knownNodes [ self . streamNumber ] [ HOST ]
if len ( shared . knownNodes [ self . streamNumber * 2 ] ) > 0 :
2012-11-19 20:45:05 +01:00
for i in range ( 250 ) :
random . seed ( )
2013-05-02 17:53:54 +02:00
HOST , = random . sample ( shared . knownNodes [ self . streamNumber * 2 ] , 1 )
2013-05-23 04:01:35 +02:00
if isHostInPrivateIPRange ( HOST ) :
2013-03-25 19:13:56 +01:00
continue
2013-05-02 17:53:54 +02:00
addrsInChildStreamLeft [ HOST ] = shared . knownNodes [ self . streamNumber * 2 ] [ HOST ]
if len ( shared . knownNodes [ ( self . streamNumber * 2 ) + 1 ] ) > 0 :
2012-11-19 20:45:05 +01:00
for i in range ( 250 ) :
random . seed ( )
2013-05-02 17:53:54 +02:00
HOST , = random . sample ( shared . knownNodes [ ( self . streamNumber * 2 ) + 1 ] , 1 )
2013-05-23 04:01:35 +02:00
if isHostInPrivateIPRange ( HOST ) :
2013-03-25 19:13:56 +01:00
continue
2013-05-02 17:53:54 +02:00
addrsInChildStreamRight [ HOST ] = shared . knownNodes [ ( self . streamNumber * 2 ) + 1 ] [ HOST ]
2013-05-18 18:11:21 +02:00
shared . knownNodesLock . release ( )
2012-11-19 20:45:05 +01:00
numberOfAddressesInAddrMessage = 0
payload = ' '
2013-03-05 20:50:31 +01:00
#print 'addrsInMyStream.items()', addrsInMyStream.items()
2012-11-19 20:45:05 +01:00
for HOST , value in addrsInMyStream . items ( ) :
PORT , timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > ( int ( time . time ( ) ) - maximumAgeOfNodesThatIAdvertiseToOthers ) : #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage + = 1
2013-06-11 00:53:15 +02:00
payload + = pack ( ' >Q ' , timeLastReceivedMessageFromThisNode ) #64-bit time
2012-11-19 20:45:05 +01:00
payload + = pack ( ' >I ' , self . streamNumber )
payload + = pack ( ' >q ' , 1 ) #service bit flags offered by this node
payload + = ' \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xFF \xFF ' + socket . inet_aton ( HOST )
payload + = pack ( ' >H ' , PORT ) #remote port
for HOST , value in addrsInChildStreamLeft . items ( ) :
PORT , timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > ( int ( time . time ( ) ) - maximumAgeOfNodesThatIAdvertiseToOthers ) : #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage + = 1
2013-06-11 00:53:15 +02:00
payload + = pack ( ' >Q ' , timeLastReceivedMessageFromThisNode ) #64-bit time
2012-11-19 20:45:05 +01:00
payload + = pack ( ' >I ' , self . streamNumber * 2 )
payload + = pack ( ' >q ' , 1 ) #service bit flags offered by this node
payload + = ' \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xFF \xFF ' + socket . inet_aton ( HOST )
payload + = pack ( ' >H ' , PORT ) #remote port
for HOST , value in addrsInChildStreamRight . items ( ) :
PORT , timeLastReceivedMessageFromThisNode = value
if timeLastReceivedMessageFromThisNode > ( int ( time . time ( ) ) - maximumAgeOfNodesThatIAdvertiseToOthers ) : #If it is younger than 3 hours old..
numberOfAddressesInAddrMessage + = 1
2013-06-11 00:53:15 +02:00
payload + = pack ( ' >Q ' , timeLastReceivedMessageFromThisNode ) #64-bit time
2012-11-19 20:45:05 +01:00
payload + = pack ( ' >I ' , ( self . streamNumber * 2 ) + 1 )
payload + = pack ( ' >q ' , 1 ) #service bit flags offered by this node
payload + = ' \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xFF \xFF ' + socket . inet_aton ( HOST )
payload + = pack ( ' >H ' , PORT ) #remote port
payload = encodeVarint ( numberOfAddressesInAddrMessage ) + payload
datatosend = ' \xE9 \xBE \xB4 \xD9 addr \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 '
datatosend = datatosend + pack ( ' >L ' , len ( payload ) ) #payload length
2012-11-23 01:06:42 +01:00
datatosend = datatosend + hashlib . sha512 ( payload ) . digest ( ) [ 0 : 4 ]
2012-11-19 20:45:05 +01:00
datatosend = datatosend + payload
2013-05-03 18:24:47 +02:00
try :
self . sock . sendall ( datatosend )
if verbose > = 1 :
shared . printLock . acquire ( )
print ' Sending addr with ' , numberOfAddressesInAddrMessage , ' entries. '
shared . printLock . release ( )
except Exception , err :
#if not 'Bad file descriptor' in err:
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-05-03 18:24:47 +02:00
sys . stderr . write ( ' sock.sendall error: %s \n ' % err )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
#We have received a version message
2013-03-28 22:56:20 +01:00
def recversion ( self , data ) :
2013-04-02 18:23:34 +02:00
if len ( data ) < 83 :
2012-11-26 20:18:52 +01:00
#This version message is unreasonably short. Forget it.
2012-11-19 20:45:05 +01:00
return
2013-03-28 22:56:20 +01:00
elif not self . verackSent :
self . remoteProtocolVersion , = unpack ( ' >L ' , data [ : 4 ] )
2013-06-11 00:53:15 +02:00
if self . remoteProtocolVersion < = 1 :
shared . broadcastToSendDataQueues ( ( 0 , ' shutdown ' , self . HOST ) )
shared . printLock . acquire ( )
print ' Closing connection to old protocol version 1 node: ' , self . HOST
shared . printLock . release ( )
return
2012-11-19 20:45:05 +01:00
#print 'remoteProtocolVersion', self.remoteProtocolVersion
2013-03-28 22:56:20 +01:00
self . myExternalIP = socket . inet_ntoa ( data [ 40 : 44 ] )
2012-11-19 20:45:05 +01:00
#print 'myExternalIP', self.myExternalIP
2013-03-28 22:56:20 +01:00
self . remoteNodeIncomingPort , = unpack ( ' >H ' , data [ 70 : 72 ] )
2012-11-19 20:45:05 +01:00
#print 'remoteNodeIncomingPort', self.remoteNodeIncomingPort
2013-03-28 22:56:20 +01:00
useragentLength , lengthOfUseragentVarint = decodeVarint ( data [ 80 : 84 ] )
readPosition = 80 + lengthOfUseragentVarint
useragent = data [ readPosition : readPosition + useragentLength ]
2013-01-24 17:38:12 +01:00
readPosition + = useragentLength
2013-03-28 22:56:20 +01:00
numberOfStreamsInVersionMessage , lengthOfNumberOfStreamsInVersionMessage = decodeVarint ( data [ readPosition : ] )
2012-11-19 20:45:05 +01:00
readPosition + = lengthOfNumberOfStreamsInVersionMessage
2013-03-28 22:56:20 +01:00
self . streamNumber , lengthOfRemoteStreamNumber = decodeVarint ( data [ readPosition : ] )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-01-24 17:38:12 +01:00
print ' Remote node useragent: ' , useragent , ' stream number: ' , self . streamNumber
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
if self . streamNumber != 1 :
2013-05-02 17:53:54 +02:00
shared . broadcastToSendDataQueues ( ( 0 , ' shutdown ' , self . HOST ) )
shared . printLock . acquire ( )
2013-02-21 01:56:21 +01:00
print ' Closed connection to ' , self . HOST , ' because they are interested in stream ' , self . streamNumber , ' . '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
return
2013-05-03 18:05:57 +02:00
shared . connectedHostsList [ self . HOST ] = 1 #We use this data structure to not only keep track of what hosts we are connected to so that we don't try to connect to them again, but also to list the connections count on the Network Status tab.
2013-02-21 01:56:21 +01:00
#If this was an incoming connection, then the sendData thread doesn't know the stream. We have to set it.
if not self . initiatedConnection :
2013-05-02 17:53:54 +02:00
shared . broadcastToSendDataQueues ( ( 0 , ' setStreamNumber ' , ( self . HOST , self . streamNumber ) ) )
2013-03-28 22:56:20 +01:00
if data [ 72 : 80 ] == eightBytesOfRandomDataUsedToDetectConnectionsToSelf :
2013-05-02 17:53:54 +02:00
shared . broadcastToSendDataQueues ( ( 0 , ' shutdown ' , self . HOST ) )
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' Closing connection to myself: ' , self . HOST
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
return
2013-05-02 17:53:54 +02:00
shared . broadcastToSendDataQueues ( ( 0 , ' setRemoteProtocolVersion ' , ( self . HOST , self . remoteProtocolVersion ) ) )
2012-11-19 20:45:05 +01:00
2013-05-02 17:53:54 +02:00
shared . knownNodesLock . acquire ( )
shared . knownNodes [ self . streamNumber ] [ self . HOST ] = ( self . remoteNodeIncomingPort , int ( time . time ( ) ) )
output = open ( shared . appdata + ' knownnodes.dat ' , ' wb ' )
pickle . dump ( shared . knownNodes , output )
2012-11-19 20:45:05 +01:00
output . close ( )
2013-05-02 17:53:54 +02:00
shared . knownNodesLock . release ( )
2012-11-19 20:45:05 +01:00
self . sendverack ( )
if self . initiatedConnection == False :
self . sendversion ( )
#Sends a version message
def sendversion ( self ) :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-01-24 17:38:12 +01:00
print ' Sending version message '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-05-03 18:24:47 +02:00
try :
self . sock . sendall ( assembleVersionMessage ( self . HOST , self . PORT , self . streamNumber ) )
except Exception , err :
#if not 'Bad file descriptor' in err:
shared . printLock . acquire ( )
sys . stderr . write ( ' sock.sendall error: %s \n ' % err )
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
#Sends a verack message
def sendverack ( self ) :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' Sending verack '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-05-03 18:24:47 +02:00
try :
self . sock . sendall ( ' \xE9 \xBE \xB4 \xD9 \x76 \x65 \x72 \x61 \x63 \x6B \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xcf \x83 \xe1 \x35 ' )
except Exception , err :
#if not 'Bad file descriptor' in err:
shared . printLock . acquire ( )
sys . stderr . write ( ' sock.sendall error: %s \n ' % err )
shared . printLock . release ( )
2012-11-23 09:22:56 +01:00
#cf 83 e1 35
2012-11-19 20:45:05 +01:00
self . verackSent = True
if self . verackReceived == True :
self . connectionFullyEstablished ( )
2013-05-23 04:01:35 +02:00
2013-03-25 19:13:56 +01:00
2012-11-19 20:45:05 +01:00
#Every connection to a peer has a sendDataThread (and also a receiveDataThread).
2013-05-01 22:06:55 +02:00
class sendDataThread ( threading . Thread ) :
def __init__ ( self ) :
threading . Thread . __init__ ( self )
2012-11-19 20:45:05 +01:00
self . mailbox = Queue . Queue ( )
2013-05-02 17:53:54 +02:00
shared . sendDataQueues . append ( self . mailbox )
shared . printLock . acquire ( )
print ' The length of sendDataQueues at sendDataThread init is: ' , len ( shared . sendDataQueues )
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
self . data = ' '
2013-02-03 06:16:50 +01:00
def setup ( self , sock , HOST , PORT , streamNumber , objectsOfWhichThisRemoteNodeIsAlreadyAware ) :
2012-11-19 20:45:05 +01:00
self . sock = sock
self . HOST = HOST
self . PORT = PORT
self . streamNumber = streamNumber
2013-04-26 23:12:35 +02:00
self . remoteProtocolVersion = - 1 #This must be set using setRemoteProtocolVersion command which is sent through the self.mailbox queue.
2012-11-19 20:45:05 +01:00
self . lastTimeISentData = int ( time . time ( ) ) #If this value increases beyond five minutes ago, we'll send a pong message to keep the connection alive.
2013-02-03 06:16:50 +01:00
self . objectsOfWhichThisRemoteNodeIsAlreadyAware = objectsOfWhichThisRemoteNodeIsAlreadyAware
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-30 16:54:30 +02:00
print ' The streamNumber of this sendDataThread (ID: ' , str ( id ( self ) ) + ' ) at setup() is ' , self . streamNumber
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
def sendVersionMessage ( self ) :
2013-04-17 21:00:42 +02:00
datatosend = assembleVersionMessage ( self . HOST , self . PORT , self . streamNumber ) #the IP and port of the remote host, and my streamNumber.
2012-11-19 20:45:05 +01:00
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' Sending version packet: ' , repr ( datatosend )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-05-03 18:24:47 +02:00
try :
self . sock . sendall ( datatosend )
except Exception , err :
#if not 'Bad file descriptor' in err:
shared . printLock . acquire ( )
sys . stderr . write ( ' sock.sendall error: %s \n ' % err )
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
self . versionSent = 1
2013-02-18 21:22:48 +01:00
2012-11-19 20:45:05 +01:00
def run ( self ) :
while True :
deststream , command , data = self . mailbox . get ( )
2013-05-02 17:53:54 +02:00
#shared.printLock.acquire()
2012-12-05 18:47:30 +01:00
#print 'sendDataThread, destream:', deststream, ', Command:', command, ', ID:',id(self), ', HOST:', self.HOST
2013-05-02 17:53:54 +02:00
#shared.printLock.release()
2012-11-19 20:45:05 +01:00
if deststream == self . streamNumber or deststream == 0 :
if command == ' shutdown ' :
if data == self . HOST or data == ' all ' :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-29 19:46:09 +02:00
print ' sendDataThread (associated with ' , self . HOST , ' ) ID: ' , id ( self ) , ' shutting down now. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-05-16 19:10:40 +02:00
try :
self . sock . shutdown ( socket . SHUT_RDWR )
self . sock . close ( )
except :
pass
2013-05-02 17:53:54 +02:00
shared . sendDataQueues . remove ( self . mailbox )
shared . printLock . acquire ( )
print ' len of sendDataQueues ' , len ( shared . sendDataQueues )
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
break
2012-12-05 18:47:30 +01:00
#When you receive an incoming connection, a sendDataThread is created even though you don't yet know what stream number the remote peer is interested in. They will tell you in a version message and if you too are interested in that stream then you will continue on with the connection and will set the streamNumber of this send data thread here:
2012-11-19 20:45:05 +01:00
elif command == ' setStreamNumber ' :
hostInMessage , specifiedStreamNumber = data
if hostInMessage == self . HOST :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2012-11-23 09:22:56 +01:00
print ' setting the stream number in the sendData thread (ID: ' , id ( self ) , ' ) to ' , specifiedStreamNumber
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
self . streamNumber = specifiedStreamNumber
2013-04-17 20:24:16 +02:00
elif command == ' setRemoteProtocolVersion ' :
hostInMessage , specifiedRemoteProtocolVersion = data
if hostInMessage == self . HOST :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-17 20:24:16 +02:00
print ' setting the remote node \' s protocol version in the sendData thread (ID: ' , id ( self ) , ' ) to ' , specifiedRemoteProtocolVersion
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-17 20:24:16 +02:00
self . remoteProtocolVersion = specifiedRemoteProtocolVersion
2013-02-03 06:16:50 +01:00
elif command == ' sendaddr ' :
2013-06-11 00:53:15 +02:00
try :
#To prevent some network analysis, 'leak' the data out to our peer after waiting a random amount of time unless we have a long list of messages in our queue to send.
random . seed ( )
time . sleep ( random . randrange ( 0 , 10 ) )
self . sock . sendall ( data )
self . lastTimeISentData = int ( time . time ( ) )
except :
print ' self.sock.sendall failed '
2013-04-17 20:24:16 +02:00
try :
2013-06-11 00:53:15 +02:00
self . sock . shutdown ( socket . SHUT_RDWR )
self . sock . close ( )
2013-04-17 20:24:16 +02:00
except :
2013-06-11 00:53:15 +02:00
pass
shared . sendDataQueues . remove ( self . mailbox )
print ' sendDataThread thread (ID: ' , str ( id ( self ) ) + ' ) ending now. Was connected to ' , self . HOST
break
2013-02-03 06:16:50 +01:00
elif command == ' sendinv ' :
if data not in self . objectsOfWhichThisRemoteNodeIsAlreadyAware :
payload = ' \x01 ' + data
headerData = ' \xe9 \xbe \xb4 \xd9 ' #magic bits, slighly different from Bitcoin's magic bits.
2013-02-04 22:49:02 +01:00
headerData + = ' inv \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 '
headerData + = pack ( ' >L ' , len ( payload ) )
headerData + = hashlib . sha512 ( payload ) . digest ( ) [ : 4 ]
2013-02-03 06:16:50 +01:00
#To prevent some network analysis, 'leak' the data out to our peer after waiting a random amount of time
random . seed ( )
time . sleep ( random . randrange ( 0 , 10 ) )
try :
self . sock . sendall ( headerData + payload )
self . lastTimeISentData = int ( time . time ( ) )
except :
print ' self.sock.sendall failed '
2013-05-16 19:10:40 +02:00
try :
self . sock . shutdown ( socket . SHUT_RDWR )
self . sock . close ( )
except :
pass
2013-05-02 17:53:54 +02:00
shared . sendDataQueues . remove ( self . mailbox )
2013-04-30 16:54:30 +02:00
print ' sendDataThread thread (ID: ' , str ( id ( self ) ) + ' ) ending now. Was connected to ' , self . HOST
2013-02-03 06:16:50 +01:00
break
2012-11-19 20:45:05 +01:00
elif command == ' pong ' :
if self . lastTimeISentData < ( int ( time . time ( ) ) - 298 ) :
#Send out a pong message to keep the connection alive.
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' Sending pong to ' , self . HOST , ' to keep connection alive. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
try :
2012-11-23 09:22:56 +01:00
self . sock . sendall ( ' \xE9 \xBE \xB4 \xD9 \x70 \x6F \x6E \x67 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xcf \x83 \xe1 \x35 ' )
2012-11-19 20:45:05 +01:00
self . lastTimeISentData = int ( time . time ( ) )
except :
2013-04-30 16:54:30 +02:00
print ' send pong failed '
2013-05-16 19:10:40 +02:00
try :
self . sock . shutdown ( socket . SHUT_RDWR )
self . sock . close ( )
except :
pass
2013-05-02 17:53:54 +02:00
shared . sendDataQueues . remove ( self . mailbox )
2013-04-30 16:54:30 +02:00
print ' sendDataThread thread ' , self , ' ending now. Was connected to ' , self . HOST
2012-11-19 20:45:05 +01:00
break
2012-12-05 18:47:30 +01:00
else :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-26 23:12:35 +02:00
print ' sendDataThread ID: ' , id ( self ) , ' ignoring command ' , command , ' because the thread is not in stream ' , deststream
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
def isInSqlInventory ( hash ) :
t = ( hash , )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' select hash from inventory where hash=? ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2012-11-19 20:45:05 +01:00
if queryreturn == [ ] :
return False
else :
return True
def convertIntToString ( n ) :
a = __builtins__ . hex ( n )
if a [ - 1 : ] == ' L ' :
a = a [ : - 1 ]
if ( len ( a ) % 2 ) == 0 :
return a [ 2 : ] . decode ( ' hex ' )
else :
return ( ' 0 ' + a [ 2 : ] ) . decode ( ' hex ' )
def convertStringToInt ( s ) :
return int ( s . encode ( ' hex ' ) , 16 )
2013-05-02 17:53:54 +02:00
2013-01-18 23:38:09 +01:00
#This function expects that pubkey begin with \x04
def calculateBitcoinAddressFromPubkey ( pubkey ) :
if len ( pubkey ) != 65 :
print ' Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was ' , len ( pubkey ) , ' bytes long rather than 65. '
return " error "
ripe = hashlib . new ( ' ripemd160 ' )
sha = hashlib . new ( ' sha256 ' )
sha . update ( pubkey )
ripe . update ( sha . digest ( ) )
ripeWithProdnetPrefix = ' \x00 ' + ripe . digest ( )
checksum = hashlib . sha256 ( hashlib . sha256 ( ripeWithProdnetPrefix ) . digest ( ) ) . digest ( ) [ : 4 ]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress [ 0 ] == ' \x00 ' :
numberOfZeroBytesOnBinaryBitcoinAddress + = 1
binaryBitcoinAddress = binaryBitcoinAddress [ 1 : ]
base58encoded = arithmetic . changebase ( binaryBitcoinAddress , 256 , 58 )
return " 1 " * numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
def calculateTestnetAddressFromPubkey ( pubkey ) :
if len ( pubkey ) != 65 :
print ' Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was ' , len ( pubkey ) , ' bytes long rather than 65. '
return " error "
ripe = hashlib . new ( ' ripemd160 ' )
sha = hashlib . new ( ' sha256 ' )
sha . update ( pubkey )
ripe . update ( sha . digest ( ) )
ripeWithProdnetPrefix = ' \x6F ' + ripe . digest ( )
checksum = hashlib . sha256 ( hashlib . sha256 ( ripeWithProdnetPrefix ) . digest ( ) ) . digest ( ) [ : 4 ]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress [ 0 ] == ' \x00 ' :
numberOfZeroBytesOnBinaryBitcoinAddress + = 1
binaryBitcoinAddress = binaryBitcoinAddress [ 1 : ]
base58encoded = arithmetic . changebase ( binaryBitcoinAddress , 256 , 58 )
return " 1 " * numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
2013-05-02 17:53:54 +02:00
2013-03-19 18:32:37 +01:00
2013-05-01 22:06:55 +02:00
def signal_handler ( signal , frame ) :
2013-05-02 17:53:54 +02:00
if shared . safeConfigGetBoolean ( ' bitmessagesettings ' , ' daemon ' ) :
shared . doCleanShutdown ( )
2013-05-06 17:35:45 +02:00
sys . exit ( 0 )
2013-05-01 22:06:55 +02:00
else :
print ' Unfortunately you cannot use Ctrl+C when running the UI because the UI captures the signal. '
2013-05-02 17:53:54 +02:00
2013-05-01 22:06:55 +02:00
def connectToStream ( streamNumber ) :
selfInitiatedConnections [ streamNumber ] = { }
2013-05-08 23:11:16 +02:00
if sys . platform [ 0 : 3 ] == ' win ' :
maximumNumberOfHalfOpenConnections = 9
else :
maximumNumberOfHalfOpenConnections = 32
for i in range ( maximumNumberOfHalfOpenConnections ) :
2013-05-01 22:06:55 +02:00
a = outgoingSynSender ( )
a . setup ( streamNumber )
a . start ( )
2013-05-03 18:05:57 +02:00
#Does an EC point multiplication; turns a private key into a public key.
2013-04-26 19:20:30 +02:00
def pointMult ( secret ) :
#ctx = OpenSSL.BN_CTX_new() #This value proved to cause Seg Faults on Linux. It turns out that it really didn't speed up EC_POINT_mul anyway.
k = OpenSSL . EC_KEY_new_by_curve_name ( OpenSSL . get_curve ( ' secp256k1 ' ) )
priv_key = OpenSSL . BN_bin2bn ( secret , 32 , 0 )
group = OpenSSL . EC_KEY_get0_group ( k )
pub_key = OpenSSL . EC_POINT_new ( group )
OpenSSL . EC_POINT_mul ( group , pub_key , priv_key , None , None , None )
OpenSSL . EC_KEY_set_private_key ( k , priv_key )
OpenSSL . EC_KEY_set_public_key ( k , pub_key )
#print 'priv_key',priv_key
#print 'pub_key',pub_key
size = OpenSSL . i2o_ECPublicKey ( k , 0 )
mb = ctypes . create_string_buffer ( size )
OpenSSL . i2o_ECPublicKey ( k , ctypes . byref ( ctypes . pointer ( mb ) ) )
#print 'mb.raw', mb.raw.encode('hex'), 'length:', len(mb.raw)
#print 'mb.raw', mb.raw, 'length:', len(mb.raw)
OpenSSL . EC_POINT_free ( pub_key )
#OpenSSL.BN_CTX_free(ctx)
OpenSSL . BN_free ( priv_key )
OpenSSL . EC_KEY_free ( k )
return mb . raw
2013-04-26 22:07:58 +02:00
2013-04-17 21:00:42 +02:00
def assembleVersionMessage ( remoteHost , remotePort , myStreamNumber ) :
2013-05-02 22:05:31 +02:00
shared . softwareVersion
2013-04-17 21:00:42 +02:00
payload = ' '
payload + = pack ( ' >L ' , 2 ) #protocol version.
payload + = pack ( ' >q ' , 1 ) #bitflags of the services I offer.
payload + = pack ( ' >q ' , int ( time . time ( ) ) )
payload + = pack ( ' >q ' , 1 ) #boolservices of remote connection. How can I even know this for sure? This is probably ignored by the remote host.
payload + = ' \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xFF \xFF ' + socket . inet_aton ( remoteHost )
payload + = pack ( ' >H ' , remotePort ) #remote IPv6 and port
payload + = pack ( ' >q ' , 1 ) #bitflags of the services I offer.
payload + = ' \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \xFF \xFF ' + pack ( ' >L ' , 2130706433 ) # = 127.0.0.1. This will be ignored by the remote host. The actual remote connected IP will be used.
2013-05-02 17:53:54 +02:00
payload + = pack ( ' >H ' , shared . config . getint ( ' bitmessagesettings ' , ' port ' ) ) #my external IPv6 and port
2013-04-17 21:00:42 +02:00
random . seed ( )
payload + = eightBytesOfRandomDataUsedToDetectConnectionsToSelf
2013-05-02 22:05:31 +02:00
userAgent = ' /PyBitmessage: ' + shared . softwareVersion + ' / ' #Length of userAgent must be less than 253.
2013-04-17 21:00:42 +02:00
payload + = pack ( ' >B ' , len ( userAgent ) ) #user agent string length. If the user agent is more than 252 bytes long, this code isn't going to work.
payload + = userAgent
payload + = encodeVarint ( 1 ) #The number of streams about which I care. PyBitmessage currently only supports 1 per connection.
payload + = encodeVarint ( myStreamNumber )
datatosend = ' \xe9 \xbe \xb4 \xd9 ' #magic bits, slighly different from Bitcoin's magic bits.
datatosend = datatosend + ' version \x00 \x00 \x00 \x00 \x00 ' #version command
datatosend = datatosend + pack ( ' >L ' , len ( payload ) ) #payload length
datatosend = datatosend + hashlib . sha512 ( payload ) . digest ( ) [ 0 : 4 ]
return datatosend + payload
2013-05-23 04:01:35 +02:00
def isHostInPrivateIPRange ( host ) :
if host [ : 3 ] == ' 10. ' :
return True
if host [ : 4 ] == ' 172. ' :
if host [ 6 ] == ' . ' :
if int ( host [ 4 : 6 ] ) > = 16 and int ( host [ 4 : 6 ] ) < = 31 :
return True
if host [ : 8 ] == ' 192.168. ' :
return True
return False
2012-11-19 20:45:05 +01:00
#This thread exists because SQLITE3 is so un-threadsafe that we must submit queries to it and it puts results back in a different queue. They won't let us just use locks.
2013-05-01 22:06:55 +02:00
class sqlThread ( threading . Thread ) :
def __init__ ( self ) :
threading . Thread . __init__ ( self )
2012-11-19 20:45:05 +01:00
def run ( self ) :
2013-05-02 17:53:54 +02:00
self . conn = sqlite3 . connect ( shared . appdata + ' messages.dat ' )
2012-11-19 20:45:05 +01:00
self . conn . text_factory = str
self . cur = self . conn . cursor ( )
try :
2013-04-08 19:35:16 +02:00
self . cur . execute ( ''' CREATE TABLE inbox (msgid blob, toaddress text, fromaddress text, subject text, received text, message text, folder text, encodingtype int, read bool, UNIQUE(msgid) ON CONFLICT REPLACE) ''' )
self . cur . execute ( ''' CREATE TABLE sent (msgid blob, toaddress text, toripe blob, fromaddress text, subject text, message text, ackdata blob, lastactiontime integer, status text, pubkeyretrynumber integer, msgretrynumber integer, folder text, encodingtype int) ''' )
2012-11-19 20:45:05 +01:00
self . cur . execute ( ''' CREATE TABLE subscriptions (label text, address text, enabled bool) ''' )
self . cur . execute ( ''' CREATE TABLE addressbook (label text, address text) ''' )
self . cur . execute ( ''' CREATE TABLE blacklist (label text, address text, enabled bool) ''' )
self . cur . execute ( ''' CREATE TABLE whitelist (label text, address text, enabled bool) ''' )
2013-02-18 21:22:48 +01:00
#Explanation of what is in the pubkeys table:
# The hash is the RIPEMD160 hash that is encoded in the Bitmessage address.
2013-02-11 22:28:38 +01:00
# transmitdata is literally the data that was included in the Bitmessage pubkey message when it arrived, except for the 24 byte protocol header- ie, it starts with the POW nonce.
# time is the time that the pubkey was broadcast on the network same as with every other type of Bitmessage object.
# usedpersonally is set to "yes" if we have used the key personally. This keeps us from deleting it because we may want to reply to a message in the future. This field is not a bool because we may need more flexability in the future and it doesn't take up much more space anyway.
2013-04-25 22:11:00 +02:00
self . cur . execute ( ''' CREATE TABLE pubkeys (hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE) ''' )
2012-11-19 20:45:05 +01:00
self . cur . execute ( ''' CREATE TABLE inventory (hash blob, objecttype text, streamnumber int, payload blob, receivedtime integer, UNIQUE(hash) ON CONFLICT REPLACE) ''' )
2013-01-04 23:21:33 +01:00
self . cur . execute ( ''' CREATE TABLE knownnodes (timelastseen int, stream int, services blob, host blob, port blob, UNIQUE(host, stream, port) ON CONFLICT REPLACE) ''' ) #This table isn't used in the program yet but I have a feeling that we'll need it.
2013-04-30 18:22:47 +02:00
self . cur . execute ( ''' INSERT INTO subscriptions VALUES( ' Bitmessage new releases/announcements ' , ' BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw ' ,1) ''' )
2013-04-25 22:11:00 +02:00
self . cur . execute ( ''' CREATE TABLE settings (key blob, value blob, UNIQUE(key) ON CONFLICT REPLACE) ''' )
self . cur . execute ( ''' INSERT INTO settings VALUES( ' version ' , ' 1 ' ) ''' )
2013-04-30 18:22:47 +02:00
self . cur . execute ( ''' INSERT INTO settings VALUES( ' lastvacuumtime ' ,?) ''' , ( int ( time . time ( ) ) , ) )
2012-11-19 20:45:05 +01:00
self . conn . commit ( )
print ' Created messages database file '
except Exception , err :
if str ( err ) == ' table inbox already exists ' :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' Database file already exists. '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
else :
sys . stderr . write ( ' ERROR trying to create database file (message.dat). Error message: %s \n ' % str ( err ) )
2013-05-06 17:32:40 +02:00
os . _exit ( 0 )
2013-02-11 22:28:38 +01:00
2013-02-12 21:00:04 +01:00
#People running earlier versions of PyBitmessage do not have the usedpersonally field in their pubkeys table. Let's add it.
2013-05-02 17:53:54 +02:00
if shared . config . getint ( ' bitmessagesettings ' , ' settingsversion ' ) == 2 :
2013-02-11 22:28:38 +01:00
item = ''' ALTER TABLE pubkeys ADD usedpersonally text DEFAULT ' no ' '''
parameters = ' '
self . cur . execute ( item , parameters )
self . conn . commit ( )
2013-05-02 17:53:54 +02:00
shared . config . set ( ' bitmessagesettings ' , ' settingsversion ' , ' 3 ' )
with open ( shared . appdata + ' keys.dat ' , ' wb ' ) as configfile :
2013-05-02 18:47:43 +02:00
shared . config . write ( configfile )
2013-02-11 22:28:38 +01:00
2013-04-08 19:35:16 +02:00
#People running earlier versions of PyBitmessage do not have the encodingtype field in their inbox and sent tables or the read field in the inbox table. Let's add them.
2013-05-02 17:53:54 +02:00
if shared . config . getint ( ' bitmessagesettings ' , ' settingsversion ' ) == 3 :
2013-04-08 19:35:16 +02:00
item = ''' ALTER TABLE inbox ADD encodingtype int DEFAULT ' 2 ' '''
parameters = ' '
self . cur . execute ( item , parameters )
item = ''' ALTER TABLE inbox ADD read bool DEFAULT ' 1 ' '''
parameters = ' '
self . cur . execute ( item , parameters )
item = ''' ALTER TABLE sent ADD encodingtype int DEFAULT ' 2 ' '''
parameters = ' '
self . cur . execute ( item , parameters )
self . conn . commit ( )
2013-05-02 17:53:54 +02:00
shared . config . set ( ' bitmessagesettings ' , ' settingsversion ' , ' 4 ' )
with open ( shared . appdata + ' keys.dat ' , ' wb ' ) as configfile :
2013-05-02 18:47:43 +02:00
shared . config . write ( configfile )
2013-04-25 22:11:00 +02:00
2013-05-02 17:53:54 +02:00
if shared . config . getint ( ' bitmessagesettings ' , ' settingsversion ' ) == 4 :
2013-05-02 21:59:10 +02:00
shared . config . set ( ' bitmessagesettings ' , ' defaultnoncetrialsperbyte ' , str ( shared . networkDefaultProofOfWorkNonceTrialsPerByte ) )
shared . config . set ( ' bitmessagesettings ' , ' defaultpayloadlengthextrabytes ' , str ( shared . networkDefaultPayloadLengthExtraBytes ) )
2013-05-02 17:53:54 +02:00
shared . config . set ( ' bitmessagesettings ' , ' settingsversion ' , ' 5 ' )
2013-06-10 15:40:51 +02:00
if shared . config . getint ( ' bitmessagesettings ' , ' settingsversion ' ) == 5 :
shared . config . set ( ' bitmessagesettings ' , ' maxacceptablenoncetrialsperbyte ' , ' 0 ' )
shared . config . set ( ' bitmessagesettings ' , ' maxacceptablepayloadlengthextrabytes ' , ' 0 ' )
shared . config . set ( ' bitmessagesettings ' , ' settingsversion ' , ' 6 ' )
2013-05-02 17:53:54 +02:00
with open ( shared . appdata + ' keys.dat ' , ' wb ' ) as configfile :
2013-05-02 18:47:43 +02:00
shared . config . write ( configfile )
2013-04-25 22:11:00 +02:00
#From now on, let us keep a 'version' embedded in the messages.dat file so that when we make changes to the database, the database version we are on can stay embedded in the messages.dat file. Let us check to see if the settings table exists yet.
item = ''' SELECT name FROM sqlite_master WHERE type= ' table ' AND name= ' settings ' ; '''
parameters = ' '
self . cur . execute ( item , parameters )
if self . cur . fetchall ( ) == [ ] :
#The settings table doesn't exist. We need to make it.
print ' In messages.dat database, creating new \' settings \' table. '
self . cur . execute ( ''' CREATE TABLE settings (key text, value blob, UNIQUE(key) ON CONFLICT REPLACE) ''' )
self . cur . execute ( ''' INSERT INTO settings VALUES( ' version ' , ' 1 ' ) ''' )
2013-04-30 18:22:47 +02:00
self . cur . execute ( ''' INSERT INTO settings VALUES( ' lastvacuumtime ' ,?) ''' , ( int ( time . time ( ) ) , ) )
2013-04-25 22:11:00 +02:00
print ' In messages.dat database, removing an obsolete field from the pubkeys table. '
self . cur . execute ( ''' CREATE TEMPORARY TABLE pubkeys_backup(hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE); ''' )
self . cur . execute ( ''' INSERT INTO pubkeys_backup SELECT hash, transmitdata, time, usedpersonally FROM pubkeys; ''' )
self . cur . execute ( ''' DROP TABLE pubkeys ''' )
self . cur . execute ( ''' CREATE TABLE pubkeys (hash blob, transmitdata blob, time int, usedpersonally text, UNIQUE(hash) ON CONFLICT REPLACE) ''' )
self . cur . execute ( ''' INSERT INTO pubkeys SELECT hash, transmitdata, time, usedpersonally FROM pubkeys_backup; ''' )
self . cur . execute ( ''' DROP TABLE pubkeys_backup; ''' )
2013-04-26 22:07:58 +02:00
print ' Deleting all pubkeys from inventory. They will be redownloaded and then saved with the correct times. '
self . cur . execute ( ''' delete from inventory where objecttype = ' pubkey ' ; ''' )
2013-04-30 18:22:47 +02:00
print ' replacing Bitmessage announcements mailing list with a new one. '
self . cur . execute ( ''' delete from subscriptions where address= ' BM-BbkPSZbzPwpVcYZpU4yHwf9ZPEapN5Zx ' ''' )
self . cur . execute ( ''' INSERT INTO subscriptions VALUES( ' Bitmessage new releases/announcements ' , ' BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw ' ,1) ''' )
2013-04-26 22:07:58 +02:00
print ' Commiting. '
2013-04-25 22:11:00 +02:00
self . conn . commit ( )
print ' Vacuuming message.dat. You might notice that the file size gets much smaller. '
self . cur . execute ( ''' VACUUM ''' )
2013-04-08 19:35:16 +02:00
2013-05-30 22:25:42 +02:00
#After code refactoring, the possible status values for sent messages as changed.
self . cur . execute ( ''' update sent set status= ' doingmsgpow ' where status= ' doingpow ' ''' )
self . cur . execute ( ''' update sent set status= ' msgsent ' where status= ' sentmessage ' ''' )
self . cur . execute ( ''' update sent set status= ' doingpubkeypow ' where status= ' findingpubkey ' ''' )
self . cur . execute ( ''' update sent set status= ' broadcastqueued ' where status= ' broadcastpending ' ''' )
self . conn . commit ( )
2012-11-19 20:45:05 +01:00
try :
testpayload = ' \x00 \x00 '
2013-04-25 22:11:00 +02:00
t = ( ' 1234 ' , testpayload , ' 12345678 ' , ' no ' )
self . cur . execute ( ''' INSERT INTO pubkeys VALUES(?,?,?,?) ''' , t )
2012-11-19 20:45:05 +01:00
self . conn . commit ( )
self . cur . execute ( ''' SELECT transmitdata FROM pubkeys WHERE hash= ' 1234 ' ''' )
queryreturn = self . cur . fetchall ( )
for row in queryreturn :
transmitdata , = row
self . cur . execute ( ''' DELETE FROM pubkeys WHERE hash= ' 1234 ' ''' )
self . conn . commit ( )
if transmitdata == ' ' :
2012-12-18 19:09:10 +01:00
sys . stderr . write ( ' Problem: The version of SQLite you have cannot store Null values. Please download and install the latest revision of your version of Python (for example, the latest Python 2.7 revision) and try again. \n ' )
2013-06-13 01:11:01 +02:00
sys . stderr . write ( ' PyBitmessage will now exit very abruptly. You may now see threading errors related to this abrupt exit but the problem you need to solve is related to SQLite. \n \n ' )
2013-05-06 17:32:40 +02:00
os . _exit ( 0 )
2012-11-19 20:45:05 +01:00
except Exception , err :
print err
2013-04-30 18:22:47 +02:00
#Let us check to see the last time we vaccumed the messages.dat file. If it has been more than a month let's do it now.
item = ''' SELECT value FROM settings WHERE key= ' lastvacuumtime ' ; '''
parameters = ' '
self . cur . execute ( item , parameters )
queryreturn = self . cur . fetchall ( )
for row in queryreturn :
value , = row
if int ( value ) < int ( time . time ( ) ) - 2592000 :
print ' It has been a long time since the messages.dat file has been vacuumed. Vacuuming now... '
self . cur . execute ( ''' VACUUM ''' )
item = ''' update settings set value=? WHERE key= ' lastvacuumtime ' ; '''
parameters = ( int ( time . time ( ) ) , )
self . cur . execute ( item , parameters )
2012-11-19 20:45:05 +01:00
while True :
2013-05-02 17:53:54 +02:00
item = shared . sqlSubmitQueue . get ( )
2013-04-04 19:39:11 +02:00
if item == ' commit ' :
self . conn . commit ( )
2013-05-01 22:06:55 +02:00
elif item == ' exit ' :
2013-05-03 21:53:38 +02:00
self . conn . close ( )
2013-06-11 20:15:17 +02:00
shared . printLock . acquire ( )
2013-05-02 17:53:54 +02:00
print ' sqlThread exiting gracefully. '
2013-06-11 20:15:17 +02:00
shared . printLock . release ( )
2013-05-01 22:06:55 +02:00
return
2013-05-03 21:53:38 +02:00
elif item == ' movemessagstoprog ' :
shared . printLock . acquire ( )
print ' the sqlThread is moving the messages.dat file to the local program directory. '
shared . printLock . release ( )
self . conn . commit ( )
self . conn . close ( )
shutil . move ( shared . lookupAppdataFolder ( ) + ' messages.dat ' , ' messages.dat ' )
self . conn = sqlite3 . connect ( ' messages.dat ' )
self . conn . text_factory = str
self . cur = self . conn . cursor ( )
elif item == ' movemessagstoappdata ' :
shared . printLock . acquire ( )
print ' the sqlThread is moving the messages.dat file to the Appdata folder. '
shared . printLock . release ( )
self . conn . commit ( )
self . conn . close ( )
shutil . move ( ' messages.dat ' , shared . lookupAppdataFolder ( ) + ' messages.dat ' )
self . conn = sqlite3 . connect ( shared . appdata + ' messages.dat ' )
self . conn . text_factory = str
self . cur = self . conn . cursor ( )
2013-06-11 00:53:15 +02:00
elif item == ' deleteandvacuume ' :
self . cur . execute ( ''' delete from inbox where folder= ' trash ' ''' )
self . cur . execute ( ''' delete from sent where folder= ' trash ' ''' )
self . conn . commit ( )
self . cur . execute ( ''' VACUUM ''' )
2013-04-04 19:39:11 +02:00
else :
2013-05-02 17:53:54 +02:00
parameters = shared . sqlSubmitQueue . get ( )
2013-04-04 19:39:11 +02:00
#print 'item', item
#print 'parameters', parameters
2013-05-16 19:04:30 +02:00
try :
self . cur . execute ( item , parameters )
except Exception , err :
shared . printLock . acquire ( )
sys . stderr . write ( ' \n Major error occurred when trying to execute a SQL statement within the sqlThread. Please tell Atheros about this error message or post it in the forum! Error occurred while trying to execute statement: " ' + str ( item ) + ' " Here are the parameters; you might want to censor this data with asterisks (***) as it can contain private information: ' + str ( repr ( parameters ) ) + ' \n Here is the actual error message thrown by the sqlThread: ' + str ( err ) + ' \n ' )
sys . stderr . write ( ' This program shall now abruptly exit! \n ' )
shared . printLock . release ( )
os . _exit ( 0 )
2013-05-02 17:53:54 +02:00
shared . sqlReturnQueue . put ( self . cur . fetchall ( ) )
#shared.sqlSubmitQueue.task_done()
2013-02-18 21:22:48 +01:00
2012-11-19 20:45:05 +01:00
''' The singleCleaner class is a timer-driven thread that cleans data structures to free memory, resends messages when a remote node doesn ' t respond, and sends pong messages to keep connections alive if the network isn ' t busy.
It cleans these data structures in memory :
inventory ( moves data to the on - disk sql database )
It cleans these tables on the disk :
inventory ( clears data more than 2 days and 12 hours old )
2013-02-12 21:00:04 +01:00
pubkeys ( clears pubkeys older than 4 weeks old which we have not used personally )
2012-11-19 20:45:05 +01:00
It resends messages when there has been no response :
2013-03-14 18:47:35 +01:00
resends getpubkey messages in 4 days ( then 8 days , then 16 days , etc . . . )
resends msg messages in 4 days ( then 8 days , then 16 days , etc . . . )
2012-11-19 20:45:05 +01:00
'''
2013-05-01 22:06:55 +02:00
class singleCleaner ( threading . Thread ) :
def __init__ ( self ) :
threading . Thread . __init__ ( self )
2012-11-19 20:45:05 +01:00
def run ( self ) :
timeWeLastClearedInventoryAndPubkeysTables = 0
while True :
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . UISignalQueue . put ( ( ' updateStatusBar ' , ' Doing housekeeping (Flushing inventory in memory to disk...) ' ) )
for hash , storedValue in shared . inventory . items ( ) :
2012-11-19 20:45:05 +01:00
objectType , streamNumber , payload , receivedTime = storedValue
2013-04-04 19:39:11 +02:00
if int ( time . time ( ) ) - 3600 > receivedTime :
2012-11-19 20:45:05 +01:00
t = ( hash , objectType , streamNumber , payload , receivedTime )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ''' INSERT INTO inventory VALUES (?,?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
del shared . inventory [ hash ]
shared . sqlSubmitQueue . put ( ' commit ' )
shared . UISignalQueue . put ( ( ' updateStatusBar ' , ' ' ) )
shared . sqlLock . release ( )
shared . broadcastToSendDataQueues ( ( 0 , ' pong ' , ' no data ' ) ) #commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
#If we are running as a daemon then we are going to fill up the UI queue which will never be handled by a UI. We should clear it to save memory.
if shared . safeConfigGetBoolean ( ' bitmessagesettings ' , ' daemon ' ) :
shared . UISignalQueue . queue . clear ( )
2012-11-19 20:45:05 +01:00
if timeWeLastClearedInventoryAndPubkeysTables < int ( time . time ( ) ) - 7380 :
timeWeLastClearedInventoryAndPubkeysTables = int ( time . time ( ) )
#inventory (moves data from the inventory data structure to the on-disk sql database)
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-05-30 22:25:42 +02:00
#inventory (clears pubkeys after 28 days and everything else after 2 days and 12 hours)
2013-04-12 21:31:45 +02:00
t = ( int ( time . time ( ) ) - lengthOfTimeToLeaveObjectsInInventory , int ( time . time ( ) ) - lengthOfTimeToHoldOnToAllPubkeys )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ''' DELETE FROM inventory WHERE (receivedtime<? AND objecttype<> ' pubkey ' ) OR (receivedtime<? AND objecttype= ' pubkey ' ) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
2012-11-19 20:45:05 +01:00
2013-02-18 21:22:48 +01:00
#pubkeys
2013-02-11 22:28:38 +01:00
t = ( int ( time . time ( ) ) - lengthOfTimeToHoldOnToAllPubkeys , )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ''' DELETE FROM pubkeys WHERE time<? AND usedpersonally= ' no ' ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
2012-11-19 20:45:05 +01:00
t = ( )
2013-05-29 23:18:44 +02:00
shared . sqlSubmitQueue . put ( ''' select toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, status, pubkeyretrynumber, msgretrynumber FROM sent WHERE ((status= ' awaitingpubkey ' OR status= ' msgsent ' ) AND folder= ' sent ' ) ''' ) #If the message's folder='trash' then we'll ignore it.
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
2012-11-19 20:45:05 +01:00
for row in queryreturn :
2013-05-28 19:30:44 +02:00
if len ( row ) < 5 :
shared . printLock . acquire ( )
sys . stderr . write ( ' Something went wrong in the singleCleaner thread: a query did not return the requested fields. ' + repr ( row ) )
time . sleep ( 3 )
shared . printLock . release ( )
break
2012-11-19 20:45:05 +01:00
toaddress , toripe , fromaddress , subject , message , ackdata , lastactiontime , status , pubkeyretrynumber , msgretrynumber = row
2013-05-29 23:18:44 +02:00
if status == ' awaitingpubkey ' :
2012-11-19 20:45:05 +01:00
if int ( time . time ( ) ) - lastactiontime > ( maximumAgeOfAnObjectThatIAmWillingToAccept * ( 2 * * ( pubkeyretrynumber ) ) ) :
print ' It has been a long time and we haven \' t heard a response to our getpubkey request. Sending again. '
try :
2013-05-02 17:53:54 +02:00
del neededPubkeys [ toripe ] #We need to take this entry out of the neededPubkeys structure because the shared.workerQueue checks to see whether the entry is already present and will not do the POW and send the message because it assumes that it has already done it recently.
2012-11-19 20:45:05 +01:00
except :
pass
2013-05-29 23:18:44 +02:00
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateStatusBar ' , ' Doing work necessary to again attempt to request a public key... ' ) )
2012-11-19 20:45:05 +01:00
t = ( int ( time . time ( ) ) , pubkeyretrynumber + 1 , toripe )
2013-05-29 23:18:44 +02:00
shared . sqlSubmitQueue . put ( ''' UPDATE sent SET lastactiontime=?, pubkeyretrynumber=?, status= ' msgqueued ' WHERE toripe=? ''' )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
2013-05-29 23:18:44 +02:00
shared . sqlSubmitQueue . put ( ' commit ' )
shared . workerQueue . put ( ( ' sendmessage ' , ' ' ) )
else : # status == msgsent
2012-11-19 20:45:05 +01:00
if int ( time . time ( ) ) - lastactiontime > ( maximumAgeOfAnObjectThatIAmWillingToAccept * ( 2 * * ( msgretrynumber ) ) ) :
print ' It has been a long time and we haven \' t heard an acknowledgement to our msg. Sending again. '
2013-05-29 23:18:44 +02:00
t = ( int ( time . time ( ) ) , msgretrynumber + 1 , ' msgqueued ' , ackdata )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ''' UPDATE sent SET lastactiontime=?, msgretrynumber=?, status=? WHERE ackdata=? ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
2013-05-29 23:18:44 +02:00
shared . sqlSubmitQueue . put ( ' commit ' )
shared . workerQueue . put ( ( ' sendmessage ' , ' ' ) )
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateStatusBar ' , ' Doing work necessary to again attempt to deliver a message... ' ) )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2013-04-23 21:59:10 +02:00
time . sleep ( 300 )
2012-11-19 20:45:05 +01:00
#This thread, of which there is only one, does the heavy lifting: calculating POWs.
2013-05-01 22:06:55 +02:00
class singleWorker ( threading . Thread ) :
def __init__ ( self ) :
#QThread.__init__(self, parent)
threading . Thread . __init__ ( self )
2012-11-19 20:45:05 +01:00
def run ( self ) :
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-06-03 21:48:53 +02:00
shared . sqlSubmitQueue . put ( ''' SELECT toripe FROM sent WHERE ((status= ' awaitingpubkey ' OR status= ' doingpubkeypow ' ) AND folder= ' sent ' ) ''' )
2013-05-29 23:18:44 +02:00
shared . sqlSubmitQueue . put ( ' ' )
2013-05-02 17:53:54 +02:00
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2012-11-19 20:45:05 +01:00
for row in queryreturn :
toripe , = row
2013-05-29 23:18:44 +02:00
neededPubkeys [ toripe ] = 0
2012-11-19 20:45:05 +01:00
2013-06-03 21:48:53 +02:00
#Initialize the ackdataForWhichImWatching data structure using data from the sql database.
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' SELECT ackdata FROM sent where (status= ' msgsent ' OR status= ' doingmsgpow ' ) ''' )
shared . sqlSubmitQueue . put ( ' ' )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
for row in queryreturn :
ackdata , = row
print ' Watching for ackdata ' , ackdata . encode ( ' hex ' )
ackdataForWhichImWatching [ ackdata ] = 0
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-05-29 23:18:44 +02:00
shared . sqlSubmitQueue . put ( ''' SELECT DISTINCT toaddress FROM sent WHERE (status= ' doingpubkeypow ' AND folder= ' sent ' ) ''' )
shared . sqlSubmitQueue . put ( ' ' )
2013-05-02 17:53:54 +02:00
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2012-11-19 20:45:05 +01:00
for row in queryreturn :
2013-05-29 23:18:44 +02:00
toaddress , = row
self . requestPubKey ( toaddress )
2013-06-03 21:48:53 +02:00
time . sleep ( 10 ) #give some time for the GUI to start before we start on existing POW tasks.
2013-05-29 23:18:44 +02:00
self . sendMsg ( ) #just in case there are any pending tasks for msg messages that have yet to be sent.
self . sendBroadcast ( ) #just in case there are any tasks for Broadcasts that have yet to be sent.
2012-11-19 20:45:05 +01:00
while True :
2013-05-02 17:53:54 +02:00
command , data = shared . workerQueue . get ( )
2012-11-19 20:45:05 +01:00
if command == ' sendmessage ' :
2013-05-29 23:18:44 +02:00
self . sendMsg ( )
2012-11-19 20:45:05 +01:00
elif command == ' sendbroadcast ' :
self . sendBroadcast ( )
2013-01-22 20:29:49 +01:00
elif command == ' doPOWForMyV2Pubkey ' :
self . doPOWForMyV2Pubkey ( data )
2013-04-24 21:48:46 +02:00
elif command == ' doPOWForMyV3Pubkey ' :
self . doPOWForMyV3Pubkey ( data )
2013-06-11 05:43:06 +02:00
""" elif command == ' newpubkey ' :
toAddressVersion , toStreamNumber , toRipe = data
if toRipe in neededPubkeys :
print ' We have been awaiting the arrival of this pubkey. '
del neededPubkeys [ toRipe ]
t = ( toRipe , )
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' UPDATE sent SET status= ' doingmsgpow ' WHERE toripe=? AND status= ' awaitingpubkey ' and folder= ' sent ' ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
self . sendMsg ( )
else :
shared . printLock . acquire ( )
print ' We don \' t need this pub key. We didn \' t ask for it. Pubkey hash: ' , toRipe . encode ( ' hex ' )
shared . printLock . release ( ) """
2013-03-19 18:32:37 +01:00
else :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-03-19 18:32:37 +01:00
sys . stderr . write ( ' Probable programming error: The command sent to the workerThread is weird. It is: %s \n ' % command )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
shared . workerQueue . task_done ( )
2012-11-19 20:45:05 +01:00
2013-03-28 22:56:20 +01:00
def doPOWForMyV2Pubkey ( self , hash ) : #This function also broadcasts out the pubkey message once it is done with the POW
#Look up my stream number based on my address hash
2013-05-02 18:47:43 +02:00
""" configSections = shared.config.sections()
2013-03-28 22:56:20 +01:00
for addressInKeysFile in configSections :
if addressInKeysFile < > ' bitmessagesettings ' :
status , addressVersionNumber , streamNumber , hashFromThisParticularAddress = decodeAddress ( addressInKeysFile )
if hash == hashFromThisParticularAddress :
myAddress = addressInKeysFile
2013-04-22 22:01:41 +02:00
break """
2013-05-02 17:53:54 +02:00
myAddress = shared . myAddressesByHash [ hash ]
2013-04-22 22:01:41 +02:00
status , addressVersionNumber , streamNumber , hash = decodeAddress ( myAddress )
2013-04-02 18:42:27 +02:00
embeddedTime = int ( time . time ( ) + random . randrange ( - 300 , 300 ) ) #the current time plus or minus five minutes
2013-02-11 22:28:38 +01:00
payload = pack ( ' >I ' , ( embeddedTime ) )
2013-03-28 22:56:20 +01:00
payload + = encodeVarint ( addressVersionNumber ) #Address version number
2013-01-22 20:29:49 +01:00
payload + = encodeVarint ( streamNumber )
payload + = ' \x00 \x00 \x00 \x01 ' #bitfield of features supported by me (see the wiki).
2013-02-12 21:00:04 +01:00
try :
2013-05-02 17:53:54 +02:00
privSigningKeyBase58 = shared . config . get ( myAddress , ' privsigningkey ' )
privEncryptionKeyBase58 = shared . config . get ( myAddress , ' privencryptionkey ' )
2013-02-12 21:00:04 +01:00
except Exception , err :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-02-12 21:00:04 +01:00
sys . stderr . write ( ' Error within doPOWForMyV2Pubkey. Could not read the keys from the keys.dat file for a requested address. %s \n ' % err )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-02-12 21:00:04 +01:00
return
2013-01-22 20:29:49 +01:00
2013-05-02 17:53:54 +02:00
privSigningKeyHex = shared . decodeWalletImportFormat ( privSigningKeyBase58 ) . encode ( ' hex ' )
privEncryptionKeyHex = shared . decodeWalletImportFormat ( privEncryptionKeyBase58 ) . encode ( ' hex ' )
2013-01-22 20:29:49 +01:00
pubSigningKey = highlevelcrypto . privToPub ( privSigningKeyHex ) . decode ( ' hex ' )
pubEncryptionKey = highlevelcrypto . privToPub ( privEncryptionKeyHex ) . decode ( ' hex ' )
payload + = pubSigningKey [ 1 : ]
payload + = pubEncryptionKey [ 1 : ]
2013-02-12 21:00:04 +01:00
#Do the POW for this pubkey message
2013-05-02 21:59:10 +02:00
target = 2 * * 64 / ( ( len ( payload ) + shared . networkDefaultPayloadLengthExtraBytes + 8 ) * shared . networkDefaultProofOfWorkNonceTrialsPerByte )
2013-04-24 21:48:46 +02:00
print ' (For pubkey message) Doing proof of work... '
initialHash = hashlib . sha512 ( payload ) . digest ( )
2013-05-29 22:01:12 +02:00
trialValue , nonce = proofofwork . run ( target , initialHash )
2013-04-24 21:48:46 +02:00
print ' (For pubkey message) Found proof of work ' , trialValue , ' Nonce: ' , nonce
payload = pack ( ' >Q ' , nonce ) + payload
2013-04-25 22:11:00 +02:00
""" t = (hash,payload,embeddedTime, ' no ' )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' INSERT INTO pubkeys VALUES (?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( ) """
2013-04-24 21:48:46 +02:00
inventoryHash = calculateInventoryHash ( payload )
objectType = ' pubkey '
2013-05-02 17:53:54 +02:00
shared . inventory [ inventoryHash ] = ( objectType , streamNumber , payload , embeddedTime )
2013-04-24 21:48:46 +02:00
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-24 21:48:46 +02:00
print ' broadcasting inv with hash: ' , inventoryHash . encode ( ' hex ' )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-05-18 18:11:21 +02:00
shared . broadcastToSendDataQueues ( ( streamNumber , ' sendinv ' , inventoryHash ) )
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateStatusBar ' , ' ' ) )
shared . config . set ( myAddress , ' lastpubkeysendtime ' , str ( int ( time . time ( ) ) ) )
with open ( shared . appdata + ' keys.dat ' , ' wb ' ) as configfile :
2013-05-02 18:47:43 +02:00
shared . config . write ( configfile )
2013-04-24 21:48:46 +02:00
def doPOWForMyV3Pubkey ( self , hash ) : #This function also broadcasts out the pubkey message once it is done with the POW
2013-05-02 17:53:54 +02:00
myAddress = shared . myAddressesByHash [ hash ]
2013-04-24 21:48:46 +02:00
status , addressVersionNumber , streamNumber , hash = decodeAddress ( myAddress )
embeddedTime = int ( time . time ( ) + random . randrange ( - 300 , 300 ) ) #the current time plus or minus five minutes
payload = pack ( ' >I ' , ( embeddedTime ) )
payload + = encodeVarint ( addressVersionNumber ) #Address version number
payload + = encodeVarint ( streamNumber )
payload + = ' \x00 \x00 \x00 \x01 ' #bitfield of features supported by me (see the wiki).
try :
2013-05-02 17:53:54 +02:00
privSigningKeyBase58 = shared . config . get ( myAddress , ' privsigningkey ' )
privEncryptionKeyBase58 = shared . config . get ( myAddress , ' privencryptionkey ' )
2013-04-24 21:48:46 +02:00
except Exception , err :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-24 21:48:46 +02:00
sys . stderr . write ( ' Error within doPOWForMyV3Pubkey. Could not read the keys from the keys.dat file for a requested address. %s \n ' % err )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-04-24 21:48:46 +02:00
return
2013-05-02 17:53:54 +02:00
privSigningKeyHex = shared . decodeWalletImportFormat ( privSigningKeyBase58 ) . encode ( ' hex ' )
privEncryptionKeyHex = shared . decodeWalletImportFormat ( privEncryptionKeyBase58 ) . encode ( ' hex ' )
2013-04-24 21:48:46 +02:00
pubSigningKey = highlevelcrypto . privToPub ( privSigningKeyHex ) . decode ( ' hex ' )
pubEncryptionKey = highlevelcrypto . privToPub ( privEncryptionKeyHex ) . decode ( ' hex ' )
payload + = pubSigningKey [ 1 : ]
payload + = pubEncryptionKey [ 1 : ]
2013-05-02 17:53:54 +02:00
payload + = encodeVarint ( shared . config . getint ( myAddress , ' noncetrialsperbyte ' ) )
payload + = encodeVarint ( shared . config . getint ( myAddress , ' payloadlengthextrabytes ' ) )
2013-04-24 21:48:46 +02:00
signature = highlevelcrypto . sign ( payload , privSigningKeyHex )
payload + = encodeVarint ( len ( signature ) )
payload + = signature
#Do the POW for this pubkey message
2013-05-02 21:59:10 +02:00
target = 2 * * 64 / ( ( len ( payload ) + shared . networkDefaultPayloadLengthExtraBytes + 8 ) * shared . networkDefaultProofOfWorkNonceTrialsPerByte )
2013-01-22 20:29:49 +01:00
print ' (For pubkey message) Doing proof of work... '
initialHash = hashlib . sha512 ( payload ) . digest ( )
2013-05-29 22:01:12 +02:00
trialValue , nonce = proofofwork . run ( target , initialHash )
2013-01-22 20:29:49 +01:00
print ' (For pubkey message) Found proof of work ' , trialValue , ' Nonce: ' , nonce
payload = pack ( ' >Q ' , nonce ) + payload
2013-04-25 22:11:00 +02:00
""" t = (hash,payload,embeddedTime, ' no ' )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' INSERT INTO pubkeys VALUES (?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( ) """
2013-01-22 20:29:49 +01:00
inventoryHash = calculateInventoryHash ( payload )
objectType = ' pubkey '
2013-05-02 17:53:54 +02:00
shared . inventory [ inventoryHash ] = ( objectType , streamNumber , payload , embeddedTime )
2013-01-22 20:29:49 +01:00
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-01-25 19:16:03 +01:00
print ' broadcasting inv with hash: ' , inventoryHash . encode ( ' hex ' )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
shared . broadcastToSendDataQueues ( ( streamNumber , ' sendinv ' , inventoryHash ) )
shared . UISignalQueue . put ( ( ' updateStatusBar ' , ' ' ) )
shared . config . set ( myAddress , ' lastpubkeysendtime ' , str ( int ( time . time ( ) ) ) )
with open ( shared . appdata + ' keys.dat ' , ' wb ' ) as configfile :
2013-05-02 18:47:43 +02:00
shared . config . write ( configfile )
2013-01-22 20:29:49 +01:00
2012-11-19 20:45:05 +01:00
def sendBroadcast ( self ) :
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-06-08 02:44:30 +02:00
t = ( ' broadcastqueued ' , )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ''' SELECT fromaddress, subject, message, ackdata FROM sent WHERE status=? and folder= ' sent ' ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2012-11-19 20:45:05 +01:00
for row in queryreturn :
fromaddress , subject , body , ackdata = row
status , addressVersionNumber , streamNumber , ripe = decodeAddress ( fromaddress )
2013-04-30 18:22:47 +02:00
if addressVersionNumber == 2 and int ( time . time ( ) ) < encryptedBroadcastSwitchoverTime :
2013-01-24 20:52:48 +01:00
#We need to convert our private keys to public keys in order to include them.
2013-03-19 18:32:37 +01:00
try :
2013-05-02 17:53:54 +02:00
privSigningKeyBase58 = shared . config . get ( fromaddress , ' privsigningkey ' )
privEncryptionKeyBase58 = shared . config . get ( fromaddress , ' privencryptionkey ' )
2013-03-19 18:32:37 +01:00
except :
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Error! Could not find sender address (your address) in the keys.dat file. ' ) ) )
2013-03-19 18:32:37 +01:00
continue
2012-11-19 20:45:05 +01:00
2013-05-02 17:53:54 +02:00
privSigningKeyHex = shared . decodeWalletImportFormat ( privSigningKeyBase58 ) . encode ( ' hex ' )
privEncryptionKeyHex = shared . decodeWalletImportFormat ( privEncryptionKeyBase58 ) . encode ( ' hex ' )
2012-11-19 20:45:05 +01:00
2013-01-24 20:52:48 +01:00
pubSigningKey = highlevelcrypto . privToPub ( privSigningKeyHex ) . decode ( ' hex ' ) #At this time these pubkeys are 65 bytes long because they include the encoding byte which we won't be sending in the broadcast message.
2013-02-18 21:22:48 +01:00
pubEncryptionKey = highlevelcrypto . privToPub ( privEncryptionKeyHex ) . decode ( ' hex ' )
2013-06-11 00:53:15 +02:00
payload = pack ( ' >Q ' , ( int ( time . time ( ) ) + random . randrange ( - 300 , 300 ) ) ) #the current time plus or minus five minutes
2013-01-24 20:52:48 +01:00
payload + = encodeVarint ( 1 ) #broadcast version
payload + = encodeVarint ( addressVersionNumber )
payload + = encodeVarint ( streamNumber )
payload + = ' \x00 \x00 \x00 \x01 ' #behavior bitfield
payload + = pubSigningKey [ 1 : ]
2013-02-18 21:22:48 +01:00
payload + = pubEncryptionKey [ 1 : ]
2013-01-24 20:52:48 +01:00
payload + = ripe
payload + = ' \x02 ' #message encoding type
payload + = encodeVarint ( len ( ' Subject: ' + subject + ' \n ' + ' Body: ' + body ) ) #Type 2 is simple UTF-8 message encoding.
payload + = ' Subject: ' + subject + ' \n ' + ' Body: ' + body
2013-02-18 21:22:48 +01:00
2013-01-24 20:52:48 +01:00
signature = highlevelcrypto . sign ( payload , privSigningKeyHex )
payload + = encodeVarint ( len ( signature ) )
payload + = signature
2013-02-18 21:22:48 +01:00
2013-05-02 21:59:10 +02:00
target = 2 * * 64 / ( ( len ( payload ) + shared . networkDefaultPayloadLengthExtraBytes + 8 ) * shared . networkDefaultProofOfWorkNonceTrialsPerByte )
2013-01-24 20:52:48 +01:00
print ' (For broadcast message) Doing proof of work... '
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Doing work necessary to send broadcast... ' ) ) )
2013-01-24 20:52:48 +01:00
initialHash = hashlib . sha512 ( payload ) . digest ( )
2013-05-29 22:01:12 +02:00
trialValue , nonce = proofofwork . run ( target , initialHash )
2013-01-24 20:52:48 +01:00
print ' (For broadcast message) Found proof of work ' , trialValue , ' Nonce: ' , nonce
2012-11-19 20:45:05 +01:00
2013-01-24 20:52:48 +01:00
payload = pack ( ' >Q ' , nonce ) + payload
2012-11-19 20:45:05 +01:00
2013-01-24 20:52:48 +01:00
inventoryHash = calculateInventoryHash ( payload )
objectType = ' broadcast '
2013-05-02 17:53:54 +02:00
shared . inventory [ inventoryHash ] = ( objectType , streamNumber , payload , int ( time . time ( ) ) )
2013-05-29 23:18:44 +02:00
print ' Broadcasting inv for my broadcast (within sendBroadcast function): ' , inventoryHash . encode ( ' hex ' )
2013-05-02 17:53:54 +02:00
shared . broadcastToSendDataQueues ( ( streamNumber , ' sendinv ' , inventoryHash ) )
2013-01-24 20:52:48 +01:00
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Broadcast sent on ' + unicode ( strftime ( shared . config . get ( ' bitmessagesettings ' , ' timeformat ' ) , localtime ( int ( time . time ( ) ) ) ) , ' utf-8 ' ) ) ) )
2013-01-24 20:52:48 +01:00
2013-04-26 19:20:30 +02:00
#Update the status of the message in the 'sent' table to have a 'broadcastsent' status
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-06-08 02:44:30 +02:00
t = ( ' broadcastsent ' , int ( time . time ( ) ) , fromaddress , subject , body , ' broadcastqueued ' )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ' UPDATE sent SET status=?, lastactiontime=? WHERE fromaddress=? AND subject=? AND message=? AND status=? ' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2013-04-30 18:22:47 +02:00
elif addressVersionNumber == 3 or int ( time . time ( ) ) > encryptedBroadcastSwitchoverTime :
2013-04-26 19:20:30 +02:00
#We need to convert our private keys to public keys in order to include them.
try :
2013-05-02 17:53:54 +02:00
privSigningKeyBase58 = shared . config . get ( fromaddress , ' privsigningkey ' )
privEncryptionKeyBase58 = shared . config . get ( fromaddress , ' privencryptionkey ' )
2013-04-26 19:20:30 +02:00
except :
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Error! Could not find sender address (your address) in the keys.dat file. ' ) ) )
2013-04-26 19:20:30 +02:00
continue
2013-05-02 17:53:54 +02:00
privSigningKeyHex = shared . decodeWalletImportFormat ( privSigningKeyBase58 ) . encode ( ' hex ' )
privEncryptionKeyHex = shared . decodeWalletImportFormat ( privEncryptionKeyBase58 ) . encode ( ' hex ' )
2013-04-26 19:20:30 +02:00
pubSigningKey = highlevelcrypto . privToPub ( privSigningKeyHex ) . decode ( ' hex ' ) #At this time these pubkeys are 65 bytes long because they include the encoding byte which we won't be sending in the broadcast message.
pubEncryptionKey = highlevelcrypto . privToPub ( privEncryptionKeyHex ) . decode ( ' hex ' )
2013-06-11 00:53:15 +02:00
payload = pack ( ' >Q ' , ( int ( time . time ( ) ) + random . randrange ( - 300 , 300 ) ) ) #the current time plus or minus five minutes
2013-04-26 19:20:30 +02:00
payload + = encodeVarint ( 2 ) #broadcast version
payload + = encodeVarint ( streamNumber )
dataToEncrypt = encodeVarint ( 2 ) #broadcast version
dataToEncrypt + = encodeVarint ( addressVersionNumber )
dataToEncrypt + = encodeVarint ( streamNumber )
dataToEncrypt + = ' \x00 \x00 \x00 \x01 ' #behavior bitfield
dataToEncrypt + = pubSigningKey [ 1 : ]
dataToEncrypt + = pubEncryptionKey [ 1 : ]
if addressVersionNumber > = 3 :
2013-05-02 17:53:54 +02:00
dataToEncrypt + = encodeVarint ( shared . config . getint ( fromaddress , ' noncetrialsperbyte ' ) )
dataToEncrypt + = encodeVarint ( shared . config . getint ( fromaddress , ' payloadlengthextrabytes ' ) )
2013-04-26 19:20:30 +02:00
dataToEncrypt + = ' \x02 ' #message encoding type
dataToEncrypt + = encodeVarint ( len ( ' Subject: ' + subject + ' \n ' + ' Body: ' + body ) ) #Type 2 is simple UTF-8 message encoding.
dataToEncrypt + = ' Subject: ' + subject + ' \n ' + ' Body: ' + body
2013-06-08 00:58:28 +02:00
signature = highlevelcrypto . sign ( dataToEncrypt , privSigningKeyHex )
2013-04-26 19:20:30 +02:00
dataToEncrypt + = encodeVarint ( len ( signature ) )
dataToEncrypt + = signature
privEncryptionKey = hashlib . sha512 ( encodeVarint ( addressVersionNumber ) + encodeVarint ( streamNumber ) + ripe ) . digest ( ) [ : 32 ]
pubEncryptionKey = pointMult ( privEncryptionKey )
payload + = highlevelcrypto . encrypt ( dataToEncrypt , pubEncryptionKey . encode ( ' hex ' ) )
2013-05-02 21:59:10 +02:00
target = 2 * * 64 / ( ( len ( payload ) + shared . networkDefaultPayloadLengthExtraBytes + 8 ) * shared . networkDefaultProofOfWorkNonceTrialsPerByte )
2013-04-26 19:20:30 +02:00
print ' (For broadcast message) Doing proof of work... '
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Doing work necessary to send broadcast... ' ) ) )
2013-04-26 19:20:30 +02:00
initialHash = hashlib . sha512 ( payload ) . digest ( )
2013-05-29 22:01:12 +02:00
trialValue , nonce = proofofwork . run ( target , initialHash )
2013-04-26 19:20:30 +02:00
print ' (For broadcast message) Found proof of work ' , trialValue , ' Nonce: ' , nonce
payload = pack ( ' >Q ' , nonce ) + payload
inventoryHash = calculateInventoryHash ( payload )
objectType = ' broadcast '
2013-05-02 17:53:54 +02:00
shared . inventory [ inventoryHash ] = ( objectType , streamNumber , payload , int ( time . time ( ) ) )
2013-04-26 19:20:30 +02:00
print ' sending inv (within sendBroadcast function) '
2013-05-02 17:53:54 +02:00
shared . broadcastToSendDataQueues ( ( streamNumber , ' sendinv ' , inventoryHash ) )
2013-04-26 19:20:30 +02:00
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Broadcast sent on ' + unicode ( strftime ( shared . config . get ( ' bitmessagesettings ' , ' timeformat ' ) , localtime ( int ( time . time ( ) ) ) ) , ' utf-8 ' ) ) ) )
2013-04-26 19:20:30 +02:00
2013-01-24 20:52:48 +01:00
#Update the status of the message in the 'sent' table to have a 'broadcastsent' status
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-06-08 02:44:30 +02:00
t = ( ' broadcastsent ' , int ( time . time ( ) ) , fromaddress , subject , body , ' broadcastqueued ' )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ' UPDATE sent SET status=?, lastactiontime=? WHERE fromaddress=? AND subject=? AND message=? AND status=? ' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2013-01-24 20:52:48 +01:00
else :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-04-25 22:11:00 +02:00
sys . stderr . write ( ' Error: In the singleWorker thread, the sendBroadcast function doesn \' t understand the address version. \n ' )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
2013-05-29 23:18:44 +02:00
def sendMsg ( self ) :
#Check to see if there are any messages queued to be sent
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-06-03 21:48:53 +02:00
shared . sqlSubmitQueue . put ( ''' SELECT DISTINCT toaddress FROM sent WHERE (status= ' msgqueued ' AND folder= ' sent ' ) ''' )
2013-05-29 23:18:44 +02:00
shared . sqlSubmitQueue . put ( ' ' )
2013-05-02 17:53:54 +02:00
queryreturn = shared . sqlReturnQueue . get ( )
2013-05-29 23:18:44 +02:00
shared . sqlLock . release ( )
for row in queryreturn : #For each address to which we need to send a message, check to see if we have its pubkey already.
toaddress , = row
toripe = decodeAddress ( toaddress ) [ 3 ]
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' SELECT hash FROM pubkeys WHERE hash=? ''' )
shared . sqlSubmitQueue . put ( ( toripe , ) )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
if queryreturn != [ ] : #If we have the needed pubkey, set the status to doingmsgpow (we'll do it further down)
t = ( toaddress , )
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' UPDATE sent SET status= ' doingmsgpow ' WHERE toaddress=? AND status= ' msgqueued ' ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
else : #We don't have the needed pubkey. Set the status to 'awaitingpubkey' and request it if we haven't already
if toripe in neededPubkeys :
#We already sent a request for the pubkey
t = ( toaddress , )
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' UPDATE sent SET status= ' awaitingpubkey ' WHERE toaddress=? AND status= ' msgqueued ' ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
shared . UISignalQueue . put ( ( ' updateSentItemStatusByHash ' , ( toripe , ' Encryption key was requested earlier. ' ) ) )
else :
#We have not yet sent a request for the pubkey
t = ( toaddress , )
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' UPDATE sent SET status= ' doingpubkeypow ' WHERE toaddress=? AND status= ' msgqueued ' ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
shared . UISignalQueue . put ( ( ' updateSentItemStatusByHash ' , ( toripe , ' Sending a request for the recipient \' s encryption key. ' ) ) )
self . requestPubKey ( toaddress )
shared . sqlLock . acquire ( )
2013-06-11 20:15:17 +02:00
#Get all messages that are ready to be sent, and also all messages which we have sent in the last 28 days which were previously marked as 'toodifficult'. If the user as raised the maximum acceptable difficulty then those messages may now be sendable.
2013-06-11 00:53:15 +02:00
shared . sqlSubmitQueue . put ( ''' SELECT toaddress, toripe, fromaddress, subject, message, ackdata, status FROM sent WHERE (status= ' doingmsgpow ' or status= ' forcepow ' or (status= ' toodifficult ' and lastactiontime>?)) and folder= ' sent ' ''' )
shared . sqlSubmitQueue . put ( ( int ( time . time ( ) ) - 2419200 , ) )
2013-05-02 17:53:54 +02:00
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2013-06-11 20:15:17 +02:00
for row in queryreturn : #For each message we need to send..
2013-06-11 00:53:15 +02:00
toaddress , toripe , fromaddress , subject , message , ackdata , status = row
2013-06-11 20:15:17 +02:00
#There is a remote possibility that we may no longer have the recipient's pubkey. Let us make sure we still have it or else the sendMsg function will appear to freeze. This can happen if the user sends a message but doesn't let the POW function finish, then leaves their client off for a long time which could cause the needed pubkey to expire and be deleted.
2013-05-29 23:18:44 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' SELECT hash FROM pubkeys WHERE hash=? ''' )
shared . sqlSubmitQueue . put ( ( toripe , ) )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2013-06-10 15:40:51 +02:00
if queryreturn == [ ] and toripe not in neededPubkeys :
#We no longer have the needed pubkey and we haven't requested it.
2013-05-29 23:18:44 +02:00
shared . printLock . acquire ( )
sys . stderr . write ( ' For some reason, the status of a message in our outbox is \' doingmsgpow \' even though we lack the pubkey. Here is the RIPE hash of the needed pubkey: %s \n ' % toripe . encode ( ' hex ' ) )
shared . printLock . release ( )
t = ( toaddress , )
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' UPDATE sent SET status= ' msgqueued ' WHERE toaddress=? AND status= ' doingmsgpow ' ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
shared . UISignalQueue . put ( ( ' updateSentItemStatusByHash ' , ( toripe , ' Sending a request for the recipient \' s encryption key. ' ) ) )
self . requestPubKey ( toaddress )
2013-06-10 15:40:51 +02:00
continue
2013-01-04 23:21:33 +01:00
ackdataForWhichImWatching [ ackdata ] = 0
2013-01-18 23:38:09 +01:00
toStatus , toAddressVersionNumber , toStreamNumber , toHash = decodeAddress ( toaddress )
fromStatus , fromAddressVersionNumber , fromStreamNumber , fromHash = decodeAddress ( fromaddress )
2013-06-11 08:33:48 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Looking up the receiver \' s public key ' ) ) )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-01-24 22:07:24 +01:00
print ' Found a message in our database that needs to be sent with this pubkey. '
2013-06-10 15:40:51 +02:00
print ' First 150 characters of message: ' , repr ( message [ : 150 ] )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-06-11 00:53:15 +02:00
2013-06-11 20:15:17 +02:00
#mark the pubkey as 'usedpersonally' so that we don't ever delete it.
2013-06-11 00:53:15 +02:00
shared . sqlLock . acquire ( )
t = ( toripe , )
shared . sqlSubmitQueue . put ( ''' UPDATE pubkeys SET usedpersonally= ' yes ' WHERE hash=? ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
2013-06-11 20:15:17 +02:00
#Let us fetch the recipient's public key out of our database. If the required proof of work difficulty is too hard then we'll abort.
shared . sqlSubmitQueue . put ( ' SELECT transmitdata FROM pubkeys WHERE hash=? ' )
shared . sqlSubmitQueue . put ( ( toripe , ) )
queryreturn = shared . sqlReturnQueue . get ( )
2013-06-11 00:53:15 +02:00
shared . sqlLock . release ( )
if queryreturn == [ ] :
shared . printLock . acquire ( )
sys . stderr . write ( ' (within sendMsg) The needed pubkey was not found. This should never happen. Aborting send. \n ' )
shared . printLock . release ( )
return
for row in queryreturn :
pubkeyPayload , = row
2013-06-11 20:15:17 +02:00
#The pubkey message is stored the way we originally received it which means that we need to read beyond things like the nonce and time to get to the actual public keys.
2013-06-11 00:53:15 +02:00
readPosition = 8 #to bypass the nonce
pubkeyEmbeddedTime , = unpack ( ' >I ' , pubkeyPayload [ readPosition : readPosition + 4 ] )
#This section is used for the transition from 32 bit time to 64 bit time in the protocol.
if pubkeyEmbeddedTime == 0 :
pubkeyEmbeddedTime , = unpack ( ' >Q ' , pubkeyPayload [ readPosition : readPosition + 8 ] )
readPosition + = 8
else :
readPosition + = 4
readPosition + = 1 #to bypass the address version whose length is definitely 1
streamNumber , streamNumberLength = decodeVarint ( pubkeyPayload [ readPosition : readPosition + 10 ] )
readPosition + = streamNumberLength
behaviorBitfield = pubkeyPayload [ readPosition : readPosition + 4 ]
readPosition + = 4 #to bypass the bitfield of behaviors
#pubSigningKeyBase256 = pubkeyPayload[readPosition:readPosition+64] #We don't use this key for anything here.
readPosition + = 64
pubEncryptionKeyBase256 = pubkeyPayload [ readPosition : readPosition + 64 ]
readPosition + = 64
if toAddressVersionNumber == 2 :
requiredAverageProofOfWorkNonceTrialsPerByte = shared . networkDefaultProofOfWorkNonceTrialsPerByte
requiredPayloadLengthExtraBytes = shared . networkDefaultPayloadLengthExtraBytes
2013-06-11 20:15:17 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Doing work necessary to send message. (There is no required difficulty for version 2 addresses like this.) ' ) ) )
2013-06-11 00:53:15 +02:00
elif toAddressVersionNumber == 3 :
requiredAverageProofOfWorkNonceTrialsPerByte , varintLength = decodeVarint ( pubkeyPayload [ readPosition : readPosition + 10 ] )
readPosition + = varintLength
requiredPayloadLengthExtraBytes , varintLength = decodeVarint ( pubkeyPayload [ readPosition : readPosition + 10 ] )
readPosition + = varintLength
if requiredAverageProofOfWorkNonceTrialsPerByte < shared . networkDefaultProofOfWorkNonceTrialsPerByte : #We still have to meet a minimum POW difficulty regardless of what they say is allowed in order to get our message to propagate through the network.
requiredAverageProofOfWorkNonceTrialsPerByte = shared . networkDefaultProofOfWorkNonceTrialsPerByte
if requiredPayloadLengthExtraBytes < shared . networkDefaultPayloadLengthExtraBytes :
requiredPayloadLengthExtraBytes = shared . networkDefaultPayloadLengthExtraBytes
2013-06-11 20:15:17 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Doing work necessary to send message. \n Receiver \' s required difficulty: ' + str ( float ( requiredAverageProofOfWorkNonceTrialsPerByte ) / shared . networkDefaultProofOfWorkNonceTrialsPerByte ) + ' and ' + str ( float ( requiredPayloadLengthExtraBytes ) / shared . networkDefaultPayloadLengthExtraBytes ) ) ) )
2013-06-11 00:53:15 +02:00
if status != ' forcepow ' :
if ( requiredAverageProofOfWorkNonceTrialsPerByte > shared . config . getint ( ' bitmessagesettings ' , ' maxacceptablenoncetrialsperbyte ' ) and shared . config . getint ( ' bitmessagesettings ' , ' maxacceptablenoncetrialsperbyte ' ) != 0 ) or ( requiredPayloadLengthExtraBytes > shared . config . getint ( ' bitmessagesettings ' , ' maxacceptablepayloadlengthextrabytes ' ) and shared . config . getint ( ' bitmessagesettings ' , ' maxacceptablepayloadlengthextrabytes ' ) != 0 ) :
#The demanded difficulty is more than we are willing to do.
shared . sqlLock . acquire ( )
t = ( ackdata , )
2013-06-11 08:33:48 +02:00
shared . sqlSubmitQueue . put ( ''' UPDATE sent SET status= ' toodifficult ' WHERE ackdata=? ''' )
2013-06-11 00:53:15 +02:00
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Problem: The work demanded by the recipient ( ' + str ( float ( requiredAverageProofOfWorkNonceTrialsPerByte ) / shared . networkDefaultProofOfWorkNonceTrialsPerByte ) + ' and ' + str ( float ( requiredPayloadLengthExtraBytes ) / shared . networkDefaultPayloadLengthExtraBytes ) + ' ) is more difficult than you are willing to do. ' + unicode ( strftime ( shared . config . get ( ' bitmessagesettings ' , ' timeformat ' ) , localtime ( int ( time . time ( ) ) ) ) , ' utf-8 ' ) ) ) )
continue
embeddedTime = pack ( ' >Q ' , ( int ( time . time ( ) ) + random . randrange ( - 300 , 300 ) ) ) #the current time plus or minus five minutes. We will use this time both for our message and for the ackdata packed within our message.
2013-01-18 23:38:09 +01:00
if fromAddressVersionNumber == 2 :
payload = ' \x01 ' #Message version.
payload + = encodeVarint ( fromAddressVersionNumber )
payload + = encodeVarint ( fromStreamNumber )
2013-02-07 22:31:15 +01:00
payload + = ' \x00 \x00 \x00 \x01 ' #Bitfield of features and behaviors that can be expected from me. (See https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features )
2013-01-18 23:38:09 +01:00
#We need to convert our private keys to public keys in order to include them.
2013-03-19 18:32:37 +01:00
try :
2013-05-02 17:53:54 +02:00
privSigningKeyBase58 = shared . config . get ( fromaddress , ' privsigningkey ' )
privEncryptionKeyBase58 = shared . config . get ( fromaddress , ' privencryptionkey ' )
2013-03-19 18:32:37 +01:00
except :
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Error! Could not find sender address (your address) in the keys.dat file. ' ) ) )
2013-03-19 18:32:37 +01:00
continue
2013-01-18 23:38:09 +01:00
2013-05-02 17:53:54 +02:00
privSigningKeyHex = shared . decodeWalletImportFormat ( privSigningKeyBase58 ) . encode ( ' hex ' )
privEncryptionKeyHex = shared . decodeWalletImportFormat ( privEncryptionKeyBase58 ) . encode ( ' hex ' )
2013-01-18 23:38:09 +01:00
pubSigningKey = highlevelcrypto . privToPub ( privSigningKeyHex ) . decode ( ' hex ' )
pubEncryptionKey = highlevelcrypto . privToPub ( privEncryptionKeyHex ) . decode ( ' hex ' )
2013-02-07 22:31:15 +01:00
payload + = pubSigningKey [ 1 : ] #The \x04 on the beginning of the public keys are not sent. This way there is only one acceptable way to encode and send a public key.
2013-01-18 23:38:09 +01:00
payload + = pubEncryptionKey [ 1 : ]
2013-02-07 22:31:15 +01:00
payload + = toHash #This hash will be checked by the receiver of the message to verify that toHash belongs to them. This prevents a Surreptitious Forwarding Attack.
payload + = ' \x02 ' #Type 2 is simple UTF-8 message encoding as specified on the Protocol Specification on the Bitmessage Wiki.
2013-01-18 23:38:09 +01:00
messageToTransmit = ' Subject: ' + subject + ' \n ' + ' Body: ' + message
payload + = encodeVarint ( len ( messageToTransmit ) )
payload + = messageToTransmit
2013-02-11 22:28:38 +01:00
fullAckPayload = self . generateFullAckMessage ( ackdata , toStreamNumber , embeddedTime ) #The fullAckPayload is a normal msg protocol message with the proof of work already completed that the receiver of this message can easily send out.
2013-01-18 23:38:09 +01:00
payload + = encodeVarint ( len ( fullAckPayload ) )
payload + = fullAckPayload
signature = highlevelcrypto . sign ( payload , privSigningKeyHex )
payload + = encodeVarint ( len ( signature ) )
payload + = signature
2013-04-24 21:48:46 +02:00
if fromAddressVersionNumber == 3 :
payload = ' \x01 ' #Message version.
2013-01-18 23:38:09 +01:00
payload + = encodeVarint ( fromAddressVersionNumber )
payload + = encodeVarint ( fromStreamNumber )
2013-04-24 21:48:46 +02:00
payload + = ' \x00 \x00 \x00 \x01 ' #Bitfield of features and behaviors that can be expected from me. (See https://bitmessage.org/wiki/Protocol_specification#Pubkey_bitfield_features )
2012-11-19 20:45:05 +01:00
2013-04-24 21:48:46 +02:00
#We need to convert our private keys to public keys in order to include them.
2013-01-25 19:29:31 +01:00
try :
2013-05-02 17:53:54 +02:00
privSigningKeyBase58 = shared . config . get ( fromaddress , ' privsigningkey ' )
privEncryptionKeyBase58 = shared . config . get ( fromaddress , ' privencryptionkey ' )
2013-01-25 19:29:31 +01:00
except :
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Error! Could not find sender address (your address) in the keys.dat file. ' ) ) )
2013-04-24 21:48:46 +02:00
continue
2013-05-02 17:53:54 +02:00
privSigningKeyHex = shared . decodeWalletImportFormat ( privSigningKeyBase58 ) . encode ( ' hex ' )
privEncryptionKeyHex = shared . decodeWalletImportFormat ( privEncryptionKeyBase58 ) . encode ( ' hex ' )
2013-04-24 21:48:46 +02:00
pubSigningKey = highlevelcrypto . privToPub ( privSigningKeyHex ) . decode ( ' hex ' )
pubEncryptionKey = highlevelcrypto . privToPub ( privEncryptionKeyHex ) . decode ( ' hex ' )
2012-11-19 20:45:05 +01:00
2013-04-24 21:48:46 +02:00
payload + = pubSigningKey [ 1 : ] #The \x04 on the beginning of the public keys are not sent. This way there is only one acceptable way to encode and send a public key.
payload + = pubEncryptionKey [ 1 : ]
2013-04-26 22:07:58 +02:00
#If the receiver of our message is in our address book, subscriptions list, or whitelist then we will allow them to do the network-minimum proof of work. Let us check to see if the receiver is in any of those lists.
2013-05-02 17:53:54 +02:00
if shared . isAddressInMyAddressBookSubscriptionsListOrWhitelist ( toaddress ) :
2013-05-02 21:59:10 +02:00
payload + = encodeVarint ( shared . networkDefaultProofOfWorkNonceTrialsPerByte )
payload + = encodeVarint ( shared . networkDefaultPayloadLengthExtraBytes )
2013-04-26 22:07:58 +02:00
else :
2013-05-02 17:53:54 +02:00
payload + = encodeVarint ( shared . config . getint ( fromaddress , ' noncetrialsperbyte ' ) )
payload + = encodeVarint ( shared . config . getint ( fromaddress , ' payloadlengthextrabytes ' ) )
2012-11-19 20:45:05 +01:00
2013-04-24 21:48:46 +02:00
payload + = toHash #This hash will be checked by the receiver of the message to verify that toHash belongs to them. This prevents a Surreptitious Forwarding Attack.
payload + = ' \x02 ' #Type 2 is simple UTF-8 message encoding as specified on the Protocol Specification on the Bitmessage Wiki.
2013-01-18 23:38:09 +01:00
messageToTransmit = ' Subject: ' + subject + ' \n ' + ' Body: ' + message
payload + = encodeVarint ( len ( messageToTransmit ) )
payload + = messageToTransmit
2013-04-24 21:48:46 +02:00
fullAckPayload = self . generateFullAckMessage ( ackdata , toStreamNumber , embeddedTime ) #The fullAckPayload is a normal msg protocol message with the proof of work already completed that the receiver of this message can easily send out.
2013-01-18 23:38:09 +01:00
payload + = encodeVarint ( len ( fullAckPayload ) )
payload + = fullAckPayload
2013-04-24 21:48:46 +02:00
signature = highlevelcrypto . sign ( payload , privSigningKeyHex )
payload + = encodeVarint ( len ( signature ) )
payload + = signature
2012-11-19 20:45:05 +01:00
2013-06-11 00:53:15 +02:00
#We have assembled the data that will be encrypted.
encrypted = highlevelcrypto . encrypt ( payload , " 04 " + pubEncryptionKeyBase256 . encode ( ' hex ' ) )
2013-06-11 20:15:17 +02:00
encryptedPayload = embeddedTime + encodeVarint ( toStreamNumber ) + encrypted
target = 2 * * 64 / ( ( len ( encryptedPayload ) + requiredPayloadLengthExtraBytes + 8 ) * requiredAverageProofOfWorkNonceTrialsPerByte )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-05-02 21:59:10 +02:00
print ' (For msg message) Doing proof of work. Total required difficulty: ' , float ( requiredAverageProofOfWorkNonceTrialsPerByte ) / shared . networkDefaultProofOfWorkNonceTrialsPerByte , ' Required small message difficulty: ' , float ( requiredPayloadLengthExtraBytes ) / shared . networkDefaultPayloadLengthExtraBytes
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-20 22:34:13 +01:00
powStartTime = time . time ( )
2013-06-11 20:15:17 +02:00
initialHash = hashlib . sha512 ( encryptedPayload ) . digest ( )
2013-05-29 22:01:12 +02:00
trialValue , nonce = proofofwork . run ( target , initialHash )
2013-06-11 08:33:48 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' (For msg message) Found proof of work ' , trialValue , ' Nonce: ' , nonce
2013-03-19 18:32:37 +01:00
try :
print ' POW took ' , int ( time . time ( ) - powStartTime ) , ' seconds. ' , nonce / ( time . time ( ) - powStartTime ) , ' nonce trials per second. '
except :
pass
2013-06-11 08:33:48 +02:00
shared . printLock . release ( )
2013-06-11 20:15:17 +02:00
encryptedPayload = pack ( ' >Q ' , nonce ) + encryptedPayload
2012-11-19 20:45:05 +01:00
2013-06-11 20:15:17 +02:00
inventoryHash = calculateInventoryHash ( encryptedPayload )
2012-11-19 20:45:05 +01:00
objectType = ' msg '
2013-06-11 20:15:17 +02:00
shared . inventory [ inventoryHash ] = ( objectType , toStreamNumber , encryptedPayload , int ( time . time ( ) ) )
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByAckdata ' , ( ackdata , ' Message sent. Waiting on acknowledgement. Sent on ' + unicode ( strftime ( shared . config . get ( ' bitmessagesettings ' , ' timeformat ' ) , localtime ( int ( time . time ( ) ) ) ) , ' utf-8 ' ) ) ) )
2013-05-29 23:18:44 +02:00
print ' Broadcasting inv for my msg(within sendmsg function): ' , inventoryHash . encode ( ' hex ' )
2013-05-02 17:53:54 +02:00
shared . broadcastToSendDataQueues ( ( streamNumber , ' sendinv ' , inventoryHash ) )
2012-11-19 20:45:05 +01:00
2013-06-10 15:40:51 +02:00
#Update the status of the message in the 'sent' table to have a 'msgsent' status
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-06-11 08:33:48 +02:00
t = ( ackdata , )
2013-06-11 20:15:17 +02:00
shared . sqlSubmitQueue . put ( ''' UPDATE sent SET status= ' msgsent ' WHERE ackdata=? AND (status= ' doingmsgpow ' or status= ' forcepow ' ) ''' )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2012-11-19 20:45:05 +01:00
2013-05-29 23:18:44 +02:00
def requestPubKey ( self , toAddress ) :
toStatus , addressVersionNumber , streamNumber , ripe = decodeAddress ( toAddress )
2013-06-03 21:48:53 +02:00
if toStatus != ' success ' :
shared . printLock . acquire ( )
sys . stderr . write ( ' Very abnormal error occurred in requestPubKey. toAddress is: ' + repr ( toAddress ) + ' . Please report this error to Atheros. ' )
shared . printLock . release ( )
return
2013-05-29 23:18:44 +02:00
neededPubkeys [ ripe ] = 0
2013-06-11 00:53:15 +02:00
payload = pack ( ' >Q ' , ( int ( time . time ( ) ) + random . randrange ( - 300 , 300 ) ) ) #the current time plus or minus five minutes.
2012-11-19 20:45:05 +01:00
payload + = encodeVarint ( addressVersionNumber )
payload + = encodeVarint ( streamNumber )
payload + = ripe
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-01-18 23:38:09 +01:00
print ' making request for pubkey with ripe: ' , ripe . encode ( ' hex ' )
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
#print 'trial value', trialValue
2013-02-18 21:01:47 +01:00
statusbar = ' Doing the computations necessary to request the recipient \' s public key. '
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateStatusBar ' , statusbar ) )
2013-05-29 23:18:44 +02:00
shared . UISignalQueue . put ( ( ' updateSentItemStatusByHash ' , ( ripe , ' Doing work necessary to request encryption key. ' ) ) )
2013-05-02 21:59:10 +02:00
target = 2 * * 64 / ( ( len ( payload ) + shared . networkDefaultPayloadLengthExtraBytes + 8 ) * shared . networkDefaultProofOfWorkNonceTrialsPerByte )
2012-11-23 09:22:56 +01:00
initialHash = hashlib . sha512 ( payload ) . digest ( )
2013-05-29 22:01:12 +02:00
trialValue , nonce = proofofwork . run ( target , initialHash )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' Found proof of work ' , trialValue , ' Nonce: ' , nonce
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
payload = pack ( ' >Q ' , nonce ) + payload
inventoryHash = calculateInventoryHash ( payload )
objectType = ' getpubkey '
2013-05-02 17:53:54 +02:00
shared . inventory [ inventoryHash ] = ( objectType , streamNumber , payload , int ( time . time ( ) ) )
2012-11-19 20:45:05 +01:00
print ' sending inv (for the getpubkey message) '
2013-05-02 17:53:54 +02:00
shared . broadcastToSendDataQueues ( ( streamNumber , ' sendinv ' , inventoryHash ) )
2012-11-19 20:45:05 +01:00
2013-05-29 23:18:44 +02:00
t = ( toAddress , )
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' UPDATE sent SET status= ' awaitingpubkey ' WHERE toaddress=? AND status= ' doingpubkeypow ' ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateStatusBar ' , ' Broacasting the public key request. This program will auto-retry if they are offline. ' ) )
shared . UISignalQueue . put ( ( ' updateSentItemStatusByHash ' , ( ripe , ' Sending public key request. Waiting for reply. Requested at ' + unicode ( strftime ( shared . config . get ( ' bitmessagesettings ' , ' timeformat ' ) , localtime ( int ( time . time ( ) ) ) ) , ' utf-8 ' ) ) ) )
2012-11-19 20:45:05 +01:00
2013-02-11 22:28:38 +01:00
def generateFullAckMessage ( self , ackdata , toStreamNumber , embeddedTime ) :
2013-04-26 22:07:58 +02:00
payload = embeddedTime + encodeVarint ( toStreamNumber ) + ackdata
2013-05-02 21:59:10 +02:00
target = 2 * * 64 / ( ( len ( payload ) + shared . networkDefaultPayloadLengthExtraBytes + 8 ) * shared . networkDefaultProofOfWorkNonceTrialsPerByte )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' (For ack message) Doing proof of work... '
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-23 09:22:56 +01:00
powStartTime = time . time ( )
initialHash = hashlib . sha512 ( payload ) . digest ( )
2013-05-29 22:01:12 +02:00
trialValue , nonce = proofofwork . run ( target , initialHash )
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2012-11-19 20:45:05 +01:00
print ' (For ack message) Found proof of work ' , trialValue , ' Nonce: ' , nonce
2013-03-19 18:32:37 +01:00
try :
print ' POW took ' , int ( time . time ( ) - powStartTime ) , ' seconds. ' , nonce / ( time . time ( ) - powStartTime ) , ' nonce trials per second. '
except :
pass
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2012-11-19 20:45:05 +01:00
payload = pack ( ' >Q ' , nonce ) + payload
headerData = ' \xe9 \xbe \xb4 \xd9 ' #magic bits, slighly different from Bitcoin's magic bits.
2013-02-04 22:49:02 +01:00
headerData + = ' msg \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 '
headerData + = pack ( ' >L ' , len ( payload ) )
headerData + = hashlib . sha512 ( payload ) . digest ( ) [ : 4 ]
2012-11-19 20:45:05 +01:00
return headerData + payload
2013-05-01 22:06:55 +02:00
class addressGenerator ( threading . Thread ) :
def __init__ ( self ) :
#QThread.__init__(self, parent)
threading . Thread . __init__ ( self )
2012-11-19 20:45:05 +01:00
def run ( self ) :
2013-05-01 22:06:55 +02:00
while True :
2013-05-08 19:59:30 +02:00
queueValue = shared . addressGeneratorQueue . get ( )
nonceTrialsPerByte = 0
payloadLengthExtraBytes = 0
2013-05-24 22:12:16 +02:00
if len ( queueValue ) == 7 :
command , addressVersionNumber , streamNumber , label , numberOfAddressesToMake , deterministicPassphrase , eighteenByteRipe = queueValue
elif len ( queueValue ) == 9 :
command , addressVersionNumber , streamNumber , label , numberOfAddressesToMake , deterministicPassphrase , eighteenByteRipe , nonceTrialsPerByte , payloadLengthExtraBytes = queueValue
2013-05-08 19:59:30 +02:00
else :
sys . stderr . write ( ' Programming error: A structure with the wrong number of values was passed into the addressGeneratorQueue. Here is the queueValue: %s \n ' % queueValue )
2013-05-02 18:47:43 +02:00
if addressVersionNumber < 3 or addressVersionNumber > 3 :
2013-05-08 19:59:30 +02:00
sys . stderr . write ( ' Program error: For some reason the address generator queue has been given a request to create at least one version %s address which it cannot do. \n ' % addressVersionNumber )
if nonceTrialsPerByte == 0 :
nonceTrialsPerByte = shared . config . getint ( ' bitmessagesettings ' , ' defaultnoncetrialsperbyte ' )
if nonceTrialsPerByte < shared . networkDefaultProofOfWorkNonceTrialsPerByte :
nonceTrialsPerByte = shared . networkDefaultProofOfWorkNonceTrialsPerByte
if payloadLengthExtraBytes == 0 :
payloadLengthExtraBytes = shared . config . getint ( ' bitmessagesettings ' , ' defaultpayloadlengthextrabytes ' )
if payloadLengthExtraBytes < shared . networkDefaultPayloadLengthExtraBytes :
payloadLengthExtraBytes = shared . networkDefaultPayloadLengthExtraBytes
if addressVersionNumber == 3 : #currently the only one supported.
2013-05-24 22:12:16 +02:00
if command == ' createRandomAddress ' :
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateStatusBar ' , ' Generating one new address ' ) )
2013-01-18 23:38:09 +01:00
#This next section is a little bit strange. We're going to generate keys over and over until we
2013-05-01 22:06:55 +02:00
#find one that starts with either \x00 or \x00\x00. Then when we pack them into a Bitmessage address,
#we won't store the \x00 or \x00\x00 bytes thus making the address shorter.
2013-01-21 01:00:46 +01:00
startTime = time . time ( )
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
2013-05-01 22:06:55 +02:00
potentialPrivSigningKey = OpenSSL . rand ( 32 )
potentialPubSigningKey = pointMult ( potentialPrivSigningKey )
2013-01-18 23:38:09 +01:00
while True :
2013-01-21 02:17:39 +01:00
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix + = 1
2013-05-01 22:06:55 +02:00
potentialPrivEncryptionKey = OpenSSL . rand ( 32 )
2013-04-26 19:20:30 +02:00
potentialPubEncryptionKey = pointMult ( potentialPrivEncryptionKey )
2013-01-18 23:38:09 +01:00
#print 'potentialPubSigningKey', potentialPubSigningKey.encode('hex')
#print 'potentialPubEncryptionKey', potentialPubEncryptionKey.encode('hex')
ripe = hashlib . new ( ' ripemd160 ' )
sha = hashlib . new ( ' sha512 ' )
sha . update ( potentialPubSigningKey + potentialPubEncryptionKey )
ripe . update ( sha . digest ( ) )
#print 'potential ripe.digest', ripe.digest().encode('hex')
2013-05-01 22:06:55 +02:00
if eighteenByteRipe :
2013-01-18 23:38:09 +01:00
if ripe . digest ( ) [ : 2 ] == ' \x00 \x00 ' :
break
else :
if ripe . digest ( ) [ : 1 ] == ' \x00 ' :
break
2013-05-01 22:06:55 +02:00
print ' Generated address with ripe digest: ' , ripe . digest ( ) . encode ( ' hex ' )
print ' Address generator calculated ' , numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix , ' addresses at ' , numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix / ( time . time ( ) - startTime ) , ' addresses per second before finding one with the correct ripe-prefix. '
address = encodeAddress ( 3 , streamNumber , ripe . digest ( ) )
2013-01-18 23:38:09 +01:00
#An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
#https://en.bitcoin.it/wiki/Wallet_import_format
privSigningKey = ' \x80 ' + potentialPrivSigningKey
checksum = hashlib . sha256 ( hashlib . sha256 ( privSigningKey ) . digest ( ) ) . digest ( ) [ 0 : 4 ]
privSigningKeyWIF = arithmetic . changebase ( privSigningKey + checksum , 256 , 58 )
2013-05-01 22:06:55 +02:00
#print 'privSigningKeyWIF',privSigningKeyWIF
2013-01-18 23:38:09 +01:00
privEncryptionKey = ' \x80 ' + potentialPrivEncryptionKey
checksum = hashlib . sha256 ( hashlib . sha256 ( privEncryptionKey ) . digest ( ) ) . digest ( ) [ 0 : 4 ]
privEncryptionKeyWIF = arithmetic . changebase ( privEncryptionKey + checksum , 256 , 58 )
2013-05-01 22:06:55 +02:00
#print 'privEncryptionKeyWIF',privEncryptionKeyWIF
2013-05-02 18:47:43 +02:00
shared . config . add_section ( address )
2013-05-02 17:53:54 +02:00
shared . config . set ( address , ' label ' , label )
shared . config . set ( address , ' enabled ' , ' true ' )
shared . config . set ( address , ' decoy ' , ' false ' )
2013-05-08 19:59:30 +02:00
shared . config . set ( address , ' noncetrialsperbyte ' , str ( nonceTrialsPerByte ) )
shared . config . set ( address , ' payloadlengthextrabytes ' , str ( payloadLengthExtraBytes ) )
2013-05-02 17:53:54 +02:00
shared . config . set ( address , ' privSigningKey ' , privSigningKeyWIF )
shared . config . set ( address , ' privEncryptionKey ' , privEncryptionKeyWIF )
with open ( shared . appdata + ' keys.dat ' , ' wb ' ) as configfile :
2013-05-02 18:47:43 +02:00
shared . config . write ( configfile )
2013-05-01 22:06:55 +02:00
#It may be the case that this address is being generated as a result of a call to the API. Let us put the result in the necessary queue.
apiAddressGeneratorReturnQueue . put ( address )
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateStatusBar ' , ' Done generating address. Doing work necessary to broadcast it... ' ) )
shared . UISignalQueue . put ( ( ' writeNewAddressToTable ' , ( label , address , streamNumber ) ) )
shared . reloadMyAddressHashes ( )
shared . workerQueue . put ( ( ' doPOWForMyV3Pubkey ' , ripe . digest ( ) ) )
2013-05-01 22:06:55 +02:00
2013-05-24 22:12:16 +02:00
elif command == ' createDeterministicAddresses ' or command == ' getDeterministicAddress ' :
if len ( deterministicPassphrase ) == 0 :
sys . stderr . write ( ' WARNING: You are creating deterministic address(es) using a blank passphrase. Bitmessage will do it but it is rather stupid. ' )
if command == ' createDeterministicAddresses ' :
statusbar = ' Generating ' + str ( numberOfAddressesToMake ) + ' new addresses. '
shared . UISignalQueue . put ( ( ' updateStatusBar ' , statusbar ) )
2013-05-01 22:06:55 +02:00
signingKeyNonce = 0
encryptionKeyNonce = 1
listOfNewAddressesToSendOutThroughTheAPI = [ ] #We fill out this list no matter what although we only need it if we end up passing the info to the API.
for i in range ( numberOfAddressesToMake ) :
#This next section is a little bit strange. We're going to generate keys over and over until we
#find one that has a RIPEMD hash that starts with either \x00 or \x00\x00. Then when we pack them
#into a Bitmessage address, we won't store the \x00 or \x00\x00 bytes thus making the address shorter.
startTime = time . time ( )
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
while True :
numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix + = 1
potentialPrivSigningKey = hashlib . sha512 ( deterministicPassphrase + encodeVarint ( signingKeyNonce ) ) . digest ( ) [ : 32 ]
potentialPrivEncryptionKey = hashlib . sha512 ( deterministicPassphrase + encodeVarint ( encryptionKeyNonce ) ) . digest ( ) [ : 32 ]
potentialPubSigningKey = pointMult ( potentialPrivSigningKey )
potentialPubEncryptionKey = pointMult ( potentialPrivEncryptionKey )
#print 'potentialPubSigningKey', potentialPubSigningKey.encode('hex')
#print 'potentialPubEncryptionKey', potentialPubEncryptionKey.encode('hex')
signingKeyNonce + = 2
encryptionKeyNonce + = 2
ripe = hashlib . new ( ' ripemd160 ' )
sha = hashlib . new ( ' sha512 ' )
sha . update ( potentialPubSigningKey + potentialPubEncryptionKey )
ripe . update ( sha . digest ( ) )
#print 'potential ripe.digest', ripe.digest().encode('hex')
if eighteenByteRipe :
if ripe . digest ( ) [ : 2 ] == ' \x00 \x00 ' :
break
else :
if ripe . digest ( ) [ : 1 ] == ' \x00 ' :
break
2013-01-18 23:38:09 +01:00
2013-05-01 22:06:55 +02:00
print ' ripe.digest ' , ripe . digest ( ) . encode ( ' hex ' )
print ' Address generator calculated ' , numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix , ' addresses at ' , numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix / ( time . time ( ) - startTime ) , ' keys per second. '
address = encodeAddress ( 3 , streamNumber , ripe . digest ( ) )
2013-01-18 23:38:09 +01:00
2013-05-24 22:12:16 +02:00
if command == ' createDeterministicAddresses ' :
#An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
#https://en.bitcoin.it/wiki/Wallet_import_format
privSigningKey = ' \x80 ' + potentialPrivSigningKey
checksum = hashlib . sha256 ( hashlib . sha256 ( privSigningKey ) . digest ( ) ) . digest ( ) [ 0 : 4 ]
privSigningKeyWIF = arithmetic . changebase ( privSigningKey + checksum , 256 , 58 )
2013-05-01 22:06:55 +02:00
2013-05-24 22:12:16 +02:00
privEncryptionKey = ' \x80 ' + potentialPrivEncryptionKey
checksum = hashlib . sha256 ( hashlib . sha256 ( privEncryptionKey ) . digest ( ) ) . digest ( ) [ 0 : 4 ]
privEncryptionKeyWIF = arithmetic . changebase ( privEncryptionKey + checksum , 256 , 58 )
2013-05-01 22:06:55 +02:00
2013-05-24 22:12:16 +02:00
try :
shared . config . add_section ( address )
print ' label: ' , label
shared . config . set ( address , ' label ' , label )
shared . config . set ( address , ' enabled ' , ' true ' )
shared . config . set ( address , ' decoy ' , ' false ' )
shared . config . set ( address , ' noncetrialsperbyte ' , str ( nonceTrialsPerByte ) )
shared . config . set ( address , ' payloadlengthextrabytes ' , str ( payloadLengthExtraBytes ) )
shared . config . set ( address , ' privSigningKey ' , privSigningKeyWIF )
shared . config . set ( address , ' privEncryptionKey ' , privEncryptionKeyWIF )
with open ( shared . appdata + ' keys.dat ' , ' wb ' ) as configfile :
shared . config . write ( configfile )
shared . UISignalQueue . put ( ( ' writeNewAddressToTable ' , ( label , address , str ( streamNumber ) ) ) )
listOfNewAddressesToSendOutThroughTheAPI . append ( address )
2013-06-11 00:53:15 +02:00
#if eighteenByteRipe:
# shared.reloadMyAddressHashes()#This is necessary here (rather than just at the end) because otherwise if the human generates a large number of new addresses and uses one before they are done generating, the program will receive a getpubkey message and will ignore it.
shared . myECCryptorObjects [ ripe . digest ( ) ] = highlevelcrypto . makeCryptor ( potentialPrivEncryptionKey . encode ( ' hex ' ) )
shared . myAddressesByHash [ ripe . digest ( ) ] = address
shared . workerQueue . put ( ( ' doPOWForMyV3Pubkey ' , ripe . digest ( ) ) )
2013-05-24 22:12:16 +02:00
except :
print address , ' already exists. Not adding it again. '
2013-02-18 21:22:48 +01:00
2013-05-24 22:12:16 +02:00
#Done generating addresses.
if command == ' createDeterministicAddresses ' :
2013-06-11 00:53:15 +02:00
#It may be the case that this address is being generated as a result of a call to the API. Let us put the result in the necessary queue.
2013-05-24 22:12:16 +02:00
apiAddressGeneratorReturnQueue . put ( listOfNewAddressesToSendOutThroughTheAPI )
shared . UISignalQueue . put ( ( ' updateStatusBar ' , ' Done generating address ' ) )
2013-06-11 00:53:15 +02:00
#shared.reloadMyAddressHashes()
2013-05-24 22:12:16 +02:00
elif command == ' getDeterministicAddress ' :
apiAddressGeneratorReturnQueue . put ( address )
else :
raise Exception ( " Error in the addressGenerator thread. Thread was given a command it could not understand: " + command )
2012-11-19 20:45:05 +01:00
2013-03-19 18:32:37 +01:00
#This is one of several classes that constitute the API
2013-03-22 18:23:44 +01:00
#This class was written by Vaibhav Bhatia. Modified by Jonathan Warren (Atheros).
2013-03-19 18:32:37 +01:00
#http://code.activestate.com/recipes/501148-xmlrpc-serverclient-which-does-cookie-handling-and/
class MySimpleXMLRPCRequestHandler ( SimpleXMLRPCRequestHandler ) :
def do_POST ( self ) :
#Handles the HTTP POST request.
#Attempts to interpret all HTTP POST requests as XML-RPC calls,
#which are forwarded to the server's _dispatch method for handling.
#Note: this method is the same as in SimpleXMLRPCRequestHandler,
#just hacked to handle cookies
# Check that the path is legal
if not self . is_rpc_path_valid ( ) :
self . report_404 ( )
return
try :
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10 * 1024 * 1024
size_remaining = int ( self . headers [ " content-length " ] )
L = [ ]
while size_remaining :
chunk_size = min ( size_remaining , max_chunk_size )
L . append ( self . rfile . read ( chunk_size ) )
size_remaining - = len ( L [ - 1 ] )
data = ' ' . join ( L )
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self . server . _marshaled_dispatch (
data , getattr ( self , ' _dispatch ' , None )
)
except : # This should only happen if the module is buggy
# internal error, report as HTTP server error
self . send_response ( 500 )
self . end_headers ( )
else :
# got a valid XML RPC response
self . send_response ( 200 )
self . send_header ( " Content-type " , " text/xml " )
self . send_header ( " Content-length " , str ( len ( response ) ) )
# HACK :start -> sends cookies here
if self . cookies :
for cookie in self . cookies :
self . send_header ( ' Set-Cookie ' , cookie . output ( header = ' ' ) )
# HACK :end
self . end_headers ( )
self . wfile . write ( response )
# shut down the connection
self . wfile . flush ( )
self . connection . shutdown ( 1 )
def APIAuthenticateClient ( self ) :
if self . headers . has_key ( ' Authorization ' ) :
# handle Basic authentication
( enctype , encstr ) = self . headers . get ( ' Authorization ' ) . split ( )
2013-03-22 18:23:44 +01:00
( emailid , password ) = encstr . decode ( ' base64 ' ) . split ( ' : ' )
2013-05-02 17:53:54 +02:00
if emailid == shared . config . get ( ' bitmessagesettings ' , ' apiusername ' ) and password == shared . config . get ( ' bitmessagesettings ' , ' apipassword ' ) :
2013-03-22 18:23:44 +01:00
return True
else :
return False
2013-03-19 18:32:37 +01:00
else :
2013-03-22 18:23:44 +01:00
print ' Authentication failed because header lacks Authentication field '
2013-03-19 18:32:37 +01:00
time . sleep ( 2 )
2013-03-22 18:23:44 +01:00
return False
2013-03-19 18:32:37 +01:00
2013-03-22 18:23:44 +01:00
return False
2013-03-19 18:32:37 +01:00
def _dispatch ( self , method , params ) :
self . cookies = [ ]
validuser = self . APIAuthenticateClient ( )
if not validuser :
time . sleep ( 2 )
2013-03-22 18:23:44 +01:00
return " RPC Username or password incorrect or HTTP header lacks authentication at all. "
2013-03-19 18:32:37 +01:00
# handle request
if method == ' helloWorld ' :
( a , b ) = params
return a + ' - ' + b
elif method == ' add ' :
( a , b ) = params
return a + b
2013-03-19 19:39:24 +01:00
elif method == ' statusBar ' :
2013-03-19 18:32:37 +01:00
message , = params
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' updateStatusBar ' , message ) )
2013-03-19 18:32:37 +01:00
elif method == ' listAddresses ' :
data = ' { " addresses " :[ '
2013-05-02 18:47:43 +02:00
configSections = shared . config . sections ( )
2013-03-19 18:32:37 +01:00
for addressInKeysFile in configSections :
if addressInKeysFile < > ' bitmessagesettings ' :
status , addressVersionNumber , streamNumber , hash = decodeAddress ( addressInKeysFile )
data
if len ( data ) > 20 :
data + = ' , '
2013-05-02 17:53:54 +02:00
data + = json . dumps ( { ' label ' : shared . config . get ( addressInKeysFile , ' label ' ) , ' address ' : addressInKeysFile , ' stream ' : streamNumber , ' enabled ' : shared . config . getboolean ( addressInKeysFile , ' enabled ' ) } , indent = 4 , separators = ( ' , ' , ' : ' ) )
2013-03-19 18:32:37 +01:00
data + = ' ]} '
return data
elif method == ' createRandomAddress ' :
if len ( params ) == 0 :
return ' API Error 0000: I need parameters! '
elif len ( params ) == 1 :
label , = params
eighteenByteRipe = False
2013-05-08 19:59:30 +02:00
nonceTrialsPerByte = shared . config . get ( ' bitmessagesettings ' , ' defaultnoncetrialsperbyte ' )
payloadLengthExtraBytes = shared . config . get ( ' bitmessagesettings ' , ' defaultpayloadlengthextrabytes ' )
2013-03-19 18:32:37 +01:00
elif len ( params ) == 2 :
label , eighteenByteRipe = params
2013-05-08 19:59:30 +02:00
nonceTrialsPerByte = shared . config . get ( ' bitmessagesettings ' , ' defaultnoncetrialsperbyte ' )
payloadLengthExtraBytes = shared . config . get ( ' bitmessagesettings ' , ' defaultpayloadlengthextrabytes ' )
elif len ( params ) == 3 :
label , eighteenByteRipe , totalDifficulty = params
nonceTrialsPerByte = int ( shared . networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty )
payloadLengthExtraBytes = shared . config . get ( ' bitmessagesettings ' , ' defaultpayloadlengthextrabytes ' )
elif len ( params ) == 4 :
label , eighteenByteRipe , totalDifficulty , smallMessageDifficulty = params
nonceTrialsPerByte = int ( shared . networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty )
payloadLengthExtraBytes = int ( shared . networkDefaultPayloadLengthExtraBytes * smallMessageDifficulty )
else :
return ' API Error 0000: Too many parameters! '
2013-03-19 21:17:54 +01:00
label = label . decode ( ' base64 ' )
2013-06-11 00:53:15 +02:00
try :
unicode ( label , ' utf-8 ' )
except :
return ' API Error 0017: Label is not valid UTF-8 data. '
2013-03-19 18:32:37 +01:00
apiAddressGeneratorReturnQueue . queue . clear ( )
2013-05-01 22:06:55 +02:00
streamNumberForAddress = 1
2013-05-24 22:12:16 +02:00
shared . addressGeneratorQueue . put ( ( ' createRandomAddress ' , 3 , streamNumberForAddress , label , 1 , " " , eighteenByteRipe , nonceTrialsPerByte , payloadLengthExtraBytes ) )
2013-03-19 18:32:37 +01:00
return apiAddressGeneratorReturnQueue . get ( )
elif method == ' createDeterministicAddresses ' :
if len ( params ) == 0 :
return ' API Error 0000: I need parameters! '
elif len ( params ) == 1 :
passphrase , = params
numberOfAddresses = 1
addressVersionNumber = 0
streamNumber = 0
eighteenByteRipe = False
2013-05-08 19:59:30 +02:00
nonceTrialsPerByte = shared . config . get ( ' bitmessagesettings ' , ' defaultnoncetrialsperbyte ' )
payloadLengthExtraBytes = shared . config . get ( ' bitmessagesettings ' , ' defaultpayloadlengthextrabytes ' )
2013-03-19 18:32:37 +01:00
elif len ( params ) == 2 :
passphrase , numberOfAddresses = params
addressVersionNumber = 0
streamNumber = 0
eighteenByteRipe = False
2013-05-08 19:59:30 +02:00
nonceTrialsPerByte = shared . config . get ( ' bitmessagesettings ' , ' defaultnoncetrialsperbyte ' )
payloadLengthExtraBytes = shared . config . get ( ' bitmessagesettings ' , ' defaultpayloadlengthextrabytes ' )
2013-03-19 18:32:37 +01:00
elif len ( params ) == 3 :
passphrase , numberOfAddresses , addressVersionNumber = params
streamNumber = 0
eighteenByteRipe = False
2013-05-08 19:59:30 +02:00
nonceTrialsPerByte = shared . config . get ( ' bitmessagesettings ' , ' defaultnoncetrialsperbyte ' )
payloadLengthExtraBytes = shared . config . get ( ' bitmessagesettings ' , ' defaultpayloadlengthextrabytes ' )
2013-03-19 18:32:37 +01:00
elif len ( params ) == 4 :
passphrase , numberOfAddresses , addressVersionNumber , streamNumber = params
eighteenByteRipe = False
2013-05-08 19:59:30 +02:00
nonceTrialsPerByte = shared . config . get ( ' bitmessagesettings ' , ' defaultnoncetrialsperbyte ' )
payloadLengthExtraBytes = shared . config . get ( ' bitmessagesettings ' , ' defaultpayloadlengthextrabytes ' )
2013-03-19 18:32:37 +01:00
elif len ( params ) == 5 :
passphrase , numberOfAddresses , addressVersionNumber , streamNumber , eighteenByteRipe = params
2013-05-08 19:59:30 +02:00
nonceTrialsPerByte = shared . config . get ( ' bitmessagesettings ' , ' defaultnoncetrialsperbyte ' )
payloadLengthExtraBytes = shared . config . get ( ' bitmessagesettings ' , ' defaultpayloadlengthextrabytes ' )
elif len ( params ) == 6 :
passphrase , numberOfAddresses , addressVersionNumber , streamNumber , eighteenByteRipe , totalDifficulty = params
nonceTrialsPerByte = int ( shared . networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty )
payloadLengthExtraBytes = shared . config . get ( ' bitmessagesettings ' , ' defaultpayloadlengthextrabytes ' )
elif len ( params ) == 7 :
passphrase , numberOfAddresses , addressVersionNumber , streamNumber , eighteenByteRipe , totalDifficulty , smallMessageDifficulty = params
nonceTrialsPerByte = int ( shared . networkDefaultProofOfWorkNonceTrialsPerByte * totalDifficulty )
payloadLengthExtraBytes = int ( shared . networkDefaultPayloadLengthExtraBytes * smallMessageDifficulty )
else :
return ' API Error 0000: Too many parameters! '
2013-03-19 18:32:37 +01:00
if len ( passphrase ) == 0 :
2013-05-03 23:26:29 +02:00
return ' API Error 0001: The specified passphrase is blank. '
2013-03-19 21:17:54 +01:00
passphrase = passphrase . decode ( ' base64 ' )
2013-03-19 18:32:37 +01:00
if addressVersionNumber == 0 : #0 means "just use the proper addressVersionNumber"
2013-05-02 18:47:43 +02:00
addressVersionNumber = 3
if addressVersionNumber != 3 :
2013-05-24 22:12:16 +02:00
return ' API Error 0002: The address version number currently must be 3 (or 0 which means auto-select). ' + addressVersionNumber + ' isn \' t supported. '
2013-03-19 18:32:37 +01:00
if streamNumber == 0 : #0 means "just use the most available stream"
streamNumber = 1
if streamNumber != 1 :
2013-05-03 23:26:29 +02:00
return ' API Error 0003: The stream number must be 1 (or 0 which means auto-select). Others aren \' t supported. '
2013-03-19 18:32:37 +01:00
if numberOfAddresses == 0 :
2013-03-19 20:12:19 +01:00
return ' API Error 0004: Why would you ask me to generate 0 addresses for you? '
2013-05-03 23:26:29 +02:00
if numberOfAddresses > 999 :
return ' API Error 0005: You have (accidentally?) specified too many addresses to make. Maximum 999. This check only exists to prevent mischief; if you really want to create more addresses than this, contact the Bitmessage developers and we can modify the check or you can do it yourself by searching the source code for this message. '
2013-03-19 18:32:37 +01:00
apiAddressGeneratorReturnQueue . queue . clear ( )
2013-05-02 18:47:43 +02:00
print ' Requesting that the addressGenerator create ' , numberOfAddresses , ' addresses. '
2013-05-24 22:12:16 +02:00
shared . addressGeneratorQueue . put ( ( ' createDeterministicAddresses ' , addressVersionNumber , streamNumber , ' unused API address ' , numberOfAddresses , passphrase , eighteenByteRipe , nonceTrialsPerByte , payloadLengthExtraBytes ) )
2013-03-19 18:32:37 +01:00
data = ' { " addresses " :[ '
queueReturn = apiAddressGeneratorReturnQueue . get ( )
for item in queueReturn :
if len ( data ) > 20 :
data + = ' , '
data + = " \" " + item + " \" "
data + = ' ]} '
return data
2013-05-24 22:12:16 +02:00
elif method == ' getDeterministicAddress ' :
if len ( params ) != 3 :
return ' API Error 0000: I need exactly 3 parameters. '
passphrase , addressVersionNumber , streamNumber = params
numberOfAddresses = 1
eighteenByteRipe = False
if len ( passphrase ) == 0 :
return ' API Error 0001: The specified passphrase is blank. '
passphrase = passphrase . decode ( ' base64 ' )
if addressVersionNumber != 3 :
return ' API Error 0002: The address version number currently must be 3. ' + addressVersionNumber + ' isn \' t supported. '
if streamNumber != 1 :
return ' API Error 0003: The stream number must be 1. Others aren \' t supported. '
apiAddressGeneratorReturnQueue . queue . clear ( )
print ' Requesting that the addressGenerator create ' , numberOfAddresses , ' addresses. '
shared . addressGeneratorQueue . put ( ( ' getDeterministicAddress ' , addressVersionNumber , streamNumber , ' unused API address ' , numberOfAddresses , passphrase , eighteenByteRipe ) )
return apiAddressGeneratorReturnQueue . get ( )
2013-03-19 18:32:37 +01:00
elif method == ' getAllInboxMessages ' :
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' SELECT msgid, toaddress, fromaddress, subject, received, message FROM inbox where folder= ' inbox ' ORDER BY received ''' )
shared . sqlSubmitQueue . put ( ' ' )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2013-03-19 18:32:37 +01:00
data = ' { " inboxMessages " :[ '
for row in queryreturn :
msgid , toAddress , fromAddress , subject , received , message , = row
2013-06-11 00:53:15 +02:00
subject = shared . fixPotentiallyInvalidUTF8Data ( subject )
message = shared . fixPotentiallyInvalidUTF8Data ( message )
2013-03-19 18:32:37 +01:00
if len ( data ) > 25 :
data + = ' , '
data + = json . dumps ( { ' msgid ' : msgid . encode ( ' hex ' ) , ' toAddress ' : toAddress , ' fromAddress ' : fromAddress , ' subject ' : subject . encode ( ' base64 ' ) , ' message ' : message . encode ( ' base64 ' ) , ' encodingType ' : 2 , ' receivedTime ' : received } , indent = 4 , separators = ( ' , ' , ' : ' ) )
data + = ' ]} '
return data
elif method == ' trashMessage ' :
2013-03-19 20:12:19 +01:00
if len ( params ) == 0 :
return ' API Error 0000: I need parameters! '
2013-03-19 18:32:37 +01:00
msgid = params [ 0 ] . decode ( ' hex ' )
t = ( msgid , )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' UPDATE inbox SET folder= ' trash ' WHERE msgid=? ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2013-06-05 23:15:26 +02:00
shared . UISignalQueue . put ( ( ' removeInboxRowByMsgid ' , msgid ) )
return ' Trashed message (assuming message existed). '
2013-03-19 18:32:37 +01:00
elif method == ' sendMessage ' :
2013-03-19 20:12:19 +01:00
if len ( params ) == 0 :
return ' API Error 0000: I need parameters! '
elif len ( params ) == 4 :
2013-03-19 18:32:37 +01:00
toAddress , fromAddress , subject , message = params
encodingType = 2
2013-03-19 20:12:19 +01:00
elif len ( params ) == 5 :
2013-03-19 18:32:37 +01:00
toAddress , fromAddress , subject , message , encodingType = params
if encodingType != 2 :
2013-03-19 20:12:19 +01:00
return ' API Error 0006: The encoding type must be 2 because that is the only one this program currently supports. '
2013-03-19 18:32:37 +01:00
subject = subject . decode ( ' base64 ' )
message = message . decode ( ' base64 ' )
status , addressVersionNumber , streamNumber , toRipe = decodeAddress ( toAddress )
if status < > ' success ' :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-03-19 20:12:19 +01:00
print ' API Error 0007: Could not decode address: ' , toAddress , ' : ' , status
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-03-19 18:32:37 +01:00
if status == ' checksumfailed ' :
2013-03-19 20:12:19 +01:00
return ' API Error 0008: Checksum failed for address: ' + toAddress
2013-03-19 18:32:37 +01:00
if status == ' invalidcharacters ' :
2013-03-19 20:12:19 +01:00
return ' API Error 0009: Invalid characters in address: ' + toAddress
2013-03-19 18:32:37 +01:00
if status == ' versiontoohigh ' :
2013-03-19 20:12:19 +01:00
return ' API Error 0010: Address version number too high (or zero) in address: ' + toAddress
2013-05-24 22:12:16 +02:00
return ' API Error 0007: Could not decode address: ' + toAddress + ' : ' + status
2013-05-02 18:47:43 +02:00
if addressVersionNumber < 2 or addressVersionNumber > 3 :
2013-05-03 23:26:29 +02:00
return ' API Error 0011: The address version number currently must be 2 or 3. Others aren \' t supported. Check the toAddress. '
2013-03-19 18:32:37 +01:00
if streamNumber != 1 :
2013-05-03 23:26:29 +02:00
return ' API Error 0012: The stream number must be 1. Others aren \' t supported. Check the toAddress. '
2013-03-19 18:32:37 +01:00
status , addressVersionNumber , streamNumber , fromRipe = decodeAddress ( fromAddress )
if status < > ' success ' :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-03-19 20:12:19 +01:00
print ' API Error 0007: Could not decode address: ' , fromAddress , ' : ' , status
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-03-19 18:32:37 +01:00
if status == ' checksumfailed ' :
2013-03-19 20:12:19 +01:00
return ' API Error 0008: Checksum failed for address: ' + fromAddress
2013-03-19 18:32:37 +01:00
if status == ' invalidcharacters ' :
2013-03-19 20:12:19 +01:00
return ' API Error 0009: Invalid characters in address: ' + fromAddress
2013-03-19 18:32:37 +01:00
if status == ' versiontoohigh ' :
2013-03-19 20:12:19 +01:00
return ' API Error 0010: Address version number too high (or zero) in address: ' + fromAddress
2013-05-24 22:12:16 +02:00
return ' API Error 0007: Could not decode address: ' + fromAddress + ' : ' + status
2013-05-02 18:47:43 +02:00
if addressVersionNumber < 2 or addressVersionNumber > 3 :
2013-05-03 23:26:29 +02:00
return ' API Error 0011: The address version number currently must be 2 or 3. Others aren \' t supported. Check the fromAddress. '
2013-03-19 18:32:37 +01:00
if streamNumber != 1 :
2013-05-03 23:26:29 +02:00
return ' API Error 0012: The stream number must be 1. Others aren \' t supported. Check the fromAddress. '
2013-03-19 18:32:37 +01:00
toAddress = addBMIfNotPresent ( toAddress )
fromAddress = addBMIfNotPresent ( fromAddress )
try :
2013-05-02 17:53:54 +02:00
fromAddressEnabled = shared . config . getboolean ( fromAddress , ' enabled ' )
2013-03-19 18:32:37 +01:00
except :
2013-05-03 23:26:29 +02:00
return ' API Error 0013: Could not find your fromAddress in the keys.dat file. '
2013-03-19 18:32:37 +01:00
if not fromAddressEnabled :
2013-05-03 23:26:29 +02:00
return ' API Error 0014: Your fromAddress is disabled. Cannot send. '
2013-03-19 18:32:37 +01:00
ackdata = OpenSSL . rand ( 32 )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-05-29 23:18:44 +02:00
t = ( ' ' , toAddress , toRipe , fromAddress , subject , message , ackdata , int ( time . time ( ) ) , ' msgqueued ' , 1 , 1 , ' sent ' , 2 )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ''' INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2013-03-19 18:32:37 +01:00
toLabel = ' '
t = ( toAddress , )
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' select label from addressbook where address=? ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
2013-03-19 18:32:37 +01:00
if queryreturn < > [ ] :
for row in queryreturn :
toLabel , = row
2013-05-02 18:47:43 +02:00
#apiSignalQueue.put(('displayNewSentMessage',(toAddress,toLabel,fromAddress,subject,message,ackdata)))
shared . UISignalQueue . put ( ( ' displayNewSentMessage ' , ( toAddress , toLabel , fromAddress , subject , message , ackdata ) ) )
2013-03-19 18:32:37 +01:00
2013-05-02 17:53:54 +02:00
shared . workerQueue . put ( ( ' sendmessage ' , toAddress ) )
2013-03-19 18:32:37 +01:00
return ackdata . encode ( ' hex ' )
elif method == ' sendBroadcast ' :
2013-03-19 20:12:19 +01:00
if len ( params ) == 0 :
return ' API Error 0000: I need parameters! '
2013-03-19 18:32:37 +01:00
if len ( params ) == 3 :
fromAddress , subject , message = params
encodingType = 2
2013-03-19 20:12:19 +01:00
elif len ( params ) == 4 :
2013-03-19 18:32:37 +01:00
fromAddress , subject , message , encodingType = params
if encodingType != 2 :
2013-03-19 20:12:19 +01:00
return ' API Error 0006: The encoding type must be 2 because that is the only one this program currently supports. '
2013-03-19 18:32:37 +01:00
subject = subject . decode ( ' base64 ' )
message = message . decode ( ' base64 ' )
status , addressVersionNumber , streamNumber , fromRipe = decodeAddress ( fromAddress )
if status < > ' success ' :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-03-19 20:12:19 +01:00
print ' API Error 0007: Could not decode address: ' , fromAddress , ' : ' , status
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-03-19 18:32:37 +01:00
if status == ' checksumfailed ' :
2013-03-19 20:12:19 +01:00
return ' API Error 0008: Checksum failed for address: ' + fromAddress
2013-03-19 18:32:37 +01:00
if status == ' invalidcharacters ' :
2013-03-19 20:12:19 +01:00
return ' API Error 0009: Invalid characters in address: ' + fromAddress
2013-03-19 18:32:37 +01:00
if status == ' versiontoohigh ' :
2013-03-19 20:12:19 +01:00
return ' API Error 0010: Address version number too high (or zero) in address: ' + fromAddress
2013-05-24 22:12:16 +02:00
return ' API Error 0007: Could not decode address: ' + fromAddress + ' : ' + status
2013-05-02 21:39:51 +02:00
if addressVersionNumber < 2 or addressVersionNumber > 3 :
return ' API Error 0011: the address version number currently must be 2 or 3. Others aren \' t supported. Check the fromAddress. '
2013-03-19 18:32:37 +01:00
if streamNumber != 1 :
2013-03-19 20:12:19 +01:00
return ' API Error 0012: the stream number must be 1. Others aren \' t supported. Check the fromAddress. '
2013-03-19 18:32:37 +01:00
fromAddress = addBMIfNotPresent ( fromAddress )
try :
2013-05-02 17:53:54 +02:00
fromAddressEnabled = shared . config . getboolean ( fromAddress , ' enabled ' )
2013-03-19 18:32:37 +01:00
except :
2013-03-19 20:12:19 +01:00
return ' API Error 0013: could not find your fromAddress in the keys.dat file. '
2013-03-19 18:32:37 +01:00
ackdata = OpenSSL . rand ( 32 )
toAddress = ' [Broadcast subscribers] '
ripe = ' '
2013-05-02 17:53:54 +02:00
shared . sqlLock . acquire ( )
2013-06-08 02:44:30 +02:00
t = ( ' ' , toAddress , ripe , fromAddress , subject , message , ackdata , int ( time . time ( ) ) , ' broadcastqueued ' , 1 , 1 , ' sent ' , 2 )
2013-05-02 17:53:54 +02:00
shared . sqlSubmitQueue . put ( ''' INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
2013-03-19 18:32:37 +01:00
toLabel = ' [Broadcast subscribers] '
2013-05-02 17:53:54 +02:00
shared . UISignalQueue . put ( ( ' displayNewSentMessage ' , ( toAddress , toLabel , fromAddress , subject , message , ackdata ) ) )
2013-05-29 23:18:44 +02:00
shared . workerQueue . put ( ( ' sendbroadcast ' , ' ' ) )
2013-03-19 18:32:37 +01:00
return ackdata . encode ( ' hex ' )
2013-05-03 23:26:29 +02:00
elif method == ' getStatus ' :
if len ( params ) != 1 :
return ' API Error 0000: I need one parameter! '
ackdata , = params
if len ( ackdata ) != 64 :
return ' API Error 0015: The length of ackData should be 32 bytes (encoded in hex thus 64 characters). '
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' SELECT status FROM sent where ackdata=? ''' )
shared . sqlSubmitQueue . put ( ( ackdata . decode ( ' hex ' ) , ) )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
if queryreturn == [ ] :
2013-06-03 07:04:22 +02:00
return ' notfound '
2013-05-03 23:26:29 +02:00
for row in queryreturn :
status , = row
2013-05-29 23:18:44 +02:00
return status
2013-05-24 22:12:16 +02:00
elif method == ' addSubscription ' :
if len ( params ) == 0 :
return ' API Error 0000: I need parameters! '
if len ( params ) == 1 :
address , = params
label == ' '
if len ( params ) == 2 :
address , label = params
label = label . decode ( ' base64 ' )
2013-05-24 23:05:45 +02:00
try :
2013-06-11 00:53:15 +02:00
unicode ( label , ' utf-8 ' )
except :
2013-05-24 23:05:45 +02:00
return ' API Error 0017: Label is not valid UTF-8 data. '
2013-05-24 22:12:16 +02:00
if len ( params ) > 2 :
return ' API Error 0000: I need either 1 or 2 parameters! '
address = addBMIfNotPresent ( address )
status , addressVersionNumber , streamNumber , toRipe = decodeAddress ( address )
if status < > ' success ' :
shared . printLock . acquire ( )
print ' API Error 0007: Could not decode address: ' , address , ' : ' , status
shared . printLock . release ( )
if status == ' checksumfailed ' :
return ' API Error 0008: Checksum failed for address: ' + address
if status == ' invalidcharacters ' :
return ' API Error 0009: Invalid characters in address: ' + address
if status == ' versiontoohigh ' :
return ' API Error 0010: Address version number too high (or zero) in address: ' + address
return ' API Error 0007: Could not decode address: ' + address + ' : ' + status
if addressVersionNumber < 2 or addressVersionNumber > 3 :
return ' API Error 0011: The address version number currently must be 2 or 3. Others aren \' t supported. '
if streamNumber != 1 :
return ' API Error 0012: The stream number must be 1. Others aren \' t supported. '
#First we must check to see if the address is already in the subscriptions list.
shared . sqlLock . acquire ( )
t = ( address , )
shared . sqlSubmitQueue . put ( ''' select * from subscriptions where address=? ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlLock . release ( )
if queryreturn != [ ] :
return ' API Error 0016: You are already subscribed to that address. '
t = ( label , address , True )
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' INSERT INTO subscriptions VALUES (?,?,?) ''' )
shared . sqlSubmitQueue . put ( t )
queryreturn = shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
shared . reloadBroadcastSendersForWhichImWatching ( )
shared . UISignalQueue . put ( ( ' rerenderInboxFromLabels ' , ' ' ) )
shared . UISignalQueue . put ( ( ' rerenderSubscriptions ' , ' ' ) )
return ' Added subscription. '
elif method == ' deleteSubscription ' :
if len ( params ) != 1 :
return ' API Error 0000: I need 1 parameter! '
address , = params
address = addBMIfNotPresent ( address )
t = ( address , )
shared . sqlLock . acquire ( )
shared . sqlSubmitQueue . put ( ''' DELETE FROM subscriptions WHERE address=? ''' )
shared . sqlSubmitQueue . put ( t )
shared . sqlReturnQueue . get ( )
shared . sqlSubmitQueue . put ( ' commit ' )
shared . sqlLock . release ( )
shared . reloadBroadcastSendersForWhichImWatching ( )
shared . UISignalQueue . put ( ( ' rerenderInboxFromLabels ' , ' ' ) )
shared . UISignalQueue . put ( ( ' rerenderSubscriptions ' , ' ' ) )
return ' Deleted subscription if it existed. '
2013-03-19 18:32:37 +01:00
else :
return ' Invalid Method: %s ' % method
#This thread, of which there is only one, runs the API.
2013-05-01 22:06:55 +02:00
class singleAPI ( threading . Thread ) :
def __init__ ( self ) :
threading . Thread . __init__ ( self )
2013-03-19 18:32:37 +01:00
def run ( self ) :
2013-05-02 17:53:54 +02:00
se = SimpleXMLRPCServer ( ( shared . config . get ( ' bitmessagesettings ' , ' apiinterface ' ) , shared . config . getint ( ' bitmessagesettings ' , ' apiport ' ) ) , MySimpleXMLRPCRequestHandler , True , True )
2013-03-19 18:32:37 +01:00
se . register_introspection_functions ( )
se . serve_forever ( )
2013-04-12 19:51:14 +02:00
selfInitiatedConnections = { } #This is a list of current connections (the thread pointers at least)
alreadyAttemptedConnectionsList = { } #This is a list of nodes to which we have already attempted a connection
2012-11-19 20:45:05 +01:00
ackdataForWhichImWatching = { }
2013-04-12 19:51:14 +02:00
alreadyAttemptedConnectionsListLock = threading . Lock ( )
2012-11-19 20:45:05 +01:00
eightBytesOfRandomDataUsedToDetectConnectionsToSelf = pack ( ' >Q ' , random . randrange ( 1 , 18446744073709551615 ) )
neededPubkeys = { }
2013-02-05 22:53:56 +01:00
successfullyDecryptMessageTimings = [ ] #A list of the amounts of time it took to successfully decrypt msg messages
2013-03-19 18:32:37 +01:00
apiAddressGeneratorReturnQueue = Queue . Queue ( ) #The address generator thread uses this queue to get information back to the API thread.
2013-04-12 19:51:14 +02:00
alreadyAttemptedConnectionsListResetTime = int ( time . time ( ) ) #used to clear out the alreadyAttemptedConnectionsList periodically so that we will retry connecting to hosts to which we have already tried to connect.
2013-05-28 19:30:44 +02:00
numberOfObjectsThatWeHaveYetToCheckAndSeeWhetherWeAlreadyHavePerPeer = { }
2013-05-02 17:53:54 +02:00
2013-01-22 20:29:49 +01:00
if useVeryEasyProofOfWorkForTesting :
2013-05-03 21:53:38 +02:00
shared . networkDefaultProofOfWorkNonceTrialsPerByte = int ( shared . networkDefaultProofOfWorkNonceTrialsPerByte / 16 )
shared . networkDefaultPayloadLengthExtraBytes = int ( shared . networkDefaultPayloadLengthExtraBytes / 7000 )
2013-01-18 23:38:09 +01:00
2012-11-19 20:45:05 +01:00
if __name__ == " __main__ " :
2013-05-13 11:29:14 +02:00
# is the application already running? If yes then exit.
thisapp = singleton . singleinstance ( )
2013-05-01 22:06:55 +02:00
signal . signal ( signal . SIGINT , signal_handler )
#signal.signal(signal.SIGINT, signal.SIG_DFL)
2013-05-03 21:53:38 +02:00
2012-11-29 12:35:48 +01:00
# Check the Major version, the first element in the array
2012-11-29 18:37:07 +01:00
if sqlite3 . sqlite_version_info [ 0 ] < 3 :
2012-11-19 20:45:05 +01:00
print ' This program requires sqlite version 3 or higher because 2 and lower cannot store NULL values. I see version: ' , sqlite3 . sqlite_version_info
2013-05-06 17:32:40 +02:00
os . _exit ( 0 )
2012-11-19 20:45:05 +01:00
2013-02-26 21:07:51 +01:00
#First try to load the config file (the keys.dat file) from the program directory
2013-05-02 18:47:43 +02:00
shared . config = ConfigParser . SafeConfigParser ( )
shared . config . read ( ' keys.dat ' )
2012-11-19 20:45:05 +01:00
try :
2013-05-02 18:47:43 +02:00
shared . config . get ( ' bitmessagesettings ' , ' settingsversion ' )
2013-02-26 21:07:51 +01:00
print ' Loading config files from same directory as program '
2013-05-02 17:53:54 +02:00
shared . appdata = ' '
2012-11-19 20:45:05 +01:00
except :
2013-02-26 21:07:51 +01:00
#Could not load the keys.dat file in the program directory. Perhaps it is in the appdata directory.
2013-05-02 17:53:54 +02:00
shared . appdata = shared . lookupAppdataFolder ( )
shared . config = ConfigParser . SafeConfigParser ( )
shared . config . read ( shared . appdata + ' keys.dat ' )
2013-02-26 21:07:51 +01:00
try :
2013-05-02 17:53:54 +02:00
shared . config . get ( ' bitmessagesettings ' , ' settingsversion ' )
print ' Loading existing config files from ' , shared . appdata
2013-02-26 21:07:51 +01:00
except :
#This appears to be the first time running the program; there is no config file (or it cannot be accessed). Create config file.
2013-05-02 17:53:54 +02:00
shared . config . add_section ( ' bitmessagesettings ' )
2013-06-10 15:40:51 +02:00
shared . config . set ( ' bitmessagesettings ' , ' settingsversion ' , ' 6 ' )
2013-05-02 17:53:54 +02:00
shared . config . set ( ' bitmessagesettings ' , ' port ' , ' 8444 ' )
shared . config . set ( ' bitmessagesettings ' , ' timeformat ' , ' %% a, %% d %% b %% Y %% I: %% M %% p ' )
shared . config . set ( ' bitmessagesettings ' , ' blackwhitelist ' , ' black ' )
shared . config . set ( ' bitmessagesettings ' , ' startonlogon ' , ' false ' )
2013-02-26 21:07:51 +01:00
if ' linux ' in sys . platform :
2013-05-02 17:53:54 +02:00
shared . config . set ( ' bitmessagesettings ' , ' minimizetotray ' , ' false ' ) #This isn't implimented yet and when True on Ubuntu causes Bitmessage to disappear while running when minimized.
2013-02-26 21:07:51 +01:00
else :
2013-05-02 17:53:54 +02:00
shared . config . set ( ' bitmessagesettings ' , ' minimizetotray ' , ' true ' )
shared . config . set ( ' bitmessagesettings ' , ' showtraynotifications ' , ' true ' )
shared . config . set ( ' bitmessagesettings ' , ' startintray ' , ' false ' )
shared . config . set ( ' bitmessagesettings ' , ' socksproxytype ' , ' none ' )
shared . config . set ( ' bitmessagesettings ' , ' sockshostname ' , ' localhost ' )
shared . config . set ( ' bitmessagesettings ' , ' socksport ' , ' 9050 ' )
shared . config . set ( ' bitmessagesettings ' , ' socksauthentication ' , ' false ' )
shared . config . set ( ' bitmessagesettings ' , ' socksusername ' , ' ' )
shared . config . set ( ' bitmessagesettings ' , ' sockspassword ' , ' ' )
shared . config . set ( ' bitmessagesettings ' , ' keysencrypted ' , ' false ' )
shared . config . set ( ' bitmessagesettings ' , ' messagesencrypted ' , ' false ' )
2013-05-02 21:59:10 +02:00
shared . config . set ( ' bitmessagesettings ' , ' defaultnoncetrialsperbyte ' , str ( shared . networkDefaultProofOfWorkNonceTrialsPerByte ) )
shared . config . set ( ' bitmessagesettings ' , ' defaultpayloadlengthextrabytes ' , str ( shared . networkDefaultPayloadLengthExtraBytes ) )
2013-05-28 22:50:09 +02:00
shared . config . set ( ' bitmessagesettings ' , ' minimizeonclose ' , ' false ' )
2013-06-10 15:40:51 +02:00
shared . config . set ( ' bitmessagesettings ' , ' maxacceptablenoncetrialsperbyte ' , ' 0 ' )
shared . config . set ( ' bitmessagesettings ' , ' maxacceptablepayloadlengthextrabytes ' , ' 0 ' )
2013-02-26 21:07:51 +01:00
if storeConfigFilesInSameDirectoryAsProgramByDefault :
#Just use the same directory as the program and forget about the appdata folder
2013-05-02 17:53:54 +02:00
shared . appdata = ' '
2013-02-26 21:07:51 +01:00
print ' Creating new config files in same directory as program. '
else :
2013-05-02 17:53:54 +02:00
print ' Creating new config files in ' , shared . appdata
if not os . path . exists ( shared . appdata ) :
os . makedirs ( shared . appdata )
with open ( shared . appdata + ' keys.dat ' , ' wb ' ) as configfile :
shared . config . write ( configfile )
2012-11-19 20:45:05 +01:00
2013-05-02 17:53:54 +02:00
if shared . config . getint ( ' bitmessagesettings ' , ' settingsversion ' ) == 1 :
shared . config . set ( ' bitmessagesettings ' , ' settingsversion ' , ' 4 ' ) #If the settings version is equal to 2 or 3 then the sqlThread will modify the pubkeys table and change the settings version to 4.
shared . config . set ( ' bitmessagesettings ' , ' socksproxytype ' , ' none ' )
shared . config . set ( ' bitmessagesettings ' , ' sockshostname ' , ' localhost ' )
shared . config . set ( ' bitmessagesettings ' , ' socksport ' , ' 9050 ' )
shared . config . set ( ' bitmessagesettings ' , ' socksauthentication ' , ' false ' )
shared . config . set ( ' bitmessagesettings ' , ' socksusername ' , ' ' )
shared . config . set ( ' bitmessagesettings ' , ' sockspassword ' , ' ' )
shared . config . set ( ' bitmessagesettings ' , ' keysencrypted ' , ' false ' )
shared . config . set ( ' bitmessagesettings ' , ' messagesencrypted ' , ' false ' )
with open ( shared . appdata + ' keys.dat ' , ' wb ' ) as configfile :
2013-05-02 18:47:43 +02:00
shared . config . write ( configfile )
2012-12-18 19:09:10 +01:00
2012-11-19 20:45:05 +01:00
try :
2013-05-02 17:53:54 +02:00
#We shouldn't have to use the shared.knownNodesLock because this had better be the only thread accessing knownNodes right now.
pickleFile = open ( shared . appdata + ' knownnodes.dat ' , ' rb ' )
shared . knownNodes = pickle . load ( pickleFile )
2012-11-19 20:45:05 +01:00
pickleFile . close ( )
except :
2013-05-02 17:53:54 +02:00
createDefaultKnownNodes ( shared . appdata )
pickleFile = open ( shared . appdata + ' knownnodes.dat ' , ' rb ' )
shared . knownNodes = pickle . load ( pickleFile )
2012-11-19 20:45:05 +01:00
pickleFile . close ( )
2013-06-10 15:40:51 +02:00
if shared . config . getint ( ' bitmessagesettings ' , ' settingsversion ' ) > 6 :
2012-11-19 20:45:05 +01:00
print ' Bitmessage cannot read future versions of the keys file (keys.dat). Run the newer version of Bitmessage. '
raise SystemExit
2013-02-04 22:49:02 +01:00
#DNS bootstrap. This could be programmed to use the SOCKS proxy to do the DNS lookup some day but for now we will just rely on the entries in defaultKnownNodes.py. Hopefully either they are up to date or the user has run Bitmessage recently without SOCKS turned on and received good bootstrap nodes using that method.
2013-05-02 17:53:54 +02:00
if shared . config . get ( ' bitmessagesettings ' , ' socksproxytype ' ) == ' none ' :
2013-02-04 22:49:02 +01:00
try :
for item in socket . getaddrinfo ( ' bootstrap8080.bitmessage.org ' , 80 ) :
print ' Adding ' , item [ 4 ] [ 0 ] , ' to knownNodes based on DNS boostrap method '
2013-05-02 17:53:54 +02:00
shared . knownNodes [ 1 ] [ item [ 4 ] [ 0 ] ] = ( 8080 , int ( time . time ( ) ) )
2013-02-04 22:49:02 +01:00
except :
print ' bootstrap8080.bitmessage.org DNS bootstraping failed. '
try :
for item in socket . getaddrinfo ( ' bootstrap8444.bitmessage.org ' , 80 ) :
print ' Adding ' , item [ 4 ] [ 0 ] , ' to knownNodes based on DNS boostrap method '
2013-05-02 17:53:54 +02:00
shared . knownNodes [ 1 ] [ item [ 4 ] [ 0 ] ] = ( 8444 , int ( time . time ( ) ) )
2013-02-04 22:49:02 +01:00
except :
print ' bootstrap8444.bitmessage.org DNS bootstrapping failed. '
else :
print ' DNS bootstrap skipped because SOCKS is used. '
2013-05-01 22:06:55 +02:00
#Start the address generation thread
addressGeneratorThread = addressGenerator ( )
addressGeneratorThread . daemon = True # close the main program even if there are threads left
addressGeneratorThread . start ( )
#Start the thread that calculates POWs
singleWorkerThread = singleWorker ( )
singleWorkerThread . daemon = True # close the main program even if there are threads left
singleWorkerThread . start ( )
#Start the SQL thread
sqlLookup = sqlThread ( )
sqlLookup . daemon = False # DON'T close the main program even if there are threads left. The closeEvent should command this thread to exit gracefully.
sqlLookup . start ( )
#Start the cleanerThread
singleCleanerThread = singleCleaner ( )
singleCleanerThread . daemon = True # close the main program even if there are threads left
singleCleanerThread . start ( )
2013-05-02 17:53:54 +02:00
shared . reloadMyAddressHashes ( )
shared . reloadBroadcastSendersForWhichImWatching ( )
if shared . safeConfigGetBoolean ( ' bitmessagesettings ' , ' apienabled ' ) :
2013-05-01 22:06:55 +02:00
try :
2013-05-02 17:53:54 +02:00
apiNotifyPath = shared . config . get ( ' bitmessagesettings ' , ' apinotifypath ' )
2013-05-01 22:06:55 +02:00
except :
apiNotifyPath = ' '
if apiNotifyPath != ' ' :
2013-05-02 17:53:54 +02:00
shared . printLock . acquire ( )
2013-05-01 22:06:55 +02:00
print ' Trying to call ' , apiNotifyPath
2013-05-02 17:53:54 +02:00
shared . printLock . release ( )
2013-05-01 22:06:55 +02:00
call ( [ apiNotifyPath , " startingUp " ] )
singleAPIThread = singleAPI ( )
singleAPIThread . daemon = True #close the main program even if there are threads left
singleAPIThread . start ( )
#self.singleAPISignalHandlerThread = singleAPISignalHandler()
#self.singleAPISignalHandlerThread.start()
#QtCore.QObject.connect(self.singleAPISignalHandlerThread, QtCore.SIGNAL("updateStatusBar(PyQt_PyObject)"), self.updateStatusBar)
#QtCore.QObject.connect(self.singleAPISignalHandlerThread, QtCore.SIGNAL("passAddressGeneratorObjectThrough(PyQt_PyObject)"), self.connectObjectToAddressGeneratorSignals)
#QtCore.QObject.connect(self.singleAPISignalHandlerThread, QtCore.SIGNAL("displayNewSentMessage(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)"), self.displayNewSentMessage)
connectToStream ( 1 )
2013-05-02 22:55:13 +02:00
singleListenerThread = singleListener ( )
singleListenerThread . daemon = True # close the main program even if there are threads left
singleListenerThread . start ( )
2013-05-02 17:53:54 +02:00
if not shared . safeConfigGetBoolean ( ' bitmessagesettings ' , ' daemon ' ) :
try :
from PyQt4 . QtCore import *
from PyQt4 . QtGui import *
except Exception , err :
print ' PyBitmessage requires PyQt unless you want to run it as a daemon and interact with it using the API. You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \' PyQt Download \' . If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon '
print ' Error message: ' , err
2013-05-05 23:52:57 +02:00
os . _exit ( 0 )
2013-05-02 17:53:54 +02:00
import bitmessageqt
bitmessageqt . run ( )
2013-05-01 22:06:55 +02:00
else :
print ' Running as a daemon. You can use Ctrl+C to exit. '
while True :
2013-05-02 17:53:54 +02:00
time . sleep ( 20 )
2013-02-18 21:22:48 +01:00
2012-11-23 09:22:56 +01:00
# So far, the Bitmessage protocol, this client, the Wiki, and the forums
# are all a one-man operation. Bitcoin tips are quite appreciated!
2012-11-29 11:39:39 +01:00
# 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u