bitmessage-js/src/pow.cc

152 lines
3.7 KiB
C++
Raw Normal View History

2015-01-09 22:36:42 +01:00
// Based on <https://github.com/grant-olson/bitmessage-powfaster>
// fastcpu implementation.
// TODO(Kagami): Port it to WIN32 (see bitmessage-powfaster for an
// example).
2015-01-09 23:36:28 +01:00
#define __STDC_LIMIT_MACROS
2015-01-09 22:36:42 +01:00
#include <stdint.h>
#include <string.h>
2015-01-10 17:29:33 +01:00
#include <pthread.h>
2015-01-09 22:36:42 +01:00
#include <arpa/inet.h>
#include <openssl/sha.h>
2015-01-11 03:59:32 +01:00
#include "./pow.h"
2015-01-09 22:36:42 +01:00
2015-01-11 03:59:32 +01:00
enum PowResult {
RESULT_OK = 0,
RESULT_OVERFLOW = -1,
RESULT_ERROR = -2,
RESULT_BAD_INPUT = -3,
RESULT_NOT_READY = -4
};
2015-01-10 17:29:33 +01:00
2015-01-30 22:14:03 +01:00
// Shared POW parameters.
typedef struct {
const size_t pool_size;
const size_t target;
const uint8_t* initial_hash;
const uint64_t max_nonce;
PowResult result;
uint64_t nonce;
pthread_mutex_t* mutex;
} PowArgs;
2015-01-10 17:29:33 +01:00
2015-01-30 22:14:03 +01:00
// Thread-specific arguments.
typedef struct {
size_t num; // Thread number
PowArgs* pow_args;
} ThreadArgs;
2015-01-10 17:29:33 +01:00
2015-09-29 15:32:36 +02:00
#ifndef ntohll
2015-01-10 23:33:30 +01:00
inline uint64_t ntohll(uint64_t x) {
return (
((uint64_t)(ntohl( (unsigned int)((x << 32) >> 32) )) << 32) |
ntohl( ((unsigned int)(x >> 32)) )
);
}
2015-09-29 15:32:36 +02:00
#endif
2015-01-10 23:33:30 +01:00
// Set POW computation result in a thread-safe way.
2015-01-30 22:14:03 +01:00
void set_result(PowArgs* pow_args, PowResult res, uint64_t nonce) {
pthread_mutex_lock(pow_args->mutex);
if (pow_args->result == RESULT_NOT_READY) {
pow_args->result = res;
pow_args->nonce = nonce;
2015-01-10 23:33:30 +01:00
}
2015-01-30 22:14:03 +01:00
pthread_mutex_unlock(pow_args->mutex);
2015-01-10 23:33:30 +01:00
}
2015-01-30 22:14:03 +01:00
void* pow_thread(void* arg) {
ThreadArgs* thread_args = (ThreadArgs *)arg;
PowArgs* pow_args = thread_args->pow_args;
// Copy some fixed POW args so compiler can inline them.
const size_t pool_size = pow_args->pool_size;
const uint64_t target = pow_args->target;
const uint64_t max_nonce = pow_args->max_nonce;
uint64_t i = thread_args->num;
2015-01-09 22:36:42 +01:00
uint8_t message[HASH_SIZE+sizeof(uint64_t)];
uint8_t digest[HASH_SIZE];
uint64_t* be_nonce;
uint64_t* be_trial;
SHA512_CTX sha;
2015-01-30 22:14:03 +01:00
memcpy(message+sizeof(uint64_t), pow_args->initial_hash, HASH_SIZE);
2015-01-09 22:36:42 +01:00
be_nonce = (uint64_t *)message;
be_trial = (uint64_t *)digest;
2015-01-09 23:09:01 +01:00
2015-01-30 22:14:03 +01:00
while (pow_args->result == RESULT_NOT_READY) {
2015-01-09 22:36:42 +01:00
// This is very unlikely to be ever happen but it's better to be
// sure anyway.
2015-01-30 22:14:03 +01:00
if (i > max_nonce) {
set_result(pow_args, RESULT_OVERFLOW, 0);
2015-01-10 17:29:33 +01:00
return NULL;
2015-01-09 22:36:42 +01:00
}
2015-01-30 22:14:03 +01:00
// XXX(Kagami): This and target comparision lines imply that we run
// this code on LE architecture while this might not be true.
2015-01-10 23:33:30 +01:00
*be_nonce = ntohll(i);
2015-01-09 22:36:42 +01:00
SHA512_Init(&sha);
SHA512_Update(&sha, message, HASH_SIZE+sizeof(uint64_t));
SHA512_Final(digest, &sha);
SHA512_Init(&sha);
SHA512_Update(&sha, digest, HASH_SIZE);
SHA512_Final(digest, &sha);
2015-01-30 22:14:03 +01:00
if (ntohll(*be_trial) <= target) {
set_result(pow_args, RESULT_OK, i);
2015-01-10 17:29:33 +01:00
return NULL;
}
2015-01-30 22:14:03 +01:00
i += pool_size;
2015-01-10 17:29:33 +01:00
}
return NULL;
}
int pow(size_t pool_size,
uint64_t target,
const uint8_t* initial_hash,
uint64_t max_nonce,
uint64_t* nonce) {
2015-01-11 03:59:32 +01:00
if (pool_size < 1 || pool_size > MAX_POOL_SIZE) {
return RESULT_BAD_INPUT;
}
2015-01-10 17:29:33 +01:00
2015-01-30 22:14:03 +01:00
// Initialize all structures on stack.
pthread_mutex_t mutex;
pthread_mutex_init(&mutex, NULL);
PowArgs pow_args = {
2015-01-30 22:28:42 +01:00
pool_size,
target,
initial_hash,
max_nonce ? max_nonce : INT64_MAX,
RESULT_NOT_READY,
0,
&mutex,
2015-01-30 22:14:03 +01:00
};
ThreadArgs threads_args[pool_size];
2015-01-10 17:29:33 +01:00
pthread_t threads[pool_size];
size_t i;
int error;
2015-01-30 22:14:03 +01:00
// Spawn threads.
2015-01-10 17:29:33 +01:00
for (i = 0; i < pool_size; i++) {
2015-01-30 22:28:42 +01:00
ThreadArgs args = {i, &pow_args};
2015-01-30 22:14:03 +01:00
threads_args[i] = args;
error = pthread_create(&threads[i], NULL, pow_thread, &threads_args[i]);
2015-01-10 17:29:33 +01:00
if (error) {
2015-01-30 22:14:03 +01:00
set_result(&pow_args, RESULT_ERROR, 0);
2015-01-09 22:36:42 +01:00
break;
}
}
2015-01-09 23:09:01 +01:00
2015-01-10 17:29:33 +01:00
// Wait for only spawned threads.
while (i--) {
pthread_join(threads[i], NULL);
}
2015-01-30 22:14:03 +01:00
// Set resulting nonce, cleanup and exit;
if (pow_args.result == RESULT_OK) {
*nonce = pow_args.nonce;
2015-01-10 17:29:33 +01:00
}
2015-01-30 22:14:03 +01:00
pthread_mutex_destroy(&mutex);
return pow_args.result;
2015-01-09 22:36:42 +01:00
}