You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
345 lines
8.5 KiB
C
345 lines
8.5 KiB
C
#define _GNU_SOURCE
|
|
#include "fetch.h"
|
|
|
|
// Model
|
|
#include "model/crypto/keys.h"
|
|
#include "model/crypto/http_sign.h"
|
|
#include "model/server.h"
|
|
#include "model/peer.h"
|
|
|
|
// Submodules
|
|
#include "http/client/client.h"
|
|
#include "http/url.h"
|
|
#include "util/format.h"
|
|
#include "ap/object.h"
|
|
#include "sha256/sha256.h"
|
|
|
|
// Standard Library
|
|
#include <sys/stat.h>
|
|
#include <time.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <unistd.h>
|
|
#include <ctype.h>
|
|
|
|
size_t fetch_handle_header( char* header, size_t size, size_t nitems, void* user )
|
|
{
|
|
int bytes = size * nitems;
|
|
int result = bytes;
|
|
|
|
struct fetch_data* fd = user;
|
|
if( !fd ) {
|
|
printf( "No fetch data provided, skippig handling.\n" );
|
|
return result;
|
|
}
|
|
|
|
//printf( "? Header: |%.*s|\n", bytes, header );
|
|
|
|
if( 0 != strncmp("onion-location: http://",header,sizeof("onion-location: http://")-1 ) ) {
|
|
return result;
|
|
}
|
|
|
|
header += (sizeof("onion-location: http://")-1);
|
|
bytes -= (sizeof("onion-location: http://")-1);
|
|
|
|
//printf( "? Header: |%.*s|\n", bytes, header );
|
|
|
|
for( int i = 0; i < bytes; ++i ) {
|
|
if( header[i] == '/' ) {
|
|
header[i] = '\0';
|
|
break;
|
|
}
|
|
}
|
|
|
|
char* onion_host = strndup( header, bytes );
|
|
int pos = strlen(onion_host) - 1;
|
|
while( !isalnum(onion_host[pos]) && !( onion_host[pos] == '.' ) ) {
|
|
onion_host[pos] = '\0';
|
|
pos -= 1;
|
|
}
|
|
|
|
//printf( "+ Onion Host: |%s|\n", onion_host );
|
|
if( fd->p ) {
|
|
printf( "Updating peer onion host to %s\n", onion_host );
|
|
free(fd->p->tor_hidden_service);
|
|
fd->p->tor_hidden_service = onion_host;
|
|
onion_host = NULL;
|
|
}
|
|
|
|
free(onion_host);
|
|
|
|
return result;
|
|
}
|
|
static bool do_fetch_tor( const char* uri, struct fetch_data* fd, const char* result_filename )
|
|
{
|
|
char user_agent[512];
|
|
snprintf( user_agent, sizeof(user_agent), "User-Agent: Apogee/0.1; +https://%s/owner/actor", g_server->domain );
|
|
|
|
char proxy[512];
|
|
snprintf( proxy,512, "socks5h://localhost:%d", g_server->tor_socks_port );
|
|
|
|
FILE* result = fopen(result_filename,"w");
|
|
if( !result ) { return false; }
|
|
|
|
long status_code;
|
|
const void* request[] = {
|
|
HTTP_REQ_URL, uri,
|
|
HTTP_REQ_HEADER, "Accept: application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\"",
|
|
HTTP_REQ_HEADER, user_agent,
|
|
HTTP_REQ_OUTFILE, result,
|
|
HTTP_REQ_RESULT_STATUS, &status_code,
|
|
HTTP_RES_HEADER_CALLBACK, fetch_handle_header, (void*)fd,
|
|
HTTP_REQ_PROXY, proxy,
|
|
HTTP_REQ_TIMEOUT, (void*)10,
|
|
NULL,
|
|
};
|
|
printf( "GET %s\n", uri );
|
|
http_client_do( request );
|
|
printf( "GET %s -> %ld\n", uri, status_code );
|
|
|
|
fclose(result);
|
|
|
|
if( status_code != 200 ) {
|
|
unlink(result_filename);
|
|
}
|
|
|
|
return status_code == 200;
|
|
}
|
|
static bool do_fetch_clearnet( const char* uri, struct fetch_data* fd, const char* result_filename )
|
|
{
|
|
char user_agent[512];
|
|
snprintf( user_agent, sizeof(user_agent), "User-Agent: curl (Apogee/0.1; +https://%s/owner/actor)", g_server->domain );
|
|
|
|
FILE* result = fopen( result_filename, "w" );
|
|
if( !result ) { return false; }
|
|
|
|
long status_code;
|
|
const void* request[] = {
|
|
HTTP_REQ_URL, uri,
|
|
HTTP_REQ_HEADER, "Accept: application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\"",
|
|
HTTP_REQ_HEADER, user_agent,
|
|
HTTP_REQ_OUTFILE, result,
|
|
HTTP_REQ_RESULT_STATUS, &status_code,
|
|
HTTP_RES_HEADER_CALLBACK, fetch_handle_header, (void*)fd,
|
|
NULL,
|
|
};
|
|
printf( "GET %s\n", uri );
|
|
http_client_do( request );
|
|
printf( "GET %s -> %ld\n", uri, status_code );
|
|
|
|
fclose(result);
|
|
|
|
if( status_code != 200 ) {
|
|
unlink( result_filename );
|
|
}
|
|
|
|
return status_code == 200;
|
|
}
|
|
static bool do_fetch_signed_clearnet( const char* uri, struct fetch_data* fd, const char* result_filename )
|
|
{
|
|
FILE* result = fopen(result_filename,"w");
|
|
if( !result ) { return false; }
|
|
|
|
char user_agent[512];
|
|
snprintf( user_agent, sizeof(user_agent), "User-Agent: curl (Apogee/0.1; +https://%s/owner/actor)", g_server->domain );
|
|
|
|
// Load crypto keys
|
|
struct crypto_keys* keys = crypto_keys_new();
|
|
if( !crypto_keys_load_private( keys, "data/owner/private.pem" ) ) {
|
|
printf( "Failed to load private key\n" );
|
|
return false;
|
|
}
|
|
|
|
// TODO: do signed fetch
|
|
struct http_signature hs;
|
|
memset( &hs, 0, sizeof(hs) );
|
|
hs.input.method = "get";
|
|
hs.input.url = uri;
|
|
|
|
if( !http_signature_make( keys, &hs ) ) {
|
|
return false;
|
|
}
|
|
|
|
char date_header[512];
|
|
snprintf( date_header, sizeof(date_header), "Date: %s", hs.date );
|
|
printf( "date_header = %s\n", date_header );
|
|
|
|
char sign_header[512];
|
|
snprintf( sign_header, sizeof(sign_header), "Signature: keyId=\"https://%s/owner/actor#mainKey\",headers=\"(request-target) host date content-length digest\",signature=\"%s\"",
|
|
g_server->domain,
|
|
hs.signature
|
|
);
|
|
printf( "sign_header = %s\n", sign_header );
|
|
|
|
char digest_header[512];
|
|
snprintf( digest_header, sizeof(digest_header), "Digest: %s", hs.digest );
|
|
printf( "digest_header = %s\n", digest_header );
|
|
|
|
char content_length_header[512];
|
|
snprintf( content_length_header, sizeof(content_length_header), "Content-Length: %d", hs.content_length );
|
|
printf( "content_length_header = %s\n", content_length_header );
|
|
|
|
long status_code = 0;
|
|
const void* request[] = {
|
|
HTTP_REQ_URL, uri,
|
|
HTTP_REQ_HEADER, user_agent,
|
|
HTTP_REQ_HEADER, date_header,
|
|
HTTP_REQ_HEADER, sign_header,
|
|
HTTP_REQ_HEADER, content_length_header,
|
|
HTTP_REQ_HEADER, digest_header,
|
|
HTTP_REQ_HEADER, "Accept: application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\"",
|
|
HTTP_REQ_OUTFILE, result,
|
|
HTTP_REQ_RESULT_STATUS, &status_code,
|
|
NULL,
|
|
};
|
|
|
|
if( !http_client_do( request ) ) {
|
|
printf( "Failed to perform get, status_code = %ld\n", status_code );
|
|
}
|
|
printf( "GET %s -> %ld (first signed)\n", uri, status_code );
|
|
fclose(result);
|
|
|
|
if( status_code == 200 ) { return true; }
|
|
|
|
result = fopen(result_filename,"w");
|
|
|
|
const void* request2[] = {
|
|
HTTP_REQ_URL, uri,
|
|
HTTP_REQ_HEADER, user_agent,
|
|
HTTP_REQ_HEADER, date_header,
|
|
HTTP_REQ_HEADER, sign_header,
|
|
HTTP_REQ_HEADER, content_length_header,
|
|
HTTP_REQ_HEADER, digest_header,
|
|
HTTP_REQ_HEADER, "Accept: application/activity+json",
|
|
HTTP_REQ_OUTFILE, result,
|
|
HTTP_REQ_RESULT_STATUS, &status_code,
|
|
HTTP_REQ_TIMEOUT, (void*)10,
|
|
NULL,
|
|
};
|
|
|
|
if( !http_client_do( request2 ) ) {
|
|
printf( "Failed to perform get, status_code = %ld\n", status_code );
|
|
}
|
|
printf( "GET %s -> %ld (second signed)\n", uri, status_code );
|
|
fclose(result);
|
|
|
|
if( status_code == 200 ) { return true; }
|
|
|
|
unlink(result_filename);
|
|
return false;
|
|
}
|
|
|
|
static bool do_fetch( const char* uri, const char* fh )
|
|
{
|
|
// Setup fetch data
|
|
struct fetch_data fd;
|
|
memset(&fd,0,sizeof(fd));
|
|
{
|
|
char host_domain[512];
|
|
if( url_get_domain( uri, host_domain, sizeof(host_domain) ) ) {
|
|
fd.p = peer_from_domain(host_domain);
|
|
}
|
|
}
|
|
|
|
bool result = false;
|
|
|
|
if( do_fetch_tor( uri, &fd, fh ) ) {
|
|
goto success;
|
|
}
|
|
if( do_fetch_clearnet( uri, &fd, fh ) ) {
|
|
goto success;
|
|
}
|
|
if( do_fetch_signed_clearnet( uri, &fd, fh ) ) {
|
|
goto success;
|
|
}
|
|
|
|
goto failed;
|
|
cleanup:
|
|
// Save peer data
|
|
if( fd.p ) {
|
|
peer_save( fd.p );
|
|
peer_free( fd.p );
|
|
}
|
|
return result;
|
|
failed:
|
|
result = false;
|
|
goto cleanup;
|
|
success:
|
|
result = true;
|
|
goto cleanup;
|
|
}
|
|
|
|
bool pull_remote_file( const char* filename, const char* uri )
|
|
{
|
|
printf( "* Fetching %s\n", uri );
|
|
char tmp_filename[512];
|
|
snprintf( tmp_filename,512, "%s.tmp", filename );
|
|
|
|
bool result = false;
|
|
if( do_fetch( uri, tmp_filename ) ) {
|
|
rename(tmp_filename,filename);
|
|
result = true;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
bool pull_remote_file_if_older( const char* filename, const char* uri, int seconds )
|
|
{
|
|
struct stat s;
|
|
char tmp_filename[512];
|
|
snprintf( tmp_filename,512, "%s.tmp", filename );
|
|
|
|
// Skip download if .tmp file exists from a failed fetch and is within the timeout window
|
|
if( 0 == stat(tmp_filename, &s ) ) {
|
|
if( time(NULL) - s.st_mtime <= seconds ) {
|
|
printf( "Fetch of %s supressed due to failed fetch within %d seconds\n", uri, seconds );
|
|
return false;
|
|
}
|
|
}
|
|
|
|
if( 0 != stat(filename, &s ) ) { goto pull; }
|
|
if( time(NULL) - s.st_mtime > seconds ) { goto pull; }
|
|
|
|
return true;
|
|
pull:
|
|
return pull_remote_file( filename, uri );
|
|
}
|
|
|
|
char* fetch_remote_file_to_cache( const char* uri, int seconds )
|
|
{
|
|
char* filename = NULL;
|
|
char hash[65];
|
|
sha256_easy_hash_hex( uri, strlen(uri), hash );
|
|
hash[64] = 0;
|
|
|
|
mkdir( "data/cache", 0755 );
|
|
|
|
asprintf( &filename, "data/cache/%s.dat", hash );
|
|
printf( "Using %s for %s\n", filename, uri );
|
|
|
|
if( !pull_remote_file_if_older(filename,uri,seconds) ) {
|
|
free(filename);
|
|
return NULL;
|
|
}
|
|
|
|
return filename;
|
|
}
|
|
|
|
enum {
|
|
//LIFE = 60*60*24*2 // 2 days
|
|
LIFE = 60*30 // 30 minutes
|
|
};
|
|
|
|
struct ap_object* fetch_ap_object_ref( const char* uri )
|
|
{
|
|
char* filename = fetch_remote_file_to_cache(uri, LIFE );
|
|
|
|
if( !filename ) { return NULL; }
|
|
|
|
struct ap_object* res = ap_object_from_file(filename);
|
|
free(filename);
|
|
return res;
|
|
}
|
|
|
|
|