You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

212 lines
5.3 KiB
C

#define _GNU_SOURCE
#include "fetch.h"
// Model
#include "model/crypto/keys.h"
#include "model/crypto/http_sign.h"
#include "model/server.h"
// Submodules
#include "http/client/client.h"
#include "util/format.h"
#include "ap/object.h"
#include "sha256/sha256.h"
// Standard Library
#include <sys/stat.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
static size_t handle_header( char* header, size_t size, size_t nitems, void* user )
{
int bytes = size * nitems;
int result = bytes;
//printf( "? Header: |%.*s|\n", bytes, header );
if( 0 != strncmp("onion-location: ",header,sizeof("onion-location: ")-1 ) ) {
return result;
}
header += sizeof("onion-location: ")-1;
bytes -= sizeof("onion-location: ")-1;
for( int i = 0; i < bytes; ++i ) {
if( header[i] == '/' ) {
header[i] = '\0';
break;
}
}
char* onion_host = strndup( header, bytes );
printf( "+ Onion Host: |%s|\n", onion_host );
free(onion_host);
return result;
}
static bool do_fetch( const char* uri, FILE* result )
{
char user_agent[512];
snprintf( user_agent, sizeof(user_agent), "User-Agent: curl (Apogee/0.1; +https://%s/owner/actor)", g_server->domain );
long status_code;
const void* request[] = {
HTTP_REQ_URL, uri,
HTTP_REQ_HEADER, "Accept: application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\"",
HTTP_REQ_HEADER, user_agent,
HTTP_REQ_OUTFILE, result,
HTTP_REQ_RESULT_STATUS, &status_code,
HTTP_RES_HEADER_CALLBACK, handle_header,
NULL,
};
printf( "GET %s\n", uri );
http_client_do( request );
printf( "GET %s -> %ld\n", uri, status_code );
if( status_code != 200 ) {
printf( "Retrying request with HTTP Signature header...\n" );
// Load crypto keys
struct crypto_keys* keys = crypto_keys_new();
if( !crypto_keys_load_private( keys, "data/owner/private.pem" ) ) {
printf( "Failed to load private key\n" );
return false;
}
// TODO: do signed fetch
struct http_signature hs;
memset( &hs, 0, sizeof(hs) );
hs.input.method = "get";
hs.input.url = uri;
if( !http_signature_make( keys, &hs ) ) {
return false;
}
char date_header[512];
snprintf( date_header, sizeof(date_header), "Date: %s", hs.date );
printf( "date_header = %s\n", date_header );
char sign_header[512];
snprintf( sign_header, sizeof(sign_header), "Signature: keyId=\"https://%s/owner/actor#mainKey\",headers=\"(request-target) host date content-length digest\",signature=\"%s\"",
g_server->domain,
hs.signature
);
printf( "sign_header = %s\n", sign_header );
char digest_header[512];
snprintf( digest_header, sizeof(digest_header), "Digest: %s", hs.digest );
printf( "digest_header = %s\n", digest_header );
char content_length_header[512];
snprintf( content_length_header, sizeof(content_length_header), "Content-Length: %d", hs.content_length );
printf( "content_length_header = %s\n", content_length_header );
fseek( result, 0, SEEK_SET );
const void* request2[] = {
HTTP_REQ_URL, uri,
HTTP_REQ_HEADER, user_agent,
HTTP_REQ_HEADER, date_header,
HTTP_REQ_HEADER, sign_header,
HTTP_REQ_HEADER, content_length_header,
HTTP_REQ_HEADER, digest_header,
HTTP_REQ_HEADER, "Accept: application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\"",
HTTP_REQ_OUTFILE, result,
HTTP_REQ_RESULT_STATUS, &status_code,
NULL,
};
if( !http_client_do( request2 ) ) {
printf( "Failed to perform get, status_code = %ld\n", status_code );
}
printf( "GET %s -> %ld\n", uri, status_code );
}
return ( status_code == 200 );
}
bool pull_remote_file( const char* filename, const char* uri )
{
printf( "* Fetching %s\n", uri );
char tmp_filename[512];
FILE* f = fopen(format(tmp_filename,512,"%s.tmp",filename),"w");
bool result = false;
if( do_fetch( uri, f ) ) {
rename(tmp_filename,filename);
result = true;
fclose(f);
} else {
fclose(f);
// Dump file contents to prevent holding large amounts of disk space inadvertantly
truncate(tmp_filename,0);
}
return result;
}
bool pull_remote_file_if_older( const char* filename, const char* uri, int seconds )
{
struct stat s;
char tmp_filename[512];
snprintf( tmp_filename,512, "%s.tmp", filename );
// Skip download if .tmp file exists from a failed fetch and is within the timeout window
if( 0 == stat(tmp_filename, &s ) ) {
if( time(NULL) - s.st_mtime <= seconds ) {
printf( "Fetch of %s supressed due to failed fetch within %d seconds\n", uri, seconds );
return false;
}
}
if( 0 != stat(filename, &s ) ) { goto pull; }
if( time(NULL) - s.st_mtime > seconds ) { goto pull; }
return true;
pull:
return pull_remote_file( filename, uri );
}
char* fetch_remote_file_to_cache( const char* uri, int seconds )
{
char* filename = NULL;
char hash[65];
sha256_easy_hash_hex( uri, strlen(uri), hash );
hash[64] = 0;
mkdir( "data/cache", 0755 );
asprintf( &filename, "data/cache/%s.dat", hash );
printf( "Using %s for %s\n", filename, uri );
if( !pull_remote_file_if_older(filename,uri,seconds) ) {
free(filename);
return NULL;
}
return filename;
}
enum {
//LIFE = 60*60*24*2 // 2 days
LIFE = 60*30 // 30 minutes
};
struct ap_object* fetch_ap_object_ref( const char* uri )
{
char* filename = fetch_remote_file_to_cache(uri, LIFE );
if( !filename ) { return NULL; }
struct ap_object* res = ap_object_from_file(filename);
free(filename);
return res;
}