| | |
| | | #include <netinet/in.h> /* needed for sockaddr_in */ |
| | | #include <netdb.h> |
| | | #include <pthread.h> |
| | | #include <time.h> |
| | | |
| | | #include "mini_blas.h" |
| | | #include "utils.h" |
| | | #include "parser.h" |
| | | #include "server.h" |
| | | #include "connected_layer.h" |
| | | #include "convolutional_layer.h" |
| | | |
| | | #define SERVER_PORT 9876 |
| | | #define SERVER_PORT 9423 |
| | | #define STR(x) #x |
| | | |
| | | int socket_setup(int server) |
| | |
| | | |
| | | typedef struct{ |
| | | int fd; |
| | | int *counter; |
| | | int counter; |
| | | network net; |
| | | } connection_info; |
| | | |
| | | void read_all(int fd, char *buffer, size_t bytes) |
| | | { |
| | | size_t n = 0; |
| | | while(n < bytes){ |
| | | int next = read(fd, buffer + n, bytes-n); |
| | | if(next < 0) error("read failed"); |
| | | n += next; |
| | | } |
| | | } |
| | | |
| | | void write_all(int fd, char *buffer, size_t bytes) |
| | | { |
| | | size_t n = 0; |
| | | while(n < bytes){ |
| | | int next = write(fd, buffer + n, bytes-n); |
| | | if(next < 0) error("write failed"); |
| | | n += next; |
| | | } |
| | | } |
| | | |
| | | void read_and_add_into(int fd, float *a, int n) |
| | | { |
| | | float *buff = calloc(n, sizeof(float)); |
| | |
| | | |
| | | void handle_connection(void *pointer) |
| | | { |
| | | printf("New Connection\n"); |
| | | connection_info info = *(connection_info *) pointer; |
| | | free(pointer); |
| | | //printf("New Connection\n"); |
| | | if(info.counter%100==0){ |
| | | char buff[256]; |
| | | sprintf(buff, "unikitty/net_%d.part", info.counter); |
| | | save_network(info.net, buff); |
| | | } |
| | | int fd = info.fd; |
| | | network net = info.net; |
| | | ++*(info.counter); |
| | | int i; |
| | | for(i = 0; i < net.n; ++i){ |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | |
| | | write_all(fd, (char *)layer.weights, layer.outputs*layer.inputs*sizeof(float)); |
| | | } |
| | | } |
| | | printf("Received updates\n"); |
| | | //printf("Received updates\n"); |
| | | close(fd); |
| | | } |
| | | |
| | | void server_update(network net) |
| | | { |
| | | int fd = socket_setup(1); |
| | | int counter = 0; |
| | | listen(fd, 10); |
| | | int counter = 18000; |
| | | listen(fd, 64); |
| | | struct sockaddr_in client; /* remote address */ |
| | | socklen_t client_size = sizeof(client); /* length of addresses */ |
| | | connection_info info; |
| | | info.net = net; |
| | | info.counter = &counter; |
| | | time_t t=0; |
| | | while(1){ |
| | | connection_info *info = calloc(1, sizeof(connection_info)); |
| | | info->net = net; |
| | | info->counter = counter; |
| | | pthread_t worker; |
| | | int connection = accept(fd, (struct sockaddr *) &client, &client_size); |
| | | info.fd = connection; |
| | | pthread_create(&worker, NULL, (void *) &handle_connection, &info); |
| | | if(!t) t=time(0); |
| | | info->fd = connection; |
| | | pthread_create(&worker, NULL, (void *) &handle_connection, info); |
| | | ++counter; |
| | | printf("%d\n", counter); |
| | | //if(counter == 1024) break; |
| | | } |
| | | close(fd); |
| | | } |
| | | |
| | | void client_update(network net, char *address) |
| | |
| | | |
| | | /* send a message to the server */ |
| | | int i; |
| | | //printf("Sending\n"); |
| | | for(i = 0; i < net.n; ++i){ |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *) net.layers[i]; |
| | |
| | | memset(layer.weight_updates, 0, layer.inputs*layer.outputs*sizeof(float)); |
| | | } |
| | | } |
| | | //printf("Sent\n"); |
| | | |
| | | for(i = 0; i < net.n; ++i){ |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | |
| | | int num = layer.n*layer.c*layer.size*layer.size; |
| | | read_all(fd, (char*) layer.filters, num*sizeof(float)); |
| | | |
| | | #ifdef GPU |
| | | push_convolutional_layer(layer); |
| | | #endif |
| | | } |
| | | if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *) net.layers[i]; |
| | |
| | | read_all(fd, (char *)layer.biases, layer.outputs*sizeof(float)); |
| | | read_all(fd, (char *)layer.weights, layer.outputs*layer.inputs*sizeof(float)); |
| | | |
| | | #ifdef GPU |
| | | push_connected_layer(layer); |
| | | #endif |
| | | } |
| | | } |
| | | //printf("Updated\n"); |
| | | close(fd); |
| | | } |