00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #include "global.hpp"
00023
00024 #include "log.hpp"
00025 #include "network_worker.hpp"
00026 #include "network.hpp"
00027 #include "thread.hpp"
00028 #include "serialization/binary_or_text.hpp"
00029 #include "serialization/binary_wml.hpp"
00030 #include "serialization/parser.hpp"
00031
00032 #include <algorithm>
00033 #include <cassert>
00034 #include <cerrno>
00035 #include <cstring>
00036 #include <deque>
00037 #include <iostream>
00038 #include <map>
00039 #include <vector>
00040
00041 #include <boost/iostreams/filter/gzip.hpp>
00042
00043 #ifdef __AMIGAOS4__
00044 #include <unistd.h>
00045
00046 #endif
00047
00048 #if defined(_WIN32) || defined(__WIN32__) || defined (WIN32)
00049 # undef INADDR_ANY
00050 # undef INADDR_BROADCAST
00051 # undef INADDR_NONE
00052 # include <windows.h>
00053 # define USE_SELECT 1
00054 typedef int socklen_t;
00055 #else
00056 # include <sys/types.h>
00057 # include <sys/socket.h>
00058 # ifdef __BEOS__
00059 # include <socket.h>
00060 # else
00061 # include <fcntl.h>
00062 # endif
00063 # define SOCKET int
00064 # ifdef HAVE_POLL_H
00065 # define USE_POLL 1
00066 # include <poll.h>
00067 # elif defined(HAVE_SYS_POLL_H)
00068 # define USE_POLL 1
00069 # include <sys/poll.h>
00070 # endif
00071 # ifndef USE_POLL
00072 # define USE_SELECT 1
00073 # ifdef HAVE_SYS_SELECT_H
00074 # include <sys/select.h>
00075 # else
00076 # include <sys/time.h>
00077 # include <sys/types.h>
00078 # include <unistd.h>
00079 # endif
00080 # endif
00081 #endif
00082
00083 #define DBG_NW LOG_STREAM(debug, network)
00084 #define LOG_NW LOG_STREAM(info, network)
00085 #define ERR_NW LOG_STREAM(err, network)
00086
00087 namespace {
00088 struct _TCPsocket {
00089 int ready;
00090 SOCKET channel;
00091 IPaddress remoteAddress;
00092 IPaddress localAddress;
00093 int sflag;
00094 };
00095
00096 #ifndef NUM_SHARDS
00097 #define NUM_SHARDS 1
00098 #endif
00099
00100 unsigned int waiting_threads[NUM_SHARDS];
00101 size_t min_threads = 0;
00102 size_t max_threads = 0;
00103
00104 size_t get_shard(TCPsocket sock) { return reinterpret_cast<uintptr_t>(sock)%NUM_SHARDS; }
00105
00106 struct buffer {
00107 explicit buffer(TCPsocket sock) :
00108 sock(sock),
00109 config_buf(),
00110 config_error(""),
00111 gzipped(false)
00112 {}
00113
00114 TCPsocket sock;
00115 mutable config config_buf;
00116 std::string config_error;
00117 std::ostringstream stream;
00118
00119
00120
00121 bool gzipped;
00122
00123
00124
00125
00126 std::vector<char> raw_buffer;
00127 };
00128
00129
00130 bool managed = false, raw_data_only = false;
00131 typedef std::vector< buffer* > buffer_set;
00132 buffer_set outgoing_bufs[NUM_SHARDS];
00133
00134 struct schema_pair
00135 {
00136 compression_schema incoming, outgoing;
00137 };
00138
00139 typedef std::map<TCPsocket,schema_pair> schema_map;
00140
00141 schema_map schemas;
00142
00143
00144 typedef std::vector<TCPsocket> receive_list;
00145 receive_list pending_receives[NUM_SHARDS];
00146
00147 typedef std::deque<buffer*> received_queue;
00148 received_queue received_data_queue;
00149
00150 enum SOCKET_STATE { SOCKET_READY, SOCKET_LOCKED, SOCKET_ERRORED, SOCKET_INTERRUPT };
00151 typedef std::map<TCPsocket,SOCKET_STATE> socket_state_map;
00152 typedef std::map<TCPsocket, std::pair<network::statistics,network::statistics> > socket_stats_map;
00153
00154 socket_state_map sockets_locked[NUM_SHARDS];
00155 socket_stats_map transfer_stats;
00156
00157 int socket_errors[NUM_SHARDS];
00158 threading::mutex* shard_mutexes[NUM_SHARDS];
00159 threading::mutex* stats_mutex = NULL;
00160 threading::mutex* schemas_mutex = NULL;
00161 threading::mutex* received_mutex = NULL;
00162 threading::condition* cond[NUM_SHARDS];
00163
00164 std::map<Uint32,threading::thread*> threads[NUM_SHARDS];
00165 std::vector<Uint32> to_clear[NUM_SHARDS];
00166 int system_send_buffer_size = 0;
00167
00168 int receive_bytes(TCPsocket s, char* buf, size_t nbytes)
00169 {
00170 #ifdef NETWORK_USE_RAW_SOCKETS
00171 _TCPsocket* sock = reinterpret_cast<_TCPsocket*>(s);
00172 int res = 0;
00173 do {
00174 errno = 0;
00175 res = recv(sock->channel, buf, nbytes, MSG_DONTWAIT);
00176 } while(errno == EINTR);
00177 sock->ready = 0;
00178 return res;
00179 #else
00180 return SDLNet_TCP_Recv(s, buf, nbytes);
00181 #endif
00182 }
00183
00184
00185 void check_send_buffer_size(TCPsocket& s)
00186 {
00187 if (system_send_buffer_size)
00188 return;
00189 _TCPsocket* sock = reinterpret_cast<_TCPsocket*>(s);
00190 socklen_t len = sizeof(system_send_buffer_size);
00191 #ifdef WIN32
00192 getsockopt(sock->channel, SOL_SOCKET, SO_RCVBUF,reinterpret_cast<char*>(&system_send_buffer_size), &len);
00193 #else
00194 getsockopt(sock->channel, SOL_SOCKET, SO_RCVBUF,&system_send_buffer_size, &len);
00195 #endif
00196 --system_send_buffer_size;
00197 DBG_NW << "send buffer size: " << system_send_buffer_size << "\n";
00198 }
00199
00200 bool receive_with_timeout(TCPsocket s, char* buf, size_t nbytes,
00201 bool update_stats=false, int timeout_ms=60000)
00202 {
00203 int startTicks = SDL_GetTicks();
00204 int time_used = 0;
00205 while(nbytes > 0) {
00206 const int bytes_read = receive_bytes(s, buf, nbytes);
00207 if(bytes_read == 0) {
00208 return false;
00209 } else if(bytes_read < 0) {
00210 #if defined(EAGAIN) && !defined(__BEOS__) && !defined(_WIN32)
00211 if(errno == EAGAIN)
00212 #elif defined(EWOULDBLOCK)
00213 if(errno == EWOULDBLOCK)
00214 #else
00215
00216 if(true)
00217 #endif
00218 {
00219
00220 time_used = SDL_GetTicks() - startTicks;
00221 if(time_used >= timeout_ms) {
00222 return false;
00223 }
00224 #ifdef USE_POLL
00225 struct pollfd fd = { ((_TCPsocket*)s)->channel, POLLIN, 0 };
00226 int poll_res;
00227 do {
00228 time_used = SDL_GetTicks() - startTicks;
00229 poll_res = poll(&fd, 1, timeout_ms - time_used);
00230 } while(poll_res == -1 && errno == EINTR);
00231
00232 #elif defined(USE_SELECT)
00233 fd_set readfds;
00234 FD_ZERO(&readfds);
00235 FD_SET(((_TCPsocket*)s)->channel, &readfds);
00236 int retval;
00237 int time_left;
00238 struct timeval tv;
00239
00240 do {
00241 time_used = SDL_GetTicks() - startTicks;
00242 time_left = timeout_ms - time_used;
00243 tv.tv_sec = time_left/1000;
00244 tv.tv_usec = time_left % 1000;
00245 retval = select(((_TCPsocket*)s)->channel + 1, &readfds, NULL, NULL, &tv);
00246 } while(retval == -1 && errno == EINTR);
00247
00248 #elif
00249 SDL_Delay(5);
00250 #endif
00251 } else {
00252 return false;
00253 }
00254 } else {
00255
00256 buf += bytes_read;
00257 if(update_stats && !raw_data_only) {
00258 const threading::lock lock(*stats_mutex);
00259 transfer_stats[s].second.transfer(static_cast<size_t>(bytes_read));
00260 }
00261
00262 if(bytes_read > static_cast<int>(nbytes)) {
00263 return false;
00264 }
00265 nbytes -= bytes_read;
00266
00267 startTicks = SDL_GetTicks();
00268 }
00269 }
00270
00271 return true;
00272 }
00273
00274 static void output_to_buffer(TCPsocket sock, const config& cfg, std::ostringstream& compressor, bool gzipped)
00275 {
00276 if(gzipped) {
00277 config_writer writer(compressor, true, "");
00278 writer.write(cfg);
00279 } else {
00280 compression_schema *compress;
00281 {
00282 const threading::lock lock(*schemas_mutex);
00283 compress = &schemas.insert(std::pair<TCPsocket,schema_pair>(sock,schema_pair())).first->second.outgoing;
00284 }
00285 write_compressed(compressor, cfg, *compress);
00286 }
00287 }
00288
00289 static void make_network_buffer(const char* input, int len, std::vector<char>& buf)
00290 {
00291 buf.resize(4 + len + 1);
00292 SDLNet_Write32(len + 1, &buf[0]);
00293 memcpy(&buf[4], input, len);
00294 buf.back() = 0;
00295 }
00296
00297 static SOCKET_STATE send_buffer(TCPsocket sock, std::vector<char>& buf)
00298 {
00299 #ifdef __BEOS__
00300 int timeout = 15000;
00301 #endif
00302 check_send_buffer_size(sock);
00303 size_t upto = 0;
00304 size_t size = buf.size();
00305 int send_len = 0;
00306
00307 if (!raw_data_only)
00308 {
00309 const threading::lock lock(*stats_mutex);
00310 transfer_stats[sock].first.fresh_current(size);
00311 }
00312 #ifdef __BEOS__
00313 while(upto < size && timeout > 0) {
00314 #else
00315 while(true) {
00316 #endif
00317 {
00318 const size_t shard = get_shard(sock);
00319
00320 const threading::lock lock(*shard_mutexes[shard]);
00321 if(sockets_locked[shard][sock] != SOCKET_LOCKED)
00322 return SOCKET_ERRORED;
00323 }
00324 send_len = minimum<int>(system_send_buffer_size, static_cast<int>(size - upto));
00325 const int res = SDLNet_TCP_Send(sock, &buf[upto],send_len);
00326
00327 if( res == static_cast<int>(size - upto)) {
00328 if (!raw_data_only)
00329 {
00330 const threading::lock lock(*stats_mutex);
00331 transfer_stats[sock].first.transfer(static_cast<size_t>(res));
00332 }
00333 return SOCKET_READY;
00334 }
00335 #if defined(EAGAIN) && !defined(__BEOS__) && !defined(_WIN32)
00336 if(errno == EAGAIN)
00337 #elif defined(EWOULDBLOCK)
00338 if(errno == EWOULDBLOCK)
00339 #endif
00340 {
00341
00342 upto += static_cast<size_t>(res);
00343 if (!raw_data_only)
00344 {
00345 const threading::lock lock(*stats_mutex);
00346 transfer_stats[sock].first.transfer(static_cast<size_t>(res));
00347 }
00348
00349 #ifdef USE_POLL
00350 struct pollfd fd = { ((_TCPsocket*)sock)->channel, POLLOUT, 0 };
00351 int poll_res;
00352 do {
00353 poll_res = poll(&fd, 1, 15000);
00354 } while(poll_res == -1 && errno == EINTR);
00355
00356 if(poll_res > 0)
00357 continue;
00358 #elif defined(USE_SELECT) && !defined(__BEOS__)
00359 fd_set writefds;
00360 FD_ZERO(&writefds);
00361 FD_SET(((_TCPsocket*)sock)->channel, &writefds);
00362 int retval;
00363 struct timeval tv;
00364 tv.tv_sec = 15;
00365 tv.tv_usec = 0;
00366
00367 do {
00368 retval = select(((_TCPsocket*)sock)->channel + 1, NULL, &writefds, NULL, &tv);
00369 } while(retval == -1 && errno == EINTR);
00370
00371 if(retval > 0)
00372 continue;
00373 #elif defined(__BEOS__)
00374 if(res > 0) {
00375
00376 timeout = 15000;
00377 } else {
00378
00379 SDL_Delay(100);
00380 timeout -= 100;
00381 }
00382 continue;
00383 #endif
00384 }
00385
00386 return SOCKET_ERRORED;
00387 }
00388 }
00389
00390 static SOCKET_STATE receive_buf(TCPsocket sock, std::vector<char>& buf)
00391 {
00392 #ifdef __GNUC__
00393
00394
00395
00396
00397 char num_buf[4] __attribute__ ((aligned (4)));
00398 #else
00399 char num_buf[4];
00400 #endif
00401 bool res = receive_with_timeout(sock,num_buf,4,false);
00402
00403 if(!res) {
00404 return SOCKET_ERRORED;
00405 }
00406
00407 const int len = SDLNet_Read32(num_buf);
00408
00409 if(len < 1 || len > 100000000) {
00410 return SOCKET_ERRORED;
00411 }
00412
00413 buf.resize(len);
00414 char* beg = &buf[0];
00415 const char* const end = beg + len;
00416
00417 if (!raw_data_only)
00418 {
00419 const threading::lock lock(*stats_mutex);
00420 transfer_stats[sock].second.fresh_current(len);
00421 }
00422
00423 res = receive_with_timeout(sock, beg, end - beg, true);
00424 if(!res) {
00425 return SOCKET_ERRORED;
00426 }
00427
00428 return SOCKET_READY;
00429 }
00430
00431 inline void check_socket_result(TCPsocket& sock, SOCKET_STATE& result)
00432 {
00433 const size_t shard = get_shard(sock);
00434 const threading::lock lock(*shard_mutexes[shard]);
00435 socket_state_map::iterator lock_it = sockets_locked[shard].find(sock);
00436 assert(lock_it != sockets_locked[shard].end());
00437 lock_it->second = result;
00438 if(result == SOCKET_ERRORED) {
00439 ++socket_errors[shard];
00440 }
00441 }
00442
00443 static int process_queue(void* shard_num)
00444 {
00445 size_t shard = static_cast<size_t>(reinterpret_cast<uintptr_t>(shard_num));
00446 DBG_NW << "thread started...\n";
00447 for(;;) {
00448
00449
00450
00451
00452 TCPsocket sock = NULL;
00453 buffer* sent_buf = 0;
00454
00455 {
00456 const threading::lock lock(*shard_mutexes[shard]);
00457 while(managed && !to_clear[shard].empty()) {
00458 Uint32 tmp = to_clear[shard].back();
00459 to_clear[shard].pop_back();
00460 threading::thread *zombie = threads[shard][tmp];
00461 threads[shard].erase(tmp);
00462 delete zombie;
00463
00464 }
00465 if(min_threads && waiting_threads[shard] >= min_threads) {
00466 DBG_NW << "worker thread exiting... not enough jobs\n";
00467 to_clear[shard].push_back(threading::get_current_thread_id());
00468 return 0;
00469 }
00470 waiting_threads[shard]++;
00471 for(;;) {
00472
00473 buffer_set::iterator itor = outgoing_bufs[shard].begin(), itor_end = outgoing_bufs[shard].end();
00474 for(; itor != itor_end; ++itor) {
00475 socket_state_map::iterator lock_it = sockets_locked[shard].find((*itor)->sock);
00476 assert(lock_it != sockets_locked[shard].end());
00477 if(lock_it->second == SOCKET_READY) {
00478 lock_it->second = SOCKET_LOCKED;
00479 sent_buf = *itor;
00480 sock = sent_buf->sock;
00481 outgoing_bufs[shard].erase(itor);
00482 break;
00483 }
00484 }
00485
00486 if(sock == NULL) {
00487 receive_list::iterator itor = pending_receives[shard].begin(), itor_end = pending_receives[shard].end();
00488 for(; itor != itor_end; ++itor) {
00489 socket_state_map::iterator lock_it = sockets_locked[shard].find(*itor);
00490 assert(lock_it != sockets_locked[shard].end());
00491 if(lock_it->second == SOCKET_READY) {
00492 lock_it->second = SOCKET_LOCKED;
00493 sock = *itor;
00494 pending_receives[shard].erase(itor);
00495 break;
00496 }
00497 }
00498 }
00499
00500 if(sock != NULL) {
00501 break;
00502 }
00503
00504 if(managed == false) {
00505 DBG_NW << "worker thread exiting...\n";
00506 waiting_threads[shard]--;
00507 to_clear[shard].push_back(threading::get_current_thread_id());
00508 return 0;
00509 }
00510
00511 cond[shard]->wait(*shard_mutexes[shard]);
00512 }
00513 waiting_threads[shard]--;
00514
00515 if(!waiting_threads[shard] && managed == true) {
00516
00517 if(!max_threads || max_threads >threads[shard].size()) {
00518 threading::thread * tmp = new threading::thread(process_queue,shard_num);
00519 threads[shard][tmp->get_id()] =tmp;
00520 }
00521 }
00522 }
00523
00524 assert(sock);
00525
00526 DBG_NW << "thread found a buffer...\n";
00527
00528 SOCKET_STATE result = SOCKET_READY;
00529 std::vector<char> buf;
00530
00531 if(sent_buf) {
00532 if(sent_buf->raw_buffer.empty()) {
00533 const std::string &value = sent_buf->stream.str();
00534 make_network_buffer(value.c_str(), value.size(), sent_buf->raw_buffer);
00535 }
00536
00537 result = send_buffer(sent_buf->sock, sent_buf->raw_buffer);
00538 delete sent_buf;
00539 } else {
00540 result = receive_buf(sock,buf);
00541 }
00542
00543
00544 if(result != SOCKET_READY || buf.empty())
00545 {
00546 check_socket_result(sock,result);
00547 continue;
00548 }
00549
00550 buffer* received_data = new buffer(sock);
00551
00552 if(raw_data_only) {
00553 received_data->raw_buffer.swap(buf);
00554 } else {
00555 std::string buffer(buf.begin(), buf.end());
00556 std::istringstream stream(buffer);
00557
00558
00559 try {
00560 if(stream.peek() == 31) {
00561 read_gz(received_data->config_buf, stream);
00562 } else {
00563 compression_schema *compress;
00564 {
00565 const threading::lock lock_schemas(*schemas_mutex);
00566 compress = &schemas.insert(std::pair<TCPsocket,schema_pair>(sock,schema_pair())).first->second.incoming;
00567 }
00568 read_compressed(received_data->config_buf, stream, *compress);
00569 }
00570 } catch(config::error &e)
00571 {
00572 received_data->config_error = e.message;
00573 }
00574 }
00575
00576 {
00577
00578 const threading::lock lock_received(*received_mutex);
00579 received_data_queue.push_back(received_data);
00580 }
00581 check_socket_result(sock,result);
00582 }
00583
00584 }
00585
00586 }
00587
00588 namespace network_worker_pool
00589 {
00590
00591 manager::manager(size_t p_min_threads,size_t p_max_threads) : active_(!managed)
00592 {
00593 if(active_) {
00594 managed = true;
00595 for(int i = 0; i != NUM_SHARDS; ++i) {
00596 shard_mutexes[i] = new threading::mutex();
00597 cond[i] = new threading::condition();
00598 }
00599 stats_mutex = new threading::mutex();
00600 schemas_mutex = new threading::mutex();
00601 received_mutex = new threading::mutex();
00602
00603 min_threads = p_min_threads;
00604 max_threads = p_max_threads;
00605
00606 if (p_min_threads == 0)
00607 {
00608 p_min_threads = 1;
00609 }
00610
00611 for(size_t shard = 0; shard != NUM_SHARDS; ++shard) {
00612 const threading::lock lock(*shard_mutexes[shard]);
00613 for(size_t n = 0; n != p_min_threads; ++n) {
00614 threading::thread * tmp = new threading::thread(process_queue,(void*)uintptr_t(shard));
00615 threads[shard][tmp->get_id()] = tmp;
00616 }
00617 }
00618 }
00619 }
00620
00621 manager::~manager()
00622 {
00623 if(active_) {
00624
00625 managed = false;
00626
00627 for(size_t shard = 0; shard != NUM_SHARDS; ++shard) {
00628 {
00629 const threading::lock lock(*shard_mutexes[shard]);
00630 socket_errors[shard] = 0;
00631 }
00632
00633 cond[shard]->notify_all();
00634
00635 for(std::map<Uint32,threading::thread*>::const_iterator i = threads[shard].begin(); i != threads[shard].end(); ++i) {
00636 DBG_NW << "waiting for thread " << i->first << " to exit...\n";
00637 delete i->second;
00638 DBG_NW << "thread exited...\n";
00639 }
00640
00641 threads[shard].clear();
00642 delete shard_mutexes[shard];
00643 shard_mutexes[shard] = NULL;
00644 delete cond[shard];
00645 cond[shard] = NULL;
00646 }
00647
00648 delete stats_mutex;
00649 delete schemas_mutex;
00650 delete received_mutex;
00651 stats_mutex = 0;
00652 schemas_mutex = 0;
00653 received_mutex = 0;
00654
00655 for(int i = 0; i != NUM_SHARDS; ++i) {
00656 sockets_locked[i].clear();
00657 }
00658 transfer_stats.clear();
00659
00660 DBG_NW << "exiting manager::~manager()\n";
00661 }
00662 }
00663
00664 network::pending_statistics get_pending_stats()
00665 {
00666 network::pending_statistics stats;
00667 stats.npending_sends = 0;
00668 stats.nbytes_pending_sends = 0;
00669 for(size_t shard = 0; shard != NUM_SHARDS; ++shard) {
00670 const threading::lock lock(*shard_mutexes[shard]);
00671 stats.npending_sends += outgoing_bufs[shard].size();
00672 for(buffer_set::const_iterator i = outgoing_bufs[shard].begin(); i != outgoing_bufs[shard].end(); ++i) {
00673 stats.nbytes_pending_sends += (*i)->raw_buffer.size();
00674 }
00675 }
00676
00677 return stats;
00678 }
00679
00680 void set_raw_data_only()
00681 {
00682 raw_data_only = true;
00683 }
00684
00685 void receive_data(TCPsocket sock)
00686 {
00687 {
00688 const size_t shard = get_shard(sock);
00689 const threading::lock lock(*shard_mutexes[shard]);
00690 pending_receives[shard].push_back(sock);
00691
00692 socket_state_map::const_iterator i = sockets_locked[shard].insert(std::pair<TCPsocket,SOCKET_STATE>(sock,SOCKET_READY)).first;
00693 if(i->second == SOCKET_READY || i->second == SOCKET_ERRORED) {
00694 cond[shard]->notify_one();
00695 }
00696 }
00697 }
00698
00699 TCPsocket get_received_data(TCPsocket sock, config& cfg)
00700 {
00701 assert(!raw_data_only);
00702 const threading::lock lock_received(*received_mutex);
00703 received_queue::iterator itor = received_data_queue.begin();
00704 if(sock != NULL) {
00705 for(; itor != received_data_queue.end(); ++itor) {
00706 if((*itor)->sock == sock) {
00707 break;
00708 }
00709 }
00710 }
00711
00712 if(itor == received_data_queue.end()) {
00713 return NULL;
00714 } else if (!(*itor)->config_error.empty()){
00715
00716 std::string error = (*itor)->config_error;
00717 buffer* buf = *itor;
00718 received_data_queue.erase(itor);
00719 delete buf;
00720 throw config::error(error);
00721 } else {
00722 cfg.swap((*itor)->config_buf);
00723 const TCPsocket res = (*itor)->sock;
00724 buffer* buf = *itor;
00725 received_data_queue.erase(itor);
00726 delete buf;
00727 return res;
00728 }
00729 }
00730
00731 TCPsocket get_received_data(std::vector<char>& out)
00732 {
00733 assert(raw_data_only);
00734 const threading::lock lock_received(*received_mutex);
00735 if(received_data_queue.empty()) {
00736 return NULL;
00737 }
00738
00739 buffer* buf = received_data_queue.front();
00740 received_data_queue.pop_front();
00741 out.swap(buf->raw_buffer);
00742 const TCPsocket res = buf->sock;
00743 delete buf;
00744 return res;
00745 }
00746
00747 void queue_raw_data(TCPsocket sock, const char* buf, int len)
00748 {
00749 buffer* queued_buf = new buffer(sock);
00750 assert(*buf == 31);
00751 make_network_buffer(buf, len, queued_buf->raw_buffer);
00752 const size_t shard = get_shard(sock);
00753 const threading::lock lock(*shard_mutexes[shard]);
00754 outgoing_bufs[shard].push_back(queued_buf);
00755 socket_state_map::const_iterator i = sockets_locked[shard].insert(std::pair<TCPsocket,SOCKET_STATE>(sock,SOCKET_READY)).first;
00756 if(i->second == SOCKET_READY || i->second == SOCKET_ERRORED) {
00757 cond[shard]->notify_one();
00758 }
00759
00760 }
00761
00762 void queue_data(TCPsocket sock,const config& buf, const bool gzipped)
00763 {
00764 DBG_NW << "queuing data...\n";
00765
00766 buffer* queued_buf = new buffer(sock);
00767 output_to_buffer(sock, buf, queued_buf->stream, gzipped);
00768 queued_buf->gzipped = gzipped;
00769 {
00770 const size_t shard = get_shard(sock);
00771 const threading::lock lock(*shard_mutexes[shard]);
00772
00773 outgoing_bufs[shard].push_back(queued_buf);
00774
00775 socket_state_map::const_iterator i = sockets_locked[shard].insert(std::pair<TCPsocket,SOCKET_STATE>(sock,SOCKET_READY)).first;
00776 if(i->second == SOCKET_READY || i->second == SOCKET_ERRORED) {
00777 cond[shard]->notify_one();
00778 }
00779 }
00780
00781 }
00782
00783 namespace
00784 {
00785
00786
00787 void remove_buffers(TCPsocket sock)
00788 {
00789 {
00790 const size_t shard = get_shard(sock);
00791 for(buffer_set::iterator i = outgoing_bufs[shard].begin(); i != outgoing_bufs[shard].end();) {
00792 if ((*i)->sock == sock)
00793 {
00794 buffer* buf = *i;
00795 i = outgoing_bufs[shard].erase(i);
00796 delete buf;
00797 }
00798 else
00799 {
00800 ++i;
00801 }
00802 }
00803 }
00804
00805 {
00806 const threading::lock lock_receive(*received_mutex);
00807
00808 for(received_queue::iterator j = received_data_queue.begin(); j != received_data_queue.end(); ) {
00809 if((*j)->sock == sock) {
00810 buffer *buf = *j;
00811 j = received_data_queue.erase(j);
00812 delete buf;
00813 } else {
00814 ++j;
00815 }
00816 }
00817 }
00818 }
00819
00820 }
00821
00822 bool is_locked(const TCPsocket sock) {
00823 const size_t shard = get_shard(sock);
00824 const threading::lock lock(*shard_mutexes[shard]);
00825 const socket_state_map::iterator lock_it = sockets_locked[shard].find(sock);
00826 if (lock_it == sockets_locked[shard].end()) return false;
00827 return (lock_it->second == SOCKET_LOCKED);
00828 }
00829
00830 bool close_socket(TCPsocket sock, bool force)
00831 {
00832 {
00833 const size_t shard = get_shard(sock);
00834 const threading::lock lock(*shard_mutexes[shard]);
00835
00836 pending_receives[shard].erase(std::remove(pending_receives[shard].begin(),pending_receives[shard].end(),sock),pending_receives[shard].end());
00837 const socket_state_map::iterator lock_it = sockets_locked[shard].find(sock);
00838 if(lock_it == sockets_locked[shard].end()) {
00839 remove_buffers(sock);
00840 return true;
00841 }
00842 {
00843 const threading::lock lock_schemas(*schemas_mutex);
00844 schemas.erase(sock);
00845 }
00846
00847 if (!(lock_it->second == SOCKET_LOCKED || lock_it->second == SOCKET_INTERRUPT) || force) {
00848 sockets_locked[shard].erase(lock_it);
00849 remove_buffers(sock);
00850 return true;
00851 } else {
00852 lock_it->second = SOCKET_INTERRUPT;
00853 return false;
00854 }
00855
00856 }
00857
00858
00859 }
00860
00861 TCPsocket detect_error()
00862 {
00863 for(size_t shard = 0; shard != NUM_SHARDS; ++shard) {
00864 const threading::lock lock(*shard_mutexes[shard]);
00865 if(socket_errors[shard] > 0) {
00866 for(socket_state_map::iterator i = sockets_locked[shard].begin(); i != sockets_locked[shard].end();) {
00867 if(i->second == SOCKET_ERRORED) {
00868 --socket_errors[shard];
00869 const TCPsocket sock = i->first;
00870 sockets_locked[shard].erase(i++);
00871 pending_receives[shard].erase(std::remove(pending_receives[shard].begin(),pending_receives[shard].end(),sock),pending_receives[shard].end());
00872 remove_buffers(sock);
00873 const threading::lock lock_schema(*schemas_mutex);
00874 schemas.erase(sock);
00875 return sock;
00876 }
00877 else
00878 {
00879 ++i;
00880 }
00881 }
00882 }
00883
00884 socket_errors[shard] = 0;
00885 }
00886
00887 return 0;
00888 }
00889
00890 std::pair<network::statistics,network::statistics> get_current_transfer_stats(TCPsocket sock)
00891 {
00892 const threading::lock lock(*stats_mutex);
00893 return transfer_stats[sock];
00894 }
00895
00896 }
00897
00898