diff --git a/README.md b/README.md index a1b5b168..70cb98f5 100644 --- a/README.md +++ b/README.md @@ -195,7 +195,7 @@ Once you have succesfully created your rt file, run the demo: ``` In general the demo program takes 6 parameters: ``` -./demo +./demo ``` where * `````` is the rt file generated by a test @@ -203,7 +203,10 @@ where * `````` is the type of network. Thee types are currently supported: ```y``` (YOLO family), ```c``` (CenterNet family) and ```m``` (MobileNet-SSD family) * ``````is the number of classes the network is trained on * `````` number of batches to use in inference (N.B. you should first export TKDNN_BATCHSIZE to the required n_batches and create again the rt file for the network). -* `````` if set to 0 the demo will not show the visualization but save the video into result.mp4 (if n-batches ==1) +* `````` if set to 0 the demo will not show the visualization. +* `````` if set to 0 the results will be saved into result.mp4 (if n-batches ==1) and if set to 1 then the results will not be saved. +* `````` if set to 0 then http stream will be off but if port number is given eg: 8090 then the stream can seen the results in ```https://localhost:8090``` +* `````` if set to 1 then yolo like coordinates of the bounding boxes will be printed on the terminal N.b. By default it is used FP32 inference diff --git a/demo/demo/demo.cpp b/demo/demo/demo.cpp index 76b451d8..f9a477d9 100644 --- a/demo/demo/demo.cpp +++ b/demo/demo/demo.cpp @@ -1,8 +1,9 @@ #include #include -#include /* srand, rand */ +#include /* srand, rand */ #include #include +#include //https_stream #include "CenternetDetection.h" #include "MobilenetDetection.h" @@ -11,61 +12,68 @@ bool gRun; bool SAVE_RESULT = false; -void sig_handler(int signo) { - std::cout<<"request gateway stop\n"; +void sig_handler(int signo) +{ + std::cout << "request gateway stop\n"; gRun = false; } -int main(int argc, char *argv[]) { +int main(int argc, char *argv[]) +{ - std::cout<<"detection\n"; + std::cout << "detection\n"; signal(SIGINT, sig_handler); - std::string net = "yolo3_berkeley.rt"; - if(argc > 1) - net = argv[1]; + if (argc > 1) + net = argv[1]; std::string input = "../demo/yolo_test.mp4"; - if(argc > 2) - input = argv[2]; + if (argc > 2) + input = argv[2]; char ntype = 'y'; - if(argc > 3) - ntype = argv[3][0]; + if (argc > 3) + ntype = argv[3][0]; int n_classes = 80; - if(argc > 4) - n_classes = atoi(argv[4]); + if (argc > 4) + n_classes = atoi(argv[4]); int n_batch = 1; - if(argc > 5) - n_batch = atoi(argv[5]); + if (argc > 5) + n_batch = atoi(argv[5]); bool show = true; - if(argc > 6) - show = atoi(argv[6]); - - if(n_batch < 1 || n_batch > 64) + if (argc > 6) + show = atoi(argv[6]); + if (argc > 7) + SAVE_RESULT = atoi(argv[7]); + int port = 0; + if (argc > 8) + port = atoi(argv[8]); + bool extyolo=false; + if(argc >9) + extyolo = atoi(argv[9]); + + if (n_batch < 1 || n_batch > 64) FatalError("Batch dim not supported"); - if(!show) - SAVE_RESULT = true; tk::dnn::Yolo3Detection yolo; tk::dnn::CenternetDetection cnet; - tk::dnn::MobilenetDetection mbnet; + tk::dnn::MobilenetDetection mbnet; - tk::dnn::DetectionNN *detNN; + tk::dnn::DetectionNN *detNN; - switch(ntype) + switch (ntype) { - case 'y': - detNN = &yolo; - break; - case 'c': - detNN = &cnet; - break; - case 'm': - detNN = &mbnet; - n_classes++; - break; - default: + case 'y': + detNN = &yolo; + break; + case 'c': + detNN = &cnet; + break; + case 'm': + detNN = &mbnet; + n_classes++; + break; + default: FatalError("Network type not allowed (3rd parameter)\n"); } @@ -74,66 +82,82 @@ int main(int argc, char *argv[]) { gRun = true; cv::VideoCapture cap(input); - if(!cap.isOpened()) - gRun = false; + if (!cap.isOpened()) + gRun = false; else - std::cout<<"camera started\n"; + std::cout << "camera started\n"; cv::VideoWriter resultVideo; - if(SAVE_RESULT) { + if (SAVE_RESULT) + { int w = cap.get(cv::CAP_PROP_FRAME_WIDTH); int h = cap.get(cv::CAP_PROP_FRAME_HEIGHT); - resultVideo.open("result.mp4", cv::VideoWriter::fourcc('M','P','4','V'), 30, cv::Size(w, h)); + resultVideo.open("result.mp4", cv::VideoWriter::fourcc('M', 'P', '4', 'V'), 30, cv::Size(w, h)); } cv::Mat frame; - if(show) + if (show) cv::namedWindow("detection", cv::WINDOW_NORMAL); + cv::moveWindow("detection", 0, 0); + cv::resizeWindow("detection", 1352, 1013); std::vector batch_frame; std::vector batch_dnn_input; - while(gRun) { + while (gRun) + { batch_dnn_input.clear(); batch_frame.clear(); - - for(int bi=0; bi< n_batch; ++bi){ - cap >> frame; - if(!frame.data) + + for (int bi = 0; bi < n_batch; ++bi) + { + cap >> frame; + if (!frame.data) break; - + batch_frame.push_back(frame); // this will be resized to the net format batch_dnn_input.push_back(frame.clone()); - } - if(!frame.data) + } + if (!frame.data) break; - + //inference detNN->update(batch_dnn_input, n_batch); - detNN->draw(batch_frame); + detNN->draw(batch_frame,extyolo); - if(show){ - for(int bi=0; bi< n_batch; ++bi){ + if (show) + { + for (int bi = 0; bi < n_batch; ++bi) + { cv::imshow("detection", batch_frame[bi]); - cv::waitKey(1); } } - if(n_batch == 1 && SAVE_RESULT) + if (cv::waitKey(1) == 27) + { + break; + } + if (n_batch == 1 && SAVE_RESULT) resultVideo << frame; + + if (port > 0) + { + send_mjpeg(batch_frame[0], port, 400000, 40); + } } - std::cout<<"detection end\n"; - double mean = 0; - - std::cout<stats.begin(), detNN->stats.end())/n_batch<<" ms\n"; - std::cout<<"Max: "<<*std::max_element(detNN->stats.begin(), detNN->stats.end())/n_batch<<" ms\n"; - for(int i=0; istats.size(); i++) mean += detNN->stats[i]; mean /= detNN->stats.size(); - std::cout<<"Avg: "<stats.begin(), detNN->stats.end()) / n_batch << " ms\n"; + std::cout << "Max: " << *std::max_element(detNN->stats.begin(), detNN->stats.end()) / n_batch << " ms\n"; + for (int i = 0; i < detNN->stats.size(); i++) + mean += detNN->stats[i]; + mean /= detNN->stats.size(); + std::cout << "Avg: " << mean / n_batch << " ms\t" << 1000 / (mean / n_batch) << " FPS\n" + << COL_END; return 0; } - diff --git a/demo/demo/map.cpp b/demo/demo/map.cpp index d724db0a..4e1f0dfd 100644 --- a/demo/demo/map.cpp +++ b/demo/demo/map.cpp @@ -145,7 +145,7 @@ int main(int argc, char *argv[]) //inference detected_bbox.clear(); detNN->update(batch_dnn_input,1,write_res_on_file, ×, write_coco_json); - detNN->draw(batch_frames); + detNN->draw(batch_frames,true); detected_bbox = detNN->detected; if(write_coco_json) @@ -232,4 +232,3 @@ int main(int argc, char *argv[]) return 0; } - diff --git a/include/tkDNN/DetectionNN.h b/include/tkDNN/DetectionNN.h index 030cf8f9..c70247d6 100644 --- a/include/tkDNN/DetectionNN.h +++ b/include/tkDNN/DetectionNN.h @@ -3,11 +3,13 @@ #include #include -#include +#include #include #include #include "utils.h" +#include + #include #include #include @@ -21,39 +23,42 @@ #include #endif +namespace tk +{ + namespace dnn + { -namespace tk { namespace dnn { - -class DetectionNN { + class DetectionNN + { - protected: - tk::dnn::NetworkRT *netRT = nullptr; - dnnType *input_d; + protected: + tk::dnn::NetworkRT *netRT = nullptr; + dnnType *input_d; - std::vector originalSize; + std::vector originalSize; - cv::Scalar colors[256]; + cv::Scalar colors[256]; - int nBatches = 1; + int nBatches = 1; #ifdef OPENCV_CUDACONTRIB - cv::cuda::GpuMat bgr[3]; - cv::cuda::GpuMat imagePreproc; + cv::cuda::GpuMat bgr[3]; + cv::cuda::GpuMat imagePreproc; #else - cv::Mat bgr[3]; - cv::Mat imagePreproc; - dnnType *input; + cv::Mat bgr[3]; + cv::Mat imagePreproc; + dnnType *input; #endif - /** + /** * This method preprocess the image, before feeding it to the NN. * * @param frame original frame to adapt for inference. * @param bi batch index */ - virtual void preprocess(cv::Mat &frame, const int bi=0) = 0; + virtual void preprocess(cv::Mat &frame, const int bi = 0) = 0; - /** + /** * This method postprocess the output of the NN to obtain the correct * boundig boxes. * @@ -61,21 +66,21 @@ class DetectionNN { * @param mAP set to true only if all the probabilities for a bounding * box are needed, as in some cases for the mAP calculation */ - virtual void postprocess(const int bi=0,const bool mAP=false) = 0; + virtual void postprocess(const int bi = 0, const bool mAP = false) = 0; - public: - int classes = 0; - float confThreshold = 0.3; /*threshold on the confidence of the boxes*/ + public: + int classes = 0; + float confThreshold = 0.3; /*threshold on the confidence of the boxes*/ - std::vector detected; /*bounding boxes in output*/ - std::vector> batchDetected; /*bounding boxes in output*/ - std::vector stats; /*keeps track of inference times (ms)*/ - std::vector classesNames; + std::vector detected; /*bounding boxes in output*/ + std::vector> batchDetected; /*bounding boxes in output*/ + std::vector stats; /*keeps track of inference times (ms)*/ + std::vector classesNames; - DetectionNN() {}; - ~DetectionNN(){}; + DetectionNN(){}; + ~DetectionNN(){}; - /** + /** * Method used to inialize the class, allocate memory and compute * needed data. * @@ -84,9 +89,9 @@ class DetectionNN { * @param n_batches maximum number of batches to use in inference * @return true if everything is correct, false otherwise. */ - virtual bool init(const std::string& tensor_path, const int n_classes=80, const int n_batches=1) = 0; - - /** + virtual bool init(const std::string &tensor_path, const int n_classes = 80, const int n_batches = 1) = 0; + + /** * This method performs the whole detection of the NN. * * @param frames frames to run detection on. @@ -97,87 +102,116 @@ class DetectionNN { * @param mAP set to true only if all the probabilities for a bounding * box are needed, as in some cases for the mAP calculation */ - void update(std::vector& frames, const int cur_batches=1, bool save_times=false, std::ofstream *times=nullptr, const bool mAP=false){ - if(save_times && times==nullptr) - FatalError("save_times set to true, but no valid ofstream given"); - if(cur_batches > nBatches) - FatalError("A batch size greater than nBatches cannot be used"); - - originalSize.clear(); - if(TKDNN_VERBOSE) printCenteredTitle(" TENSORRT detection ", '=', 30); + void update(std::vector &frames, const int cur_batches = 1, bool save_times = false, std::ofstream *times = nullptr, const bool mAP = false) { - TKDNN_TSTART - for(int bi=0; bi nBatches) + FatalError("A batch size greater than nBatches cannot be used"); + + originalSize.clear(); + if (TKDNN_VERBOSE) + printCenteredTitle(" TENSORRT detection ", '=', 30); + { + TKDNN_TSTART + for (int bi = 0; bi < cur_batches; ++bi) + { + if (!frames[bi].data) + FatalError("No image data feed to detection"); + originalSize.push_back(frames[bi].size()); + preprocess(frames[bi], bi); + } + TKDNN_TSTOP + if (save_times) + *times << t_ns << ";"; } - TKDNN_TSTOP - if(save_times) *times<input_dim; - dim.n = cur_batches; - { - if(TKDNN_VERBOSE) dim.print(); - TKDNN_TSTART - netRT->infer(dim, input_d); - TKDNN_TSTOP - if(TKDNN_VERBOSE) dim.print(); - stats.push_back(t_ns); - if(save_times) *times<input_dim; + dim.n = cur_batches; + { + if (TKDNN_VERBOSE) + dim.print(); + TKDNN_TSTART + netRT->infer(dim, input_d); + TKDNN_TSTOP + if (TKDNN_VERBOSE) + dim.print(); + stats.push_back(t_ns); + if (save_times) + *times << t_ns << ";"; + } - batchDetected.clear(); - { - TKDNN_TSTART - for(int bi=0; bi& frames) { - tk::dnn::box b; - int x0, w, x1, y0, h, y1; - int objClass; - std::string det_class; - - int baseline = 0; - float font_scale = 0.5; - int thickness = 2; - - for(int bi=0; bi &frames, bool ext_yolo) + { + tk::dnn::box b; + int x0, w, x1, y0, h, y1; + int objClass; + std::string det_class; + + //yolo detctions output + std::string yoloBox; + float Yx, Yy, Yw, Yh; + cv::Size sz = frames[0].size(); + int imageWidth = sz.width; + int imageHeight = sz.height; + + int baseline = 0; + float font_scale = 0.5; + int thickness = 2; + + for (int bi = 0; bi < frames.size(); ++bi) + { + // draw dets + for (int i = 0; i < batchDetected[bi].size(); i++) + { + b = batchDetected[bi][i]; + x0 = b.x; + x1 = b.x + b.w; + y0 = b.y; + y1 = b.y + b.h; + det_class = classesNames[b.cl]; + + //yolo stuff + if (ext_yolo) + { + Yx = (b.x + (int)(b.w / 2)) / imageWidth; + Yy = (b.y + (int)(b.h / 2)) / imageHeight; + Yw = b.w / imageWidth; + Yh = b.h / imageHeight; + std::cout << std::fixed << std::setprecision(6)< +#include +#include +#include +#include +#include +#include +#include +#include +using std::cerr; +using std::endl; + +// +// socket related abstractions: +// +#ifdef _WIN32 +#ifndef USE_CMAKE_LIBS +#pragma comment(lib, "ws2_32.lib") +#endif +#define WIN32_LEAN_AND_MEAN +#define _WINSOCK_DEPRECATED_NO_WARNINGS +#include +#include +#include +#include "gettimeofday.h" +#define PORT unsigned long +#define ADDRPOINTER int* +struct _INIT_W32DATA +{ + WSADATA w; + _INIT_W32DATA() { WSAStartup(MAKEWORD(2, 1), &w); } +} _init_once; + +// Graceful closes will first close their output channels and then wait for the peer +// on the other side of the connection to close its output channels. When both sides are done telling +// each other they won,t be sending any more data (i.e., closing output channels), +// the connection can be closed fully, with no risk of reset. +static int close_socket(SOCKET s) { + int close_output = ::shutdown(s, 1); // 0 close input, 1 close output, 2 close both + char *buf = (char *)calloc(1024, sizeof(char)); + ::recv(s, buf, 1024, 0); + free(buf); + int close_input = ::shutdown(s, 0); + int result = ::closesocket(s); + cerr << "Close socket: out = " << close_output << ", in = " << close_input << " \n"; + return result; +} +#else // _WIN32 - else: nix +#include "httpunistd.h" +#include +#include +#include +#include +#include +#include +#include +#include +#define PORT unsigned short +#define SOCKET int +#define HOSTENT struct hostent +#define SOCKADDR struct sockaddr +#define SOCKADDR_IN struct sockaddr_in +#define ADDRPOINTER unsigned int* +#define INVALID_SOCKET -1 +#define SOCKET_ERROR -1 +struct _IGNORE_PIPE_SIGNAL +{ + struct sigaction new_actn, old_actn; + _IGNORE_PIPE_SIGNAL() { + new_actn.sa_handler = SIG_IGN; // ignore the broken pipe signal + sigemptyset(&new_actn.sa_mask); + new_actn.sa_flags = 0; + sigaction(SIGPIPE, &new_actn, &old_actn); + // sigaction (SIGPIPE, &old_actn, NULL); // - to restore the previous signal handling + } +} _init_once; + +static int close_socket(SOCKET s) { + int close_output = ::shutdown(s, 1); // 0 close input, 1 close output, 2 close both + char *buf = (char *)calloc(1024, sizeof(char)); + ::recv(s, buf, 1024, 0); + free(buf); + int close_input = ::shutdown(s, 0); + int result = close(s); + std::cerr << "Close socket: out = " << close_output << ", in = " << close_input << " \n"; + return result; +} +#endif // _WIN32 + +#include +#include +#include +#include +#ifndef CV_VERSION_EPOCH +#include +#endif + +using namespace cv; + + + +class MJPG_sender +{ + SOCKET sock; + SOCKET maxfd; + fd_set master; + int timeout; // master sock timeout, shutdown after timeout usec. + int quality; // jpeg compression [1..100] + int close_all_sockets; + + int _write(int sock, char const*const s, int len) + { + if (len < 1) { len = strlen(s); } + return ::send(sock, s, len, 0); + } + +public: + + MJPG_sender(int port = 0, int _timeout = 400000, int _quality = 30) + : sock(INVALID_SOCKET) + , timeout(_timeout) + , quality(_quality) + { + close_all_sockets = 0; + FD_ZERO(&master); + if (port) + open(port); + } + + ~MJPG_sender() + { + close_all(); + release(); + } + + bool release() + { + if (sock != INVALID_SOCKET) + ::shutdown(sock, 2); + sock = (INVALID_SOCKET); + return false; + } + + void close_all() + { + close_all_sockets = 1; + cv::Mat tmp(cv::Size(10, 10), CV_8UC3); + write(tmp); + } + + bool open(int port) + { + sock = ::socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); + + SOCKADDR_IN address; + address.sin_addr.s_addr = INADDR_ANY; + address.sin_family = AF_INET; + address.sin_port = htons(port); // ::htons(port); + int reuse = 1; + if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuse, sizeof(reuse)) < 0) + cerr << "setsockopt(SO_REUSEADDR) failed" << endl; + + // Non-blocking sockets + // Windows: ioctlsocket() and FIONBIO + // Linux: fcntl() and O_NONBLOCK +#ifdef WIN32 + unsigned long i_mode = 1; + int result = ioctlsocket(sock, FIONBIO, &i_mode); + if (result != NO_ERROR) { + std::cerr << "ioctlsocket(FIONBIO) failed with error: " << result << std::endl; + } +#else // WIN32 + int flags = fcntl(sock, F_GETFL, 0); + fcntl(sock, F_SETFL, flags | O_NONBLOCK); +#endif // WIN32 + +#ifdef SO_REUSEPORT + if (setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, (const char*)&reuse, sizeof(reuse)) < 0) + cerr << "setsockopt(SO_REUSEPORT) failed" << endl; +#endif + if (::bind(sock, (SOCKADDR*)&address, sizeof(SOCKADDR_IN)) == SOCKET_ERROR) + { + cerr << "error MJPG_sender: couldn't bind sock " << sock << " to port " << port << "!" << endl; + return release(); + } + if (::listen(sock, 10) == SOCKET_ERROR) + { + cerr << "error MJPG_sender: couldn't listen on sock " << sock << " on port " << port << " !" << endl; + return release(); + } + FD_ZERO(&master); + FD_SET(sock, &master); + maxfd = sock; + return true; + } + + bool isOpened() + { + return sock != INVALID_SOCKET; + } + + bool write(cv::Mat frame) + { + fd_set rread = master; + struct timeval select_timeout = { 0, 0 }; + struct timeval socket_timeout = { 0, timeout }; + if (::select(maxfd + 1, &rread, NULL, NULL, &select_timeout) <= 0) + return true; // nothing broken, there's just noone listening + + std::vector outbuf; + std::vector params; + params.push_back(IMWRITE_JPEG_QUALITY); + params.push_back(quality); + cv::imencode(".jpg", frame, outbuf, params); //REMOVED FOR COMPATIBILITY + // https://docs.opencv.org/3.4/d4/da8/group__imgcodecs.html#ga292d81be8d76901bff7988d18d2b42ac + //std::cerr << "cv::imencode call disabled!" << std::endl; + int outlen = static_cast(outbuf.size()); + +#ifdef _WIN32 + for (unsigned i = 0; iclient ? maxfd : client); + FD_SET(client, &master); + _write(client, "HTTP/1.0 200 OK\r\n", 0); + _write(client, + "Server: Mozarella/2.2\r\n" + "Accept-Range: bytes\r\n" + "Connection: close\r\n" + "Max-Age: 0\r\n" + "Expires: 0\r\n" + "Cache-Control: no-cache, private\r\n" + "Pragma: no-cache\r\n" + "Content-Type: multipart/x-mixed-replace; boundary=mjpegstream\r\n" + "\r\n", 0); + cerr << "MJPG_sender: new client " << client << endl; + } + else // existing client, just stream pix + { + if (close_all_sockets) { + int result = close_socket(s); + cerr << "MJPG_sender: close clinet: " << result << " \n"; + continue; + } + + char head[400]; + sprintf(head, "--mjpegstream\r\nContent-Type: image/jpeg\r\nContent-Length: %zu\r\n\r\n", outlen); + _write(s, head, 0); + int n = _write(s, (char*)(&outbuf[0]), outlen); + cerr << "known client: " << s << ", sent = " << n << ", must be sent outlen = " << outlen << endl; + if (n < (int)outlen) + { + cerr << "MJPG_sender: kill client " << s << endl; + //::shutdown(s, 2); + close_socket(s); + FD_CLR(s, &master); + } + } + } + if (close_all_sockets) { + int result = close_socket(sock); + cerr << "MJPG_sender: close acceptor: " << result << " \n\n"; + } + return true; + } +}; +// ---------------------------------------- + +static std::mutex mtx_mjpeg; + +//struct mat_cv : cv::Mat { int a[0]; }; + +void send_mjpeg(cv::Mat mat, int port, int timeout, int quality) +{ + try { + std::lock_guard lock(mtx_mjpeg); + static MJPG_sender wri(port, timeout, quality); + //cv::Mat mat = cv::cvarrToMat(ipl); + wri.write(mat); + std::cout << " MJPEG-stream sent. \n"; + } + catch (...) { + cerr << " Error in send_mjpeg() function \n"; + } +} +// ---------------------------------------- + +std::string get_system_frame_time_string() +{ + std::time_t t = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()); + static std::mutex mtx; + std::lock_guard lock(mtx); + struct tm *tmp_buf = localtime(&t); + char buff[256]; + std::strftime(buff, 256, "%A %F %T", tmp_buf); + std::string system_frame_time = buff; + return system_frame_time; +} +// ---------------------------------------- + + +/*#ifdef __CYGWIN__ +int send_http_post_request(char *http_post_host, int server_port, const char *videosource, + detection *dets, int nboxes, int classes, char **names, long long int frame_id, int ext_output, int timeout) +{ + std::cerr << " send_http_post_request() isn't implemented \n"; + return 0; +} +#else // __CYGWIN__*/ diff --git a/include/tkDNN/httpunistd.h b/include/tkDNN/httpunistd.h new file mode 100644 index 00000000..6c06aef1 --- /dev/null +++ b/include/tkDNN/httpunistd.h @@ -0,0 +1,56 @@ +#ifdef _WIN32 +#ifndef _UNISTD_H +#define _UNISTD_H 1 + +/* This file intended to serve as a drop-in replacement for +* unistd.h on Windows +* Please add functionality as needed +*/ + +#include +#include /* for _getcwd() and _chdir() */ +#include "getopt.h" +#include +#include /* for getpid() and the exec..() family */ +#include + +#define srandom srand +#define random rand + +/* Values for the second argument to access. +These may be OR'd together. */ +#define R_OK 4 /* Test for read permission. */ +#define W_OK 2 /* Test for write permission. */ +#define X_OK R_OK /* execute permission - unsupported in Windows, */ +#define F_OK 0 /* Test for existence. */ + +#define access _access +#define dup2 _dup2 +#define execve _execve +#define ftruncate _chsize +#define unlink _unlink +#define fileno _fileno +#define getcwd _getcwd +#define chdir _chdir +#define isatty _isatty +#define lseek _lseek +/* read, write, and close are NOT being #defined here, because while there are file handle specific versions for Windows, they probably don't work for sockets. You need to look at your app and consider whether to call e.g. closesocket(). */ + +#define ssize_t int + +#define STDIN_FILENO 0 +#define STDOUT_FILENO 1 +#define STDERR_FILENO 2 +/* should be in some equivalent to */ +//typedef __int8 int8_t; +//typedef __int16 int16_t; +//typedef __int32 int32_t; +//typedef __int64 int64_t; +//typedef unsigned __int8 uint8_t; +//typedef unsigned __int16 uint16_t; +//typedef unsigned __int32 uint32_t; +//typedef unsigned __int64 uint64_t; +#endif /* _UNISTD_H */ +#else +#include +#endif /* _WIN32 */