Commit 641cf4ba by wangquyuan

add by wqy

1 parent c866715e
......@@ -49,7 +49,7 @@ public:
//JSONField( self, MysqlConfig, db );
JSONField( self, int, frame_fps ) = 25;
//JSONField( self, int, frame_fps ) = 25;
JSONField( self, std::string, device ) = "cpu";
JSONField( self, ModelsConfig, models );
......
......@@ -39,10 +39,6 @@ Config *parse_config( const std::string &name )
return NULL;
}
if(config->frame_fps < 1)
{
config->frame_fps = 1;
}
return config;
}
......
......@@ -2,6 +2,9 @@
#include "do_request.h"
//#include <websocketpp/config/asio_no_tls.hpp>
//#include <websocketpp/server.hpp>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <map>
......@@ -88,6 +91,7 @@ using websocketpp::lib::bind;
const int VIDEO_WIDTH = 800;
const int VIDEO_HEIGHT = 600;
std::string g_response;
//seeta::FaceTracker *g_track = NULL;//create_face_detector();
seeta::FaceDetector *g_fd = NULL;//create_face_detector();
......@@ -142,231 +146,265 @@ void create_http_response( httpserver::connection_ptr con, const std::string &bo
//con->append_header("Access-Control-Request-Method","POST,GET");
}
static seeta::FaceDetector* create_face_detector()
static seeta::FaceDetector *create_face_detector()
{
seeta::ModelSetting fd_model;
fd_model.append(gmodelpath + g_config->models.face_detector);
if (g_config->device == "gpu" )
fd_model.append( gmodelpath + g_config->models.face_detector );
if( g_config->device == "gpu" )
{
fd_model.set_device( seeta::ModelSetting::GPU );
}else
}
else
{
fd_model.set_device( seeta::ModelSetting::CPU );
}
fd_model.set_id(0);
seeta::FaceDetector *m_fd = new seeta::FaceDetector(fd_model);
m_fd->set(seeta::FaceDetector::PROPERTY_MIN_FACE_SIZE, 100);
fd_model.set_id( 0 );
seeta::FaceDetector *m_fd = new seeta::FaceDetector( fd_model );
m_fd->set( seeta::FaceDetector::PROPERTY_MIN_FACE_SIZE, 100 );
return m_fd;
}
static seeta::FaceTracker * create_face_tracker(int width, int height)
static seeta::FaceTracker *create_face_tracker( int width, int height )
{
seeta::ModelSetting fd_model;
fd_model.append(gmodelpath + g_config->models.face_detector);
if (g_config->device == "gpu" )
fd_model.append( gmodelpath + g_config->models.face_detector );
if( g_config->device == "gpu" )
{
fd_model.set_device( seeta::ModelSetting::GPU );
}else
}
else
{
fd_model.set_device( seeta::ModelSetting::CPU );
}
fd_model.set_id(0);
seeta::FaceTracker *m_tracker = new seeta::FaceTracker(fd_model, width, height);
m_tracker->SetMinFaceSize(100); //set(seeta::FaceTracker::PROPERTY_MIN_FACE_SIZE, 100);
fd_model.set_id( 0 );
seeta::FaceTracker *m_tracker = new seeta::FaceTracker( fd_model, width, height );
m_tracker->SetMinFaceSize( 100 ); //set(seeta::FaceTracker::PROPERTY_MIN_FACE_SIZE, 100);
return m_tracker;
}
static seeta::FaceLandmarker * create_face_landmarker5()
static seeta::FaceLandmarker *create_face_landmarker5()
{
seeta::ModelSetting pd_model;
pd_model.append(gmodelpath + g_config->models.face_landmarker5);
if (g_config->device == "gpu" )
pd_model.append( gmodelpath + g_config->models.face_landmarker5 );
if( g_config->device == "gpu" )
{
pd_model.set_device( seeta::ModelSetting::GPU );
}else
}
else
{
pd_model.set_device( seeta::ModelSetting::CPU );
}
pd_model.set_id(0);
seeta::FaceLandmarker *m_pd5 = new seeta::FaceLandmarker(pd_model);
pd_model.set_id( 0 );
seeta::FaceLandmarker *m_pd5 = new seeta::FaceLandmarker( pd_model );
return m_pd5;
}
static seeta::FaceLandmarker * create_face_landmarker81()
static seeta::FaceLandmarker *create_face_landmarker81()
{
seeta::ModelSetting pd_model;
pd_model.append(gmodelpath + g_config->models.face_landmarker81);
if (g_config->device == "gpu" )
pd_model.append( gmodelpath + g_config->models.face_landmarker81 );
if( g_config->device == "gpu" )
{
pd_model.set_device( seeta::ModelSetting::GPU );
}else
}
else
{
pd_model.set_device( seeta::ModelSetting::CPU );
}
pd_model.set_id(0);
seeta::FaceLandmarker *m_pd81 = new seeta::FaceLandmarker(pd_model);
pd_model.set_id( 0 );
seeta::FaceLandmarker *m_pd81 = new seeta::FaceLandmarker( pd_model );
return m_pd81;
}
static seeta::FaceRecognizer * create_face_recognizer()
static seeta::FaceRecognizer *create_face_recognizer()
{
seeta::ModelSetting fr_model;
fr_model.append(gmodelpath + g_config->models.face_recognizer);
if (g_config->device == "gpu" )
fr_model.append( gmodelpath + g_config->models.face_recognizer );
if( g_config->device == "gpu" )
{
fr_model.set_device( seeta::ModelSetting::GPU );
}else
}
else
{
fr_model.set_device( seeta::ModelSetting::CPU );
}
fr_model.set_id(0);
seeta::FaceRecognizer *m_fr = new seeta::FaceRecognizer(fr_model);
fr_model.set_id( 0 );
seeta::FaceRecognizer *m_fr = new seeta::FaceRecognizer( fr_model );
return m_fr;
}
static seeta::PoseEstimator* create_pose_estimator()
static seeta::PoseEstimator *create_pose_estimator()
{
seeta::ModelSetting pose_model;
pose_model.append(gmodelpath + g_config->models.pose_model);
if (g_config->device == "gpu" )
pose_model.append( gmodelpath + g_config->models.pose_model );
if( g_config->device == "gpu" )
{
pose_model.set_device( seeta::ModelSetting::GPU );
}else
}
else
{
pose_model.set_device( seeta::ModelSetting::CPU );
}
pose_model.set_id(0);
seeta::PoseEstimator *m_pose = new seeta::PoseEstimator(pose_model);
pose_model.set_id( 0 );
seeta::PoseEstimator *m_pose = new seeta::PoseEstimator( pose_model );
return m_pose;
}
static seeta::ActionUnit* create_actionunit()
static seeta::ActionUnit *create_actionunit()
{
seeta::ModelSetting pose_model;
pose_model.append(gmodelpath + g_config->models.actionunit_model);
if (g_config->device == "gpu" )
pose_model.append( gmodelpath + g_config->models.actionunit_model );
if( g_config->device == "gpu" )
{
pose_model.set_device( seeta::ModelSetting::GPU );
}else
}
else
{
pose_model.set_device( seeta::ModelSetting::CPU );
}
pose_model.set_id(0);
seeta::ActionUnit *m_pose = new seeta::ActionUnit(pose_model);
pose_model.set_id( 0 );
seeta::ActionUnit *m_pose = new seeta::ActionUnit( pose_model );
return m_pose;
}
static seeta::EmotionRecognizer* create_emotion_recognizer()
static seeta::EmotionRecognizer *create_emotion_recognizer()
{
seeta::ModelSetting pose_model;
pose_model.append(gmodelpath + g_config->models.emotion_model);
if (g_config->device == "gpu" )
pose_model.append( gmodelpath + g_config->models.emotion_model );
if( g_config->device == "gpu" )
{
pose_model.set_device( seeta::ModelSetting::GPU );
}else
}
else
{
pose_model.set_device( seeta::ModelSetting::CPU );
}
pose_model.set_id(0);
seeta::EmotionRecognizer *m_pose = new seeta::EmotionRecognizer(pose_model);
pose_model.set_id( 0 );
seeta::EmotionRecognizer *m_pose = new seeta::EmotionRecognizer( pose_model );
return m_pose;
}
static seeta::EyeStateDetector* create_eye_detection()
static seeta::EyeStateDetector *create_eye_detection()
{
seeta::ModelSetting pose_model;
pose_model.append(gmodelpath + g_config->models.eye_model);
if (g_config->device == "gpu" )
pose_model.append( gmodelpath + g_config->models.eye_model );
if( g_config->device == "gpu" )
{
pose_model.set_device( seeta::ModelSetting::GPU );
}else
}
else
{
pose_model.set_device( seeta::ModelSetting::CPU );
}
pose_model.set_id(0);
seeta::EyeStateDetector *m_pose = new seeta::EyeStateDetector(pose_model);
pose_model.set_id( 0 );
seeta::EyeStateDetector *m_pose = new seeta::EyeStateDetector( pose_model );
return m_pose;
}
int init_engine()
{
try
{
g_fd = create_face_detector();
if (!g_fd )
{
return -1;
}
/*
g_track = create_face_tracker(VIDEO_WIDTH,VIDEO_HEIGHT);
if (!g_track )
{
return -1;
}
*/
g_pd5 = create_face_landmarker5();
if (!g_pd5 )
{
return -1;
}
g_pd81 = create_face_landmarker81();
if (!g_pd81 )
{
return -1;
}
g_fr = create_face_recognizer();
if (!g_fr )
{
return -1;
}
g_pose = create_pose_estimator();
if (!g_pose )
{
return -1;
}
g_action = create_actionunit();
if (!g_action )
{
return -1;
}
g_emotion = create_emotion_recognizer();
if (!g_emotion )
{
return -1;
}
g_eye = create_eye_detection();
if (!g_eye )
{
return -1;
}
}catch(std::exception &e)
{
LOG(_ERROR_, "init engine failed:%s",e.what());
return -1;
}
return 0;
/*
cv::VideoCapture *m_capture = new cv::VideoCapture;
//m_capture->open( "/tmp/9d32f8c8-5c12-4324-a712-dd712a03d93f" );
m_capture->open( "/wqy/works/http/ygydserver/test/cap1.mp4" );
//m_capture->set( cv::CAP_PROP_FRAME_WIDTH, VIDEO_WIDTH );
//m_capture->set( cv::CAP_PROP_FRAME_HEIGHT, VIDEO_HEIGHT );
int fps = m_capture->get( cv::CAP_PROP_FPS );
std::cout << "fps:" << fps << std::endl;
if( !m_capture->isOpened() )
{
m_capture->release();
delete m_capture;
m_capture = NULL;
std::cout << "------open video---failed" << std::endl;
}
std::cout << "fps:" << fps << std::endl;
m_capture->release();
delete m_capture;
*/
try
{
g_fd = create_face_detector();
if( !g_fd )
{
return -1;
}
/*
g_track = create_face_tracker(VIDEO_WIDTH,VIDEO_HEIGHT);
if (!g_track )
{
return -1;
}
*/
g_pd5 = create_face_landmarker5();
if( !g_pd5 )
{
return -1;
}
g_pd81 = create_face_landmarker81();
if( !g_pd81 )
{
return -1;
}
g_fr = create_face_recognizer();
if( !g_fr )
{
return -1;
}
g_pose = create_pose_estimator();
if( !g_pose )
{
return -1;
}
g_action = create_actionunit();
if( !g_action )
{
return -1;
}
g_emotion = create_emotion_recognizer();
if( !g_emotion )
{
return -1;
}
g_eye = create_eye_detection();
if( !g_eye )
{
return -1;
}
}
catch( std::exception &e )
{
LOG( _ERROR_, "init engine failed:%s", e.what() );
return -1;
}
return 0;
}
static SeetaFaceInfoArray face_detector(const SeetaImageData &image)
static SeetaFaceInfoArray face_detector( const SeetaImageData &image )
{
SeetaFaceInfoArray faces;
std::lock_guard<std::mutex> guard( g_fd_lock );
try
{
faces = g_fd->detect(image);
}catch(std::exception &e)
faces = g_fd->detect( image );
}
catch( std::exception &e )
{
LOG(_ERROR_,"face detector exception:%s", e.what());
LOG( _ERROR_, "face detector exception:%s", e.what() );
}
return faces;
}
......@@ -387,106 +425,113 @@ static SeetaTrackingFaceInfoArray face_tracker(const SeetaImageData &image)
}
*/
static void face_landmarker5(const SeetaImageData &image, const SeetaRect &face, SeetaPointF *points)
static void face_landmarker5( const SeetaImageData &image, const SeetaRect &face, SeetaPointF *points )
{
std::lock_guard<std::mutex> guard( g_pd5_lock );
try
{
g_pd5->mark(image, face, points);
}catch(std::exception &e)
g_pd5->mark( image, face, points );
}
catch( std::exception &e )
{
LOG(_ERROR_,"face landmarker 5 exception:%s", e.what());
LOG( _ERROR_, "face landmarker 5 exception:%s", e.what() );
}
return;
}
static void face_landmarker81(const SeetaImageData &image, const SeetaRect &face, SeetaPointF *points)
static void face_landmarker81( const SeetaImageData &image, const SeetaRect &face, SeetaPointF *points )
{
//std::vector<float> points;
std::lock_guard<std::mutex> guard( g_pd81_lock );
try
{
g_pd81->mark(image, face,points);
}catch(std::exception &e)
g_pd81->mark( image, face, points );
}
catch( std::exception &e )
{
LOG(_ERROR_,"face landmarker 81 exception:%s", e.what());
LOG( _ERROR_, "face landmarker 81 exception:%s", e.what() );
}
return;
}
static std::vector<float> face_recognizer(const SeetaImageData &image, const SeetaPointF *points)
static std::vector<float> face_recognizer( const SeetaImageData &image, const SeetaPointF *points )
{
std::vector<float> features;
std::lock_guard<std::mutex> guard( g_fr_lock );
try
{
features.resize(g_fr->GetExtractFeatureSize());
if(!g_fr->Extract(image, points, features.data()))
features.resize( g_fr->GetExtractFeatureSize() );
if( !g_fr->Extract( image, points, features.data() ) )
{
LOG(_WARN_,"face recognizer failed");
LOG( _WARN_, "face recognizer failed" );
}
}catch(std::exception &e)
}
catch( std::exception &e )
{
LOG(_ERROR_,"face recognizer exception:%s", e.what());
LOG( _ERROR_, "face recognizer exception:%s", e.what() );
}
return features;
}
static void pose_estimate(const SeetaImageData &image, const SeetaRect face, float *yaw, float *pitch, float *roll)
static void pose_estimate( const SeetaImageData &image, const SeetaRect face, float *yaw, float *pitch, float *roll )
{
std::lock_guard<std::mutex> guard( g_pose_lock );
try
{
g_pose->Estimate(image, face, yaw,pitch,roll);
}catch(std::exception &e)
g_pose->Estimate( image, face, yaw, pitch, roll );
}
catch( std::exception &e )
{
LOG(_ERROR_,"pose estimate exception:%s", e.what());
LOG( _ERROR_, "pose estimate exception:%s", e.what() );
}
return;
}
static void eye_detect(const SeetaImageData &image, const SeetaPointF *points, seeta::EyeStateDetector::EYE_STATE &leftstate, seeta::EyeStateDetector::EYE_STATE &rightstate)
static void eye_detect( const SeetaImageData &image, const SeetaPointF *points, seeta::EyeStateDetector::EYE_STATE &leftstate, seeta::EyeStateDetector::EYE_STATE &rightstate )
{
std::lock_guard<std::mutex> guard( g_eye_lock );
try
{
g_eye->Detect(image, points, leftstate, rightstate);
}catch(std::exception &e)
g_eye->Detect( image, points, leftstate, rightstate );
}
catch( std::exception &e )
{
LOG(_ERROR_,"pose eye detector exception:%s", e.what());
LOG( _ERROR_, "pose eye detector exception:%s", e.what() );
}
}
static std::vector<float> face_action(const SeetaImageData &image, const SeetaPointF *points)
static std::vector<float> face_action( const SeetaImageData &image, const SeetaPointF *points )
{
std::vector<float> features;
std::lock_guard<std::mutex> guard( g_action_lock );
try
{
features.resize(g_action->GetExtractFeatureSize());
features.resize( g_action->GetExtractFeatureSize() );
std::cout << "face_action: GetExtractFeatureSize():" << g_action->GetExtractFeatureSize() << std::endl;
g_action->Extract(image, points, features.data());
}catch(std::exception &e)
g_action->Extract( image, points, features.data() );
}
catch( std::exception &e )
{
LOG(_ERROR_,"face action detector exception:%s", e.what());
LOG( _ERROR_, "face action detector exception:%s", e.what() );
}
return features;
}
static std::vector<float> face_emotion(const SeetaImageData &image, const SeetaPointF *points)
static std::vector<float> face_emotion( const SeetaImageData &image, const SeetaPointF *points )
{
std::vector<float> features;
std::lock_guard<std::mutex> guard( g_emotion_lock );
try
{
features.resize(g_emotion->emotion_count());
features.resize( g_emotion->emotion_count() );
std::cout << "face_emotion: emotion_count():" << g_emotion->emotion_count() << std::endl;
g_emotion->recognize_emotion(image, points, features.data());
}catch(std::exception &e)
g_emotion->recognize_emotion( image, points, features.data() );
}
catch( std::exception &e )
{
LOG(_ERROR_,"face emotion detector exception:%s", e.what());
LOG( _ERROR_, "face emotion detector exception:%s", e.what() );
}
return features;
}
......@@ -496,69 +541,71 @@ static std::vector<float> face_emotion(const SeetaImageData &image, const SeetaP
static std::string do_query_face_feature( const std::string &body )
{
std::string strresponse;
try
try
{
std::map<std::string, std::string> parameters;
int n = parse_http_parameters(body, parameters);
int n = parse_http_parameters( body, parameters );
std::map<std::string, std::string>::iterator iter;
iter = parameters.find("imgStr");
if (iter == parameters.end())
iter = parameters.find( "imgStr" );
if( iter == parameters.end() )
{
LOG( _ERROR_, "do not find the parameter imgStr" );
strresponse = "{\"code\":4,\"msg\":\"" + GetError( 4 ) + "\"}";
return strresponse;
LOG( _ERROR_, "do not find the parameter imgStr" );
strresponse = "{\"code\":4,\"msg\":\"" + GetError( 4 ) + "\"}";
return strresponse;
}
std::string strbase64 = base64_decode( iter->second );
std::vector<unsigned char> imagedatas( strbase64.begin(), strbase64.end() );
cv::Mat mat = cv::imdecode( imagedatas, 1 ); //COLOR_LOAD_IMAGE_COLOR);
std::cout << "------cv::imdecode---begin" << std::endl;
if( !mat.data )
{
std::cout << "------cv::imdecode---failed" << std::endl;
strresponse = "{\"code\":7,\"msg\":\"" + GetError( 7) + "\"}";
return strresponse;
}
std::cout << "------cv::imdecode---end" << std::endl;
SeetaImageData img;
img.width = mat.cols;
img.height = mat.rows;
img.channels = mat.channels();
img.data = mat.data;
do
{
auto faces = face_detector(img);//fd->detect(img);
if (faces.size <= 0) {
strresponse = "{\"code\":1,\"msg\":\"" + GetError( 1) + "\"}";
break;
}
int index = 0;
int maxarea = faces.data[0].pos.width * faces.data[0].pos.height;
for (int i=1; i<faces.size; i++)
{
if(maxarea < faces.data[i].pos.width * faces.data[i].pos.height)
{
index = i;
maxarea = faces.data[i].pos.width * faces.data[i].pos.height;
}
}
SeetaPointF points[5];
face_landmarker5(img, faces.data[index].pos, points);
std::vector<float> features = face_recognizer(img, points);
std::string strtmp((const char *)features.data(), int(features.size() * sizeof(float)));
std::string enbase64 = base64_encode(strtmp);
strresponse = "{\"code\":0,\"msg\":\"" + GetError(0) + "\",\"data\":\"";
strresponse += enbase64 + "\"}";
}while(0);
std::string strbase64 = base64_decode( iter->second );
std::vector<unsigned char> imagedatas( strbase64.begin(), strbase64.end() );
cv::Mat mat = cv::imdecode( imagedatas, 1 ); //COLOR_LOAD_IMAGE_COLOR);
std::cout << "------cv::imdecode---begin" << std::endl;
if( !mat.data )
{
std::cout << "------cv::imdecode---failed" << std::endl;
strresponse = "{\"code\":7,\"msg\":\"" + GetError( 7 ) + "\"}";
return strresponse;
}
std::cout << "------cv::imdecode---end" << std::endl;
SeetaImageData img;
img.width = mat.cols;
img.height = mat.rows;
img.channels = mat.channels();
img.data = mat.data;
do
{
auto faces = face_detector( img ); //fd->detect(img);
if( faces.size <= 0 )
{
strresponse = "{\"code\":1,\"msg\":\"" + GetError( 1 ) + "\"}";
break;
}
int index = 0;
int maxarea = faces.data[0].pos.width * faces.data[0].pos.height;
for( int i = 1; i < faces.size; i++ )
{
if( maxarea < faces.data[i].pos.width * faces.data[i].pos.height )
{
index = i;
maxarea = faces.data[i].pos.width * faces.data[i].pos.height;
}
}
SeetaPointF points[5];
face_landmarker5( img, faces.data[index].pos, points );
std::vector<float> features = face_recognizer( img, points );
std::string strtmp( ( const char * )features.data(), int( features.size() * sizeof( float ) ) );
std::string enbase64 = base64_encode( strtmp );
strresponse = "{\"code\":0,\"msg\":\"" + GetError( 0 ) + "\",\"data\":\"";
strresponse += enbase64 + "\"}";
}
while( 0 );
}
catch( std::exception &e )
......@@ -566,7 +613,7 @@ static std::string do_query_face_feature( const std::string &body )
LOG( _ERROR_, "parse message failed:%s", GETNULLPTR( e.what() ) );
strresponse = "{\"code\":1,\"msg\":\"" + GetError( 1 ) + "\"}";
}
return strresponse;
}
......@@ -574,80 +621,83 @@ static std::string do_query_face_feature( const std::string &body )
static std::string do_query_face_action( const std::string &body )
{
std::string strresponse;
try
try
{
std::map<std::string, std::string> parameters;
int n = parse_http_parameters(body, parameters);
int n = parse_http_parameters( body, parameters );
std::map<std::string, std::string>::iterator iter;
iter = parameters.find("imgStr");
if (iter == parameters.end())
iter = parameters.find( "imgStr" );
if( iter == parameters.end() )
{
LOG( _ERROR_, "do not find the parameter imgStr" );
strresponse = "{\"code\":4,\"msg\":\"" + GetError( 4 ) + "\"}";
return strresponse;
LOG( _ERROR_, "do not find the parameter imgStr" );
strresponse = "{\"code\":4,\"msg\":\"" + GetError( 4 ) + "\"}";
return strresponse;
}
std::string strbase64 = base64_decode( iter->second );
std::vector<unsigned char> imagedatas( strbase64.begin(), strbase64.end() );
cv::Mat mat = cv::imdecode( imagedatas, 1 ); //COLOR_LOAD_IMAGE_COLOR);
std::cout << "------cv::imdecode---begin" << std::endl;
if( !mat.data )
{
std::cout << "------cv::imdecode---failed" << std::endl;
strresponse = "{\"code\":7,\"msg\":\"" + GetError( 7) + "\"}";
return strresponse;
}
std::cout << "------cv::imdecode---end" << std::endl;
SeetaImageData img;
img.width = mat.cols;
img.height = mat.rows;
img.channels = mat.channels();
img.data = mat.data;
do
{
auto faces = face_detector(img);//fd->detect(img);
if (faces.size <= 0) {
strresponse = "{\"code\":1,\"msg\":\"" + GetError( 1) + "\"}";
break;
}
int index = 0;
int maxarea = faces.data[0].pos.width * faces.data[0].pos.height;
for (int i=1; i<faces.size; i++)
{
if(maxarea < faces.data[i].pos.width * faces.data[i].pos.height)
{
index = i;
maxarea = faces.data[i].pos.width * faces.data[i].pos.height;
}
}
SeetaPointF points[5];
face_landmarker5(img, faces.data[index].pos, points);
std::vector<float> features = face_action(img, points);
//std::string strtmp((const char *)features.data(), int(features.size() * sizeof(float)));
//std::string enbase64 = base64_encode(strtmp);
strresponse = "{\"code\":0,\"msg\":\"" + GetError(0) + "\",\"data\":[";
for(int i=0; i<features.size(); i++)
{
if( i== 11) {
continue;
}
if( i > 0)
{
strresponse += ",";
}
strresponse += floattostring(features[i]);
}
strresponse += "]}";
}while(0);
std::string strbase64 = base64_decode( iter->second );
std::vector<unsigned char> imagedatas( strbase64.begin(), strbase64.end() );
cv::Mat mat = cv::imdecode( imagedatas, 1 ); //COLOR_LOAD_IMAGE_COLOR);
std::cout << "------cv::imdecode---begin" << std::endl;
if( !mat.data )
{
std::cout << "------cv::imdecode---failed" << std::endl;
strresponse = "{\"code\":7,\"msg\":\"" + GetError( 7 ) + "\"}";
return strresponse;
}
std::cout << "------cv::imdecode---end" << std::endl;
SeetaImageData img;
img.width = mat.cols;
img.height = mat.rows;
img.channels = mat.channels();
img.data = mat.data;
do
{
auto faces = face_detector( img ); //fd->detect(img);
if( faces.size <= 0 )
{
strresponse = "{\"code\":1,\"msg\":\"" + GetError( 1 ) + "\"}";
break;
}
int index = 0;
int maxarea = faces.data[0].pos.width * faces.data[0].pos.height;
for( int i = 1; i < faces.size; i++ )
{
if( maxarea < faces.data[i].pos.width * faces.data[i].pos.height )
{
index = i;
maxarea = faces.data[i].pos.width * faces.data[i].pos.height;
}
}
SeetaPointF points[5];
face_landmarker5( img, faces.data[index].pos, points );
std::vector<float> features = face_action( img, points );
//std::string strtmp((const char *)features.data(), int(features.size() * sizeof(float)));
//std::string enbase64 = base64_encode(strtmp);
strresponse = "{\"code\":0,\"msg\":\"" + GetError( 0 ) + "\",\"data\":[";
for( int i = 0; i < features.size(); i++ )
{
if( i == 11 )
{
continue;
}
if( i > 0 )
{
strresponse += ",";
}
strresponse += floattostring( features[i] );
}
strresponse += "]}";
}
while( 0 );
}
catch( std::exception &e )
......@@ -655,82 +705,84 @@ static std::string do_query_face_action( const std::string &body )
LOG( _ERROR_, "parse message failed:%s", GETNULLPTR( e.what() ) );
strresponse = "{\"code\":1,\"msg\":\"" + GetError( 1 ) + "\"}";
}
return strresponse;
}
static std::string do_query_face_emotion( const std::string &body )
{
std::string strresponse;
try
try
{
std::map<std::string, std::string> parameters;
int n = parse_http_parameters(body, parameters);
int n = parse_http_parameters( body, parameters );
std::map<std::string, std::string>::iterator iter;
iter = parameters.find("imgStr");
if (iter == parameters.end())
iter = parameters.find( "imgStr" );
if( iter == parameters.end() )
{
LOG( _ERROR_, "do not find the parameter imgStr" );
strresponse = "{\"code\":4,\"msg\":\"" + GetError( 4 ) + "\"}";
return strresponse;
LOG( _ERROR_, "do not find the parameter imgStr" );
strresponse = "{\"code\":4,\"msg\":\"" + GetError( 4 ) + "\"}";
return strresponse;
}
std::string strbase64 = base64_decode( iter->second );
std::vector<unsigned char> imagedatas( strbase64.begin(), strbase64.end() );
cv::Mat mat = cv::imdecode( imagedatas, 1 ); //COLOR_LOAD_IMAGE_COLOR);
std::cout << "------cv::imdecode---begin" << std::endl;
if( !mat.data )
{
std::cout << "------cv::imdecode---failed" << std::endl;
strresponse = "{\"code\":7,\"msg\":\"" + GetError( 7) + "\"}";
return strresponse;
}
std::cout << "------cv::imdecode---end" << std::endl;
SeetaImageData img;
img.width = mat.cols;
img.height = mat.rows;
img.channels = mat.channels();
img.data = mat.data;
do
{
auto faces = face_detector(img);//fd->detect(img);
if (faces.size <= 0) {
strresponse = "{\"code\":1,\"msg\":\"" + GetError( 1) + "\"}";
break;
}
int index = 0;
int maxarea = faces.data[0].pos.width * faces.data[0].pos.height;
for (int i=1; i<faces.size; i++)
{
if(maxarea < faces.data[i].pos.width * faces.data[i].pos.height)
{
index = i;
maxarea = faces.data[i].pos.width * faces.data[i].pos.height;
}
}
SeetaPointF points[5];
face_landmarker5(img, faces.data[index].pos, points);
std::vector<float> features = face_emotion(img, points);
strresponse = "{\"code\":0,\"msg\":\"" + GetError(0) + "\",\"data\":[";
for(int i=0; i<features.size(); i++)
{
if( i > 0)
{
strresponse += ",";
}
strresponse += floattostring(features[i]);
}
strresponse += "]}";
}while(0);
std::string strbase64 = base64_decode( iter->second );
std::vector<unsigned char> imagedatas( strbase64.begin(), strbase64.end() );
cv::Mat mat = cv::imdecode( imagedatas, 1 ); //COLOR_LOAD_IMAGE_COLOR);
std::cout << "------cv::imdecode---begin" << std::endl;
if( !mat.data )
{
std::cout << "------cv::imdecode---failed" << std::endl;
strresponse = "{\"code\":7,\"msg\":\"" + GetError( 7 ) + "\"}";
return strresponse;
}
std::cout << "------cv::imdecode---end" << std::endl;
SeetaImageData img;
img.width = mat.cols;
img.height = mat.rows;
img.channels = mat.channels();
img.data = mat.data;
do
{
auto faces = face_detector( img ); //fd->detect(img);
if( faces.size <= 0 )
{
strresponse = "{\"code\":1,\"msg\":\"" + GetError( 1 ) + "\"}";
break;
}
int index = 0;
int maxarea = faces.data[0].pos.width * faces.data[0].pos.height;
for( int i = 1; i < faces.size; i++ )
{
if( maxarea < faces.data[i].pos.width * faces.data[i].pos.height )
{
index = i;
maxarea = faces.data[i].pos.width * faces.data[i].pos.height;
}
}
SeetaPointF points[5];
face_landmarker5( img, faces.data[index].pos, points );
std::vector<float> features = face_emotion( img, points );
strresponse = "{\"code\":0,\"msg\":\"" + GetError( 0 ) + "\",\"data\":[";
for( int i = 0; i < features.size(); i++ )
{
if( i > 0 )
{
strresponse += ",";
}
strresponse += floattostring( features[i] );
}
strresponse += "]}";
}
while( 0 );
}
catch( std::exception &e )
......@@ -738,51 +790,52 @@ static std::string do_query_face_emotion( const std::string &body )
LOG( _ERROR_, "parse message failed:%s", GETNULLPTR( e.what() ) );
strresponse = "{\"code\":1,\"msg\":\"" + GetError( 1 ) + "\"}";
}
return strresponse;
}
static int compute_heart_rate(std::vector<double> &vecs)
static int compute_heart_rate( std::vector<double> &vecs )
{
if(vecs.size() < 1)
if( vecs.size() < 1 )
return 0.0;
double value = 0.0;
std::vector<double> data;
for(int i=0; i<vecs.size(); i++)
for( int i = 0; i < vecs.size(); i++ )
{
value += vecs[i];
if(i >= 10)
{
if( i >= 10 )
{
value -= vecs[i - 10];
}
if(i>= 9)
{
data.push_back(value / 10);
if( i >= 9 )
{
data.push_back( value / 10 );
}
}
if(vecs.size() < 10)
if( vecs.size() < 10 )
{
data.push_back(value / vecs.size());
data.push_back( value / vecs.size() );
}
std::map<int,int> totals;
std::map<int,int>::iterator iter;
std::map<int, int> totals;
std::map<int, int>::iterator iter;
//std::stringstream stream;
for(int i=0; i<data.size(); i++)
for( int i = 0; i < data.size(); i++ )
{
int m = floor(data[i]);
iter = totals.find(m);
if(iter != totals.end())
int m = floor( data[i] );
iter = totals.find( m );
if( iter != totals.end() )
{
iter->second++;
}else
}
else
{
totals.insert(std::map<int,int>::value_type(m, 1));
totals.insert( std::map<int, int>::value_type( m, 1 ) );
}
//if(i % 32 == 0)
// stream << "\n";
......@@ -793,9 +846,9 @@ static int compute_heart_rate(std::vector<double> &vecs)
int max = 0;
int maxvalue = 0;
for(iter = totals.begin(); iter != totals.end(); ++iter)
for( iter = totals.begin(); iter != totals.end(); ++iter )
{
if(iter->second > max)
if( iter->second > max )
{
max = iter->second;
maxvalue = iter->first;
......@@ -805,33 +858,34 @@ static int compute_heart_rate(std::vector<double> &vecs)
}
static int recognize(seeta::HeartRateDetector *heartrate, cv::Mat &mat, const SeetaImageData &image, const SeetaRect face, double & value)
static int recognize( seeta::HeartRateDetector *heartrate, cv::Mat &mat, const SeetaImageData &image, const SeetaRect face, double &value )
{
std::vector<SeetaPointF> spoints81(81);
face_landmarker81(image, face, spoints81.data());
std::vector<SeetaPointF> spoints81( 81 );
face_landmarker81( image, face, spoints81.data() );
auto time_now = std::chrono::system_clock::now();
auto duration_in_ms = std::chrono::duration_cast<std::chrono::milliseconds>(time_now.time_since_epoch());
auto duration_in_ms = std::chrono::duration_cast<std::chrono::milliseconds>( time_now.time_since_epoch() );
double time = duration_in_ms.count();
cv::Mat mat2 = mat.clone();
//cv::imwrite("/tmp/test.jpg",mat2);
int x = heartrate->get_signal(mat2, time, spoints81.data());
int x = heartrate->get_signal( mat2, time, spoints81.data() );
//int x = heartrate->get_signal(mat2, spoints81.data());
if(x == -1)
{
if( x == -1 )
{
return -1;
}
if(heartrate->is_waiting())
{
if( heartrate->is_waiting() )
{
value = heartrate->get_waiting_time();
return 1;
}else
{
}
else
{
value = heartrate->get_heart_rate();
return 0;
}
......@@ -839,181 +893,526 @@ static int recognize(seeta::HeartRateDetector *heartrate, cv::Mat &mat, const Se
}
static std::string do_query_face_video( const std::string &body )
static void do_heart_rate( const std::string &videofile, int *rate )
{
std::string strresponse;
*rate = 0;
cv::VideoCapture *m_capture = NULL;
seeta::HeartRateDetector *m_heartrate = NULL;
int nheartrate = 0;
try
{
std::map<std::string, std::string> parameters;
int n = parse_http_parameters(body, parameters);
std::map<std::string, std::string>::iterator iter;
iter = parameters.find("userVideo");
if (iter == parameters.end())
m_capture = new cv::VideoCapture;
m_capture->open( videofile.c_str() );
//m_capture->set( cv::CAP_PROP_FPS, 25 );
if( !m_capture->isOpened() )
{
m_capture->release();
delete m_capture;
m_capture = NULL;
std::cout << "------open video---failed" << std::endl;
return;
}
LOG( _ERROR_, "do not find the parameter userVideo" );
strresponse = "{\"code\":4,\"msg\":\"" + GetError( 4 ) + "\"}";
return strresponse;
int fps = m_capture->get( cv::CAP_PROP_FPS);
if(fps < 1 )
{
fps = 20;
}
std::string strbase64 = base64_decode( iter->second );
std::string filename = "/tmp/" + get_uuid("");
std::ofstream outf(filename,std::ios::out|std::ios::binary);
outf.write((const char *)strbase64.data(), strbase64.length());
outf.close();
m_heartrate = new seeta::HeartRateDetector;
m_heartrate->set_frame_number( 300 );
m_heartrate->reset();
std::chrono::system_clock::time_point starttimer = std::chrono::system_clock::now();
std::chrono::system_clock::time_point lasttimer;
std::vector<double> rates;
cv::Mat mat;
int num = 0;
int per_frame = 1000 / fps;
iter = parameters.find("frameNum");
if (iter == parameters.end())
while( 1 )
{
LOG( _ERROR_, "do not find the parameter frameNum" );
strresponse = "{\"code\":4,\"msg\":\"" + GetError( 4 ) + "\"}";
return strresponse;
std::chrono::system_clock::time_point cur = std::chrono::system_clock::now();
auto timer_duration2 = std::chrono::duration_cast<std::chrono::milliseconds>( cur - starttimer );
if( timer_duration2.count() < per_frame )
{
std::this_thread::sleep_for( std::chrono::milliseconds( per_frame - timer_duration2.count() ) );
}
starttimer = std::chrono::system_clock::now();
//std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
if( !m_capture->read( mat ) )
{
std::cout << "read end" << std::endl;
break;
}
if( mat.channels() == 4 )
{
std::cout << "channels:" << mat.channels() << std::endl;
//cv::cvtColor(mat, mat, cv::COLOR_RGBA2GBR);
}
if( !mat.data )
{
std::cout << "skip invalid frame" << std::endl;
continue;
}
//cv::Mat mat2;
//cv::Size size (VIDEO_WIDTH, VIDEO_HEIGHT);
//cv::resize(mat, mat2, size, 0, 0, cv::INTER_CUBIC);
//mat = mat2.clone();
SeetaImageData img;
img.width = mat.cols;
img.height = mat.rows;
img.channels = mat.channels();
img.data = mat.data;
//cv::cvtColor(mat, mat2, cv::COLOR_BGR2RGB);
auto faces = face_detector( img );
if( faces.size <= 0 )
{
std::cout << "no find face,skip frame" << std::endl;
continue;
}
num++;
int index = 0;
int maxarea = faces.data[0].pos.width * faces.data[0].pos.height;
for( int i = 1; i < faces.size; i++ )
{
if( maxarea < faces.data[i].pos.width * faces.data[i].pos.height )
{
index = i;
maxarea = faces.data[i].pos.width * faces.data[i].pos.height;
}
}
std::cout << "-----num:" << num << std::endl;
double value = 0.0;
int nret = recognize( m_heartrate, mat, img, faces.data[index].pos, value );
if( nret == 0 )
{
std::cout << "-----rate:" << value << std::endl;
rates.push_back( value );
if( rates.size() <= 1 )
{
lasttimer = std::chrono::system_clock::now();
}
else
{
std::chrono::system_clock::time_point endtimer = std::chrono::system_clock::now();
auto timer_duration = std::chrono::duration_cast<std::chrono::milliseconds>( endtimer - lasttimer );
if( timer_duration.count() >= 4 * 1000 )
{
value = ( double )( compute_heart_rate( rates ) );
nheartrate = ( int )value;
std::cout << "heart rate:" << nheartrate << std::endl;
//strresponse = "{\"code\":0,\"msg\":\"" + GetError( 0 ) + "\"}";
break;
}
}
}
else
{
}
}//end while
}
catch( std::exception &e )
{
LOG( _ERROR_, "heartrate compute failed:%s", GETNULLPTR( e.what() ) );
}
if(!m_heartrate)
{
delete m_heartrate;
}
if(!m_capture)
{
m_capture->release();
delete m_capture;
}
*rate = nheartrate;
return;
}
static int geteyecount(seeta::EyeStateDetector::EYE_STATE oldstate, seeta::EyeStateDetector::EYE_STATE newstate)
{
if((newstate == seeta::EyeStateDetector::EYE_RANDOM) || (newstate == seeta::EyeStateDetector::EYE_UNKNOWN))
{
return -1;
}
if((oldstate == seeta::EyeStateDetector::EYE_OPEN) && (newstate == seeta::EyeStateDetector::EYE_CLOSE))
{
return 1;
}
return 0;
}
static void do_video( const std::string &videofile, int frameNum, int *eyes,
std::vector<std::vector<float>> &face_features,
std::vector<std::vector<float>> &face_poses,
std::vector<std::vector<float>> &face_actions,
std::vector<std::vector<float>> &face_emotions,
std::vector<int> &blink_eyes
)
{
*eyes = 0;
cv::VideoCapture *m_capture = NULL;
try
{
m_capture = new cv::VideoCapture;
m_capture->open( videofile.c_str() );
//m_capture->set( cv::CAP_PROP_FPS, 25 );
if( !m_capture->isOpened() )
{
m_capture->release();
delete m_capture;
m_capture = NULL;
std::cout << "------open video---failed" << std::endl;
return;
}
int frameNum = atoi(iter->second.c_str());
if(frameNum <= 0)
int fps = m_capture->get( cv::CAP_PROP_FPS);
if(fps < 1 )
{
frameNum = 1;
fps = 20;
}
//std::string strbase64 = base64_decode( iter->second );
//std::vector<unsigned char> imagedatas( strbase64.begin(), strbase64.end() );
//cv::Mat mat = cv::imdecode( imagedatas, 1 ); //COLOR_LOAD_IMAGE_COLOR);
cv::VideoCapture * m_capture = new cv::VideoCapture;
m_capture->open(filename.c_str());
//m_capture->set( cv::CAP_PROP_FRAME_WIDTH, VIDEO_WIDTH );
//m_capture->set( cv::CAP_PROP_FRAME_HEIGHT, VIDEO_HEIGHT );
m_capture->set(cv::CAP_PROP_FPS, 25 );
if(!m_capture->isOpened())
{
m_capture->release();
delete m_capture;
m_capture = NULL;
std::cout << "------open video---failed" << std::endl;
strresponse = "{\"code\":8,\"msg\":\"" + GetError( 8) + "\"}";
return strresponse;
}
seeta::HeartRateDetector *m_heartrate = new seeta::HeartRateDetector;
m_heartrate->set_frame_number(300);
m_heartrate->reset();
std::chrono::system_clock::time_point starttimer = std::chrono::system_clock::now();
std::chrono::system_clock::time_point lasttimer;
std::vector<double> rates;
cv::Mat mat;
int nheartrate = 0;
int num = 0;
int per_frame = 1000 / g_config->frame_fps;
while(1)
{
std::chrono::system_clock::time_point cur = std::chrono::system_clock::now();
auto timer_duration2= std::chrono::duration_cast<std::chrono::milliseconds>(cur - starttimer);
if(timer_duration2.count() < per_frame)
{
std::this_thread::sleep_for( std::chrono::milliseconds(per_frame - timer_duration2.count()));
}
starttimer = std::chrono::system_clock::now();
//std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
if( !m_capture->read(mat))
{
std::cout << "read end" << std::endl;
break;
}
if(mat.channels() == 4)
{
std::cout << "channels:" << mat.channels() << std::endl;
//cv::cvtColor(mat, mat, cv::COLOR_RGBA2GBR);
}
//std::chrono::system_clock::time_point starttimer = std::chrono::system_clock::now();
//std::chrono::system_clock::time_point lasttimer;
//std::vector<double> rates;
cv::Mat mat;
//int nheartrate = 0;
int num = 0;
seeta::EyeStateDetector::EYE_STATE m_leftstate, m_rightstate;
m_leftstate = m_rightstate = seeta::EyeStateDetector::EYE_UNKNOWN;
//int per_frame = 1000 / fps;
while( 1 )
{
//std::chrono::system_clock::time_point cur = std::chrono::system_clock::now();
//auto timer_duration2 = std::chrono::duration_cast<std::chrono::milliseconds>( cur - starttimer );
//if( timer_duration2.count() < per_frame )
//{
// std::this_thread::sleep_for( std::chrono::milliseconds( per_frame - timer_duration2.count() ) );
//}
//starttimer = std::chrono::system_clock::now();
//std::this_thread::sleep_for( std::chrono::milliseconds( 10 ) );
if( !m_capture->read( mat ) )
{
std::cout << "read end" << std::endl;
break;
}
if( mat.channels() == 4 )
{
std::cout << "channels:" << mat.channels() << std::endl;
//cv::cvtColor(mat, mat, cv::COLOR_RGBA2GBR);
}
if( !mat.data )
{
std::cout << "skip invalid frame" << std::endl;
continue;
}
//cv::Mat mat2;
//cv::Size size (VIDEO_WIDTH, VIDEO_HEIGHT);
//cv::resize(mat, mat2, size, 0, 0, cv::INTER_CUBIC);
//mat = mat2.clone();
SeetaImageData img;
img.width = mat.cols;
img.height = mat.rows;
img.channels = mat.channels();
img.data = mat.data;
//cv::cvtColor(mat, mat2, cv::COLOR_BGR2RGB);
auto faces = face_detector( img );
if( faces.size <= 0 )
{
std::cout << "no find face,skip frame" << std::endl;
continue;
}
num++;
int index = 0;
int maxarea = faces.data[0].pos.width * faces.data[0].pos.height;
for( int i = 1; i < faces.size; i++ )
{
if( maxarea < faces.data[i].pos.width * faces.data[i].pos.height )
{
index = i;
maxarea = faces.data[i].pos.width * faces.data[i].pos.height;
}
}
std::cout << "-----num:" << num << std::endl;
SeetaPointF points[5];
face_landmarker5(img, faces.data[index].pos, points);
if((num % frameNum) == 0)
{
std::vector<float> features = face_recognizer(img, points);
face_features.push_back(features);
}
std::vector<float> actions;
std::vector<float> features2 = face_action(img, points);
for(int i=0; i<features2.size(); i++)
{
if( i== 11)
{
continue;
}
actions.push_back(features2[i]);
}
face_actions.push_back(actions);
std::vector<float> emotions = face_emotion(img, points);
face_emotions.push_back(emotions);
float yaw,pitch,roll;
yaw = pitch = roll = 0.0;
pose_estimate(img, faces.data[index].pos, &yaw, &pitch, &roll);
std::vector<float> poses;
poses.push_back(yaw);
poses.push_back(pitch);
poses.push_back(roll);
face_poses.push_back(poses);
seeta::EyeStateDetector::EYE_STATE leftstate, rightstate;
leftstate = rightstate = seeta::EyeStateDetector::EYE_UNKNOWN;
eye_detect(img, points,leftstate,rightstate);
int count = geteyecount(m_leftstate, leftstate);
if(count >= 0)
{
*eyes += count;
m_leftstate = leftstate;
}
count = geteyecount(m_rightstate, rightstate);
if(count >= 0)
{
*eyes += count;
m_rightstate = rightstate;
}
if( !mat.data)
{
std::cout << "skip invalid frame" << std::endl;
continue;
}
//cv::Mat mat2;
//cv::Size size (VIDEO_WIDTH, VIDEO_HEIGHT);
//cv::resize(mat, mat2, size, 0, 0, cv::INTER_CUBIC);
//mat = mat2.clone();
SeetaImageData img;
img.width = mat.cols;
img.height = mat.rows;
img.channels = mat.channels();
img.data = mat.data;
//cv::cvtColor(mat, mat2, cv::COLOR_BGR2RGB);
auto faces = face_detector(img);
if (faces.size <= 0)
{
std::cout << "no find face,skip frame" << std::endl;
continue;
}
num++;
int index = 0;
int maxarea = faces.data[0].pos.width * faces.data[0].pos.height;
for (int i=1; i<faces.size; i++)
{
if(maxarea < faces.data[i].pos.width * faces.data[i].pos.height)
{
index = i;
maxarea = faces.data[i].pos.width * faces.data[i].pos.height;
}
}
std::cout << "-----num:" << num << std::endl;
double value = 0.0;
int nret = recognize(m_heartrate, mat, img, faces.data[index].pos, value);
if(nret == 0)
{
std::cout << "-----rate:" << value << std::endl;
rates.push_back(value);
if(rates.size() <= 1 )
{
lasttimer = std::chrono::system_clock::now();
}else
{
std::chrono::system_clock::time_point endtimer = std::chrono::system_clock::now();
auto timer_duration= std::chrono::duration_cast<std::chrono::milliseconds>(endtimer - lasttimer);
if(timer_duration.count() >= 4 * 1000)
{
value = (double)(compute_heart_rate(rates));
nheartrate = (int)value;
std::cout << "heart rate:" << nheartrate << std::endl;
strresponse = "{\"code\":0,\"msg\":\"" + GetError( 0 ) + "\"}";
break;
}
}
}else {
}
}//end while
delete m_heartrate;
delete m_capture;
if((leftstate == seeta::EyeStateDetector::EYE_OPEN) ||
(rightstate == seeta::EyeStateDetector::EYE_OPEN))
{
blink_eyes.push_back(2);
}else if ((leftstate == seeta::EyeStateDetector::EYE_UNKNOWN) &&
(rightstate == seeta::EyeStateDetector::EYE_UNKNOWN))
{
blink_eyes.push_back(1);
}else if ((leftstate == seeta::EyeStateDetector::EYE_RANDOM) &&
(rightstate == seeta::EyeStateDetector::EYE_RANDOM))
{
blink_eyes.push_back(0);
}else
{
blink_eyes.push_back(3);
}
}//end while
}
catch( std::exception &e )
{
LOG( _ERROR_, "parse message failed:%s", GETNULLPTR( e.what() ) );
strresponse = "{\"code\":1,\"msg\":\"" + GetError( 1 ) + "\"}";
LOG( _ERROR_, "heartrate compute failed:%s", GETNULLPTR( e.what() ) );
}
return strresponse;
}
if(!m_capture)
{
m_capture->release();
delete m_capture;
}
//*rate = nheartrate;
return;
}
struct UploadFileData
{
std::string::size_type nbegin;
std::string::size_type nend;
std::string filename;
};
static int parse_boundary_paramters( const std::string &filename, const std::string &boundary, const std::string &body, int &frameNum )
{
int nret = -1;
size_t nbegin, nend, nfind1, nfind2, nfind3;
nbegin = nfind1 = nfind2 = nend = 0;
std::string name, value;
std::string strtmp;
std::string strboundary = "--" + boundary + "\r\n";
std::string strboundaryend = "--" + boundary + "--";
std::string strContent = "Content-Disposition: form-data; ";
std::map<std::string, std::string> paramters;
std::vector<UploadFileData> uploadfiles;
while( 1 )
{
nfind1 = body.find( strboundary, nbegin );
if( nfind1 != std::string::npos )
{
nfind2 = body.find( strboundary, nfind1 + 1 );
if( nfind2 != std::string::npos )
{
}
else
{
nfind2 = body.find( strboundaryend, nfind1 + 1 );
if( nfind2 == std::string::npos )
{
return -1;
}
}
nend = body.find( "\r\n", nfind1 + strboundary.length() );
if( nend == std::string::npos )
{
return -1;
}
strtmp = body.substr( nfind1 + strboundary.length() , nend - nfind1 - strboundary.length() );
nend = strtmp.find( strContent );
if( nend == std::string::npos )
{
return -1;
}
nend = strtmp.find( "name=\"" );
if( nend == std::string::npos )
{
return -1;
}
nfind3 = strtmp.find( "\"", nend + 6 );
if( nfind3 == std::string::npos )
{
return -1;
}
name = strtmp.substr( nend + 6, nfind3 - nend - 6 );
name = decodeuricomponent(name);//urldecode( name );
nend = strtmp.find( "filename=\"" );
if( nend == std::string::npos )
{
nend = body.find( "\r\n\r\n", nfind1 + strboundary.length() );
if( ( nend == std::string::npos ) || ( nend >= nfind2 ) )
{
return -1;
}
value = body.substr( nend + 4, nfind2 - nend - 4 - 2 );
value = decodeuricomponent(value);//urldecode( value );
paramters[name] = value;
}
else //is upload file
{
nfind3 = strtmp.find( "\"", nend + 10 );
if( nfind3 == std::string::npos )
{
return -1;
}
std::string strfilename = strtmp.substr( nend + 10, nfind3 - nend - 10 );
strfilename = decodeuricomponent(strfilename);//urldecode( strfilename );
nend = body.find( "\r\n\r\n", nfind1 + strboundary.length() );
if( ( nend == std::string::npos ) || ( nend >= nfind2 ) )
{
return -1;
}
struct UploadFileData data;
data.nbegin = nend + 4;
data.nend = nfind2 - 2;
data.filename = strfilename;
uploadfiles.push_back( data );
}
nbegin = nfind2;
}
else
{
//nret = -1;
break;
}
}//while
std::map<std::string, std::string>::iterator iter;
for( iter = paramters.begin(); iter != paramters.end(); ++iter )
{
if((iter->first == "frameNum") && (iter->second.length() > 0))
{
frameNum = atoi(iter->second.c_str());
if(frameNum <= 0)
{
frameNum = 1;
}
}
LOG( _INFO_, "paramter:%s=%s", GETNULLSTR( iter->first ), GETNULLSTR( iter->second ) );
std::cout << "---" << iter->first << "=" << iter->second << ",size:" << iter->second.length() << std::endl;
}
//only support one file /per
for( size_t i = 0; i < uploadfiles.size(); i++ )
{
std::cout << "---filename:" << uploadfiles[i].filename << ",size:" << uploadfiles[i].nend - uploadfiles[i].nbegin;
std::cout << std::endl;// << body.substr(uploadfiles[i].nbegin, uploadfiles[i].nend - uploadfiles[i].nbegin) << std::endl;
//strtmp = "/tmp/" + uploadfiles[i].filename;
//int fd = open(strtmp.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0666);
//std::cout << "---chunkpath:" << chunkpath << std::endl;
int fd = open( filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0666 );
if( fd >= 0 )
{
write( fd, body.data() + uploadfiles[i].nbegin, uploadfiles[i].nend - uploadfiles[i].nbegin );
close( fd );
nret = 0;
break;
}
else
{
LOG( _ERROR_, "open upload file:%s failed,error:%s", GETNULLSTR( filename ), strerror( errno ) );
std::cout << "write file:" << filename << " failed!" << std::endl;
nret = -2;
break;
}
}
return nret;
}
......@@ -1027,7 +1426,7 @@ void on_http( httpserver *s, websocketpp::connection_hdl hdl )
const std::string &strVersion = rt.get_version();
//std::string token = rt.get_header("token");
//std::cout << "recv a:" << strMethod.c_str() << "request:" << strUri.c_str() << "thread ID=" << pthread_self() << std::endl;
std::cout << "recv a:" << strMethod.c_str() << "request:" << strUri.c_str() << "thread ID=" << pthread_self() << std::endl;
//websocketpp::http::parser::response rp;
//string strContent = rt.raw();
......@@ -1049,10 +1448,167 @@ void on_http( httpserver *s, websocketpp::connection_hdl hdl )
if( strMethod == "POST" )
{
//std::cout << "post:body:" << strBody << std::endl;
string strContentType = rt.get_header("Content-Type");
string strContentType = rt.get_header( "Content-Type" );
std::cout << "----ContentType:" << strContentType << std::endl;
if( strUri == "/query/video" )
{
string sformdata = "multipart/form-data";
string stype = strContentType.substr( 0, sformdata.length() );
if( stype != sformdata )
{
strresponse = "the request is not implement";
LOG( _WARN_, "no support get method:%s", GETNULLSTR( strUri ) );
create_http_response( con, strresponse, 404 );
return;
}
if (g_response.length() > 0)
{
std::cout << "send:" << g_response.length() << std::endl;
create_http_response( con, g_response, 200 );
return;
}
if (strContentType != "application/x-www-form-urlencoded" )
std::string filename;
int frameNum = 1;
string strboundary;
int nfind2, nfind3;
int nfind = strContentType.find( "boundary=" );
if( nfind != string::npos )
{
strboundary = strContentType.substr( nfind + 9 );
filename = "/tmp/" + get_uuid( "" );
std::cout << "upload file:" << filename << std::endl;
nret = parse_boundary_paramters( filename, strboundary, strBody, frameNum );
if(nret == -1)
{
nret = 9;
}else if (nret == -2)
{
nret = 10;
}
}else
{
nret = 9;
}
int hearts = 0;
int eyes = 0;
std::vector<std::vector<float>> face_features;
std::vector<std::vector<float>> face_poses;
std::vector<std::vector<float>> face_actions;
std::vector<std::vector<float>> face_emotions;
std::vector<int> blink_eyes;
if(nret == 0)
{
std::thread heartrate_thread(do_heart_rate, filename, &hearts);
do_video(filename, frameNum, &eyes, face_features,
face_poses,face_actions,face_emotions, blink_eyes);
heartrate_thread.join();
}
remove( filename.c_str() );
strresponse = "{\"code\":" + std::to_string(nret) + ",\"msg\":\"" + GetError( nret ) + "\",\"data\":{";
strresponse += "\"resultData\":{\"blinkNum\":" + std::to_string(eyes) + ",\"heartRate\":" + std::to_string(hearts) + "}";
strresponse += ",\"userfaces\":[";
for(int i=0; i<face_features.size(); i++)
{
std::string strtmp( ( const char * )face_features[i].data(), int( face_features[i].size() * sizeof( float ) ) );
std::string enbase64 = base64_encode( strtmp );
if(i > 0)
{
strresponse += ",";
}
strresponse += "\"" + enbase64 + "\"";
break;
}
strresponse += "],\"srcData\":[";
int num = face_poses.size();
if (num > face_actions.size())
num = face_actions.size();
if (num > face_emotions.size())
num = face_emotions.size();
if (num > blink_eyes.size())
num = blink_eyes.size();
std::cout << "num:" << num << std::endl;
//num = 10;
for(int i=0; i<num; i++)
{
if(i > 0)
{
strresponse += ",";
}
strresponse += "{";
strresponse += "\"microAction\":[";
for(int m=0; m<face_actions[i].size(); m++)
{
if(m > 0)
{
strresponse += ",";
}
strresponse += floattostring(face_actions[i][m]);
}
strresponse += "],\"emotion\":[";
for(int m=0; m<face_emotions[i].size(); m++)
{
if(m > 0)
{
strresponse += ",";
}
strresponse += floattostring(face_emotions[i][m]);
}
strresponse += "],\"faceInfo\":[";
for(int m=0; m<face_poses[i].size(); m++)
{
if(m > 0)
{
strresponse += ",";
}
strresponse += floattostring(face_poses[i][m]);
}
strresponse += "],\"eyeInfo\":" + std::to_string(blink_eyes[i]);
//for(int m=0; m<blink_eyes[i].size(); m++)
//{
// if(m > 0)
// {
// strresponse += ",";
// }
// strresponse += std::to_string(blink_eyes[i][m]);
//}
strresponse += "}";
}
strresponse += "]}}";
g_response = strresponse;
std::cout << "response:" << strresponse.length() << std::endl;
create_http_response(con, strresponse, 200);
int fd = open( "/tmp/response.txt", O_WRONLY | O_CREAT | O_TRUNC, 0666 );
if( fd >= 0 )
{
write( fd, strresponse.data(), strresponse.length() );
close( fd );
}
sleep(1);
return;
}
if( strContentType != "application/x-www-form-urlencoded" )
{
strresponse = "the request is not implement";
LOG( _WARN_, "no support get method:%s", GETNULLSTR( strUri ) );
......@@ -1075,13 +1631,16 @@ void on_http( httpserver *s, websocketpp::connection_hdl hdl )
strresponse = do_query_face_emotion( strBody );
create_http_response( con, strresponse, 200 );
}
/*
else if( strUri == "/query/video" )
{
strresponse = do_query_face_video( strBody );
create_http_response( con, strresponse, 200 );
}else
}
*/
else
{
//std::cout << "----no method:" << strMethod << std::endl;
strresponse = "the request is not implement";
LOG( _WARN_, "no support get method:%s", GETNULLSTR( strUri ) );
......@@ -1093,12 +1652,13 @@ void on_http( httpserver *s, websocketpp::connection_hdl hdl )
{
std::string method;
std::string parameters;
int nfind = strUri.find("?");
if(nfind >= 0)
int nfind = strUri.find( "?" );
if( nfind >= 0 )
{
method = strUri.substr(0, nfind);
parameters = strUri.substr(nfind + 1);
}else
method = strUri.substr( 0, nfind );
parameters = strUri.substr( nfind + 1 );
}
else
{
method = strUri;
}
......@@ -1106,13 +1666,14 @@ void on_http( httpserver *s, websocketpp::connection_hdl hdl )
std::cout << "header:" << parameters << std::endl;
std::map<std::string, std::string>::iterator iter;
std::map<std::string, std::string> params;
geturlparameters(parameters, params);
geturlparameters( parameters, params );
if( method == "/query/pots" )
{
//strresponse = do_query_pots( strBody );
create_http_response( con, strresponse, 200 );
}else
}
else
{
strresponse = "the request is not implement";
LOG( _WARN_, "no support get method:%s", GETNULLSTR( strUri ) );
......
......@@ -11,7 +11,8 @@ static std::vector<ErrorCode> errors =
{6, "database is not avilable"},
{7, "image data parse failed"},
{8, "open video failed"},
{9, "this pot is offline"},
{9, "parse multipart/form-data error"},
{10, "save upload file failed"},
};
......
......@@ -42,12 +42,14 @@ using websocketpp::lib::bind;
//typedef server::message_ptr message_ptr;
static std::string get_password() {
static std::string get_password()
{
return "test";
}
enum tls_mode {
enum tls_mode
{
MOZILLA_INTERMEDIATE = 1,
MOZILLA_MODERN = 2
};
......@@ -112,26 +114,26 @@ context_ptr on_tls_init(tls_mode mode, websocketpp::connection_hdl hdl) {
void testjson()
{
std::string strBody = "{";
strBody +="\"potuid\":\"P-ddc43fd6-c441-4d4d-9280-5f2bc2934390\",";
strBody += "\"potuid\":\"P-ddc43fd6-c441-4d4d-9280-5f2bc2934390\",";
strBody += "\"method\":\"post\",";
strBody += "\"url\":\"/pot/test\",";
strBody += "\"headers\":[";
strBody += " {\"keyname\":\"key1\",\"keyvalue\":\"value1\"}";
strBody += " ],";
strBody += " \"body\":{";
strBody += " \"max_face_size\":100";
strBody += " \"max_face_size\":100";
strBody += " }}";
std::cout << strBody << std::endl;
orz::jug bjug = orz::json2jug(strBody);
orz::jug bjug = orz::json2jug( strBody );
orz::jug bodyjug = bjug["body"];
std::string strparam = orz::jug2json(bodyjug);
std::string strparam = orz::jug2json( bodyjug );
orz::DictPiece *piece = reinterpret_cast<orz::DictPiece *>( bjug.raw() );
if(piece)
if( piece )
{
piece->erase("body");
piece->erase( "body" );
}
std::string strdirect = orz::jug2json(bjug);
std::string strdirect = orz::jug2json( bjug );
strdirect = strdirect + strparam;
std::cout << strdirect << std::endl;
}
......@@ -180,16 +182,17 @@ int main( int argc, char *argv[] )
return -1;
}
//std::cout << "frame_fps:" << g_config->frame_fps << std::endl;
std::cout << "device:" << g_config->device << std::endl;
std::cout << "face_detector:" << g_config->models.face_detector << std::endl;
std::cout << "face_landmarker81:" << g_config->models.face_landmarker81 << std::endl;
std::cout << "face_landmarker5:" << g_config->models.face_landmarker5 << std::endl;
std::cout << "face_recognizer:" << g_config->models.face_recognizer<< std::endl;
std::cout << "face_recognizer:" << g_config->models.face_recognizer << std::endl;
std::cout << "pose_model:" << g_config->models.pose_model<< std::endl;
std::cout << "pose_model:" << g_config->models.pose_model << std::endl;
std::cout << "actionunit_model:" << g_config->models.actionunit_model << std::endl;
std::cout << "emotion_model:" << g_config->models.emotion_model<< std::endl;
std::cout << "eye_model:" << g_config->models.eye_model<< std::endl;
std::cout << "emotion_model:" << g_config->models.emotion_model << std::endl;
std::cout << "eye_model:" << g_config->models.eye_model << std::endl;
......@@ -224,11 +227,11 @@ int main( int argc, char *argv[] )
if (init_engine() < 0 )
if( init_engine() < 0 )
{
std::cout << "init engine failed, system exited!" << std::endl;
LOG(_ERROR_, "init engine failed, system exited");
return -1;
std::cout << "init engine failed, system exited!" << std::endl;
LOG( _ERROR_, "init engine failed, system exited" );
return -1;
}
/*
CMysqlConnectPool *pmysqlclient = CMysqlConnectPool::GetInstance();
......@@ -289,6 +292,8 @@ int main( int argc, char *argv[] )
bone_server.set_listen_backlog( 8192 );
bone_server.listen( g_config->http_port );
bone_server.set_max_http_body_size(1024 * 1024 * 2000);
std::cout << "init http max_body_size:" << bone_server.get_max_http_body_size() << std::endl;
std::cout << "init max_message_size:" << bone_server.get_max_message_size() << std::endl;
std::cout << "http port:" << g_config->http_port << std::endl;
//bone_server.set_max_message_size(96000000);
......
......@@ -2,12 +2,12 @@
#include "simplelog.h"
#ifdef _WIN32
#include <windows.h>
#include <windows.h>
#else
#include <fcntl.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#endif
#include <openssl/md5.h>
......@@ -17,12 +17,12 @@
#include <string.h>
std::string floattostring(float value)
std::string floattostring( float value )
{
char buf[100];
memset(buf, 0, sizeof(buf));
snprintf(buf, sizeof(buf) - 1, "%0.2f", value);
return std::string(buf);
memset( buf, 0, sizeof( buf ) );
snprintf( buf, sizeof( buf ) - 1, "%0.2f", value );
return std::string( buf );
}
......@@ -372,31 +372,31 @@ std::string decodeuricomponent( const std::string &str )
int checkinstance()
{
std::string exepath,exename;
std::string exepath, exename;
GetExePathAndName( exepath, exename );
//std::string str = exepath + exename;
std::string str = exepath + ".";
str += exename + "_lock";
#ifdef _WIN32
HANDLE hfile = CreateFileA(str.c_str(), FILE_GENERIC_READ|FILE_GENERIC_WRITE,0, NULL, CREATE_ALWAYS,FILE_ATTRIBUTE_NORMAL, NULL);
if(hfile)
#ifdef _WIN32
HANDLE hfile = CreateFileA( str.c_str(), FILE_GENERIC_READ | FILE_GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL );
if( hfile )
{
if(LocfFile(hfile, 0,0,0,0))
if( LocfFile( hfile, 0, 0, 0, 0 ) )
{
return 0;
}
CloseHandle(hfile);
CloseHandle( hfile );
}
return -1;
#else
int fd = open(str.c_str(), O_CREAT|O_RDWR, 0666);
if(fd < 0)
#else
int fd = open( str.c_str(), O_CREAT | O_RDWR, 0666 );
if( fd < 0 )
{
std::cout << "error:" << strerror(errno) << std::endl;
return -1;
std::cout << "error:" << strerror( errno ) << std::endl;
return -1;
}
struct flock lock;
......@@ -405,141 +405,144 @@ int checkinstance()
lock.l_whence = 0;
lock.l_len = 0;
if(fcntl(fd, F_SETLK, &lock) < 0)
if( fcntl( fd, F_SETLK, &lock ) < 0 )
{
std::cout << "fntl error:" << strerror(errno) << std::endl;
close(fd);
std::cout << "fntl error:" << strerror( errno ) << std::endl;
close( fd );
return -1;
}
#endif
#endif
return 0;
}
void geturlparameters(const std::string &str, std::map<std::string, std::string> &params)
void geturlparameters( const std::string &str, std::map<std::string, std::string> &params )
{
std::string strtmp;
std::vector<std::string> vecs;
splits(str, "&", vecs);
splits( str, "&", vecs );
int offset = 0;
for(int i=0; i<vecs.size(); i++)
for( int i = 0; i < vecs.size(); i++ )
{
strtmp = vecs[i];
offset = strtmp.find("=");
if(offset <= 0)
offset = strtmp.find( "=" );
if( offset <= 0 )
{
continue;
}
std::string key = strtmp.substr(0, offset);
key = trim(key);
key = decodeuricomponent(key);
std::string value = strtmp.substr(offset + 1);
value = trim(value);
value = decodeuricomponent(value);
if((key.length() < 1)|| (value.length() < 1))
std::string key = strtmp.substr( 0, offset );
key = trim( key );
key = decodeuricomponent( key );
std::string value = strtmp.substr( offset + 1 );
value = trim( value );
value = decodeuricomponent( value );
if( ( key.length() < 1 ) || ( value.length() < 1 ) )
{
continue;
}
}
std::cout << "key:" << key << ", value:" << value << std::endl;
params.insert(std::map<std::string, std::string>::value_type(key, value));
params.insert( std::map<std::string, std::string>::value_type( key, value ) );
}
}
std::string encodetojson(const std::string &str)
std::string encodetojson( const std::string &str )
{
std::ostringstream oss;
static const std::map<char, std::string> escape = {
{'\"', R"(\")"},
{'\\', R"(\\)"},
{'/', R"(/)"},
{'\b', R"(\b)"},
{'\f', R"(\f)"},
{'\n', R"(\n)"},
{'\r', R"(\r)"},
{'\t', R"(\t)"},
};
for (auto &ch : str)
static const std::map<char, std::string> escape =
{
auto it = escape.find(ch);
if (it != escape.end())
{
oss << it->second;
} else
{
oss << ch;
}
}
return oss.str();
{'\"', R"(\")"},
{'\\', R"(\\)"},
{'/', R"(/)"},
{'\b', R"(\b)"},
{'\f', R"(\f)"},
{'\n', R"(\n)"},
{'\r', R"(\r)"},
{'\t', R"(\t)"},
};
for( auto &ch : str )
{
auto it = escape.find( ch );
if( it != escape.end() )
{
oss << it->second;
}
else
{
oss << ch;
}
}
return oss.str();
}
bool is_space_char(unsigned char c)
bool is_space_char( unsigned char c )
{
return (c == 9 || c == 32);
return ( c == 9 || c == 32 );
}
int skip_space_chars(const std::string &str, std::string::size_type nbegin)
int skip_space_chars( const std::string &str, std::string::size_type nbegin )
{
while(is_space_char(str[nbegin]))
{
if(nbegin >= str.length() - 1)
return nbegin;
nbegin++;
}
return nbegin;
while( is_space_char( str[nbegin] ) )
{
if( nbegin >= str.length() - 1 )
return nbegin;
nbegin++;
}
return nbegin;
}
static int parse_paramter(const std::string &str, std::string &name, std::string &value)
static int parse_paramter( const std::string &str, std::string &name, std::string &value )
{
std::string::size_type nfind = str.find("=");
if(nfind != std::string::npos)
{
name = str.substr(0, nfind);
value = str.substr(nfind + 1);
name = decodeuricomponent(name);//urldecode(name);
value = decodeuricomponent(value);//urldecode(value);
return 0;
}
return -1;
std::string::size_type nfind = str.find( "=" );
if( nfind != std::string::npos )
{
name = str.substr( 0, nfind );
value = str.substr( nfind + 1 );
name = decodeuricomponent( name ); //urldecode(name);
value = decodeuricomponent( value ); //urldecode(value);
return 0;
}
return -1;
}
int parse_http_parameters(const std::string & body, std::map<std::string,std::string> &paramters)
int parse_http_parameters( const std::string &body, std::map<std::string, std::string> &paramters )
{
//LOG(_INFO_,"recv post req:%s ",GETNULLSTR(command));
std::string::size_type nbegin,nend, nfind;
nbegin = nend = 0;
nbegin = skip_space_chars(body, 0);
std::string name,value;
std::string strtmp;
//std::map<std::string, std::string> paramters;
while(1)
{
name = value = "";
nfind = body.find("&", nbegin);
if(nfind != std::string::npos)
{
strtmp = body.substr(nbegin, nfind - nbegin);
if(parse_paramter(strtmp, name, value) >= 0)
{
paramters[name] = value;
}
nbegin = nfind + 1;
continue;
}else if(nbegin < body.length())
{
strtmp = body.substr(nbegin);
if(parse_paramter(strtmp, name, value) >= 0)
{
paramters[name] = value;
}
break;
}
}
return paramters.size();
//LOG(_INFO_,"recv post req:%s ",GETNULLSTR(command));
std::string::size_type nbegin, nend, nfind;
nbegin = nend = 0;
nbegin = skip_space_chars( body, 0 );
std::string name, value;
std::string strtmp;
//std::map<std::string, std::string> paramters;
while( 1 )
{
name = value = "";
nfind = body.find( "&", nbegin );
if( nfind != std::string::npos )
{
strtmp = body.substr( nbegin, nfind - nbegin );
if( parse_paramter( strtmp, name, value ) >= 0 )
{
paramters[name] = value;
}
nbegin = nfind + 1;
continue;
}
else if( nbegin < body.length() )
{
strtmp = body.substr( nbegin );
if( parse_paramter( strtmp, name, value ) >= 0 )
{
paramters[name] = value;
}
break;
}
}
return paramters.size();
}
......@@ -9,13 +9,17 @@ import (
//"sync"
"encoding/json"
//"strconv"
//"time"
"time"
//"math/rand"
//"crypto/md5"
//"bytes"
//"os"
"bytes"
"os"
"io"
"mime/multipart"
"encoding/base64"
"flag"
"path/filepath"
//"bufio"
)
......@@ -237,6 +241,225 @@ func get_emotion(filename, url string) bool {
return true
}
func get_video(filename, url string) bool {
//data := `{"check_id":"sJjcUq2CORH8tTjSNJ14","check_code":"97369"}`
//str := sign(data)
data1, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println("read file failed: ", err.Error())
return false
}
strdata := base64.StdEncoding.EncodeToString(data1)
data := "userVideo=" + strdata
data += "&frameNum=10"
//seq := "http://192.168.1.33:4000/query/feature"
seq := url + "/query/video"
client := &http.Client{}
request, err := http.NewRequest("POST", seq, strings.NewReader(data))
if err != nil {
fmt.Println("create request failed: " , err)
return false
}
//resp, err := http.Post(seq, "application/x-www-form-urlencoded",
// strings.NewReader(data))
//if err != nil {
// fmt.Println("send request error: " , err)
// return false
//}
//request.Header.Add("Authorization","AIPSS1.0 " + str)
//request.Header.Add("Content-Type","application/json; charset=utf-8")
request.Header.Add("Content-Type","application/x-www-form-urlencoded")
resp, err := client.Do(request)
if err != nil {
fmt.Println("send error: " , err)
return false
}else {
fmt.Println("send ok")
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println("LoginSeetaAuthCenter ReadAll failed: ", err)
return false
}else {
fmt.Println("length:", len(body))
}
fmt.Println(string(body))
/*
var res Response2
err = json.Unmarshal(body, &res)
if err != nil {
fmt.Println("Unmarshal error:", err)
return false
}
if res.Code == 0 {
fmt.Println("face detector ok" )
//bits := base64.StdEncoding.DecodeToString(res.Data)
fmt.Println(res)
}else {
fmt.Println("face detector falied" )
}
*/
return true
}
func get_video2(filename, url string) bool {
//data := `{"check_id":"sJjcUq2CORH8tTjSNJ14","check_code":"97369"}`
//str := sign(data)
file, err := os.Open(filename)
if err != nil {
fmt.Println("open file failed,", err)
return false
}
defer file.Close()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", filepath.Base(filename))
if err != nil {
fmt.Println("create form file failed", err)
return false
}
_, err = io.Copy(part, file)
writer.WriteField("frameNum", "100")
err = writer.Close()
if err != nil {
fmt.Println("writer Close failed", err)
return false
}
/*
data1, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println("read file failed: ", err.Error())
return false
}
strdata := base64.StdEncoding.EncodeToString(data1)
data := "userVideo=" + strdata
data += "&frameNum=10"
*/
//seq := "http://192.168.1.33:4000/query/feature"
seq := url + "/query/video"
client := &http.Client{Timeout: 60 * time.Second,}
request, err := http.NewRequest("POST", seq, body)
if err != nil {
fmt.Println("new request failed", err)
return false
}
//resp, err := http.Post(seq, "application/x-www-form-urlencoded",
// strings.NewReader(data))
//if err != nil {
// fmt.Println("send request error: " , err)
// return false
//}
//request.Header.Add("Authorization","AIPSS1.0 " + str)
//request.Header.Add("Content-Type","application/json; charset=utf-8")
request.Header.Add("Content-Type",writer.FormDataContentType())
resp, err := client.Do(request)
if err != nil {
fmt.Println("send error: " , err)
return false
}else {
fmt.Println("send ok")
}
defer resp.Body.Close()
/*
var body2 string
var d = 0
r := bufio.NewReader(resp.Body)
block := make([]byte, 1024)
for {
n,err := r.Read(block)
if err != nil && err != io.EOF {
fmt.Println("recv ReadAll failed: ", err)
return false
}
d += n
fmt.Println("read:",d)
if n == 0 {
break
}else {
body2 += string(block[0:n])
}
}
*/
//body2, err := ioutil.ReadAll(io.LimitReader(resp.Body, int64(3<<20)))
body2, err := io.ReadAll(resp.Body)
if err != nil {
if strings.Contains(err.Error(), "unexpected EOF") && len(body2) > 0 {
}else {
fmt.Println("recv ReadAll failed: ", err)
return false
}
}else {
fmt.Println("length:", len(body2))
}
fmt.Println("length:", len(body2))
//fmt.Println(string(body2))
ioutil.WriteFile("/tmp/recv.txt",body2, 0666);
/*
var res Response2
err = json.Unmarshal(body, &res)
if err != nil {
fmt.Println("Unmarshal error:", err)
return false
}
if res.Code == 0 {
fmt.Println("face detector ok" )
//bits := base64.StdEncoding.DecodeToString(res.Data)
fmt.Println(res)
}else {
fmt.Println("face detector falied" )
}
*/
return true
}
var gname string
......@@ -262,6 +485,8 @@ func main() {
get_action(gfilename, gurl)
}else if gname == "emotion" {
get_emotion(gfilename, gurl)
}else if gname == "video" {
get_video2(gfilename, gurl)
}else {
fmt.Println("not support type:",gname)
}
......
Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!