本文整理汇总了C++中UsageEnvironment类的典型用法代码示例。如果您正苦于以下问题:C++ UsageEnvironment类的具体用法?C++ UsageEnvironment怎么用?C++ UsageEnvironment使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了UsageEnvironment类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: lookupByName
Boolean MediaSession::lookupByName(UsageEnvironment& env,
char const* instanceName,
MediaSession*& resultSession) {
resultSession = NULL; // unless we succeed
Medium* medium;
if (!Medium::lookupByName(env, instanceName, medium)) return False;
if (!medium->isMediaSession()) {
env.setResultMsg(instanceName, " is not a 'MediaSession' object");
return False;
}
resultSession = (MediaSession*)medium;
return True;
}
开发者ID:LiYX,项目名称:live555,代码行数:16,代码来源:MediaSession.cpp
示例2: lookupByName
Boolean RTCPInstance::lookupByName(UsageEnvironment& env,
char const* instanceName,
RTCPInstance*& resultInstance) {
resultInstance = NULL; // unless we succeed
Medium* medium;
if (!Medium::lookupByName(env, instanceName, medium)) return False;
if (!medium->isRTCPInstance()) {
env.setResultMsg(instanceName, " is not a RTCP instance");
return False;
}
resultInstance = (RTCPInstance*)medium;
return True;
}
开发者ID:richard-nellist,项目名称:idl4k,代码行数:16,代码来源:RTCP.cpp
示例3: lookupByName
Boolean RTPSource::lookupByName(UsageEnvironment& env,
char const* sourceName,
RTPSource*& resultSource) {
resultSource = NULL; // unless we succeed
MediaSource* source;
if (!MediaSource::lookupByName(env, sourceName, source)) return False;
if (!source->isRTPSource()) {
env.setResultMsg(sourceName, " is not a RTP source");
return False;
}
resultSource = (RTPSource*)source;
return True;
}
开发者ID:Castlely,项目名称:HADOOP_NAIVE_CODE,代码行数:16,代码来源:RTPSource.cpp
示例4: lookupByName
Boolean MediaSource::lookupByName(UsageEnvironment& env,
char const* sourceName,
MediaSource*& resultSource) {
resultSource = NULL; // unless we succeed
Medium* medium;
if (!Medium::lookupByName(env, sourceName, medium)) return False;
if (!medium->isSource()) {
env.setResultMsg(sourceName, " is not a media source");
return False;
}
resultSource = (MediaSource*)medium;
return True;
}
开发者ID:natae,项目名称:ffmpeg,代码行数:16,代码来源:MediaSource.cpp
示例5: readSocket
int readSocket(UsageEnvironment& env,
int socket, unsigned char* buffer, unsigned bufferSize,
struct sockaddr_in& fromAddress,
struct timeval* timeout) {
int bytesRead = -1;
do {
int result = blockUntilReadable(env, socket, timeout);
if (timeout != NULL && result == 0) {
bytesRead = 0;
break;
} else if (result <= 0) {
break;
}
SOCKLEN_T addressSize = sizeof fromAddress;
bytesRead = recvfrom(socket, (char*)buffer, bufferSize, 0,
(struct sockaddr*)&fromAddress,
&addressSize);
if (bytesRead < 0) {
//##### HACK to work around bugs in Linux and Windows:
int err = env.getErrno();
if (err == 111 /*ECONNREFUSED (Linux)*/
#if defined(__WIN32__) || defined(_WIN32)
// What a piece of crap Windows is. Sometimes
// recvfrom() returns -1, but with an 'errno' of 0.
// This appears not to be a real error; just treat
// it as if it were a read of zero bytes, and hope
// we don't have to do anything else to 'reset'
// this alleged error:
|| err == 0
#else
|| err == EAGAIN
#endif
|| err == 113 /*EHOSTUNREACH (Linux)*/) {
//Why does Linux return this for datagram sock?
fromAddress.sin_addr.s_addr = 0;
return 0;
}
//##### END HACK
socketErr(env, "recvfrom() error: ");
break;
}
} while (0);
return bytesRead;
}
开发者ID:ShawnOfMisfit,项目名称:ambarella,代码行数:46,代码来源:GroupsockHelper.cpp
示例6: openURL
void openURL(UsageEnvironment& env, char const* progName, char const* rtspURL)
{
// Begin by creating a "RTSPClient" object. Note that there is a separate "RTSPClient" object for each stream that we wish
// to receive (even if more than stream uses the same "rtsp://" URL).
RTSPClient* rtspClient = ourRTSPClient::createNew(env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL, progName);
if (rtspClient == NULL) {
env << "Failed to create a RTSP client for URL \"" << rtspURL << "\": " << env.getResultMsg() << "\n";
return;
}
// ++rtspClientCount;
// Next, send a RTSP "DESCRIBE" command, to get a SDP description for the stream.
// Note that this command - like all RTSP commands - is sent asynchronously; we do not block, waiting for a response.
// Instead, the following function call returns immediately, and we handle the RTSP response later, from within the event loop:
rtspClient->sendDescribeCommand(continueAfterDESCRIBE);
}
开发者ID:tzabcd,项目名称:RTSP-RTP--Unicast-Multicast--over-Live555-FFMPEG,代码行数:17,代码来源:Live555Test.cpp
示例7: Medium
RTSPServer::RTSPServer(UsageEnvironment& env,
int ourSocket, Port ourPort,
UserAuthenticationDatabase* authDatabase,
unsigned reclamationTestSeconds)
: Medium(env),
fServerSocket(ourSocket), fServerPort(ourPort),
fAuthDB(authDatabase), fReclamationTestSeconds(reclamationTestSeconds),
fServerMediaSessions(HashTable::create(STRING_HASH_KEYS)) {
#ifdef USE_SIGNALS
// Ignore the SIGPIPE signal, so that clients on the same host that are killed
// don't also kill us:
signal(SIGPIPE, SIG_IGN);
#endif
// Arrange to handle connections from others:
// printf("RTSPServer: turnOnBackgroundReadHandling\n"); //jay
env.taskScheduler().turnOnBackgroundReadHandling(fServerSocket,
(TaskScheduler::BackgroundHandlerProc*)&incomingConnectionHandler,
this);
}
开发者ID:ShawnOfMisfit,项目名称:ambarella,代码行数:20,代码来源:RTSPServer.cpp
示例8: socketJoinGroup
Boolean socketJoinGroup(UsageEnvironment& env, int socket,
netAddressBits groupAddress){
if (!IsMulticastAddress(groupAddress)) return True; // ignore this case
struct ip_mreq imr;
imr.imr_multiaddr.s_addr = groupAddress;
imr.imr_interface.s_addr = ReceivingInterfaceAddr;
if (setsockopt(socket, IPPROTO_IP, IP_ADD_MEMBERSHIP,
(const char*)&imr, sizeof (struct ip_mreq)) < 0) {
#if defined(__WIN32__) || defined(_WIN32)
if (env.getErrno() != 0) {
// That piece-of-shit toy operating system (Windows) sometimes lies
// about setsockopt() failing!
#endif
socketErr(env, "setsockopt(IP_ADD_MEMBERSHIP) error: ");
return False;
#if defined(__WIN32__) || defined(_WIN32)
}
#endif
}
return True;
}
开发者ID:ShawnOfMisfit,项目名称:ambarella,代码行数:23,代码来源:GroupsockHelper.cpp
示例9: appendPortNum
void HTTPSink::appendPortNum(UsageEnvironment& env,
Port const& port) {
char tmpBuf[10]; // large enough to hold a port # string
sprintf(tmpBuf, " %d", ntohs(port.num()));
env.appendToResultMsg(tmpBuf);
}
开发者ID:github188,项目名称:ffmpeg-port,代码行数:6,代码来源:HTTPSink.cpp
示例10: main
// -----------------------------------------
// entry point
// -----------------------------------------
int main(int argc, char** argv)
{
// default parameters
const char *dev_name = "/dev/video0";
int format = V4L2_PIX_FMT_H264;
int width = 640;
int height = 480;
int queueSize = 10;
int fps = 25;
unsigned short rtspPort = 8554;
unsigned short rtspOverHTTPPort = 0;
bool multicast = false;
int verbose = 0;
std::string outputFile;
bool useMmap = true;
std::string url = "unicast";
std::string murl = "multicast";
bool useThread = true;
std::string maddr;
bool repeatConfig = true;
int timeout = 65;
// decode parameters
int c = 0;
while ((c = getopt (argc, argv, "v::Q:O:" "I:P:T:m:u:M:ct:" "rsfF:W:H:" "h")) != -1)
{
switch (c)
{
case 'v': verbose = 1; if (optarg && *optarg=='v') verbose++; break;
case 'Q': queueSize = atoi(optarg); break;
case 'O': outputFile = optarg; break;
// RTSP/RTP
case 'I': ReceivingInterfaceAddr = inet_addr(optarg); break;
case 'P': rtspPort = atoi(optarg); break;
case 'T': rtspOverHTTPPort = atoi(optarg); break;
case 'u': url = optarg; break;
case 'm': multicast = true; murl = optarg; break;
case 'M': multicast = true; maddr = optarg; break;
case 'c': repeatConfig = false; break;
case 't': timeout = atoi(optarg); break;
// V4L2
case 'r': useMmap = false; break;
case 's': useThread = false; break;
case 'f': format = 0; break;
case 'F': fps = atoi(optarg); break;
case 'W': width = atoi(optarg); break;
case 'H': height = atoi(optarg); break;
case 'h':
default:
{
std::cout << argv[0] << " [-v[v]] [-Q queueSize] [-O file]" << std::endl;
std::cout << "\t [-I interface] [-P RTSP port] [-T RTSP/HTTP port] [-m multicast url] [-u unicast url] [-M multicast addr] [-c] [-t timeout]" << std::endl;
std::cout << "\t [-r] [-s] [-W width] [-H height] [-F fps] [device] [device]" << std::endl;
std::cout << "\t -v : verbose" << std::endl;
std::cout << "\t -vv : very verbose" << std::endl;
std::cout << "\t -Q length: Number of frame queue (default "<< queueSize << ")" << std::endl;
std::cout << "\t -O output: Copy captured frame to a file or a V4L2 device" << std::endl;
std::cout << "\t RTSP options :" << std::endl;
std::cout << "\t -I addr : RTSP interface (default autodetect)" << std::endl;
std::cout << "\t -P port : RTSP port (default "<< rtspPort << ")" << std::endl;
std::cout << "\t -T port : RTSP over HTTP port (default "<< rtspOverHTTPPort << ")" << std::endl;
std::cout << "\t -u url : unicast url (default " << url << ")" << std::endl;
std::cout << "\t -m url : multicast url (default " << murl << ")" << std::endl;
std::cout << "\t -M addr : multicast group:port (default is random_address:20000)" << std::endl;
std::cout << "\t -c : don't repeat config (default repeat config before IDR frame)" << std::endl;
std::cout << "\t -t secs : RTCP expiration timeout (default " << timeout << ")" << std::endl;
std::cout << "\t V4L2 options :" << std::endl;
std::cout << "\t -r : V4L2 capture using read interface (default use memory mapped buffers)" << std::endl;
std::cout << "\t -s : V4L2 capture using live555 mainloop (default use a reader thread)" << std::endl;
std::cout << "\t -f : V4L2 capture using current format (-W,-H,-F are ignore)" << std::endl;
std::cout << "\t -W width : V4L2 capture width (default "<< width << ")" << std::endl;
std::cout << "\t -H height: V4L2 capture height (default "<< height << ")" << std::endl;
std::cout << "\t -F fps : V4L2 capture framerate (default "<< fps << ")" << std::endl;
std::cout << "\t device : V4L2 capture device (default "<< dev_name << ")" << std::endl;
exit(0);
}
}
}
std::list<std::string> devList;
while (optind<argc)
{
devList.push_back(argv[optind]);
optind++;
}
if (devList.empty())
{
devList.push_back(dev_name);
}
// init logger
initLogger(verbose);
// create live555 environment
TaskScheduler* scheduler = BasicTaskScheduler::createNew();
UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
//.........这里部分代码省略.........
开发者ID:Xianleewu,项目名称:camrtsp,代码行数:101,代码来源:main.cpp
示例11: main
int main(int argc, char** argv) {
// Begin by setting up our usage environment:
TaskScheduler* scheduler = BasicTaskScheduler::createNew();
UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
UserAuthenticationDatabase* authDB = NULL;
#ifdef ACCESS_CONTROL
// To implement client access control to the RTSP server, do the following:
authDB = new UserAuthenticationDatabase;
authDB->addUserRecord("username1", "password1"); // replace these with real strings
// Repeat the above with each <username>, <password> that you wish to allow
// access to the server.
#endif
// Create the RTSP server. Try first with the default port number (554),
// and then with the alternative port number (8554):
RTSPServer* rtspServer;
portNumBits rtspServerPortNum = 554; //先使用554默认端口创建RTSP server
rtspServer = DynamicRTSPServer::createNew(*env, rtspServerPortNum, authDB);
if (rtspServer == NULL) { //若使用554端口创建失败,则使用8554端口创建 Server
rtspServerPortNum = 8554;
rtspServer = DynamicRTSPServer::createNew(*env, rtspServerPortNum, authDB);
}
if (rtspServer == NULL) {
*env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
exit(1);
}
*env << "LIVE555 Media Server\n";
*env << "\tversion " << MEDIA_SERVER_VERSION_STRING
<< " (LIVE555 Streaming Media library version "
<< LIVEMEDIA_LIBRARY_VERSION_STRING << ").\n";
char* urlPrefix = rtspServer->rtspURLPrefix();
*env << "Play streams from this server using the URL\n\t"
<< urlPrefix << "<filename>\nwhere <filename> is a file present in the current directory.\n";
*env << "Each file's type is inferred from its name suffix:\n";
*env << "\t\".264\" => a H.264 Video Elementary Stream file\n";
*env << "\t\".265\" => a H.265 Video Elementary Stream file\n";
*env << "\t\".aac\" => an AAC Audio (ADTS format) file\n";
*env << "\t\".ac3\" => an AC-3 Audio file\n";
*env << "\t\".amr\" => an AMR Audio file\n";
*env << "\t\".dv\" => a DV Video file\n";
*env << "\t\".m4e\" => a MPEG-4 Video Elementary Stream file\n";
*env << "\t\".mkv\" => a Matroska audio+video+(optional)subtitles file\n";
*env << "\t\".mp3\" => a MPEG-1 or 2 Audio file\n";
*env << "\t\".mpg\" => a MPEG-1 or 2 Program Stream (audio+video) file\n";
*env << "\t\".ogg\" or \".ogv\" or \".opus\" => an Ogg audio and/or video file\n";
*env << "\t\".ts\" => a MPEG Transport Stream file\n";
*env << "\t\t(a \".tsx\" index file - if present - provides server 'trick play' support)\n";
*env << "\t\".vob\" => a VOB (MPEG-2 video with AC-3 audio) file\n";
*env << "\t\".wav\" => a WAV Audio file\n";
*env << "\t\".webm\" => a WebM audio(Vorbis)+video(VP8) file\n";
*env << "See http://www.live555.com/mediaServer/ for additional documentation.\n";
// Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling.
// Try first with the default HTTP port (80), and then with the alternative HTTP
// port numbers (8000 and 8080).
if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) {
*env << "(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling, or for HTTP live streaming (for indexed Transport Stream files only).)\n";
} else {
*env << "(RTSP-over-HTTP tunneling is not available.)\n";
}
env->taskScheduler().doEventLoop(); // does not return
return 0; // only to prevent compiler warning
}
开发者ID:github188,项目名称:SimpleCode,代码行数:69,代码来源:live555MediaServer.cpp
示例12: ourIPAddress
netAddressBits ourIPAddress(UsageEnvironment& env) {
static netAddressBits ourAddress = 0;
int sock = -1;
struct in_addr testAddr;
if (ourAddress == 0) {
// We need to find our source address
struct sockaddr_in fromAddr;
fromAddr.sin_addr.s_addr = 0;
// Get our address by sending a (0-TTL) multicast packet,
// receiving it, and looking at the source address used.
// (This is kinda bogus, but it provides the best guarantee
// that other nodes will think our address is the same as we do.)
do {
loopbackWorks = 0; // until we learn otherwise
testAddr.s_addr = our_inet_addr("228.67.43.91"); // arbitrary
Port testPort(15947); // ditto
sock = setupDatagramSocket(env, testPort);
if (sock < 0) break;
if (!socketJoinGroup(env, sock, testAddr.s_addr)) break;
unsigned char testString[] = "hostIdTest";
unsigned testStringLength = sizeof testString;
if (!writeSocket(env, sock, testAddr, testPort, 0,
testString, testStringLength)) break;
unsigned char readBuffer[20];
struct timeval timeout;
timeout.tv_sec = 5;
timeout.tv_usec = 0;
int bytesRead = readSocket(env, sock,
readBuffer, sizeof readBuffer,
fromAddr, &timeout);
if (bytesRead == 0 // timeout occurred
|| bytesRead != (int)testStringLength
|| strncmp((char*)readBuffer, (char*)testString,
testStringLength) != 0) {
break;
}
loopbackWorks = 1;
} while (0);
if (!loopbackWorks) do {
// We couldn't find our address using multicast loopback
// so try instead to look it up directly.
char hostname[100];
hostname[0] = '\0';
#ifndef CRIS
gethostname(hostname, sizeof hostname);
#endif
if (hostname[0] == '\0') {
env.setResultErrMsg("initial gethostname() failed");
break;
}
#if defined(VXWORKS)
#include <hostLib.h>
if (ERROR == (ourAddress = hostGetByName( hostname ))) break;
#else
struct hostent* hstent
= (struct hostent*)gethostbyname(hostname);
if (hstent == NULL || hstent->h_length != 4) {
env.setResultErrMsg("initial gethostbyname() failed");
break;
}
// Take the first address that's not bad
// (This code, like many others, won't handle IPv6)
netAddressBits addr = 0;
for (unsigned i = 0; ; ++i) {
char* addrPtr = hstent->h_addr_list[i];
if (addrPtr == NULL) break;
netAddressBits a = *(netAddressBits*)addrPtr;
if (!badAddress(a)) {
addr = a;
break;
}
}
if (addr != 0) {
fromAddr.sin_addr.s_addr = addr;
} else {
env.setResultMsg("no address");
break;
}
} while (0);
// Make sure we have a good address:
netAddressBits from = fromAddr.sin_addr.s_addr;
if (badAddress(from)) {
char tmp[100];
sprintf(tmp,
"This computer has an invalid IP address: 0x%x",
(netAddressBits)(ntohl(from)));
env.setResultMsg(tmp);
//.........这里部分代码省略.........
开发者ID:ShawnOfMisfit,项目名称:ambarella,代码行数:101,代码来源:GroupsockHelper.cpp
示例13: setupDarwinStreaming
void setupDarwinStreaming(UsageEnvironment& env, WISInput& inputDevice) {
// Create a 'Darwin injector' object:
injector = DarwinInjector::createNew(env, applicationName);
// For RTCP:
const unsigned maxCNAMElen = 100;
unsigned char CNAME[maxCNAMElen + 1];
gethostname((char *) CNAME, maxCNAMElen);
CNAME[maxCNAMElen] = '\0'; // just in case
/******************audio***********************/
if (audioFormat != AFMT_NONE) {
// Create the audio source:
sourceAudio = createAudioSource(env, inputDevice.audioSource());
if (packageFormat != PFMT_TRANSPORT_STREAM) { // there's a separate RTP stream for audio
// Create 'groupsocks' for RTP and RTCP.
// (Note: Because we will actually be streaming through a remote Darwin server,
// via TCP, we just use dummy destination addresses, port numbers, and TTLs here.)
struct in_addr dummyDestAddress;
dummyDestAddress.s_addr = 0;
rtpGroupsockAudio = new Groupsock(env, dummyDestAddress, 0, 0);
rtcpGroupsockAudio = new Groupsock(env, dummyDestAddress, 0, 0);
// Create a RTP sink for the audio stream:
sinkAudio = createAudioRTPSink(env, rtpGroupsockAudio);
// Create (and start) a 'RTCP instance' for this RTP sink:
unsigned totalSessionBandwidthAudio = (audioOutputBitrate+500)/1000; // in kbps; for RTCP b/w share
rtcpAudio = RTCPInstance::createNew(env, rtcpGroupsockAudio,
totalSessionBandwidthAudio, CNAME,
sinkAudio, NULL /* we're a server */);
// Note: This starts RTCP running automatically
// Add these to our 'Darwin injector':
injector->addStream(sinkAudio, rtcpAudio);
}
}
/******************end audio***********************/
/******************video***********************/
if (videoFormat != VFMT_NONE) {
// Create the video source:
if (packageFormat == PFMT_TRANSPORT_STREAM) {
MPEG2TransportStreamFromESSource* tsSource
= MPEG2TransportStreamFromESSource::createNew(env);
tsSource->addNewVideoSource(inputDevice.videoSource(), 2);
if (sourceAudio != NULL) tsSource->addNewAudioSource(sourceAudio, 2);
// Gather the Transport packets into network packet-sized chunks:
sourceVideo = MPEG2TransportStreamAccumulator::createNew(env, tsSource);
sourceAudio = NULL;
} else {
switch (videoFormat) {
case VFMT_NONE: // not used
break;
case VFMT_MJPEG: {
sourceVideo = WISJPEGStreamSource::createNew(inputDevice.videoSource());
break;
}
case VFMT_MPEG1:
case VFMT_MPEG2: {
sourceVideo = MPEG1or2VideoStreamDiscreteFramer::createNew(env, inputDevice.videoSource());
break;
}
case VFMT_MPEG4: {
sourceVideo = MPEG4VideoStreamDiscreteFramer::createNew(env, inputDevice.videoSource());
break;
}
}
}
// Create 'groupsocks' for RTP and RTCP.
// (Note: Because we will actually be streaming through a remote Darwin server,
// via TCP, we just use dummy destination addresses, port numbers, and TTLs here.)
struct in_addr dummyDestAddress;
dummyDestAddress.s_addr = 0;
rtpGroupsockVideo = new Groupsock(env, dummyDestAddress, 0, 0);
rtcpGroupsockVideo = new Groupsock(env, dummyDestAddress, 0, 0);
// Create a RTP sink for the video stream:
unsigned char payloadFormatCode = 97; // if dynamic
setVideoRTPSinkBufferSize();
if (packageFormat == PFMT_TRANSPORT_STREAM) {
sinkVideo = SimpleRTPSink::createNew(env, rtpGroupsockVideo,
33, 90000, "video", "mp2t",
1, True, False/*no 'M' bit*/);
} else {
switch (videoFormat) {
case VFMT_NONE: // not used
break;
case VFMT_MJPEG: {
sinkVideo = JPEGVideoRTPSink::createNew(env, rtpGroupsockVideo);
break;
}
case VFMT_MPEG1:
case VFMT_MPEG2: {
sinkVideo = MPEG1or2VideoRTPSink::createNew(env, rtpGroupsockVideo);
break;
}
case VFMT_MPEG4: {
//.........这里部分代码省略.........
开发者ID:epheatt,项目名称:wis-streamer,代码行数:101,代码来源:DarwinStreaming.cpp
示例14: AudioInputDevice
WAVAudioFileSource::WAVAudioFileSource(UsageEnvironment& env, FILE* fid)
: AudioInputDevice(env, 0, 0, 0, 0)/* set the real parameters later */,
fFid(fid), fFidIsSeekable(False), fLastPlayTime(0), fHaveStartedReading(False), fWAVHeaderSize(0), fFileSize(0),
fScaleFactor(1), fLimitNumBytesToStream(False), fNumBytesToStream(0), fAudioFormat(WA_UNKNOWN) {
// Check the WAV file header for validity.
// Note: The following web pages contain info about the WAV format:
// http://www.ringthis.com/dev/wave_format.htm
// http://www.lightlink.com/tjweber/StripWav/Canon.html
// http://www.onicos.com/staff/iz/formats/wav.html
Boolean success = False; // until we learn otherwise
do {
// RIFF Chunk:
if (nextc != 'R' || nextc != 'I' || nextc != 'F' || nextc != 'F') break;
if (!skipBytes(fid, 4)) break;
if (nextc != 'W' || nextc != 'A' || nextc != 'V' || nextc != 'E') break;
// Skip over any chunk that's not a FORMAT ('fmt ') chunk:
u_int32_t tmp;
if (!get4Bytes(fid, tmp)) break;
while (tmp != 0x20746d66/*'fmt ', little-endian*/) {
// Skip this chunk:
u_int32_t chunkLength;
if (!get4Bytes(fid, chunkLength)) break;
if (!skipBytes(fid, chunkLength)) break;
if (!get4Bytes(fid, tmp)) break;
}
// FORMAT Chunk (the 4-byte header code has already been parsed):
unsigned formatLength;
if (!get4Bytes(fid, formatLength)) break;
unsigned short audioFormat;
if (!get2Bytes(fid, audioFormat)) break;
fAudioFormat = (unsigned char)audioFormat;
if (fAudioFormat != WA_PCM && fAudioFormat != WA_PCMA && fAudioFormat != WA_PCMU && fAudioFormat != WA_IMA_ADPCM) {
// It's a format that we don't (yet) understand
env.setResultMsg("Audio format is not one that we handle (PCM/PCMU/PCMA or IMA ADPCM)");
break;
}
unsigned short numChannels;
if (!get2Bytes(fid, numChannels)) break;
fNumChannels = (unsigned char)numChannels;
if (fNumChannels < 1 || fNumChannels > 2) { // invalid # channels
char errMsg[100];
sprintf(errMsg, "Bad # channels: %d", fNumChannels);
env.setResultMsg(errMsg);
break;
}
if (!get4Bytes(fid, fSamplingFrequency)) break;
if (fSamplingFrequency == 0) {
env.setResultMsg("Bad sampling frequency: 0");
break;
}
if (!skipBytes(fid, 6)) break; // "nAvgBytesPerSec" (4 bytes) + "nBlockAlign" (2 bytes)
unsigned short bitsPerSample;
if (!get2Bytes(fid, bitsPerSample)) break;
fBitsPerSample = (unsigned char)bitsPerSample;
if (fBitsPerSample == 0) {
env.setResultMsg("Bad bits-per-sample: 0");
break;
}
if (!skipBytes(fid, formatLength - 16)) break;
// FACT chunk (optional):
int c = nextc;
if (c == 'f') {
if (nextc != 'a' || nextc != 'c' || nextc != 't') break;
unsigned factLength;
if (!get4Bytes(fid, factLength)) break;
if (!skipBytes(fid, factLength)) break;
c = nextc;
}
// EYRE chunk (optional):
if (c == 'e') {
if (nextc != 'y' || nextc != 'r' || nextc != 'e') break;
unsigned eyreLength;
if (!get4Bytes(fid, eyreLength)) break;
if (!skipBytes(fid, eyreLength)) break;
c = nextc;
}
// DATA Chunk:
if (c != 'd' || nextc != 'a' || nextc != 't' || nextc != 'a') break;
if (!skipBytes(fid, 4)) break;
// The header is good; the remaining data are the sample bytes.
fWAVHeaderSize = (unsigned)TellFile64(fid);
success = True;
} while (0);
if (!success) {
env.setResultMsg("Bad WAV file format");
// Set "fBitsPerSample" to zero, to indicate failure:
fBitsPerSample = 0;
return;
}
fPlayTimePerSample = 1e6/(double)fSamplingFrequency;
//.........这里部分代码省略.........
开发者ID:Azzuro,项目名称:MediaPortal-1,代码行数:101,代码来源:WAVAudioFileSource.cpp
示例15: AudioInputDevice
WAVAudioFileSource::WAVAudioFileSource(UsageEnvironment& env, FILE* fid)
: AudioInputDevice(env, 0, 0, 0, 0)/* set the real parameters later */,
fFid(fid), fLastPlayTime(0), fWAVHeaderSize(0), fFileSize(0), fScaleFactor(1),
fLimitNumBytesToStream(False), fNumBytesToStream(0), fAudioFormat(WA_UNKNOWN) {
// Check the WAV file header for validity.
// Note: The following web pages contain info about the WAV format:
// http://www.ringthis.com/dev/wave_format.htm
// http://www.lightlink.com/tjweber/StripWav/Canon.html
// http://www.wotsit.org/list.asp?al=W
Boolean success = False; // until we learn otherwise
do {
// RIFF Chunk:
if (nextc != 'R' || nextc != 'I' || nextc != 'F' || nextc != 'F') break;
if (!skipBytes(fid, 4)) break;
if (nextc != 'W' || nextc != 'A' || nextc != 'V' || nextc != 'E') break;
// FORMAT Chunk:
if (nextc != 'f' || nextc != 'm' || nextc != 't' || nextc != ' ') break;
unsigned formatLength;
if (!get4Bytes(fid, formatLength)) break;
unsigned short audioFormat;
if (!get2Bytes(fid, audioFormat)) break;
fAudioFormat = (unsigned char)audioFormat;
if (fAudioFormat != WA_PCM && fAudioFormat != WA_PCMA && fAudioFormat != WA_PCMU && fAudioFormat != WA_IMA_ADPCM) {
// It's a format that we don't (yet) understand
env.setResultMsg("Audio format is not one that we handle (PCM/PCMU/PCMA or IMA ADPCM)");
break;
}
unsigned short numChannels;
if (!get2Bytes(fid, numChannels)) break;
fNumChannels = (unsigned char)numChannels;
if (fNumChannels < 1 || fNumChannels > 2) { // invalid # channels
char errMsg[100];
sprintf(errMsg, "Bad # channels: %d", fNumChannels);
env.setResultMsg(errMsg);
break;
}
if (!get4Bytes(fid, fSamplingFrequency)) break;
if (fSamplingFrequency == 0) {
env.setResultMsg("Bad sampling frequency: 0");
break;
}
if (!skipBytes(fid, 6)) break; // "nAvgBytesPerSec" (4 bytes) + "nBlockAlign" (2 bytes)
unsigned short bitsPerSample;
if (!get2Bytes(fid, bitsPerSample)) break;
fBitsPerSample = (unsigned char)bitsPerSample;
if (fBitsPerSample == 0) {
env.setResultMsg("Bad bits-per-sample: 0");
break;
}
if (!skipBytes(fid, formatLength - 16)) break;
// FACT chunk (optional):
int c = nextc;
if (c == 'f') {
if (nextc != 'a' || nextc != 'c' || nextc != 't') break;
unsigned factLength;
if (!get4Bytes(fid, factLength)) break;
if (!skipBytes(fid, factLength)) break;
c = nextc;
}
// DATA Chunk:
if (c != 'd' || nextc != 'a' || nextc != 't' || nextc != 'a') break;
if (!skipBytes(fid, 4)) break;
// The header is good; the remaining data are the sample bytes.
fWAVHeaderSize = ftell(fid);
success = True;
} while (0);
if (!success) {
env.setResultMsg("Bad WAV file format");
// Set "fBitsPerSample" to zero, to indicate failure:
fBitsPerSample = 0;
return;
}
fPlayTimePerSample = 1e6/(double)fSamplingFrequency;
// Although PCM is a sample-based format, we group samples into
// 'frames' for efficient delivery to clients. Set up our preferred
// frame size to be close to 20 ms, if possible, but always no greater
// than 1400 bytes (to ensure that it will fit in a single RTP packet)
unsigned maxSamplesPerFrame = (1400*8)/(fNumChannels*fBitsPerSample);
unsigned desiredSamplesPerFrame = (unsigned)(0.02*fSamplingFrequency);
unsigned samplesPerFrame = desiredSamplesPerFrame < maxSamplesPerFrame ? desiredSamplesPerFrame : maxSamplesPerFrame;
fPreferredFrameSize = (samplesPerFrame*fNumChannels*fBitsPerSample)/8;
}
开发者ID:OneDream,项目名称:faplayer,代码行数:91,代码来源:WAVAudioFileSource.cpp
示例16: parseSIPURL
Boolean SIPClient::parseSIPURL(UsageEnvironment& env, char const* url,
NetAddress& address,
portNumBits& portNum) {
do {
// Parse the URL as "sip:<username>@<address>:<port>/<etc>"
// (with ":<port>" and "/<etc>" optional)
// Also, skip over any "<username>[:<password>]@" preceding <address>
char const* prefix = "sip:";
unsigned const prefixLength = 4;
if (_strncasecmp(url, prefix, prefixLength) != 0) {
env.setResultMsg("URL is not of the form \"", prefix, "\"");
break;
}
unsigned const parseBufferSize = 100;
char parseBuffer[parseBufferSize];
unsigned addressStartIndex = prefixLength;
while (url[addressStartIndex] != '\0'
&& url[addressStartIndex++] != '@') {}
char const* from = &url[addressStartIndex];
// Skip over any "<username>[:<password>]@"
char const* from1 = from;
while (*from1 != '\0' && *from1 != '/') {
if (*from1 == '@') {
from = ++from1;
break;
}
++from1;
}
char* to = &parseBuffer[0];
unsigned i;
for (i = 0; i < parseBufferSize; ++i) {
if (*from == '\0' || *from == ':' || *from == '/') {
// We've completed parsing the address
*to = '\0';
break;
}
*to++ = *from++;
}
if (i == parseBufferSize) {
env.setResultMsg("URL is too long");
break;
}
NetAddressList addresses(parseBuffer);
if (addresses.numAddresses() == 0) {
env.setResultMsg("Failed to find network address for \"",
parseBuffer, "\"");
break;
}
address = *(addresses.firstAddress());
portNum = 5060; // default value
char nextChar = *from;
if (nextChar == ':') {
int portNumInt;
if (sscanf(++from, "%d", &portNumInt) != 1) {
env.setResultMsg("No port number follows ':'");
break;
}
if (portNumInt < 1 || portNumInt > 65535) {
env.setResultMsg("Bad port number");
break;
}
portNum = (portNumBits)portNumInt;
}
return True;
} while (0);
return False;
}
开发者ID:Azzuro,项目名称:MediaPortal-1,代码行数:74,代码来源:SIPClient.cpp
示例17: srcPort
SIPClient::SIPClient(UsageEnvironment& env,
unsigned char desiredAudioRTPPayloadFormat,
char const* mimeSubtype,
int verbosityLevel, char const* applicationName)
: Medium(env),
fT1(500000 /* 500 ms */),
fDesiredAudioRTPPayloadFormat(desiredAudioRTPPayloadFormat),
fVerbosityLevel(verbosityLevel), fCSeq(0),
fUserAgentHeaderStr(NULL), fUserAgentHeaderStrLen(0),
fURL(NULL), fURLSize(0),
fToTagStr(NULL), fToTagStrSize(0),
fUserName(NULL), fUserNameSize(0),
fInviteSDPDescription(NULL), fInviteSDPDescriptionReturned(NULL),
fInviteCmd(NULL), fInviteCmdSize(0) {
if (mimeSubtype == NULL) mimeSubtype = "";
fMIMESubtype = strDup(mimeSubtype);
fMIMESubtypeSize = strlen(fMIMESubtype);
if (applicationName == NULL) applicationName = "";
fApplicationName = strDup(applicationName);
fApplicationNameSize = strlen(fApplicationName);
struct in_addr ourAddress;
ourAddress.s_addr = ourIPAddress(env); // hack
fOurAddressStr = strDup(AddressString(ourAddress).val());
fOurAddressStrSize = strlen(fOurAddressStr);
fOurSocket = new Groupsock(env, ourAddress, 0, 255);
if (fOurSocket == NULL) {
env << "ERROR: Failed to create socket for addr "
<< fOurAddressStr << ": "
<< env.getResultMsg() << "\n";
}
// Now, find out our source port number. Hack: Do this by first trying to
// send a 0-length packet, so that the "getSourcePort()" call will work.
fOurSocket->output(envir(), (unsigned char*)"", 0);
Port srcPort(0);
getSourcePort(env, fOurSocket->socketNum(), srcPort);
if (srcPort.num() != 0) {
fOurPortNum = ntohs(srcPort.num());
} else {
// No luck. Try again using a default port number:
fOurPortNum = 5060;
delete fOurSocket;
fOurSocket = new Groupsock(env, ourAddress, fOurPortNum, 255);
if (fOurSocket == NULL) {
env << "ERROR: Failed to create socket for addr "
<< fOurAddressStr << ", port "
<< fOurPortNum << ": "
<< env.getResultMsg() << "\n";
}
}
// Set the "User-Agent:" header to use in each request:
char const* const libName = "LIVE555 Streaming Media v";
char const* const libVersionStr = LIVEMEDIA_LIBRARY_VERSION_STRING;
char const* libPrefix; char const* libSuffix;
if (applicationName == NULL || applicationName[0] == '\0') {
applicationName = libPrefix = libSuffix = "";
} else {
libPrefix = " (";
libSuffix = ")";
}
unsigned userAgentNameSize
= fApplicationNameSize + strlen(libPrefix) + strlen(libName) + strlen(libVersionStr) + strlen(libSuffix) + 1;
char* userAgentName = new char[userAgentNameSize];
sprintf(userAgentName, "%s%s%s%s%s",
applicationName, libPrefix, libName, libVersionStr, libSuffix);
setUserAgentString(userAgentName);
delete[] userAgentName;
reset();
}
开发者ID:Azzuro,项目名称:MediaPortal-1,代码行数:74,代码来源:SIPClient.cpp
示例18: ourIPAddress
netAddressBits ourIPAddress(UsageEnvironment& env) {
static netAddressBits ourAddress = 0;
int sock = -1;
struct in_addr testAddr;
if (ReceivingInterfaceAddr != INADDR_ANY) {
// Hack: If we were told to receive on a specific interface address, then
// define this to be our ip address:
ourAddress = ReceivingInterfaceAddr;
}
if (ourAddress == 0) {
// We need to find our source address
struct sockaddr_in fromAddr;
fromAddr.sin_addr.s_addr = 0;
// Get our address by sending a (0-TTL) multicast packet,
// receiving it, and looking at the source address used.
// (This is kinda bogus, but it provides the best guarantee
// that other nodes will think our address is the same as we do.)
do {
loopbackWorks = 0; // until we learn otherwise
testAddr.s_addr = our_inet_addr("228.67.43.91"); // arbitrary
Port testPort(15947); // ditto
sock = setupDatagramSocket(env, testPort);
if (sock < 0) break;
if (!socketJoinGroup(env, sock, testAddr.s_addr)) break;
unsigned char testString[] = "hostIdTest";
unsigned testStringLength = sizeof testString;
if (!writeSocket(env, sock, testAddr, testPort.num(), 0,
testString, testStringLength)) break;
// Block until the socket is readable (with a 5-second timeout):
fd_set rd_set;
FD_ZERO(&rd_set);
FD_SET((unsigned)sock, &rd_set);
const unsigned numFds = sock+1;
struct timeval timeout;
timeout.tv_sec = 5;
timeout.tv_usec = 0;
int result = select(numFds, &rd_set, NULL, NULL, &timeout);
if (result <= 0) break;
unsigned char readBuffer[20];
int bytesRead = readSocket(env, sock,
readBuffer, sizeof readBuffer,
fromAddr);
if (bytesRead != (int)testStringLength
|| strncmp((char*)readBuffer, (char*)testString, testStringLength) != 0) {
break;
}
// We use this packet's source address, if it's good:
loopbackWorks = !badAddressForUs(fromAddr.sin_addr.s_addr);
} while (0);
if (sock >= 0) {
socketLeaveGroup(env, sock, testAddr.s_addr);
closeSocket(sock);
}
if (!loopbackWorks) do {
// We couldn't find our address using multicast loopback,
// so try instead to look it up directly - by first getting our host name, and then resolving this host name
char hostname[100];
hostname[0] = '\0';
int result = gethostname(hostname, sizeof hostname);
if (result != 0 || hostname[0] == '\0') {
env.setResultErrMsg("initial gethostname() failed");
break;
}
// Try to resolve "hostname" to an IP address:
NetAddressList addresses(hostname);
NetAddressList::Iterator iter(addresses);
NetAddress const* address;
// Take the first address that's not bad:
netAddressBits addr = 0;
while ((address = iter.nextAddress()) != NULL) {
netAddressBits a = *(netAddressBits*)(address->data());
if (!badAddressForUs(a)) {
addr = a;
break;
}
}
// Assign the address that we found to "fromAddr" (as if the 'loopback' method had worked), to simplify the code below:
fromAddr.sin_addr.s_addr = addr;
|
请发表评论