remove audio stuff

This commit is contained in:
Alexander Mahr 2025-03-01 11:35:11 +01:00
parent b85bf60b5f
commit b41cad4f8f
18 changed files with 0 additions and 3711 deletions

View file

@ -1,3 +0,0 @@
os_generic.h
example

View file

@ -1,128 +0,0 @@
//Copyright <>< 2010-2020 Charles Lohr (And other authors as cited)
//CNFA is licensed under the MIT/x11, ColorChord or NewBSD Licenses. You choose.
#ifndef _CNFA_C
#define _CNFA_C
#include "CNFA.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#if defined(WINDOWS) || defined(WIN32) || defined(WIN64) \
|| defined(_WIN32) || defined(_WIN64)
#ifndef strdup
#define strdup _strdup
#endif
#endif
static CNFAInitFn * CNFADrivers[MAX_CNFA_DRIVERS];
static char * CNFADriverNames[MAX_CNFA_DRIVERS];
static int CNFADriverPriorities[MAX_CNFA_DRIVERS];
void RegCNFADriver( int priority, const char * name, CNFAInitFn * fn )
{
int j;
if( priority <= 0 )
{
return;
}
printf("[CNFA] Registering Driver: %s\n", name);
for( j = MAX_CNFA_DRIVERS-1; j >= 0; j-- )
{
//Cruise along, find location to insert
if( j > 0 && ( !CNFADrivers[j-1] || CNFADriverPriorities[j-1] < priority ) )
{
CNFADrivers[j] = CNFADrivers[j-1];
CNFADriverNames[j] = CNFADriverNames[j-1];
CNFADriverPriorities[j] = CNFADriverPriorities[j-1];
}
else
{
CNFADrivers[j] = fn;
CNFADriverNames[j] = strdup( name );
CNFADriverPriorities[j] = priority;
break;
}
}
}
struct CNFADriver * CNFAInit( const char * driver_name, const char * your_name, CNFACBType cb, int reqSPSPlay, int reqSPSRec,
int reqChannelsPlay, int reqChannelsRec, int sugBufferSize, const char * outputSelect, const char * inputSelect, void * opaque)
{
#if defined( ANDROID ) || defined( __android__ )
//Android can't run static-time code.
void REGISTERAndroidCNFA();
REGISTERAndroidCNFA();
#endif
int i;
struct CNFADriver * ret = 0;
int minprio = 0;
CNFAInitFn * bestinit = 0;
if( driver_name == 0 || strlen( driver_name ) == 0 )
{
//Search for a driver.
for( i = 0; i < MAX_CNFA_DRIVERS; i++ )
{
if( CNFADrivers[i] == 0 )
{
break;
}
if( CNFADriverPriorities[i] > minprio )
{
minprio = CNFADriverPriorities[i];
bestinit = CNFADrivers[i];
}
}
if( bestinit )
{
ret = (struct CNFADriver *)bestinit( cb, your_name, reqSPSPlay, reqSPSRec, reqChannelsPlay, reqChannelsRec, sugBufferSize, outputSelect, inputSelect, opaque );
}
if( ret )
{
return ret;
}
}
else
{
for( i = 0; i < MAX_CNFA_DRIVERS; i++ )
{
if( CNFADrivers[i] == 0 )
{
break;
}
if( strcmp( CNFADriverNames[i], driver_name ) == 0 )
{
return (struct CNFADriver *)CNFADrivers[i]( cb, your_name, reqSPSPlay, reqSPSRec, reqChannelsPlay, reqChannelsRec, sugBufferSize, outputSelect, inputSelect, opaque );
}
}
}
printf( "CNFA Driver not found.\n" );
return 0;
}
int CNFAState( struct CNFADriver * cnfaobject )
{
if( cnfaobject )
{
return cnfaobject->StateFn( cnfaobject );
}
return -1;
}
void CNFAClose( struct CNFADriver * cnfaobject )
{
if( cnfaobject )
{
cnfaobject->CloseFn( cnfaobject );
}
}
#endif

View file

@ -1,122 +0,0 @@
//Copyright <>< 2010-2020 Charles Lohr (And other authors as cited)
//CNFA is licensed under the MIT/x11, ColorChord or NewBSD Licenses. You choose.
//
// CN's Platform-agnostic, foundational sound driver subsystem.
// Easily output and input sound on a variety of platforms.
//
// Options:
// * #define CNFA_IMPLEMENTATION before this header and it will build all
// definitions in.
//
#ifndef _CNFA_H
#define _CNFA_H
//this #define is per-platform. For instance on Linux, you have ALSA, Pulse and null
#define MAX_CNFA_DRIVERS 4
struct CNFADriver;
#ifdef __cplusplus
extern "C" {
#endif
#ifdef BUILD_DLL
#ifdef WINDOWS
#define DllExport __declspec( dllexport )
#else
#define DllExport extern
#endif
#else
#define DllExport
#endif
//NOTE: Some drivers have synchronous duplex mode, other drivers will use two different callbacks. If ether is unavailable, it will be NULL.
//I.e. if `out` is null, only use in to read. If in is null, only place samples in out.
typedef void(*CNFACBType)( struct CNFADriver * sd, short * out, short * in, int framesp, int framesr );
typedef void*(CNFAInitFn)( CNFACBType cb, const char * your_name, int reqSPSPlay, int reqSPSRec, int reqChannelsPlay, int reqChannelsRec, int sugBufferSize, const char * outputSelect, const char * inputSelect, void * opaque );
struct CNFADriver
{
void (*CloseFn)( void * object );
int (*StateFn)( void * object );
CNFACBType callback;
short channelsPlay;
short channelsRec;
int spsPlay;
int spsRec;
void * opaque;
//More fields may exist on a per-sound-driver basis
};
//Accepts:
//If DriverName = 0 or empty, will try to find best driver.
//
// our_source_name is an optional argument, but on some platforms controls the name of your endpoint.
// reqSPSPlay = 44100 is guaranteed on many platforms.
// reqSPSRec = 44100 is guaranteed on many platforms.
// NOTE: Some platforms do not allow SPS play and REC to deviate from each other.
// reqChannelsRec = 1 or 2 guaranteed on many platforms.
// reqChannelsPlay = 1 or 2 guaranteedon many platforms. NOTE: Some systems require ChannelsPlay == ChannelsRec!
// sugBufferSize = No promises.
// outputSelect = No standardization, NULL is OK for default.
// inputSelect = No standardization, NULL is OK for default.
DllExport struct CNFADriver * CNFAInit( const char * driver_name, const char * your_name, CNFACBType cb, int reqSPSPlay, int reqSPSRec, int reqChannelsPlay,
int reqChannelsRec, int sugBufferSize, const char * outputSelect, const char * inputSelect, void * opaque );
DllExport int CNFAState( struct CNFADriver * cnfaobject ); //returns bitmask. 1 if mic recording, 2 if play back running, 3 if both running.
DllExport void CNFAClose( struct CNFADriver * cnfaobject );
//Called by various sound drivers. Notice priority must be greater than 0. Priority of 0 or less will not register.
//This is an internal function. Applications shouldnot call it.
void RegCNFADriver( int priority, const char * name, CNFAInitFn * fn );
#if defined(_MSC_VER) && !defined(__clang__)
#define REGISTER_CNFA( cnfadriver, priority, name, function ) \
void REGISTER##cnfadriver() { RegCNFADriver( priority, name, function ); }
#else
#define REGISTER_CNFA( cnfadriver, priority, name, function ) \
void __attribute__((constructor)) REGISTER##cnfadriver() { RegCNFADriver( priority, name, function ); }
#endif
#ifdef __TINYC__
#ifndef TCC
#define TCC
#endif
#endif
#ifdef CNFA_IMPLEMENTATION
#include "CNFA.c"
#include "CNFA_null.c"
#if defined(WINDOWS) || defined(WIN32) || defined(WIN64)
#include "CNFA_winmm.c"
#include "CNFA_wasapi.c"
#elif defined( ANDROID ) || defined( __android__ )
#include "CNFA_android.c"
#elif defined(__NetBSD__) || defined(__sun)
#include "CNFA_sun.c"
#elif defined(__linux__)
#include "CNFA_alsa.c"
#if defined(PULSEAUDIO)
#include "CNFA_pulse.c"
#endif
#endif
#endif
#ifdef __cplusplus
};
#endif
#endif

View file

@ -1,374 +0,0 @@
//Copyright 2015-2020 <>< Charles Lohr under the MIT/x11, NewBSD or ColorChord License. You choose.
#include "CNFA.h"
#include "os_generic.h"
#include <alsa/asoundlib.h>
#include <string.h>
struct CNFADriverAlsa
{
void (*CloseFn)( void * object );
int (*StateFn)( void * object );
CNFACBType callback;
short channelsPlay;
short channelsRec;
int spsPlay;
int spsRec;
void * opaque;
char * devRec;
char * devPlay;
snd_pcm_uframes_t bufsize;
og_thread_t threadPlay;
og_thread_t threadRec;
snd_pcm_t *playback_handle;
snd_pcm_t *record_handle;
char playing;
char recording;
};
int CNFAStateAlsa( void * v )
{
struct CNFADriverAlsa * r = (struct CNFADriverAlsa *)v;
return ((r->playing)?2:0) | ((r->recording)?1:0);
}
void CloseCNFAAlsa( void * v )
{
struct CNFADriverAlsa * r = (struct CNFADriverAlsa *)v;
if( r )
{
if( r->playback_handle ) snd_pcm_close (r->playback_handle);
if( r->record_handle ) snd_pcm_close (r->record_handle);
if( r->threadPlay ) OGJoinThread( r->threadPlay );
if( r->threadRec ) OGJoinThread( r->threadRec );
OGUSleep(2000);
if( r->devRec ) free( r->devRec );
if( r->devPlay ) free( r->devPlay );
free( r );
}
}
static int SetHWParams( snd_pcm_t * handle, int * samplerate, short * channels, snd_pcm_uframes_t * bufsize, struct CNFADriverAlsa * a )
{
int err;
int bufs;
int dir;
snd_pcm_hw_params_t *hw_params;
if ((err = snd_pcm_hw_params_malloc (&hw_params)) < 0) {
fprintf (stderr, "cannot allocate hardware parameter structure (%s)\n",
snd_strerror (err));
return -1;
}
if ((err = snd_pcm_hw_params_any (handle, hw_params)) < 0) {
fprintf (stderr, "cannot initialize hardware parameter structure (%s)\n",
snd_strerror (err));
goto fail;
}
if ((err = snd_pcm_hw_params_set_access (handle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) {
fprintf (stderr, "cannot set access type (%s)\n",
snd_strerror (err));
goto fail;
}
if ((err = snd_pcm_hw_params_set_format (handle, hw_params, SND_PCM_FORMAT_S16_LE )) < 0) {
fprintf (stderr, "cannot set sample format (%s)\n",
snd_strerror (err));
goto fail;
}
if ((err = snd_pcm_hw_params_set_rate_near (handle, hw_params, (unsigned int*)samplerate, 0)) < 0) {
fprintf (stderr, "cannot set sample rate (%s)\n",
snd_strerror (err));
goto fail;
}
if ((err = snd_pcm_hw_params_set_channels (handle, hw_params, *channels)) < 0) {
fprintf (stderr, "cannot set channel count (%s)\n",
snd_strerror (err));
goto fail;
}
dir = 0;
if( (err = snd_pcm_hw_params_set_period_size_near(handle, hw_params, bufsize, &dir)) < 0 )
{
fprintf( stderr, "cannot set period size. (%s)\n",
snd_strerror(err) );
goto fail;
}
//NOTE: This step is critical for low-latency sound.
bufs = *bufsize*3;
if( (err = snd_pcm_hw_params_set_buffer_size(handle, hw_params, bufs)) < 0 )
{
fprintf( stderr, "cannot set snd_pcm_hw_params_set_buffer_size size. (%s)\n",
snd_strerror(err) );
goto fail;
}
if ((err = snd_pcm_hw_params (handle, hw_params)) < 0) {
fprintf (stderr, "cannot set parameters (%s)\n",
snd_strerror (err));
goto fail;
}
snd_pcm_hw_params_free (hw_params);
return 0;
fail:
snd_pcm_hw_params_free (hw_params);
return -2;
}
static int SetSWParams( struct CNFADriverAlsa * d, snd_pcm_t * handle, int isrec )
{
snd_pcm_sw_params_t *sw_params;
int err;
//Time for software parameters:
if( !isrec )
{
if ((err = snd_pcm_sw_params_malloc (&sw_params)) < 0) {
fprintf (stderr, "cannot allocate software parameters structure (%s)\n",
snd_strerror (err));
goto failhard;
}
if ((err = snd_pcm_sw_params_current (handle, sw_params)) < 0) {
fprintf (stderr, "cannot initialize software parameters structure (%s) (%p)\n",
snd_strerror (err), handle);
goto fail;
}
int buffer_size = d->bufsize*3;
int period_size = d->bufsize;
printf( "PERIOD: %d BUFFER: %d\n", period_size, buffer_size );
if ((err = snd_pcm_sw_params_set_avail_min (handle, sw_params, period_size )) < 0) {
fprintf (stderr, "cannot set minimum available count (%s)\n",
snd_strerror (err));
goto fail;
}
//if ((err = snd_pcm_sw_params_set_stop_threshold(handle, sw_params, 512 )) < 0) {
// fprintf (stderr, "cannot set minimum available count (%s)\n",
// snd_strerror (err));
// goto fail;
//}
if ((err = snd_pcm_sw_params_set_start_threshold(handle, sw_params, buffer_size - period_size )) < 0) {
fprintf (stderr, "cannot set minimum available count (%s)\n",
snd_strerror (err));
goto fail;
}
if ((err = snd_pcm_sw_params (handle, sw_params)) < 0) {
fprintf (stderr, "cannot set software parameters (%s)\n",
snd_strerror (err));
goto fail;
}
}
if ((err = snd_pcm_prepare (handle)) < 0) {
fprintf (stderr, "cannot prepare audio interface for use (%s)\n",
snd_strerror (err));
goto fail;
}
return 0;
fail:
if( !isrec )
{
snd_pcm_sw_params_free (sw_params);
}
failhard:
return -1;
}
void * RecThread( void * v )
{
struct CNFADriverAlsa * r = (struct CNFADriverAlsa *)v;
short samples[r->bufsize * r->channelsRec];
snd_pcm_start(r->record_handle);
do
{
int err = snd_pcm_readi( r->record_handle, samples, r->bufsize );
if( err < 0 )
{
fprintf( stderr, "Warning: ALSA Recording Failed\n" );
break;
}
if( err != r->bufsize )
{
fprintf( stderr, "Warning: ALSA Recording Underflow\n" );
}
r->recording = 1;
r->callback( (struct CNFADriver *)r, 0, samples, 0, err );
} while( 1 );
r->recording = 0;
fprintf( stderr, "ALSA Recording Stopped\n" );
return 0;
}
void * PlayThread( void * v )
{
struct CNFADriverAlsa * r = (struct CNFADriverAlsa *)v;
short samples[r->bufsize * r->channelsPlay];
int err;
//int total_avail = snd_pcm_avail(r->playback_handle);
snd_pcm_start(r->playback_handle);
r->callback( (struct CNFADriver *)r, samples, 0, r->bufsize, 0 );
err = snd_pcm_writei(r->playback_handle, samples, r->bufsize);
while( err >= 0 )
{
// int avail = snd_pcm_avail(r->playback_handle);
// printf( "avail: %d\n", avail );
r->callback( (struct CNFADriver *)r, samples, 0, r->bufsize, 0 );
err = snd_pcm_writei(r->playback_handle, samples, r->bufsize);
if( err != r->bufsize )
{
fprintf( stderr, "Warning: ALSA Playback Overflow\n" );
}
r->playing = 1;
}
r->playing = 0;
fprintf( stderr, "ALSA Playback Stopped\n" );
return 0;
}
static struct CNFADriverAlsa * InitALSA( struct CNFADriverAlsa * r )
{
printf( "CNFA Alsa Init %p %p (%d %d) %d %d\n", r->playback_handle, r->record_handle, r->spsPlay, r->spsRec, r->channelsPlay, r->channelsRec );
int err;
if( r->channelsPlay )
{
if ((err = snd_pcm_open (&r->playback_handle, r->devPlay?r->devPlay:"default", SND_PCM_STREAM_PLAYBACK, 0)) < 0) {
fprintf (stderr, "cannot open output audio device (%s)\n",
snd_strerror (err));
goto fail;
}
}
if( r->channelsRec )
{
if ((err = snd_pcm_open (&r->record_handle, r->devRec?r->devRec:"default", SND_PCM_STREAM_CAPTURE, 0)) < 0) {
fprintf (stderr, "cannot open input audio device (%s)\n",
snd_strerror (err));
goto fail;
}
}
printf( "%p %p\n", r->playback_handle, r->record_handle );
if( r->playback_handle )
{
if( SetHWParams( r->playback_handle, &r->spsPlay, &r->channelsPlay, &r->bufsize, r ) < 0 )
goto fail;
if( SetSWParams( r, r->playback_handle, 0 ) < 0 )
goto fail;
}
if( r->record_handle )
{
if( SetHWParams( r->record_handle, &r->spsRec, &r->channelsRec, &r->bufsize, r ) < 0 )
goto fail;
if( SetSWParams( r, r->record_handle, 1 ) < 0 )
goto fail;
}
#if 0
if( r->playback_handle )
{
snd_async_handler_t *pcm_callback;
//Handle automatically cleaned up when stream closed.
err = snd_async_add_pcm_handler(&pcm_callback, r->playback_handle, playback_callback, r);
if(err < 0)
{
printf("Playback callback handler error: %s\n", snd_strerror(err));
}
}
if( r->record_handle )
{
snd_async_handler_t *pcm_callback;
//Handle automatically cleaned up when stream closed.
err = snd_async_add_pcm_handler(&pcm_callback, r->record_handle, record_callback, r);
if(err < 0)
{
printf("Record callback handler error: %s\n", snd_strerror(err));
}
}
#endif
if( r->playback_handle && r->record_handle )
{
err = snd_pcm_link ( r->playback_handle, r->record_handle );
if(err < 0)
{
printf("snd_pcm_link error: %s\n", snd_strerror(err));
}
}
if( r->playback_handle )
{
r->threadPlay = OGCreateThread( PlayThread, r );
}
if( r->record_handle )
{
r->threadRec = OGCreateThread( RecThread, r );
}
printf( "CNFA Alsa Init Out -> %p %p (%d %d) %d %d\n", r->playback_handle, r->record_handle, r->spsPlay, r->spsRec, r->channelsPlay, r->channelsRec );
return r;
fail:
if( r )
{
if( r->playback_handle ) snd_pcm_close (r->playback_handle);
if( r->record_handle ) snd_pcm_close (r->record_handle);
free( r );
}
fprintf( stderr, "Error: ALSA failed to start.\n" );
return 0;
}
void * InitALSADriver( CNFACBType cb, const char * your_name, int reqSPSPlay, int reqSPSRec, int reqChannelsPlay, int reqChannelsRec, int sugBufferSize, const char * outputSelect, const char * inputSelect, void * opaque )
{
struct CNFADriverAlsa * r = (struct CNFADriverAlsa *)malloc( sizeof( struct CNFADriverAlsa ) );
r->CloseFn = CloseCNFAAlsa;
r->StateFn = CNFAStateAlsa;
r->callback = cb;
r->opaque = opaque;
r->spsPlay = reqSPSPlay;
r->spsRec = reqSPSRec;
r->channelsPlay = reqChannelsPlay;
r->channelsRec = reqChannelsRec;
r->devRec = (inputSelect)?strdup(inputSelect):0;
r->devPlay = (outputSelect)?strdup(outputSelect):0;
r->playback_handle = 0;
r->record_handle = 0;
r->bufsize = sugBufferSize;
return InitALSA(r);
}
REGISTER_CNFA( ALSA, 10, "ALSA", InitALSADriver );

View file

@ -1,319 +0,0 @@
//Copyright 2019-2020 <>< Charles Lohr under the ColorChord License, MIT/x11 license or NewBSD Licenses.
// This was originally to be used with rawdrawandroid
#include "CNFA.h"
#include "os_generic.h"
#include <pthread.h> //Using android threads not os_generic threads.
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//based on https://github.com/android/ndk-samples/blob/master/native-audio/app/src/main/cpp/native-audio-jni.c
// for native audio
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
#include <android_native_app_glue.h>
#include <jni.h>
#include <native_activity.h>
struct CNFADriverAndroid
{
//Standard header - must remain.
void (*CloseFn)( void * object );
int (*StateFn)( void * object );
CNFACBType callback;
short channelsPlay;
short channelsRec;
int spsPlay;
int spsRec;
void * opaque;
int buffsz;
SLObjectItf engineObject;
SLEngineItf engineEngine;
SLRecordItf recorderRecord;
SLObjectItf recorderObject;
SLPlayItf playerPlay;
SLObjectItf playerObject;
SLObjectItf outputMixObject;
SLAndroidSimpleBufferQueueItf recorderBufferQueue;
SLAndroidSimpleBufferQueueItf playerBufferQueue;
//unsigned recorderSize;
int recorderBufferSizeBytes;
int playerBufferSizeBytes;
short * recorderBuffer;
short * playerBuffer;
};
void bqRecorderCallback(SLAndroidSimpleBufferQueueItf bq, void *context)
{
struct CNFADriverAndroid * r = (struct CNFADriverAndroid*)context;
r->callback( (struct CNFADriver*)r, 0, r->recorderBuffer, 0, r->buffsz/(sizeof(short)*r->channelsRec) );
(*r->recorderBufferQueue)->Enqueue( r->recorderBufferQueue, r->recorderBuffer, r->recorderBufferSizeBytes/(r->channelsRec*sizeof(short)) );
}
void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *context)
{
struct CNFADriverAndroid * r = (struct CNFADriverAndroid*)context;
r->callback( (struct CNFADriver*)r, r->playerBuffer, 0, r->buffsz/(sizeof(short)*r->channelsPlay), 0 );
(*r->playerBufferQueue)->Enqueue( r->playerBufferQueue, r->playerBuffer, r->playerBufferSizeBytes/(r->channelsPlay*sizeof(short)));
}
static struct CNFADriverAndroid* InitAndroidDriver( struct CNFADriverAndroid * r )
{
SLresult result;
printf( "Starting InitAndroidDriver\n" );
// create engine
result = slCreateEngine(&r->engineObject, 0, NULL, 0, NULL, NULL);
assert(SL_RESULT_SUCCESS == result);
(void)result;
// realize the engine
result = (*r->engineObject)->Realize(r->engineObject, SL_BOOLEAN_FALSE);
assert(SL_RESULT_SUCCESS == result);
(void)result;
// get the engine interface, which is needed in order to create other objects
result = (*r->engineObject)->GetInterface(r->engineObject, SL_IID_ENGINE, &r->engineEngine);
assert(SL_RESULT_SUCCESS == result);
(void)result;
///////////////////////////////////////////////////////////////////////////////////////////////////////
if( r->channelsPlay )
{
printf("create output mix");
SLDataFormat_PCM format_pcm ={
SL_DATAFORMAT_PCM,
r->channelsPlay,
r->spsPlay*1000,
SL_PCMSAMPLEFORMAT_FIXED_16,
SL_PCMSAMPLEFORMAT_FIXED_16,
(r->channelsPlay==1)?SL_SPEAKER_FRONT_CENTER:3,
SL_BYTEORDER_LITTLEENDIAN,
};
SLDataLocator_AndroidSimpleBufferQueue loc_bq_play = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
SLDataSource source = {&loc_bq_play, &format_pcm};
const SLInterfaceID ids[1] = {SL_IID_VOLUME};
const SLboolean req[1] = {SL_BOOLEAN_TRUE};
const SLInterfaceID id[1] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE};
result = (*r->engineEngine)->CreateOutputMix(r->engineEngine, &r->outputMixObject, 0, ids, req);
result = (*r->outputMixObject)->Realize(r->outputMixObject, SL_BOOLEAN_FALSE);
SLDataLocator_OutputMix loc_outmix = { SL_DATALOCATOR_OUTPUTMIX, r->outputMixObject };
SLDataSink sink;
sink.pFormat = &format_pcm;
sink.pLocator = &loc_outmix;
// create audio player
result = (*r->engineEngine)->CreateAudioPlayer(r->engineEngine, &r->playerObject, &source, &sink, 1, id, req);
if (SL_RESULT_SUCCESS != result) {
printf( "CreateAudioPlayer failed\n" );
return JNI_FALSE;
}
// realize the audio player
result = (*r->playerObject)->Realize(r->playerObject, SL_BOOLEAN_FALSE);
if (SL_RESULT_SUCCESS != result) {
printf( "AudioPlayer Realize failed: %d\n", result );
return JNI_FALSE;
}
// get the player interface
result = (*r->playerObject)->GetInterface(r->playerObject, SL_IID_PLAY, &r->playerPlay);
assert(SL_RESULT_SUCCESS == result);
(void)result;
// get the buffer queue interface
result = (*r->playerObject)->GetInterface(r->playerObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &r->playerBufferQueue);
assert(SL_RESULT_SUCCESS == result);
(void)result;
// register callback on the buffer queue
result = (*r->playerBufferQueue)->RegisterCallback(r->playerBufferQueue, bqPlayerCallback, r);
assert(SL_RESULT_SUCCESS == result);
(void)result;
printf( "===================== Player init ok.\n" );
}
if( r->channelsRec )
{
// configure audio source
SLDataLocator_IODevice loc_devI = {SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT, SL_DEFAULTDEVICEID_AUDIOINPUT, NULL};
SLDataSource audioSrc = {&loc_devI, NULL};
// configure audio sink
SLDataFormat_PCM format_pcm ={
SL_DATAFORMAT_PCM,
r->channelsRec,
r->spsRec*1000,
SL_PCMSAMPLEFORMAT_FIXED_16,
SL_PCMSAMPLEFORMAT_FIXED_16,
(r->channelsRec==1)?SL_SPEAKER_FRONT_CENTER:3,
SL_BYTEORDER_LITTLEENDIAN,
};
SLDataLocator_AndroidSimpleBufferQueue loc_bq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
SLDataSink audioSnk = {&loc_bq, &format_pcm};
const SLInterfaceID id[1] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE};
const SLboolean req[1] = {SL_BOOLEAN_TRUE};
result = (*r->engineEngine)->CreateAudioRecorder(r->engineEngine, &r->recorderObject, &audioSrc, &audioSnk, 1, id, req);
if (SL_RESULT_SUCCESS != result) {
printf( "CreateAudioRecorder failed\n" );
return JNI_FALSE;
}
// realize the audio recorder
result = (*r->recorderObject)->Realize(r->recorderObject, SL_BOOLEAN_FALSE);
if (SL_RESULT_SUCCESS != result) {
printf( "AudioRecorder Realize failed: %d\n", result );
return JNI_FALSE;
}
// get the record interface
result = (*r->recorderObject)->GetInterface(r->recorderObject, SL_IID_RECORD, &r->recorderRecord);
assert(SL_RESULT_SUCCESS == result);
(void)result;
// get the buffer queue interface
result = (*r->recorderObject)->GetInterface(r->recorderObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &r->recorderBufferQueue);
assert(SL_RESULT_SUCCESS == result);
(void)result;
// register callback on the buffer queue
result = (*r->recorderBufferQueue)->RegisterCallback(r->recorderBufferQueue, bqRecorderCallback, r);
assert(SL_RESULT_SUCCESS == result);
(void)result;
}
if( r->playerPlay )
{
result = (*r->playerPlay)->SetPlayState(r->playerPlay, SL_PLAYSTATE_STOPPED);
assert(SL_RESULT_SUCCESS == result); (void)result;
result = (*r->playerBufferQueue)->Clear(r->playerBufferQueue);
assert(SL_RESULT_SUCCESS == result); (void)result;
r->playerBuffer = malloc( r->playerBufferSizeBytes );
memset( r->playerBuffer, 0, r->playerBufferSizeBytes );
result = (*r->playerBufferQueue)->Enqueue(r->playerBufferQueue, r->playerBuffer, r->playerBufferSizeBytes );
assert(SL_RESULT_SUCCESS == result); (void)result;
result = (*r->playerPlay)->SetPlayState(r->playerPlay, SL_PLAYSTATE_PLAYING);
assert(SL_RESULT_SUCCESS == result); (void)result;
}
if( r->recorderRecord )
{
result = (*r->recorderRecord)->SetRecordState(r->recorderRecord, SL_RECORDSTATE_STOPPED);
assert(SL_RESULT_SUCCESS == result); (void)result;
result = (*r->recorderBufferQueue)->Clear(r->recorderBufferQueue);
assert(SL_RESULT_SUCCESS == result); (void)result;
// the buffer is not valid for playback yet
r->recorderBuffer = malloc( r->recorderBufferSizeBytes );
// enqueue an empty buffer to be filled by the recorder
// (for streaming recording, we would enqueue at least 2 empty buffers to start things off)
result = (*r->recorderBufferQueue)->Enqueue(r->recorderBufferQueue, r->recorderBuffer, r->recorderBufferSizeBytes );
// the most likely other result is SL_RESULT_BUFFER_INSUFFICIENT,
// which for this code example would indicate a programming error
assert(SL_RESULT_SUCCESS == result); (void)result;
// start recording
result = (*r->recorderRecord)->SetRecordState(r->recorderRecord, SL_RECORDSTATE_RECORDING);
assert(SL_RESULT_SUCCESS == result); (void)result;
}
printf( "Complete Init Sound Android\n" );
return r;
}
int CNFAStateAndroid( void * v )
{
struct CNFADriverAndroid * soundobject = (struct CNFADriverAndroid *)v;
return ((soundobject->recorderObject)?1:0) | ((soundobject->playerObject)?2:0);
}
void CloseCNFAAndroid( void * v )
{
struct CNFADriverAndroid * r = (struct CNFADriverAndroid *)v;
// destroy audio recorder object, and invalidate all associated interfaces
if (r->recorderObject != NULL) {
(*r->recorderObject)->Destroy(r->recorderObject);
r->recorderObject = NULL;
r->recorderRecord = NULL;
r->recorderBufferQueue = NULL;
if( r->recorderBuffer ) free( r->recorderBuffer );
}
if (r->playerObject != NULL) {
(*r->playerObject)->Destroy(r->playerObject);
r->playerObject = NULL;
r->playerPlay = NULL;
r->playerBufferQueue = NULL;
if( r->playerBuffer ) free( r->playerBuffer );
}
// destroy engine object, and invalidate all associated interfaces
if (r->engineObject != NULL) {
(*r->engineObject)->Destroy(r->engineObject);
r->engineObject = NULL;
r->engineEngine = NULL;
}
}
int AndroidHasPermissions(const char* perm_name);
void AndroidRequestAppPermissions(const char * perm);
void * InitCNFAAndroid( CNFACBType cb, const char * your_name, int reqSPSPlay, int reqSPSRec, int reqChannelsPlay, int reqChannelsRec, int sugBufferSize, const char * outputSelect, const char * inputSelect, void * opaque )
{
struct CNFADriverAndroid * r = (struct CNFADriverAndroid *)malloc( sizeof( struct CNFADriverAndroid ) );
memset( r, 0, sizeof( *r) );
r->CloseFn = CloseCNFAAndroid;
r->StateFn = CNFAStateAndroid;
r->callback = cb;
r->opaque = opaque;
r->channelsPlay = reqChannelsPlay;
r->channelsRec = reqChannelsRec;
r->spsRec = reqSPSRec;
r->spsPlay = reqSPSPlay;
r->recorderBufferSizeBytes = sugBufferSize * 2 * r->channelsRec;
r->playerBufferSizeBytes = sugBufferSize * 2 * r->channelsPlay;
int hasperm = AndroidHasPermissions( "RECORD_AUDIO" );
if( !hasperm )
{
AndroidRequestAppPermissions( "RECORD_AUDIO" );
}
r->buffsz = sugBufferSize;
return InitAndroidDriver(r);
}
//Tricky: On Android, this can't actually run before main. Have to manually execute it.
REGISTER_CNFA( AndroidCNFA, 10, "ANDROID", InitCNFAAndroid );

View file

@ -1,46 +0,0 @@
//Copyright 2015-2020 <>< Charles Lohr under the ColorChord License.
#include "CNFA.h"
#include "os_generic.h"
#include <stdlib.h>
struct CNFADriverNull
{
void (*CloseFn)( void * object );
int (*StateFn)( void * object );
CNFACBType callback;
short channelsPlay;
short channelsRec;
int spsPlay;
int spsRec;
void * opaque;
};
void CloseCNFANull( void * object )
{
free( object );
}
int CNFAStateNull( void * object )
{
return 0;
}
void * InitCNFANull( CNFACBType cb, const char * your_name, int reqSPSPlay, int reqSPSRec, int reqChannelsPlay, int reqChannelsRec, int sugBufferSize, const char * outputSelect, const char * inputSelect, void * opaque )
{
struct CNFADriverNull * r = (struct CNFADriverNull *)malloc( sizeof( struct CNFADriverNull ) );
r->CloseFn = CloseCNFANull;
r->StateFn = CNFAStateNull;
r->callback = cb;
r->spsPlay = reqSPSPlay;
r->spsRec = reqSPSRec;
r->opaque = opaque;
r->channelsPlay = reqChannelsPlay;
r->channelsRec = reqChannelsRec;
return r;
}
REGISTER_CNFA( NullCNFA, 1, "NULL", InitCNFANull );

View file

@ -1,308 +0,0 @@
//Copyright 2015-2020 <>< Charles Lohr under the MIT/x11, NewBSD or ColorChord License. You choose.
//This file is really rough. Full duplex doesn't seem to work hardly at all.
#include "CNFA.h"
#include "os_generic.h"
#include <stdlib.h>
#include <pulse/simple.h>
#include <pulse/pulseaudio.h>
#include <pulse/error.h>
#include <stdio.h>
#include <string.h>
#define BUFFERSETS 3
//from http://www.freedesktop.org/wiki/Software/PulseAudio/Documentation/Developer/Clients/Samples/AsyncPlayback/
//also http://maemo.org/api_refs/5.0/5.0-final/pulseaudio/pacat_8c-example.html
struct CNFADriverPulse
{
void (*CloseFn)( void * object );
int (*StateFn)( void * object );
CNFACBType callback;
short channelsPlay;
short channelsRec;
int spsPlay;
int spsRec;
void * opaque;
char * sourceNamePlay;
char * sourceNameRec;
og_thread_t thread;
pa_stream * play;
pa_stream * rec;
pa_context * pa_ctx;
pa_mainloop *pa_ml;
int pa_ready;
int buffer;
//More fields may exist on a per-sound-driver basis
};
int CNFAStatePulse( void * v )
{
struct CNFADriverPulse * soundobject = (struct CNFADriverPulse *)v;
return ((soundobject->play)?2:0) | ((soundobject->rec)?1:0);
}
void CloseCNFAPulse( void * v )
{
struct CNFADriverPulse * r = (struct CNFADriverPulse *)v;
if( r )
{
if( r->play )
{
pa_stream_unref (r->play);
r->play = 0;
}
if( r->rec )
{
pa_stream_unref (r->rec);
r->rec = 0;
}
OGUSleep(2000);
OGCancelThread( r->thread );
if( r->sourceNamePlay ) free( r->sourceNamePlay );
if( r->sourceNameRec ) free( r->sourceNameRec );
free( r );
}
}
static void * CNFAThread( void * v )
{
struct CNFADriverPulse * r = (struct CNFADriverPulse*)v;
while(1)
{
pa_mainloop_iterate( r->pa_ml, 1, NULL );
}
return 0;
}
static void stream_request_cb(pa_stream *s, size_t length, void *userdata)
{
struct CNFADriverPulse * r = (struct CNFADriverPulse*)userdata;
if( !r->play )
{
return;
}
short bufp[length*r->channelsPlay/sizeof(short)];
r->callback( (struct CNFADriver*)r, bufp, 0, length/(sizeof(short)*r->channelsPlay), 0 );
pa_stream_write(r->play, &bufp[0], length, NULL, 0LL, PA_SEEK_RELATIVE);
}
static void stream_record_cb(pa_stream *s, size_t length, void *userdata)
{
struct CNFADriverPulse * r = (struct CNFADriverPulse*)userdata;
uint16_t * bufr;
if (pa_stream_peek(r->rec, (const void**)&bufr, &length) < 0) {
fprintf(stderr, ("pa_stream_peek() failed: %s\n"), pa_strerror(pa_context_errno(r->pa_ctx)));
return;
}
short * buffer;
buffer = (short*)pa_xmalloc(length);
memcpy(buffer, bufr, length);
pa_stream_drop(r->rec);
r->callback( (struct CNFADriver*)r, 0, buffer, 0, length/(sizeof(short)*r->channelsRec) );
pa_xfree( buffer );
}
static void stream_underflow_cb(pa_stream *s, void *userdata) {
printf("underflow\n");
}
void pa_state_cb(pa_context *c, void *userdata) {
pa_context_state_t state;
int *pa_ready = (int*)userdata;
state = pa_context_get_state(c);
switch (state) {
// These are just here for reference
case PA_CONTEXT_UNCONNECTED:
case PA_CONTEXT_CONNECTING:
case PA_CONTEXT_AUTHORIZING:
case PA_CONTEXT_SETTING_NAME:
default:
break;
case PA_CONTEXT_FAILED:
case PA_CONTEXT_TERMINATED:
*pa_ready = 2;
break;
case PA_CONTEXT_READY:
*pa_ready = 1;
break;
}
}
void * InitCNFAPulse( CNFACBType cb, const char * your_name, int reqSPSPlay, int reqSPSRec, int reqChannelsPlay, int reqChannelsRec, int sugBufferSize, const char * outputSelect, const char * inputSelect, void * opaque )
{
static pa_buffer_attr bufattr;
static pa_sample_spec ss;
int error;
pa_mainloop_api *pa_mlapi;
const char * title = your_name;
struct CNFADriverPulse * r = (struct CNFADriverPulse *)malloc( sizeof( struct CNFADriverPulse ) );
r->pa_ml = pa_mainloop_new();
if( !r->pa_ml )
{
fprintf( stderr, "Failed to initialize pa_mainloop_new()\n" );
goto fail;
}
pa_mlapi = pa_mainloop_get_api(r->pa_ml);
if( !pa_mlapi )
{
fprintf( stderr, "Failed to initialize pa_mainloop_get_api()\n" );
goto fail;
}
r->pa_ctx = pa_context_new(pa_mlapi, title );
pa_context_connect(r->pa_ctx, NULL, PA_CONTEXT_NOFLAGS, NULL);
//TODO: pa_context_set_state_callback
r->CloseFn = CloseCNFAPulse;
r->StateFn = CNFAStatePulse;
r->callback = cb;
r->opaque = opaque;
r->spsPlay = reqSPSPlay;
r->spsRec = reqSPSRec;
r->channelsPlay = reqChannelsPlay;
r->channelsRec = reqChannelsRec;
r->sourceNamePlay = outputSelect?strdup(outputSelect):0;
r->sourceNameRec = inputSelect?strdup(inputSelect):0;
r->play = 0;
r->rec = 0;
r->buffer = sugBufferSize;
printf ("Pulse: from: [O/I] %s/%s (%s) / (%d,%d)x(%d,%d) (%d)\n", r->sourceNamePlay, r->sourceNameRec, title, r->spsPlay, r->spsRec, r->channelsPlay, r->channelsRec, r->buffer );
memset( &ss, 0, sizeof( ss ) );
ss.format = PA_SAMPLE_S16NE;
r->pa_ready = 0;
pa_context_set_state_callback(r->pa_ctx, pa_state_cb, &r->pa_ready);
while (r->pa_ready == 0)
{
pa_mainloop_iterate(r->pa_ml, 1, NULL);
}
int bufbytes = r->buffer * sizeof(short) * r->channelsRec;
if( r->channelsPlay )
{
ss.channels = r->channelsPlay;
ss.rate = r->spsPlay;
if (!(r->play = pa_stream_new(r->pa_ctx, "Play", &ss, NULL))) {
error = -3; //XXX ??? TODO
fprintf(stderr, __FILE__": pa_simple_new() failed: %s\n", pa_strerror(error));
goto fail;
}
pa_stream_set_underflow_callback(r->play, stream_underflow_cb, NULL);
pa_stream_set_write_callback(r->play, stream_request_cb, r );
bufattr.fragsize = (uint32_t)-1;
bufattr.maxlength = bufbytes*3; //XXX TODO Consider making this -1
bufattr.minreq = 0;
bufattr.prebuf = (uint32_t)-1;
bufattr.tlength = bufbytes*3;
int ret = pa_stream_connect_playback(r->play, r->sourceNamePlay, &bufattr,
// PA_STREAM_INTERPOLATE_TIMING
// |PA_STREAM_ADJUST_LATENCY //Some servers don't like the adjust_latency flag.
// |PA_STREAM_AUTO_TIMING_UPDATE, NULL, NULL);
PA_STREAM_NOFLAGS, NULL, NULL );
if( ret < 0 )
{
fprintf(stderr, __FILE__": (PLAY) pa_stream_connect_playback() failed: %s\n", pa_strerror(ret));
goto fail;
}
}
if( r->channelsRec )
{
ss.channels = r->channelsRec;
ss.rate = r->spsRec;
if (!(r->rec = pa_stream_new(r->pa_ctx, "Record", &ss, NULL))) {
error = -3; //XXX ??? TODO
fprintf(stderr, __FILE__": pa_simple_new() failed: %s\n", pa_strerror(error));
goto fail;
}
pa_stream_set_read_callback(r->rec, stream_record_cb, r );
bufattr.fragsize = bufbytes;
bufattr.maxlength = (uint32_t)-1;//(uint32_t)-1; //XXX: Todo, should this be low?
bufattr.minreq = bufbytes;
bufattr.prebuf = (uint32_t)-1;
bufattr.tlength = bufbytes*3;
int ret = pa_stream_connect_record(r->rec, r->sourceNameRec, &bufattr,
// PA_STREAM_INTERPOLATE_TIMING
PA_STREAM_ADJUST_LATENCY //Some servers don't like the adjust_latency flag.
// PA_STREAM_AUTO_TIMING_UPDATE
// PA_STREAM_NOFLAGS
);
printf( "PA REC RES: %d\n", ret );
if( ret < 0 )
{
fprintf(stderr, __FILE__": (REC) pa_stream_connect_playback() failed: %s\n", pa_strerror(ret));
goto fail;
}
}
printf( "Pulse initialized.\n" );
r->thread = OGCreateThread( CNFAThread, r );
if( r->play )
{
stream_request_cb( r->play, bufbytes, r );
stream_request_cb( r->play, bufbytes, r );
}
return r;
fail:
if( r )
{
if( r->play ) pa_xfree (r->play);
if( r->rec ) pa_xfree (r->rec);
free( r );
}
return 0;
}
REGISTER_CNFA( PulseCNFA, 11, "PULSE", InitCNFAPulse );

View file

@ -1,275 +0,0 @@
//Copyright 2015-2020 <>< Charles Lohr under the MIT/x11, NewBSD or ColorChord License. You choose.
#include "CNFA.h"
#include "os_generic.h"
#include <sys/audioio.h>
#include <sys/ioctl.h>
#include <fcntl.h>
#include <string.h>
#include <errno.h>
struct CNFADriverSun
{
void (*CloseFn)( void * object );
int (*StateFn)( void * object );
CNFACBType callback;
short channelsPlay;
short channelsRec;
int spsPlay;
int spsRec;
void * opaque;
char * devRec;
char * devPlay;
short * samplesRec;
short * samplesPlay;
og_thread_t threadPlay;
og_thread_t threadRec;
int bufsize;
int playback_handle;
int record_handle;
char playing;
char recording;
};
int CNFAStateSun( void * v )
{
struct CNFADriverSun * r = (struct CNFADriverSun *)v;
return ((r->playing)?2:0) | ((r->recording)?1:0);
}
void CloseCNFASun( void * v )
{
struct CNFADriverSun * r = (struct CNFADriverSun *)v;
if( r )
{
if( r->playback_handle != -1 ) close (r->playback_handle);
if( r->record_handle != -1 ) close (r->record_handle);
if( r->threadPlay ) OGJoinThread( r->threadPlay );
if( r->threadRec ) OGJoinThread( r->threadRec );
OGUSleep(2000);
free( r->devRec );
free( r->devPlay );
free( r->samplesRec );
free( r->samplesPlay );
free( r );
}
}
void * RecThread( void * v )
{
struct CNFADriverSun * r = (struct CNFADriverSun *)v;
size_t nbytes = r->bufsize * (2 * r->channelsRec);
do
{
int nread = read( r->record_handle, r->samplesRec, nbytes );
if( nread < 0 )
{
fprintf( stderr, "Warning: Sun Recording Failed\n" );
break;
}
r->recording = 1;
r->callback( (struct CNFADriver *)r, NULL, r->samplesRec, 0, (nread / 2) / r->channelsRec);
} while( 1 );
r->recording = 0;
fprintf( stderr, "Sun Recording Stopped\n" );
return 0;
}
void * PlayThread( void * v )
{
struct CNFADriverSun * r = (struct CNFADriverSun *)v;
size_t nbytes = r->bufsize * (2 * r->channelsPlay);
int err;
r->callback( (struct CNFADriver *)r, r->samplesPlay, NULL, r->bufsize, 0 );
err = write( r->playback_handle, r->samplesPlay, nbytes );
while( err >= 0 )
{
r->callback( (struct CNFADriver *)r, r->samplesPlay, NULL, r->bufsize, 0 );
err = write( r->playback_handle, r->samplesPlay, nbytes );
r->playing = 1;
}
r->playing = 0;
fprintf( stderr, "Sun Playback Stopped\n" );
return 0;
}
static struct CNFADriverSun * InitSun( struct CNFADriverSun * r )
{
const char * devPlay = r->devPlay;
const char * devRec = r->devRec;
struct audio_info rinfo, pinfo;
if( devRec == NULL || strcmp ( devRec, "default" ) == 0 )
{
devRec = "/dev/audio";
}
if( devPlay == NULL || strcmp ( devPlay , "default" ) == 0 )
{
devPlay = "/dev/audio";
}
printf( "CNFA Sun Init -> devPlay: %s, channelsPlay: %d, spsPlay: %d, devRec: %s, channelsRec: %d, spsRec: %d\n", devPlay, r->channelsPlay, r->spsPlay, devRec, r->channelsRec, r->spsRec);
if( r->channelsPlay && r->channelsRec && strcmp (devPlay, devRec) == 0 )
{
if ( (r->playback_handle = r->record_handle = open (devPlay, O_RDWR)) < 0 )
{
fprintf (stderr, "cannot open audio device (%s)\n",
strerror (errno));
goto fail;
}
}
else
{
if( r->channelsPlay )
{
if ( (r->playback_handle = open (devPlay, O_WRONLY)) < 0 )
{
fprintf (stderr, "cannot open output audio device %s (%s)\n",
r->devPlay, strerror (errno));
goto fail;
}
}
if( r->channelsRec )
{
if ( (r->record_handle = open (devRec, O_RDONLY)) < 0 )
{
fprintf (stderr, "cannot open input audio device %s (%s)\n",
r->devRec, strerror (errno));
goto fail;
}
}
}
if( r->playback_handle )
{
AUDIO_INITINFO(&pinfo);
pinfo.play.precision = 16;
pinfo.play.encoding = AUDIO_ENCODING_LINEAR;
pinfo.play.sample_rate = r->spsPlay;
pinfo.play.channels = r->channelsPlay;
if ( ioctl(r->playback_handle, AUDIO_SETINFO, &pinfo) < 0 )
{
fprintf (stderr, "cannot set audio playback format (%s)\n",
strerror (errno));
goto fail;
}
if ( ioctl(r->playback_handle, AUDIO_GETINFO, &pinfo) < 0 )
{
fprintf (stderr, "cannot get audio record format (%s)\n",
strerror (errno));
goto fail;
}
r->spsPlay = pinfo.play.sample_rate;
r->channelsPlay = pinfo.play.channels;
if ( (r->samplesPlay = calloc(2 * r->channelsPlay, r->bufsize)) == NULL )
{
goto fail;
}
}
if( r->record_handle )
{
AUDIO_INITINFO(&rinfo);
rinfo.record.precision = 16;
rinfo.record.encoding = AUDIO_ENCODING_LINEAR;
rinfo.record.sample_rate = r->spsRec;
rinfo.record.channels = r->channelsRec;
if ( ioctl(r->record_handle, AUDIO_SETINFO, &rinfo) < 0 )
{
fprintf (stderr, "cannot set audio record format (%s)\n",
strerror (errno));
goto fail;
}
if ( ioctl(r->record_handle, AUDIO_GETINFO, &rinfo) < 0 )
{
fprintf (stderr, "cannot get audio record format (%s)\n",
strerror (errno));
goto fail;
}
r->spsRec = rinfo.record.sample_rate;
r->channelsRec = rinfo.record.channels;
if ( (r->samplesRec = calloc(2 * r->channelsRec, r->bufsize)) == NULL )
{
goto fail;
}
}
if( r->playback_handle )
{
r->threadPlay = OGCreateThread( PlayThread, r );
}
if( r->record_handle )
{
r->threadRec = OGCreateThread( RecThread, r );
}
printf( "CNFA Sun Init Out -> channelsPlay: %d, spsPlay: %d, channelsRec: %d, spsRec: %d\n", r->channelsPlay, r->spsPlay, r->channelsRec, r->spsRec);
return r;
fail:
if( r )
{
if( r->playback_handle != -1 ) close (r->playback_handle);
if( r->record_handle != -1 ) close (r->record_handle);
free( r->samplesPlay );
free( r->samplesRec );
free( r );
}
return 0;
}
void * InitSunDriver( CNFACBType cb, const char * your_name, int reqSPSPlay, int reqSPSRec, int reqChannelsPlay, int reqChannelsRec, int sugBufferSize, const char * outputSelect, const char * inputSelect, void * opaque )
{
struct CNFADriverSun * r = (struct CNFADriverSun *)malloc( sizeof( struct CNFADriverSun ) );
r->CloseFn = CloseCNFASun;
r->StateFn = CNFAStateSun;
r->callback = cb;
r->opaque = opaque;
r->spsPlay = reqSPSPlay;
r->spsRec = reqSPSRec;
r->channelsPlay = reqChannelsPlay;
r->channelsRec = reqChannelsRec;
r->devRec = (inputSelect)?strdup(inputSelect):0;
r->devPlay = (outputSelect)?strdup(outputSelect):0;
r->samplesPlay = NULL;
r->samplesRec = NULL;
r->playback_handle = -1;
r->record_handle = -1;
r->bufsize = sugBufferSize;
return InitSun(r);
}
REGISTER_CNFA( SUN, 10, "Sun", InitSunDriver );

View file

@ -1,514 +0,0 @@
#include "CNFA.h"
//Needed libraries: -lmmdevapi -lavrt -lole32
//Or DLLs: C:/windows/system32/avrt.dll C:/windows/system32/ole32.dll
#ifdef TCC
#define NO_WIN_HEADERS
#endif
#ifdef NO_WIN_HEADERS
#include "CNFA_wasapi_utils.h"
#else
#include <InitGuid.h>
#include <audioclient.h> // Render and capturing audio
#include <mmdeviceapi.h> // Audio device handling
#include <Functiondiscoverykeys_devpkey.h> // Property keys for audio devices
#include <avrt.h> // Thread management
#include "windows.h"
#endif
#include "os_generic.h"
#if defined(WIN32) && !defined( TCC )
#pragma comment(lib,"avrt.lib")
#pragma comment(lib,"ole32.lib")
//And maybe mmdevapi.lib
#endif
#define WASAPIPRINT(message) (printf("[WASAPI] %s\n", message))
#define WASAPIERROR(error, message) (printf("[WASAPI][ERR] %s HRESULT: 0x%lX\n", message, error))
#define PRINTGUID(guid) (printf("{%08lX-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X}", guid.Data1, guid.Data2, guid.Data3, guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3], guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]))
#define WASAPI_EXTRA_DEBUG FALSE
// Forward declarations
void CloseCNFAWASAPI(void* stateObj);
int CNFAStateWASAPI(void* object);
static struct CNFADriverWASAPI* StartWASAPIDriver(struct CNFADriverWASAPI* initState);
static IMMDevice* WASAPIGetDefaultDevice(BOOL isCapture, BOOL isMultimedia);
static void WASAPIPrintAllDeviceLists();
static void WASAPIPrintDeviceList(EDataFlow dataFlow);
void* ProcessEventAudioIn(void* stateObj);
void* InitCNFAWASAPIDriver(
CNFACBType callback, const char *session_name,
int reqSampleRateOut, int reqSampleRateIn,
int reqChannelsOut, int reqChannelsIn, int sugBufferSize,
const char * inputDevice, const char * outputDevice,
void * opaque
);
DEFINE_GUID(CLSID_MMDeviceEnumerator, 0xBCDE0395L, 0xE52F, 0x467C, 0x8E, 0x3D, 0xC4, 0x57, 0x92, 0x91, 0x69, 0x2E);
DEFINE_GUID(IID_IMMDeviceEnumerator, 0xA95664D2L, 0x9614, 0x4F35, 0xA7, 0x46, 0xDE, 0x8D, 0xB6, 0x36, 0x17, 0xE6);
DEFINE_GUID(IID_IMMEndpoint, 0x1BE09788L, 0x6894, 0x4089, 0x85, 0x86, 0x9A, 0x2A, 0x6C, 0x26, 0x5A, 0xC5);
DEFINE_GUID(IID_IAudioClient, 0x1CB9AD4CL, 0xDBFA, 0x4c32, 0xB1, 0x78, 0xC2, 0xF5, 0x68, 0xA7, 0x03, 0xB2);
DEFINE_GUID(IID_IAudioCaptureClient, 0xC8ADBD64L, 0xE71E, 0x48a0, 0xA4, 0xDE, 0x18, 0x5C, 0x39, 0x5C, 0xD3, 0x17);
// This is a fallback if the client application does not provide a GUID.
DEFINE_GUID(CNFA_GUID, 0x899081C7L, 0x9428, 0x4103, 0x87, 0x93, 0x26, 0x47, 0xE5, 0xEA, 0xA2, 0xB4);
struct CNFADriverWASAPI
{
// Common CNFA items
void (*CloseFn)(void* object);
int (*StateFn)(void* object);
CNFACBType Callback;
short ChannelCountOut; // Not yet used.
short ChannelCountIn; // How many cahnnels the input stream has per frame. E.g. stereo = 2.
int SampleRateOut;
int SampleRateIn;
void* Opaque; // Not relevant to us
// Adjustable WASAPI-specific items
const char* SessionName; // The name to give our audio sessions. Otherwise, defaults to using embedded EXE name, Window title, or EXE file name directly.
const GUID* SessionID; // In order to have different CNFA-based applications individually controllable from the volume mixer, this should be set differently for every client program, but constant across all runs/builds of that application.
// Everything below here is for internal use only. Do not attempt to interact with these items.
const char* InputDeviceID; // The device to use for getting input from. Can be a render device (operating in loopback), or a capture device.
const char* OutputDeviceID; // Not yet used.
IMMDeviceEnumerator* DeviceEnumerator; // The base object that allows us to look through the system's devices, and from there get everything else.
IMMDevice* Device; // The device we are taking input from.
IAudioClient* Client; // The base client we use for getting input.
IAudioCaptureClient* CaptureClient; // The specific client we use for getting input.
WAVEFORMATEX* MixFormat; // The format of the input stream.
INT32 BytesPerFrame; // The number of bytes of one full frame of audio. AKA (channel count) * (sample bit depth), in Bytes.
BOOL StreamReady; // Whether the input stream is ready for data retrieval.
BOOL KeepGoing; // Whether to continue interacting with the streams, or shutdown the driver.
og_thread_t ThreadOut; // Not yet used.
og_thread_t ThreadIn; // The thread used to grab input data.
HANDLE EventHandleOut; // Not yet used.
HANDLE EventHandleIn; // The handle used to wait for more input data to be ready in the input thread.
HANDLE TaskHandleOut; // The task used to request output thread priority changes.
HANDLE TaskHandleIn; // The task used to request input thread priority changes.
};
// This is where the driver's current state is stored.
static struct CNFADriverWASAPI* WASAPIState;
// Stops streams, ends threads, and cleans up all resources used by the driver.
void CloseCNFAWASAPI(void* stateObj)
{
struct CNFADriverWASAPI* state = (struct CNFADriverWASAPI*)stateObj;
if(state != NULL)
{
// TODO: See if there are any other items that need cleanup.
state->KeepGoing = FALSE;
if (state->ThreadOut != NULL) { OGJoinThread(state->ThreadOut); }
if (state->ThreadIn != NULL) { OGJoinThread(state->ThreadIn); }
if (state->EventHandleOut != NULL) { CloseHandle(state->EventHandleOut); }
if (state->EventHandleIn != NULL) { CloseHandle(state->EventHandleIn); }
CoTaskMemFree(state->MixFormat);
if (state->CaptureClient != NULL) { state->CaptureClient->lpVtbl->Release(state->CaptureClient); }
if (state->Client != NULL) { state->Client->lpVtbl->Release(state->Client); }
if (state->Device != NULL) { state->Device->lpVtbl->Release(state->Device); }
if (state->DeviceEnumerator != NULL) { state->DeviceEnumerator->lpVtbl->Release(state->DeviceEnumerator); }
free(stateObj);
CoUninitialize();
printf("[WASAPI] Cleanup completed. Goodbye.\n");
}
}
// Gets the current state of the driver.
// 0 = No streams active
// 1 = Input stream active
// 2 = Output stream active
// 3 = Both streams active
int CNFAStateWASAPI(void* stateObj)
{
struct CNFADriverWASAPI* state = (struct CNFADriverWASAPI*)stateObj;
if(state != NULL)
{
if (state->StreamReady) { return 1; } // TODO: Output the correct status when output is implemented.
}
return 0;
}
// Reads the desired configuration, interfaces with WASAPI to get the current system information, and starts the input stream.
static struct CNFADriverWASAPI* StartWASAPIDriver(struct CNFADriverWASAPI* initState)
{
WASAPIState = initState;
WASAPIState->StreamReady = FALSE;
WASAPIState->SessionID = &CNFA_GUID;
HRESULT ErrorCode;
#ifndef BUILD_DLL
// A library should never call CoInitialize, as it needs to be done from the host program according to its threading model needs.
// NOTE: If you are getting errors, and you are using CNFA as a DLL, you need to call CoInitialize yourself with an appropriate threading model for your needs!
// When the host program is something like ColorChord on the other hand, it cannot be expected to call CoInitialize itself, so we do it on its behalf.
// This restricts the threading model of direct consumers of CNFA, but we can address that if it does ever become an issue.
ErrorCode = CoInitialize(NULL);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "COM INIT FAILED!"); return WASAPIState; }
#endif
if(WASAPI_EXTRA_DEBUG)
{
printf("[WASAPI] CLSID for MMDeviceEnumerator: ");
PRINTGUID(CLSID_MMDeviceEnumerator);
printf("\n[WASAPI] IID for IMMDeviceEnumerator: ");
PRINTGUID(IID_IMMDeviceEnumerator);
printf("\n[WASAPI] IID for IAudioClient: ");
PRINTGUID(IID_IAudioClient);
printf("\n[WASAPI] IID for IAudioCaptureClient: ");
PRINTGUID(IID_IAudioCaptureClient);
printf("\n[WASAPI] IID for IMMEndpoint: ");
PRINTGUID(IID_IMMEndpoint);
printf("\n");
}
ErrorCode = CoCreateInstance(&CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, &IID_IMMDeviceEnumerator, (void**)&(WASAPIState->DeviceEnumerator));
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get device enumerator. "); return WASAPIState; }
WASAPIPrintAllDeviceLists();
// We need to find the appropriate device to use.
BYTE DeviceDirection = 2; // 0 = Render, 1 = Capture, 2 = Unknown
if (WASAPIState->InputDeviceID == NULL)
{
WASAPIPRINT("No device specified, attempting to use system default multimedia capture device as input.");
WASAPIState->Device = WASAPIGetDefaultDevice(TRUE, TRUE);
DeviceDirection = 1;
}
else if (strcmp(WASAPIState->InputDeviceID, "defaultRender") == 0)
{
WASAPIPRINT("Attempting to use system default render device as input.");
WASAPIState->Device = WASAPIGetDefaultDevice(FALSE, TRUE);
DeviceDirection = 0;
}
else if (strncmp("defaultCapture", WASAPIState->InputDeviceID, strlen("defaultCapture")) == 0)
{
BOOL IsMultimedia = TRUE;
if (strstr(WASAPIState->InputDeviceID, "Comm") != NULL) { IsMultimedia = FALSE; }
printf("[WASAPI] Attempting to use system default %s capture device as input.\n", (IsMultimedia ? "multimedia" : "communications"));
WASAPIState->Device = WASAPIGetDefaultDevice(TRUE, IsMultimedia);
DeviceDirection = 1;
}
else // A specific device was selected by ID.
{
LPWSTR DeviceIDasLPWSTR;
DeviceIDasLPWSTR = malloc((strlen(WASAPIState->InputDeviceID) + 1) * sizeof(WCHAR));
mbstowcs(DeviceIDasLPWSTR, WASAPIState->InputDeviceID, strlen(WASAPIState->InputDeviceID) + 1);
printf("[WASAPI] Attempting to find specified device \"%ls\".\n", DeviceIDasLPWSTR);
ErrorCode = WASAPIState->DeviceEnumerator->lpVtbl->GetDevice(WASAPIState->DeviceEnumerator, DeviceIDasLPWSTR, &(WASAPIState->Device));
if (FAILED(ErrorCode))
{
WASAPIERROR(ErrorCode, "Failed to get audio device from the given ID. Using default multimedia capture device instead.");
WASAPIState->Device = WASAPIGetDefaultDevice(TRUE, TRUE);
DeviceDirection = 1;
}
else
{
printf("[WASAPI] Found specified device.\n");
DWORD DeviceState;
ErrorCode = WASAPIState->Device->lpVtbl->GetState(WASAPIState->Device, &DeviceState);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get device state."); }
if ((DeviceState & DEVICE_STATE_DISABLED) == DEVICE_STATE_DISABLED) { WASAPIERROR(E_FAIL, "The specified device is currently disabled."); }
if ((DeviceState & DEVICE_STATE_NOTPRESENT) == DEVICE_STATE_NOTPRESENT) { WASAPIERROR(E_FAIL, "The specified device is not currently present."); }
if ((DeviceState & DEVICE_STATE_UNPLUGGED) == DEVICE_STATE_UNPLUGGED) { WASAPIERROR(E_FAIL, "The specified device is currently unplugged."); }
}
}
if (DeviceDirection == 2) // We still don't know what type of device we are trying to use. Query the endpoint to find out.
{
IMMEndpoint* Endpoint;
ErrorCode = WASAPIState->Device->lpVtbl->QueryInterface(WASAPIState->Device, &IID_IMMEndpoint, (void**)&Endpoint);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get endpoint of device."); }
EDataFlow DataFlow;
ErrorCode = Endpoint->lpVtbl->GetDataFlow(Endpoint, &DataFlow);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Could not determine endpoint type."); }
DeviceDirection = (DataFlow == eRender) ? 0 : 1;
if (Endpoint != NULL) { Endpoint->lpVtbl->Release(Endpoint); }
}
// We should have a device now.
char* DeviceDirectionDesc = (DeviceDirection == 0) ? "render" : ((DeviceDirection == 1) ? "capture" : "UNKNOWN");
LPWSTR DeviceID;
ErrorCode = WASAPIState->Device->lpVtbl->GetId(WASAPIState->Device, &DeviceID);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get audio device ID."); return WASAPIState; }
else { printf("[WASAPI] Using device ID \"%ls\", which is a %s device.\n", DeviceID, DeviceDirectionDesc); }
// Start an audio client and get info about the stream format.
ErrorCode = WASAPIState->Device->lpVtbl->Activate(WASAPIState->Device, &IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&(WASAPIState->Client));
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get audio client. "); return WASAPIState; }
ErrorCode = WASAPIState->Client->lpVtbl->GetMixFormat(WASAPIState->Client, &(WASAPIState->MixFormat));
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get mix format. "); return WASAPIState; }
printf("[WASAPI] Mix format is %d channel, %luHz sample rate, %db per sample.\n", WASAPIState->MixFormat->nChannels, WASAPIState->MixFormat->nSamplesPerSec, WASAPIState->MixFormat->wBitsPerSample);
printf("[WASAPI] Mix format is format %d, %dB block-aligned, with %dB of extra data in this definition.\n", WASAPIState->MixFormat->wFormatTag, WASAPIState->MixFormat->nBlockAlign, WASAPIState->MixFormat->cbSize);
// We'll request PCM, 16bpS data from the system. It should be able to do this conversion for us, as long as we are not in exclusive mode.
// TODO: This isn't working, no matter what combination I try to ask it for. Figure this out, so we don't have to do the conversion ourselves.
// Also, we probably don't handle channel counts > 2 with this current setup.
//WASAPIState->MixFormat->wFormatTag = WAVE_FORMAT_PCM;
//WASAPIState->MixFormat->wBitsPerSample = 16 * WASAPIState->MixFormat->nChannels;
//WASAPIState->MixFormat->nBlockAlign = 2 * WASAPIState->MixFormat->nChannels;
//WASAPIState->MixFormat->nAvgBytesPerSec = WASAPIState->MixFormat->nSamplesPerSec * WASAPIState->MixFormat->nBlockAlign;
WASAPIState->ChannelCountIn = WASAPIState->MixFormat->nChannels;
WASAPIState->SampleRateIn = WASAPIState->MixFormat->nSamplesPerSec;
WASAPIState->BytesPerFrame = WASAPIState->MixFormat->nChannels * (WASAPIState->MixFormat->wBitsPerSample / 8);
REFERENCE_TIME DefaultInterval, MinimumInterval;
ErrorCode = WASAPIState->Client->lpVtbl->GetDevicePeriod(WASAPIState->Client, &DefaultInterval, &MinimumInterval);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get device timing info. "); return WASAPIState; }
printf("[WASAPI] Default transaction period is %lld ticks, minimum is %lld ticks.\n", DefaultInterval, MinimumInterval);
// Configure a capture client.
UINT32 StreamFlags;
if (DeviceDirection == 1) { StreamFlags = AUDCLNT_STREAMFLAGS_NOPERSIST | AUDCLNT_STREAMFLAGS_EVENTCALLBACK; }
else if (DeviceDirection == 0) { StreamFlags = (AUDCLNT_STREAMFLAGS_LOOPBACK | AUDCLNT_STREAMFLAGS_EVENTCALLBACK); }
else { WASAPIPRINT("[ERR] Device type was not determined!"); return WASAPIState; }
// TODO: Allow the target application to influence the interval we choose. Super realtime apps may require MinimumInterval.
ErrorCode = WASAPIState->Client->lpVtbl->Initialize(WASAPIState->Client, AUDCLNT_SHAREMODE_SHARED, StreamFlags, DefaultInterval, DefaultInterval, WASAPIState->MixFormat, WASAPIState->SessionID);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Could not init audio client."); return WASAPIState; }
WASAPIState->EventHandleIn = CreateEvent(NULL, FALSE, FALSE, NULL);
if (WASAPIState->EventHandleIn == NULL) { WASAPIERROR(E_FAIL, "Failed to make event handle."); return WASAPIState; }
ErrorCode = WASAPIState->Client->lpVtbl->SetEventHandle(WASAPIState->Client, WASAPIState->EventHandleIn);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to set event handler."); return WASAPIState; }
UINT32 BufferFrameCount;
ErrorCode = WASAPIState->Client->lpVtbl->GetBufferSize(WASAPIState->Client, &BufferFrameCount);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Could not get audio client buffer size."); return WASAPIState; }
ErrorCode = WASAPIState->Client->lpVtbl->GetService(WASAPIState->Client, &IID_IAudioCaptureClient, (void**)&(WASAPIState->CaptureClient));
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Could not get audio capture client."); return WASAPIState; }
// Begin capturing audio. It will be received on a separate thread.
ErrorCode = WASAPIState->Client->lpVtbl->Start(WASAPIState->Client);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Could not start audio client."); return WASAPIState; }
WASAPIState->StreamReady = TRUE;
WASAPIState->KeepGoing = TRUE;
WASAPIState->ThreadIn = OGCreateThread(ProcessEventAudioIn, WASAPIState);
return WASAPIState;
}
// Gets the default render or capture device.
// isCapture: If true, gets the default capture device, otherwise gets the default render device.
// isMultimedia: If true, gets the system default devide for "multimedia" use, otheriwse for "communication" use.
static IMMDevice* WASAPIGetDefaultDevice(BOOL isCapture, BOOL isMultimedia)
{
HRESULT ErrorCode;
IMMDevice* Device;
ErrorCode = WASAPIState->DeviceEnumerator->lpVtbl->GetDefaultAudioEndpoint(WASAPIState->DeviceEnumerator, isCapture ? eCapture : eRender, isMultimedia ? eMultimedia : eCommunications, &Device);
if (FAILED(ErrorCode))
{
WASAPIERROR(ErrorCode, "Failed to get default device.");
return NULL;
}
return Device;
}
// Prints all available devices to the console.
static void WASAPIPrintAllDeviceLists()
{
WASAPIPrintDeviceList(eRender);
WASAPIPrintDeviceList(eCapture);
}
// Prints a list of all available devices of a specified data flow direction to the console.
static void WASAPIPrintDeviceList(EDataFlow dataFlow)
{
printf("[WASAPI] %s Devices:\n", (dataFlow == eCapture ? "Capture" : "Render"));
IMMDeviceCollection* Devices;
HRESULT ErrorCode = WASAPIState->DeviceEnumerator->lpVtbl->EnumAudioEndpoints(WASAPIState->DeviceEnumerator, dataFlow, (WASAPI_EXTRA_DEBUG ? DEVICE_STATEMASK_ALL : DEVICE_STATE_ACTIVE), &Devices);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get audio endpoints."); return; }
UINT32 DeviceCount;
ErrorCode = Devices->lpVtbl->GetCount(Devices, &DeviceCount);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get audio endpoint count."); return; }
for (UINT32 DeviceIndex = 0; DeviceIndex < DeviceCount; DeviceIndex++)
{
IMMDevice* Device;
ErrorCode = Devices->lpVtbl->Item(Devices, DeviceIndex, &Device);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get audio device."); continue; }
LPWSTR DeviceID;
ErrorCode = Device->lpVtbl->GetId(Device, &DeviceID);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get audio device ID."); continue; }
IPropertyStore* Properties;
ErrorCode = Device->lpVtbl->OpenPropertyStore(Device, STGM_READ, &Properties);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get device properties."); continue; }
PROPVARIANT Variant;
PropVariantInit(&Variant);
ErrorCode = Properties->lpVtbl->GetValue(Properties, &PKEY_Device_FriendlyName, &Variant);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get device friendly name."); }
LPWSTR DeviceFriendlyName = L"[Name Retrieval Failed]";
if (Variant.pwszVal != NULL) { DeviceFriendlyName = Variant.pwszVal; }
printf("[WASAPI] [%d]: \"%ls\" = \"%ls\"\n", DeviceIndex, DeviceFriendlyName, DeviceID);
CoTaskMemFree(DeviceID);
DeviceID = NULL;
PropVariantClear(&Variant);
if (Properties != NULL) { Properties->lpVtbl->Release(Properties); }
if (Device != NULL) { Device->lpVtbl->Release(Device); }
}
if (Devices != NULL) { Devices->lpVtbl->Release(Devices); }
}
// Runs on a thread. Waits for audio data to be ready from the system, then forwards it to the registered callback.
void* ProcessEventAudioIn(void* stateObj)
{
struct CNFADriverWASAPI* state = (struct CNFADriverWASAPI*)stateObj;
HRESULT ErrorCode;
UINT32 PacketLength;
// TODO: Set this based on our device period requested. If we are using 10ms or higher, just request "Audio", not "Pro Audio".
DWORD TaskIndex = 0;
state->TaskHandleIn = AvSetMmThreadCharacteristicsW(L"Pro Audio", &TaskIndex);
if (state->TaskHandleIn == NULL) { WASAPIERROR(E_FAIL, "Failed to request thread priority elevation on input task."); }
while (state->KeepGoing)
{
// Waits up to 500ms to get the next audio buffer from the system.
// The timeout is used because if no audio sessions are active, WASAPI stops sending buffers after a few that indicate silence.
// This means that if the client tries to exit, this loop would not complete, and therefore the thread would not exit, until the next buffer is received.
// This is mostly an issue in loopback mode, where true silence is common, not so much on microphones.
DWORD WaitResult = WaitForSingleObject(state->EventHandleIn, 500);
if (WaitResult == WAIT_TIMEOUT) { continue; } // We are in a period of silence. Keep waiting for audio.
else if (WaitResult != WAIT_OBJECT_0) { WASAPIERROR(E_FAIL, "Something went wrong while waiting for an audio event."); continue; }
ErrorCode = state->CaptureClient->lpVtbl->GetNextPacketSize(state->CaptureClient, &PacketLength);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get audio packet size."); continue; }
BYTE* DataBuffer;
UINT32 FramesAvailable;
DWORD BufferStatus;
BOOL Released = FALSE;
ErrorCode = state->CaptureClient->lpVtbl->GetBuffer(state->CaptureClient, &DataBuffer, &FramesAvailable, &BufferStatus, NULL, NULL);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to get audio buffer."); continue; }
// "The data in the packet is not correlated with the previous packet's device position; this is possibly due to a stream state transition or timing glitch."
// There's no real way for us to notify the client about this...
if ((BufferStatus & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) == AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY)
{
WASAPIPRINT("A data discontinuity was detected.");
}
if ((BufferStatus & AUDCLNT_BUFFERFLAGS_SILENT) == AUDCLNT_BUFFERFLAGS_SILENT)
{
UINT32 Length = FramesAvailable * state->MixFormat->nChannels;
if (Length == 0) { Length = state->MixFormat->nChannels; }
INT16* AudioData = malloc(Length * 2);
for (int i = 0; i < Length; i++) { AudioData[i] = 0; }
ErrorCode = state->CaptureClient->lpVtbl->ReleaseBuffer(state->CaptureClient, FramesAvailable);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to release audio buffer."); }
else { Released = TRUE; }
if (WASAPI_EXTRA_DEBUG) { printf("[WASAPI] SILENCE buffer received. Passing on %d samples.\n", Length); }
WASAPIState->Callback((struct CNFADriver*)WASAPIState, 0, AudioData, 0, Length / state->MixFormat->nChannels );
free(AudioData);
}
else
{
// TODO: This assumes that data is coming in at 32b float format. While this appears to be the format that WASAPI uses internally in all cases I've seen, I don't think it's guaranteed.
// We should instead read the MixFormat information and properly handle the data in other cases.
// Ideally, we could request 16b signed PCM data from WASAPI, so we don't even have to do any conversion. But I couldn't get this working yet.
UINT32 Size = FramesAvailable * state->BytesPerFrame; // Size in bytes
FLOAT* DataAsFloat = (FLOAT*)DataBuffer; // The raw input data, reinterpreted as floats.
INT16* AudioData = malloc((FramesAvailable * state->MixFormat->nChannels) * 2); // The data we are passing to the consumer.
for (INT32 i = 0; i < Size / 4; i++) { AudioData[i] = (INT16)(DataAsFloat[i] * 32767.5F); }
ErrorCode = state->CaptureClient->lpVtbl->ReleaseBuffer(state->CaptureClient, FramesAvailable);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to release audio buffer."); }
else { Released = TRUE; }
if (WASAPI_EXTRA_DEBUG) { printf("[WASAPI] Got %d bytes of audio data in %d frames. Fowarding to %p.\n", Size, FramesAvailable, (void*) WASAPIState->Callback); }
WASAPIState->Callback((struct CNFADriver*)WASAPIState, 0, AudioData, 0, FramesAvailable );
free(AudioData);
}
if (!Released)
{
ErrorCode = state->CaptureClient->lpVtbl->ReleaseBuffer(state->CaptureClient, FramesAvailable);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to release audio buffer."); }
}
}
ErrorCode = state->Client->lpVtbl->Stop(state->Client);
if (FAILED(ErrorCode)) { WASAPIERROR(ErrorCode, "Failed to stop audio client."); }
if(state->TaskHandleIn != NULL) { AvRevertMmThreadCharacteristics(state->TaskHandleIn); }
state->StreamReady = FALSE;
return 0;
}
// Begins preparation of the WASAPI driver.
// callback: The user application's function where audio data is placed when received from the system and/or audio data is retrieved from to give to the system.
// sessionName: How your session will appear to the end user if you play audio.
// reqSampleRateIn/Out: Sample rate you'd like to request. Ignored, as this is determined by the system. See note below.
// reqChannelsIn: Input channel count you'd like to request. Ignored, as this is determined by the system. See note below.
// reqChannelsOut: Output channel count you'd like to request. Ignored, as this is determined by the system. See note below.
// sugBufferSize: Buffer size you'd like to request. Ignored, as this is determined by the system. See note below.
// inputDevice: The device you want to receive audio from. Loopback is supported, so this can be either a capture or render device.
// To get the default render device, specify "defaultRender"
// To get the default multimedia capture device, specify "defaultCapture"
// To get the default communications capture device, specify "defaultCaptureComm"
// A device ID as presented by WASAPI can be specified, regardless of what type it is. If it is invalid, the default capture device is used as fallback.
// If you do not wish to receive audio, specify null. NOT YET IMPLEMENTED
// outputDevice: The device you want to output audio to. OUTPUT IS NOT IMPLEMENTED.
// NOTES:
// Regarding format requests: Sample rate and channel count is determined by the system settings, and cannot be changed. Resampling/mixing will be required in your application if you cannot accept the current system mode. Make sure to check `WASAPIState` for the current system mode.
// Note also that both sample rate and channel count can vary between input and output!
// Currently audio output (playing) is not yet implemented.
void* InitCNFAWASAPIDriver(
CNFACBType callback, const char *sessionName,
int reqSampleRateOut, int reqSampleRateIn,
int reqChannelsOut, int reqChannelsIn, int sugBufferSize,
const char * outputDevice, const char * inputDevice,
void * opaque)
{
struct CNFADriverWASAPI * InitState = malloc(sizeof(struct CNFADriverWASAPI));
memset(InitState, 0, sizeof(*InitState));
InitState->CloseFn = CloseCNFAWASAPI;
InitState->StateFn = CNFAStateWASAPI;
InitState->Callback = callback;
InitState->Opaque = opaque;
// TODO: Waiting for CNFA to support directional sample rates.
InitState->SampleRateIn = reqSampleRateIn; // Will be overridden by the actual system setting.
InitState->SampleRateOut = reqSampleRateOut; // Will be overridden by the actual system setting.
InitState->ChannelCountIn = reqChannelsIn; // Will be overridden by the actual system setting.
InitState->ChannelCountOut = reqChannelsOut; // Will be overridden by the actual system setting.
InitState->InputDeviceID = inputDevice;
InitState->OutputDeviceID = outputDevice;
InitState->SessionName = sessionName;
WASAPIPRINT("WASAPI Init");
return StartWASAPIDriver(InitState);
}
REGISTER_CNFA(cnfa_wasapi, 20, "WASAPI", InitCNFAWASAPIDriver);

View file

@ -1,636 +0,0 @@
#ifndef _CNFA_WASAPI_UTILS_H
#define _CNFA_WASAPI_UTILS_H
//#include "ole2.h"
#ifndef REFPROPERTYKEY
#define REFPROPERTYKEY const PROPERTYKEY * __MIDL_CONST
#endif //REFPROPERTYKEY
// Necessary definitions
#define _ANONYMOUS_STRUCT
#define BEGIN_INTERFACE
#define END_INTERFACE
#define DEVICE_STATE_ACTIVE 0x00000001
#define AUDCLNT_STREAMFLAGS_CROSSPROCESS 0x00010000
#define AUDCLNT_STREAMFLAGS_LOOPBACK 0x00020000
#define AUDCLNT_STREAMFLAGS_EVENTCALLBACK 0x00040000
#define AUDCLNT_STREAMFLAGS_NOPERSIST 0x00080000
#define AUDCLNT_STREAMFLAGS_RATEADJUST 0x00100000
#define AUDCLNT_STREAMFLAGS_PREVENT_LOOPBACK_CAPTURE 0x01000000
#define AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY 0x08000000
#define AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000
#define AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED 0x10000000
#define AUDCLNT_SESSIONFLAGS_DISPLAY_HIDE 0x20000000
#define AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED 0x40000000
enum _AUDCLNT_BUFFERFLAGS
{
AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY = 0x1,
AUDCLNT_BUFFERFLAGS_SILENT = 0x2,
AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR = 0x4
} ;
#ifndef REFIID
#define REFIID const IID * __MIDL_CONST
#endif
#ifndef PropVariantInit
#define PropVariantInit(pvar) memset ( (pvar), 0, sizeof(PROPVARIANT) )
#endif
#if defined (__TINYC__)
#define _COM_Outptr_
#define _In_
#define _Out_
#define _Outptr_
#define _In_opt_
#define _Out_opt_
#define __RPC__in
#define __RPC__out
#define interface struct
#define CONST_VTBL
#define _Outptr_result_buffer_(X)
#define _Inexpressible_(X)
#define REFPROPVARIANT const PROPVARIANT * __MIDL_CONST
typedef struct tagPROPVARIANT PROPVARIANT;
typedef struct tWAVEFORMATEX WAVEFORMATEX;
typedef IID GUID;
typedef void* HANDLE;
#define CLSCTX_INPROC_SERVER 0x1
#define CLSCTX_INPROC_HANDLER 0x2
#define CLSCTX_LOCAL_SERVER 0x4
#define CLSCTX_REMOTE_SERVER 0x10
#define STGM_READ 0x00000000L
#define CLSCTX_ALL (CLSCTX_INPROC_SERVER| \
CLSCTX_INPROC_HANDLER| \
CLSCTX_LOCAL_SERVER| \
CLSCTX_REMOTE_SERVER)
typedef unsigned short VARTYPE;
typedef struct _tagpropertykey {
GUID fmtid;
DWORD pid;
} PROPERTYKEY;
#ifndef __wtypes_h__
typedef struct tagDEC {
USHORT wReserved;
BYTE scale;
BYTE sign;
ULONG Hi32;
ULONGLONG Lo64;
} DECIMAL;
// Property varient struct, used for getting the device name info
typedef BYTE PROPVAR_PAD1;
typedef BYTE PROPVAR_PAD2;
typedef ULONG PROPVAR_PAD3;
struct tagPROPVARIANT {
union {
struct tag_inner_PROPVARIANT
{
VARTYPE vt;
PROPVAR_PAD1 wReserved1;
PROPVAR_PAD2 wReserved2;
PROPVAR_PAD3 wReserved3;
union
{
double dblVal; // Filler for the largest object we need to store
LPWSTR pwszVal; // This is the only parameter we actually use
};
} ;
DECIMAL decVal;
};
};
#endif
#define _Inout_updates_(dwCount)
#define FAR
typedef interface IUnknown IUnknown;
typedef IUnknown *LPUNKNOWN;
#endif
#ifdef NO_WIN_HEADERS
#undef DEFINE_GUID
#define DEFINE_GUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) \
EXTERN_C const GUID DECLSPEC_SELECTANY name \
= { l, w1, w2, { b1, b2, b3, b4, b5, b6, b7, b8 } }
#undef DEFINE_PROPERTYKEY
#define DEFINE_PROPERTYKEY(name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8,pid) \
EXTERN_C const PROPERTYKEY DECLSPEC_SELECTANY name \
= { { l, w1, w2, { b1, b2, b3, b4, b5, b6, b7, b8 } }, pid }
// stuff to be able to read device names
DEFINE_PROPERTYKEY(PKEY_Device_FriendlyName, 0xa45c254e, 0xdf1c, 0x4efd, 0x80, 0x20, 0x67, 0xd1, 0x46, 0xa8, 0x50, 0xe0, 14);
#ifndef WINOLEAPI
#define WINOLEAPI EXTERN_C DECLSPEC_IMPORT HRESULT STDAPICALLTYPE
#define WINOLEAPI_(type) EXTERN_C DECLSPEC_IMPORT type STDAPICALLTYPE
#endif
// Define necessary functions
WINOLEAPI_(HANDLE)
AvSetMmThreadCharacteristicsW(LPCWSTR TaskName, LPDWORD TaskIndex);
WINOLEAPI_(BOOL)
AvRevertMmThreadCharacteristics(HANDLE AvrtHandle);
WINOLEAPI CoInitialize(LPVOID pvReserved);
WINOLEAPI_(void) CoUninitialize();
WINOLEAPI_(void) CoTaskMemFree(LPVOID pv);
WINOLEAPI CoCreateInstance(
REFCLSID rclsid,
LPUNKNOWN pUnkOuter,
DWORD dwClsContext,
REFIID riid,
LPVOID FAR* ppv);
// WINOLEAPI CoCreateInstanceEx(
// REFCLSID Clsid,
// IUnknown *punkOuter,
// DWORD dwClsCtx,
// COSERVERINFO *pServerInfo,
// DWORD dwCount,
// MULTI_QI *pResults );
#endif //NO_WIN_HEADERS
// forward declarations
typedef struct IMMDevice IMMDevice;
typedef struct IMMDeviceCollection IMMDeviceCollection;
typedef struct IMMDeviceEnumerator IMMDeviceEnumerator;
typedef struct IMMNotificationClient IMMNotificationClient;
typedef struct IPropertyStore IPropertyStore;
typedef struct IAudioClient IAudioClient;
typedef struct IAudioCaptureClient IAudioCaptureClient;
// So the linker doesn't complain
extern const IID CLSID_MMDeviceEnumerator;
extern const IID IID_IMMDeviceEnumerator;
extern const IID IID_IAudioClient;
extern const IID CNFA_GUID;
extern const IID IID_IAudioCaptureClient;
typedef enum __MIDL___MIDL_itf_mmdeviceapi_0000_0000_0001
{
eRender = 0,
eCapture = ( eRender + 1 ) ,
eAll = ( eCapture + 1 ) ,
EDataFlow_enum_count = ( eAll + 1 )
} EDataFlow;
typedef enum __MIDL___MIDL_itf_mmdeviceapi_0000_0000_0002
{
eConsole = 0,
eMultimedia = ( eConsole + 1 ) ,
eCommunications = ( eMultimedia + 1 ) ,
ERole_enum_count = ( eCommunications + 1 )
} ERole;
typedef struct IMMDeviceEnumeratorVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
IMMDeviceEnumerator * This,
/* [in] */ REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
IMMDeviceEnumerator * This);
ULONG ( STDMETHODCALLTYPE *Release )(
IMMDeviceEnumerator * This);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *EnumAudioEndpoints )(
IMMDeviceEnumerator * This,
/* [annotation][in] */
_In_ EDataFlow dataFlow,
/* [annotation][in] */
_In_ DWORD dwStateMask,
/* [annotation][out] */
_Out_ IMMDeviceCollection **ppDevices);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *GetDefaultAudioEndpoint )(
IMMDeviceEnumerator * This,
/* [annotation][in] */
_In_ EDataFlow dataFlow,
/* [annotation][in] */
_In_ ERole role,
/* [annotation][out] */
_Out_ IMMDevice **ppEndpoint);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *GetDevice )(
IMMDeviceEnumerator * This,
/* [annotation][in] */
_In_ LPCWSTR pwstrId,
/* [annotation][out] */
_Out_ IMMDevice **ppDevice);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *RegisterEndpointNotificationCallback )(
IMMDeviceEnumerator * This,
/* [annotation][in] */
_In_ IMMNotificationClient *pClient);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *UnregisterEndpointNotificationCallback )(
IMMDeviceEnumerator * This,
/* [annotation][in] */
_In_ IMMNotificationClient *pClient);
END_INTERFACE
} IMMDeviceEnumeratorVtbl;
interface IMMDeviceEnumerator
{
CONST_VTBL struct IMMDeviceEnumeratorVtbl *lpVtbl;
};
typedef struct IMMDeviceCollectionVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
IMMDeviceCollection * This,
/* [in] */ REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
IMMDeviceCollection * This);
ULONG ( STDMETHODCALLTYPE *Release )(
IMMDeviceCollection * This);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *GetCount )(
IMMDeviceCollection * This,
/* [annotation][out] */
_Out_ UINT *pcDevices);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *Item )(
IMMDeviceCollection * This,
/* [annotation][in] */
_In_ UINT nDevice,
/* [annotation][out] */
_Out_ IMMDevice **ppDevice);
END_INTERFACE
} IMMDeviceCollectionVtbl;
interface IMMDeviceCollection
{
CONST_VTBL struct IMMDeviceCollectionVtbl *lpVtbl;
};
typedef struct IMMDeviceVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
IMMDevice * This,
/* [in] */ REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
IMMDevice * This);
ULONG ( STDMETHODCALLTYPE *Release )(
IMMDevice * This);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *Activate )(
IMMDevice * This,
/* [annotation][in] */
_In_ REFIID iid,
/* [annotation][in] */
_In_ DWORD dwClsCtx,
/* [annotation][unique][in] */
_In_opt_ PROPVARIANT *pActivationParams,
/* [annotation][iid_is][out] */
_Out_ void **ppInterface);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *OpenPropertyStore )(
IMMDevice * This,
/* [annotation][in] */
_In_ DWORD stgmAccess,
/* [annotation][out] */
_Out_ IPropertyStore **ppProperties);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *GetId )(
IMMDevice * This,
/* [annotation][out] */
_Outptr_ LPWSTR *ppstrId);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *GetState )(
IMMDevice * This,
/* [annotation][out] */
_Out_ DWORD *pdwState);
END_INTERFACE
} IMMDeviceVtbl;
interface IMMDevice
{
CONST_VTBL struct IMMDeviceVtbl *lpVtbl;
};
typedef struct IMMNotificationClientVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
IMMNotificationClient * This,
/* [in] */ REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
IMMNotificationClient * This);
ULONG ( STDMETHODCALLTYPE *Release )(
IMMNotificationClient * This);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *OnDeviceStateChanged )(
IMMNotificationClient * This,
/* [annotation][in] */
_In_ LPCWSTR pwstrDeviceId,
/* [annotation][in] */
_In_ DWORD dwNewState);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *OnDeviceAdded )(
IMMNotificationClient * This,
/* [annotation][in] */
_In_ LPCWSTR pwstrDeviceId);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *OnDeviceRemoved )(
IMMNotificationClient * This,
/* [annotation][in] */
_In_ LPCWSTR pwstrDeviceId);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *OnDefaultDeviceChanged )(
IMMNotificationClient * This,
/* [annotation][in] */
_In_ EDataFlow flow,
/* [annotation][in] */
_In_ ERole role,
/* [annotation][in] */
_In_ LPCWSTR pwstrDefaultDeviceId);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *OnPropertyValueChanged )(
IMMNotificationClient * This,
/* [annotation][in] */
_In_ LPCWSTR pwstrDeviceId,
/* [annotation][in] */
_In_ const PROPERTYKEY key);
END_INTERFACE
} IMMNotificationClientVtbl;
interface IMMNotificationClient
{
CONST_VTBL struct IMMNotificationClientVtbl *lpVtbl;
};
typedef struct IPropertyStoreVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
__RPC__in IPropertyStore * This,
/* [in] */ __RPC__in REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
__RPC__in IPropertyStore * This);
ULONG ( STDMETHODCALLTYPE *Release )(
__RPC__in IPropertyStore * This);
HRESULT ( STDMETHODCALLTYPE *GetCount )(
__RPC__in IPropertyStore * This,
/* [out] */ __RPC__out DWORD *cProps);
HRESULT ( STDMETHODCALLTYPE *GetAt )(
__RPC__in IPropertyStore * This,
/* [in] */ DWORD iProp,
/* [out] */ __RPC__out PROPERTYKEY *pkey);
HRESULT ( STDMETHODCALLTYPE *GetValue )(
__RPC__in IPropertyStore * This,
/* [in] */ __RPC__in REFPROPERTYKEY key,
/* [out] */ __RPC__out PROPVARIANT *pv);
HRESULT ( STDMETHODCALLTYPE *SetValue )(
__RPC__in IPropertyStore * This,
/* [in] */ __RPC__in REFPROPERTYKEY key,
/* [in] */ __RPC__in REFPROPVARIANT propvar);
HRESULT ( STDMETHODCALLTYPE *Commit )(
__RPC__in IPropertyStore * This);
END_INTERFACE
} IPropertyStoreVtbl;
interface IPropertyStore
{
CONST_VTBL struct IPropertyStoreVtbl *lpVtbl;
};
// ----- audioclient.h -----
typedef enum _AUDCLNT_SHAREMODE
{
AUDCLNT_SHAREMODE_SHARED,
AUDCLNT_SHAREMODE_EXCLUSIVE
} AUDCLNT_SHAREMODE;
typedef LONGLONG REFERENCE_TIME;
typedef struct IAudioClientVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
IAudioClient * This,
/* [in] */ REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
IAudioClient * This);
ULONG ( STDMETHODCALLTYPE *Release )(
IAudioClient * This);
HRESULT ( STDMETHODCALLTYPE *Initialize )(
IAudioClient * This,
/* [annotation][in] */
_In_ AUDCLNT_SHAREMODE ShareMode,
/* [annotation][in] */
_In_ DWORD StreamFlags,
/* [annotation][in] */
_In_ REFERENCE_TIME hnsBufferDuration,
/* [annotation][in] */
_In_ REFERENCE_TIME hnsPeriodicity,
/* [annotation][in] */
_In_ const WAVEFORMATEX *pFormat,
/* [annotation][in] */
_In_opt_ LPCGUID AudioSessionGuid);
HRESULT ( STDMETHODCALLTYPE *GetBufferSize )(
IAudioClient * This,
/* [annotation][out] */
_Out_ UINT32 *pNumBufferFrames);
HRESULT ( STDMETHODCALLTYPE *GetStreamLatency )(
IAudioClient * This,
/* [annotation][out] */
_Out_ REFERENCE_TIME *phnsLatency);
HRESULT ( STDMETHODCALLTYPE *GetCurrentPadding )(
IAudioClient * This,
/* [annotation][out] */
_Out_ UINT32 *pNumPaddingFrames);
HRESULT ( STDMETHODCALLTYPE *IsFormatSupported )(
IAudioClient * This,
/* [annotation][in] */
_In_ AUDCLNT_SHAREMODE ShareMode,
/* [annotation][in] */
_In_ const WAVEFORMATEX *pFormat,
/* [unique][annotation][out] */
_Out_opt_ WAVEFORMATEX **ppClosestMatch);
HRESULT ( STDMETHODCALLTYPE *GetMixFormat )(
IAudioClient * This,
/* [annotation][out] */
_Out_ WAVEFORMATEX **ppDeviceFormat);
HRESULT ( STDMETHODCALLTYPE *GetDevicePeriod )(
IAudioClient * This,
/* [annotation][out] */
_Out_opt_ REFERENCE_TIME *phnsDefaultDevicePeriod,
/* [annotation][out] */
_Out_opt_ REFERENCE_TIME *phnsMinimumDevicePeriod);
HRESULT ( STDMETHODCALLTYPE *Start )(
IAudioClient * This);
HRESULT ( STDMETHODCALLTYPE *Stop )(
IAudioClient * This);
HRESULT ( STDMETHODCALLTYPE *Reset )(
IAudioClient * This);
HRESULT ( STDMETHODCALLTYPE *SetEventHandle )(
IAudioClient * This,
/* [in] */ HANDLE eventHandle);
HRESULT ( STDMETHODCALLTYPE *GetService )(
IAudioClient * This,
/* [annotation][in] */
_In_ REFIID riid,
/* [annotation][iid_is][out] */
_Out_ void **ppv);
END_INTERFACE
} IAudioClientVtbl;
interface IAudioClient
{
CONST_VTBL struct IAudioClientVtbl *lpVtbl;
};
typedef struct IAudioCaptureClientVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
IAudioCaptureClient * This,
/* [in] */ REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
IAudioCaptureClient * This);
ULONG ( STDMETHODCALLTYPE *Release )(
IAudioCaptureClient * This);
HRESULT ( STDMETHODCALLTYPE *GetBuffer )(
IAudioCaptureClient * This,
/* [annotation][out] */
_Outptr_result_buffer_(_Inexpressible_("*pNumFramesToRead * pFormat->nBlockAlign")) BYTE **ppData,
/* [annotation][out] */
_Out_ UINT32 *pNumFramesToRead,
/* [annotation][out] */
_Out_ DWORD *pdwFlags,
/* [annotation][unique][out] */
_Out_opt_ UINT64 *pu64DevicePosition,
/* [annotation][unique][out] */
_Out_opt_ UINT64 *pu64QPCPosition);
HRESULT ( STDMETHODCALLTYPE *ReleaseBuffer )(
IAudioCaptureClient * This,
/* [annotation][in] */
_In_ UINT32 NumFramesRead);
HRESULT ( STDMETHODCALLTYPE *GetNextPacketSize )(
IAudioCaptureClient * This,
/* [annotation][out] */
_Out_ UINT32 *pNumFramesInNextPacket);
END_INTERFACE
} IAudioCaptureClientVtbl;
interface IAudioCaptureClient
{
CONST_VTBL struct IAudioCaptureClientVtbl *lpVtbl;
};
typedef interface IMMEndpoint IMMEndpoint;
typedef struct IMMEndpointVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
IMMEndpoint * This,
/* [in] */ REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
IMMEndpoint * This);
ULONG ( STDMETHODCALLTYPE *Release )(
IMMEndpoint * This);
/* [helpstring][id] */ HRESULT ( STDMETHODCALLTYPE *GetDataFlow )(
IMMEndpoint * This,
/* [annotation][out] */
_Out_ EDataFlow *pDataFlow);
END_INTERFACE
} IMMEndpointVtbl;
interface IMMEndpoint
{
CONST_VTBL struct IMMEndpointVtbl *lpVtbl;
};
#define DEVICE_STATE_ACTIVE 0x00000001
#define DEVICE_STATE_DISABLED 0x00000002
#define DEVICE_STATE_NOTPRESENT 0x00000004
#define DEVICE_STATE_UNPLUGGED 0x00000008
#define DEVICE_STATEMASK_ALL 0x0000000f
#endif // _CNFA_WASAPI_UTILS_H

View file

@ -1,258 +0,0 @@
//Copyright 2015-2020 <>< Charles Lohr under the ColorChord License, MIT/x11 license or NewBSD Licenses.
#include <windows.h>
#include "CNFA.h"
#include "os_generic.h"
#include <stdio.h>
#include <stdint.h>
#include <mmsystem.h>
#include <stdlib.h>
//Include -lwinmm, or, C:/windows/system32/winmm.dll
#if defined(WINDOWS) || defined(WIN32) || defined(WIN64) \
|| defined(_WIN32) || defined(_WIN64)
#ifndef strdup
#define strdup _strdup
#endif
#endif
#if defined(WIN32) && !defined( TCC )
#pragma comment(lib,"winmm.lib")
#endif
#define BUFFS 3
struct CNFADriverWin
{
//Standard header - must remain.
void (*CloseFn)( void * object );
int (*StateFn)( void * object );
CNFACBType callback;
short channelsPlay;
short channelsRec;
int spsPlay;
int spsRec;
void * opaque;
char * sInputDev;
char * sOutputDev;
int buffer;
int isEnding;
int GOBUFFRec;
int GOBUFFPlay;
int recording;
int playing;
HWAVEIN hMyWaveIn;
HWAVEOUT hMyWaveOut;
WAVEHDR WavBuffIn[BUFFS];
WAVEHDR WavBuffOut[BUFFS];
};
static struct CNFADriverWin * w;
void CloseCNFAWin( void * v )
{
struct CNFADriverWin * r = (struct CNFADriverWin *)v;
int i;
if( r )
{
if( r->hMyWaveIn )
{
waveInStop(r->hMyWaveIn);
waveInReset(r->hMyWaveIn);
for ( i=0;i<BUFFS;i++)
{
waveInUnprepareHeader(r->hMyWaveIn,&(r->WavBuffIn[i]),sizeof(WAVEHDR));
free ((r->WavBuffIn[i]).lpData);
}
waveInClose(r->hMyWaveIn);
}
if( r->hMyWaveOut )
{
waveOutPause(r->hMyWaveOut);
waveOutReset(r->hMyWaveOut);
for ( i=0;i<BUFFS;i++)
{
waveInUnprepareHeader(r->hMyWaveIn,&(r->WavBuffOut[i]),sizeof(WAVEHDR));
free ((r->WavBuffOut[i]).lpData);
}
waveInClose(r->hMyWaveIn);
waveOutClose(r->hMyWaveOut);
}
free( r );
}
}
int CNFAStateWin( void * v )
{
struct CNFADriverWin * soundobject = (struct CNFADriverWin *)v;
return soundobject->recording | (soundobject->playing?2:0);
}
void CALLBACK HANDLEMIC(HWAVEIN hwi, UINT umsg, DWORD dwi, DWORD hdr, DWORD dwparm)
{
int ctr;
int ob;
long cValue;
unsigned int maxWave=0;
if (w->isEnding) return;
switch (umsg)
{
case MM_WIM_OPEN:
printf( "Mic Open.\n" );
w->recording = 1;
break;
case MM_WIM_DATA:
ob = (w->GOBUFFRec+(BUFFS))%BUFFS;
w->callback( (struct CNFADriver*)w, 0, (short*)(w->WavBuffIn[w->GOBUFFRec]).lpData, 0, w->buffer );
waveInAddBuffer(w->hMyWaveIn,&(w->WavBuffIn[w->GOBUFFRec]),sizeof(WAVEHDR));
w->GOBUFFRec = ( w->GOBUFFRec + 1 ) % BUFFS;
break;
}
}
void CALLBACK HANDLESINK(HWAVEIN hwi, UINT umsg, DWORD dwi, DWORD hdr, DWORD dwparm)
{
int ctr;
long cValue;
unsigned int maxWave=0;
if (w->isEnding) return;
switch (umsg)
{
case MM_WOM_OPEN:
printf( "Sink Open.\n" );
w->playing = 1;
break;
case MM_WOM_DONE:
w->callback( (struct CNFADriver*)w, (short*)(w->WavBuffOut[w->GOBUFFPlay]).lpData, 0, w->buffer, 0 );
waveOutWrite( w->hMyWaveOut, &(w->WavBuffOut[w->GOBUFFPlay]),sizeof(WAVEHDR) );
w->GOBUFFPlay = ( w->GOBUFFPlay + 1 ) % BUFFS;
break;
}
}
static struct CNFADriverWin * InitWinCNFA( struct CNFADriverWin * r )
{
int i;
WAVEFORMATEX wfmt;
memset( &wfmt, 0, sizeof(wfmt) );
printf ("WFMT Size (debugging temp for TCC): %llu\n", sizeof(wfmt) );
printf( "WFMT: %d %d %d\n", r->channelsRec, r->spsRec, r->spsRec * r->channelsRec );
w = r;
wfmt.wFormatTag = WAVE_FORMAT_PCM;
wfmt.nChannels = r->channelsRec;
wfmt.nAvgBytesPerSec = r->spsRec * r->channelsRec;
wfmt.nBlockAlign = r->channelsRec * 2;
wfmt.nSamplesPerSec = r->spsRec;
wfmt.wBitsPerSample = 16;
wfmt.cbSize = 0;
long dwdeviceR, dwdeviceP;
dwdeviceR = r->sInputDev?atoi(r->sInputDev):WAVE_MAPPER;
dwdeviceP = r->sOutputDev?atoi(r->sOutputDev):WAVE_MAPPER;
if( r->channelsRec )
{
printf( "In Wave Devs: %d; WAVE_MAPPER: %d; Selected Input: %ld\n", waveInGetNumDevs(), WAVE_MAPPER, dwdeviceR );
int p = waveInOpen(&r->hMyWaveIn, dwdeviceR, &wfmt, (intptr_t)(&HANDLEMIC), 0, CALLBACK_FUNCTION);
if( p )
{
fprintf( stderr, "Error performing waveInOpen. Received code: %d\n", p );
}
printf( "waveInOpen: %d\n", p );
for ( i=0;i<BUFFS;i++)
{
memset( &(r->WavBuffIn[i]), 0, sizeof(r->WavBuffIn[i]) );
(r->WavBuffIn[i]).dwBufferLength = r->buffer*2*r->channelsRec;
(r->WavBuffIn[i]).dwLoops = 1;
(r->WavBuffIn[i]).lpData=(char*) malloc(r->buffer*r->channelsRec*2);
printf( "buffer gen size: %d: %p\n", r->buffer*r->channelsRec*2, (r->WavBuffIn[i]).lpData );
p = waveInPrepareHeader(r->hMyWaveIn,&(r->WavBuffIn[i]),sizeof(WAVEHDR));
printf( "WIPr: %d\n", p );
waveInAddBuffer(r->hMyWaveIn,&(r->WavBuffIn[i]),sizeof(WAVEHDR));
printf( "WIAr: %d\n", p );
}
p = waveInStart(r->hMyWaveIn);
if( p )
{
fprintf( stderr, "Error performing waveInStart. Received code %d\n", p );
}
}
wfmt.nChannels = r->channelsPlay;
wfmt.nAvgBytesPerSec = r->spsPlay * r->channelsPlay;
wfmt.nBlockAlign = r->channelsPlay * 2;
wfmt.nSamplesPerSec = r->spsPlay;
if( r->channelsPlay )
{
printf( "Out Wave Devs: %d; WAVE_MAPPER: %d; Selected Input: %ld\n", waveOutGetNumDevs(), WAVE_MAPPER, dwdeviceP );
int p = waveOutOpen( &r->hMyWaveOut, dwdeviceP, &wfmt, (intptr_t)(void*)(&HANDLESINK), (intptr_t)r, CALLBACK_FUNCTION);
if( p )
{
fprintf( stderr, "Error performing waveOutOpen. Received code: %d\n", p );
}
printf( "waveOutOpen: %d\n", p );
for ( i=0;i<BUFFS;i++)
{
memset( &(r->WavBuffOut[i]), 0, sizeof(r->WavBuffOut[i]) );
(r->WavBuffOut[i]).dwBufferLength = r->buffer*2*r->channelsPlay;
(r->WavBuffOut[i]).dwLoops = 1;
int size = r->buffer*r->channelsPlay*2;
char * buf = (r->WavBuffOut[i]).lpData=(char*) malloc(size);
memset( buf, 0, size );
p = waveOutPrepareHeader(r->hMyWaveOut,&(r->WavBuffOut[i]),sizeof(WAVEHDR));
waveOutWrite( r->hMyWaveOut, &(r->WavBuffOut[i]),sizeof(WAVEHDR));
}
}
return r;
}
void * InitCNFAWin( CNFACBType cb, const char * your_name, int reqSPSPlay, int reqSPSRec, int reqChannelsPlay, int reqChannelsRec, int sugBufferSize, const char * outputSelect, const char * inputSelect, void * opaque )
{
struct CNFADriverWin * r = (struct CNFADriverWin *)malloc( sizeof( struct CNFADriverWin ) );
memset( r, 0, sizeof(*r) );
r->CloseFn = CloseCNFAWin;
r->StateFn = CNFAStateWin;
r->callback = cb;
r->opaque = opaque;
r->spsPlay = reqSPSPlay;
r->spsRec = reqSPSRec;
r->channelsPlay = reqChannelsPlay;
r->channelsRec = reqChannelsRec;
r->buffer = sugBufferSize;
r->sInputDev = inputSelect?strdup(inputSelect):0;
r->sOutputDev = outputSelect?strdup(outputSelect):0;
r->recording = 0;
r->playing = 0;
r->isEnding = 0;
r->GOBUFFPlay = 0;
r->GOBUFFRec = 0;
return InitWinCNFA(r);
}
REGISTER_CNFA( WinCNFA, 10, "WIN", InitCNFAWin );

View file

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2020 <>< Charles Lohr
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,34 +0,0 @@
.PHONY: all shared shared-example wave_player clean
all: example wav_player
os_generic.h:
wget https://raw.githubusercontent.com/cntools/rawdraw/master/os_generic.h
LDFLAGS = -lasound -lpthread
ifeq "$(shell pkgconf --exists pulse && echo 'found' )" "found"
PULSE ?= "YES"
else
PULSE ?= "NO"
endif
ifeq "$(PULSE)" "YES"
LDFLAGS += -lpulse -DPULSEAUDIO
endif
example : example.c os_generic.h
$(CC) -o $@ $^ $(LDFLAGS) -lm
wav_player: os_generic.h
make -C wave_player PULSE=$(PULSE)
shared : os_generic.h
$(CC) CNFA.c -shared -fpic -o libCNFA.so -DCNFA_IMPLEMENTATION -DBUILD_DLL \
$(LDFLAGS)
shared-example : example.c shared
$(CC) -L. -Wl,-rpath=. -o example example.c -DUSE_SHARED -lm -lCNFA
clean :
rm -rf *.o *~ example libCNFA.so
make -C wave_player clean

View file

@ -1,75 +0,0 @@
# cnfa
CN's foundational audio drivers
See CNFA.h for more help and use info.
This toolset is 100% based around sound callbacks.
You get three functions:
```C
struct CNFADriver* CNFAInit(const char* driver_name,
const char* your_name,
CNFACBType cb,
int reqSPSPlay,
int reqSPSRec,
int reqChannelsPlay,
int reqChannelsRec,
int sugBufferSize,
const char* outputSelect,
const char* inputSelect,
void* opaque)
// Returns bitmask: 1 if mic recording, 2 if playback running, 3 if both running.
int CNFAState(struct CNFADriver* cnfaobject)
void CNFAClose(struct CNFADriver* cnfaobject)
```
Then it goes and calls a callback function, the `CNFACBType cb` parameter. This can feed you new frames, or you can pass frames back in.
`framesp` is the size of one channel of the output buffer in samples. If there are multiple channels they should be interleaved i.e. L:R:L:R...
`framerp` is the size of one channel of the input buffer in samples. Works the same way as the output buffer.
You can obtain the number of input/output channels from `sd->channelsRec` and `sd->channelsPlay` respectively.
```C
void Callback(struct CNFADriver* sd, short* out, short* in, int framesp, int framesr)
{
int i;
for( i = 0; i < framesr * sd->channelsRec; i++ )
short value = in[i];
for( i = 0; i < framesp * sd->channelsPlay; i++ )
out[i] = 0; //Send output frames.
}
```
There are two examples in this repository, [example.c](example.c) and [wave_player.c](wave_player/wav_player.c). Both of these show examples of using
CNFA to output sound. For use of CNFA for input see [colorchord](https://github.com/cnlohr/colorchord)
### Building .DLL and .SO files
If you would like to use CNFA in a project where using a DLL or SO file is more practical, you can easily build those files. The below steps are for the Clang & GGC compilers, but others like TCC should work fine as well, just have not been tested.
NOTE: In order for functions to be exported, you'll need to make sure `-DBUILD_DLL` is specified!
Parts of CNFA rely on [rawdraw](https://github.com/cntools/rawdraw), so make sure to clone this repo somewhere and insert the path to it in the `[RAWDRAW PATH]` space below:
**Don't forget to install all libraries' headers!** For Linux, at least these packages: `libasound2-dev`, `libpulse-dev`
Windows build:
```PS
& "C:\Program Files\LLVM\bin\clang.exe" CNFA.c -shared -o CNFA.dll -DWINDOWS -DCNFA_IMPLEMENTATION -DBUILD_DLL -D_CRT_SECURE_NO_WARNINGS -I"[RAWDRAW PATH]" -lmmdevapi -lavrt -lole32
```
Linux build:
```Bash
make shared-example
```
This will build the shared library as `libCNFA.so` and will build the example
project to link against it. You can run the example project with `./example`.
The makefile will try and copy the `os_generic.h` header from
[rawdraw](https://github.com/cntools/rawdraw)
automatically using `wget`.
The command to build simply build the shared library is:
```Bash
gcc CNFA.c -shared -fpic -o CNFA.so -DCNFA_IMPLEMENTATION -DBUILD_DLL -I"[RAWDRAW PATH]" -lasound -lpulse -lpthread
```

View file

@ -1,65 +0,0 @@
#include <stdio.h>
#include <math.h>
// If using the shared library, don't define CNFA_IMPLEMENTATION
// (it's already in the library).
#ifndef USE_SHARED
#define CNFA_IMPLEMENTATION
#endif
#include "CNFA.h"
#define RUNTIME 500000
double omega = 0;
int totalframesr = 0;
int totalframesp = 0;
void Callback( struct CNFADriver * sd, short * out, short * in, int framesp, int framesr )
{
int i;
totalframesr += framesr;
totalframesp += framesp;
int channels = sd->channelsPlay;
for( i = 0; i < framesp; i++ )
{
// Shift phase, so we run at 440 Hz (A4)
omega += ( 3.14159 * 2 * 440. ) / sd->spsPlay;
// Make the 440 Hz tone at 10% volume and convert to short.
short value = sin( omega ) * 0.1 * 32767;
int c;
for( c = 0; c < channels; c++ )
{
*(out++) = value;
}
}
}
struct CNFADriver * cnfa;
int main( int argc, char ** argv )
{
cnfa = CNFAInit(
//"PULSE",
"ALSA", //You can select a plaback driver, or use 0 for default.
//0, //default
"cnfa_example", Callback,
48000, //Requested samplerate for playback
48000, //Requested samplerate for recording
2, //Number of playback channels.
2, //Number of record channels.
1024, //Buffer size in frames.
0, //Could be a string, for the selected input device - but 0 means default.
0, //Could be a string, for the selected output device - but 0 means default.
0 // 'opaque' value if the driver wanted it.
);
sleep( RUNTIME );
printf( "Received %d (%d per sec) frames\nSent %d (%d per sec) frames\n", totalframesr, totalframesr/RUNTIME, totalframesp, totalframesp/RUNTIME );
}

View file

@ -1,27 +0,0 @@
.PHONY: clean
C_SRCS = wav_player.c
OUT := wav_player
CFLAGS = -O2 -g
LDFLAGS = -lasound -lpthread -lm
CC ?= gcc -std=c99
ifeq "$(shell pkgconf --exists pulse && echo 'found' )" "found"
PULSE ?= "YES"
else
PULSE ?= "NO"
endif
ifeq "$(PULSE)" "YES"
LDFLAGS += -lpulse
endif
OBJS := $(C_SRCS:.c=.o)
$(OUT): $(OBJS)
$(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) -o $(OUT)
clean:
rm $(OBJS) $(OUT)
.c.o:
$(CC) $(CFLAGS) -c $< -o $@

View file

@ -1,117 +0,0 @@
/*
* wavDefs.h Samuel Ellicott - 9-3-13
* defines basic features of PCM encoded wav files
*/
#ifndef _WAV_DEFS_H_
#define _WAV_DEFS_H_
#include <stdint.h>
//#define ABSOLUTE 1
#ifdef ABSOLUTE
//absolute offsets PCM audacity files only
// name offset description
//RIFF chunk descriptor
#define CHUNK_ID 0 //start of the riff chunk
#define CHUNK_SIZE 4 //size of the file minus the first 2 entries
#define FORMAT 8 //describes the format aka. WAVE
//fmt sub-chunk descriptor
#define FMT_CHUNK_ID 12 //start of the fmt sub chunk
#define FMT_CHUNK_SIZE 16 //length of the rest of the sub-chunk. for PCM should be 16
#define AUDIO_FORMAT 20 //audio format - for PCM should be 1 (unsigned char)
#define NUM_CHANNELS 22 //the number of channels 1 or 2 (unsigned char)
#define SAMPLE_RATE 24 //sample rate 8000, 44100, or 48000 typical in hz
#define BIT_RATE 28 //bit rate - (SampleRate * NumChannels * BitsPerSample)/8
#define BLOCK_ALIGN 32 //NumChannels * BitsPerSample/8 (unsigned char)
//1 - 8 bit mono, 2 - 8 bit stereo/16 bit mono, 4 - 16 bit stereo
#define BITS_PER_SAMPLE 34 //bits per channel 8 or 16 (unsigned char)
//fmt sub-chunk descriptor
#define CHUNK_ID_1 12 //start of the fmt sub chunk
#define CHUNK_SIZE_1 16 //length of the rest of the sub-chunk. for PCM should be 1
//info data sub-chunk descriptor
//if Wav_Data.is_info == 1 than this is info chunk
//if Wav_Data.is_info == 0 than this is data chunk
#define _CHUNK_ID 36 //start of data sub chunk
#define _CHUNK_SIZE 40 //chunk size of sub chunk
#define _CHUNK_DATA 44 //data in chunk
#else
//general chunk descriptor
#define CHUNK_ID 0 //start of the riff chunk
#define CHUNK_SIZE 4 //size of the file minus the first 2 entries
#define FORMAT 8 //describes the format for RIFF and LIST chunks
#define CHUNK_DATA 8 //start of data for other chunks
#define CHUNK_ID_LEN 5 //length of chunk ID string (including null char)
//fmt sub-chunk descriptor
#define FMT_CHUNK_ID 0 //start of the fmt sub chunk
#define FMT_CHUNK_SIZE 4 //length of the rest of the sub-chunk. for PCM should be 16
#define AUDIO_FORMAT 8 //audio format - for PCM should be 1 (unsigned char)
#define NUM_CHANNELS 10 //the number of channels 1 or 2 (unsigned char)
#define SAMPLE_RATE 12 //sample rate 8000, 44100, or 48000 typical in hz
#define BIT_RATE 16 //bit rate - (SampleRate * NumChannels * BitsPerSample)/8
#define BLOCK_ALIGN 20 //NumChannels * BitsPerSample/8 (unsigned char)
//1 - 8 bit mono, 2 - 8 bit stereo/16 bit mono, 4 - 16 bit stereo
#define BITS_PER_SAMPLE 22 //bits per channel 8 or 16 (unsigned char)
#endif //ABSOLUTE
#define MAX_TAG_SIZE 100 //defines the maximum number of characters to be allocated for any info string
/*
* data on the optional INFO data chunk contains pointers to the following info:
* title
* author
* genre
*/
typedef struct WaveInfoChunk{
uint8_t is_info; //1 for info data, 0 for no info data
uint32_t info_offset; //the file offset for the info chunk
uint32_t info_len; //length of the info chunk
//begin heap pointers
char *title;
char *artist;
char *genre;
char *creation_date;
} WaveInfoChunk;
/*
* data structure containing the data neccesarry for PCM audio playback
*/
typedef struct WaveFmtChunk{
uint32_t fmt_len; //fmt size: 16 for pcm
uint16_t audio_format; //1 = PCM
uint16_t num_channels; //1 for mono, 2 for stereo
uint32_t sample_rate; //44100 (CD), 48000 (DAT)
uint32_t byte_rate; //SampleRate * NumChannels * BitsPerSample/8
uint16_t block_align; //1 - 8 bit mono
//2 - 8 bit stereo/16 bit mono
//4 - 16 bit stereo
uint16_t bits_per_sample; //8 or 16
uint8_t bytes_per_sample;
} WaveFmtChunk;
typedef struct WaveDataChunk{
uint32_t data_offset; //the file offset for data chunk
uint32_t data_size; //data size in bytes
uint32_t current_offset; //current offset in file in bytes
uint16_t num_samples; //number of samples
uint16_t samples_left; //number of samples left to read
} WaveDataChunk;
/*
* struct all the pertinent information for the wav file
*/
typedef struct WaveHeaderChunk{
struct WaveFmtChunk fmt; //the wave format chunk
struct WaveInfoChunk info; //the info metadata chunk
struct WaveDataChunk data;
} WaveHeaderChunk;
#endif //_WAV_DEFS_H_

View file

@ -1,389 +0,0 @@
/*
* Sam Ellicott - 09-06-22
* CNFA demo wave file player
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "wavDefs.h"
// wave file player function prototypes
int loadHeader(FILE *file, WaveHeaderChunk *hdr);
int loadInfo(FILE *file, WaveHeaderChunk *hdr);
int printInfo(FILE *file);
int readData(FILE *file, WaveHeaderChunk *hdr, void* buff, int buff_len);
void freeData(WaveHeaderChunk *hdr);
/* ------------------------------------------ Main Application Code ------------------------------*/
#define BUFF_SIZE 2048
// If using the shared library, don't define CNFA_IMPLEMENTATION
// (it's already in the library).
#ifndef USE_SHARED
#define CNFA_IMPLEMENTATION
#endif
#include "../CNFA.h"
int totalframesr = 0;
int totalframesp = 0;
FILE* wav_file;
WaveHeaderChunk hdr;
struct CNFADriver * cnfa;
short buff[BUFF_SIZE];
int is_done;
void Callback( struct CNFADriver * sd, short * out, short * in, int framesp, int framesr )
{
int* is_done_ptr = (int*) sd->opaque;
const int output_channels = sd->channelsPlay;
const int file_channels = hdr.fmt.num_channels;
const int output_buff_sz = framesp * output_channels;
int br = 0;
// if we have already ended the file, then clear the buffer and exit function
if(*is_done_ptr) {
memset(out, 0, sizeof(short) * output_buff_sz);
return;
}
totalframesr += framesr;
totalframesp += framesp;
if (output_channels == file_channels) {
int read_buff_sz = framesp * sd->channelsPlay;
br = readData(wav_file, &hdr, out, read_buff_sz);
}
else if (output_channels > file_channels) {
int read_buff_sz = framesp;
// exit loop if we are done filling the output buffer or we are at the end of file
int samples_remaining = read_buff_sz;
while (samples_remaining > 0 && br >= 0) {
int read_sz = (samples_remaining > BUFF_SIZE) ? BUFF_SIZE : samples_remaining;
br = readData(wav_file, &hdr, buff, read_sz);
// duplicate data on left and right channels
for (int i = 0; i < read_buff_sz; ++i){
out[2*i] = buff[i];
out[2*i+1] = buff[i];
}
samples_remaining -= br;
}
}
else {
printf("what are you doing? mono sound output?\n");
}
// end of file
if (br < 0) {
printf("End of wave file: setting flag\n");
*is_done_ptr = 1;
}
}
int main (int nargs, char** args) {
const char* filename;
// if there is a file given on the command line play it.
if(nargs >= 2) {
filename = args[1];
}
else {
printf("\nError, no input file\nUseage %s <wave file>\n", args[0]);
return 1;
}
wav_file = fopen(filename, "r");
printInfo(wav_file);
printf("\n\n");
printf("loading file\n");
loadHeader(wav_file, &hdr);
printf("playing file\n");
is_done = 0;
cnfa = CNFAInit(
NULL, // String, for the driver "PULSE", "WASAPI" (output only) - NULL means default.
"cnfa_example", // Name of program to audio driver
Callback, // CNFA callback function handle
hdr.fmt.sample_rate, // Requested samplerate for playback
hdr.fmt.sample_rate, // Requested samplerate for record
2, // Number of playback channels.
2, // Number of record channels.
1024, // Buffer size in frames.
NULL, // String, for the selected input device - NULL means default.
NULL, // String, for the selected output device - NULL means default.
&is_done // pass an integer as an "opaque" object so that CNFA can close
);
int runtime = 0;
const char* spin_glyph = "-\\|/";
const char* glyph = spin_glyph;
int i = 0;
while (!is_done){
sleep(1);
++runtime;
printf("\r %c ", *glyph++);
fflush(stdout);
if (!*glyph) {
glyph = spin_glyph;
}
}
CNFAClose(cnfa);
fclose(wav_file);
printf( "Received %d (%d per sec) frames\nSent %d (%d per sec) frames\n",
totalframesr, totalframesr/runtime, // recorded samples, recorded samples/sec
totalframesp, totalframesp/runtime ); // outputted samples, outputted samples/sec
return 0;
}
/* ------------------------------------------ Wave File Player Code ------------------------------*/
int printInfo(FILE *file){
WaveHeaderChunk wav_data;
if(loadHeader(file, &wav_data)!=0){
printf("file invalid\n\r");
return 1;
}
//print file data
printf("\n");
printf("Audio Format: %i \n", wav_data.fmt.audio_format);
printf("Channels: %u \n", wav_data.fmt.num_channels);
printf("Sample Rate: %lu \n", wav_data.fmt.sample_rate);
printf("Block Alignment (bytes): %u \n", wav_data.fmt.block_align);
printf("Bits-per-Sample: %u \n", wav_data.fmt.bits_per_sample);
if(loadInfo(file, &wav_data)==0){//if there is data show it
printf("\n");
printf("Track name: %s \n\r", wav_data.info.title);
printf("Artist: %s \n\r", wav_data.info.artist);
printf("Genre: %s \n\r", wav_data.info.genre);
printf("Creation date: %s \n\r", wav_data.info.creation_date);
}
freeData(&wav_data);
return 0;
}
int loadHeader(FILE *file, WaveHeaderChunk *hdr){
char chunk_id[CHUNK_ID_LEN];
uint32_t chunk_offset=0;//keeps track of position in file - referenced to the chunk id
uint32_t chunk_len=0;//the length of the current chunk
uint32_t br;
//check if the file is valid first
if(file==NULL){
printf("Could not open file \n");
return 1;
}
// check that the pointer is valid
if (!hdr) {
return 2;
}
// clear pointers
hdr->info.creation_date = NULL;
hdr->info.genre = NULL;
hdr->info.artist = NULL;
hdr->info.title = NULL;
//look for RIFF/WAVE file header
fseek(file, CHUNK_ID, SEEK_SET);
fgets(chunk_id, CHUNK_ID_LEN, file);
if(strncmp(chunk_id, "RIFF", 4)!=0){
printf("File is not a wav file\n");
return 1;
}
fseek(file, FORMAT, SEEK_SET);
fgets(chunk_id, CHUNK_ID_LEN, file);
if(strncmp(chunk_id, "WAVE", 4)!=0){
printf("File is not a wav file \n");
return 1;
}
chunk_offset+=CHUNK_DATA+4;//add the file offset of the begining of the first chunk
//must be WAVE look for chunks
do{
//get the chunk id
fseek(file, chunk_offset+CHUNK_ID, SEEK_SET);
fgets(chunk_id, CHUNK_ID_LEN, file);
//get the chunk length
fseek(file, chunk_offset+CHUNK_SIZE, SEEK_SET);
br = fread(&chunk_len, sizeof(uint32_t), 1, file);//get length
//check for fmt chunk
if(strncmp(chunk_id, "fmt ", 4)==0){//if format section
//get the format section of the file
fseek(file, chunk_offset+AUDIO_FORMAT, SEEK_SET);
br = fread(&hdr->fmt.audio_format, sizeof(uint16_t), 1, file);
//number of channels
fseek(file, chunk_offset+NUM_CHANNELS, SEEK_SET);
br = fread(&hdr->fmt.num_channels, sizeof(uint16_t), 1, file);
//sample rate
fseek(file, chunk_offset+SAMPLE_RATE, SEEK_SET);
br = fread(&hdr->fmt.sample_rate, sizeof(uint32_t), 1, file);
//bits per channel tells if mono, stereo, or hifi stereo
fseek(file, chunk_offset+BLOCK_ALIGN, SEEK_SET);
br = fread(&hdr->fmt.block_align, sizeof(uint16_t), 1, file);
//bits per channel 8 or 16
fseek(file, chunk_offset+BITS_PER_SAMPLE, SEEK_SET);
br = fread(&hdr->fmt.bits_per_sample, sizeof(uint16_t), 1, file);
hdr->fmt.bytes_per_sample=hdr->fmt.bits_per_sample/8;
}
else if(strncmp(chunk_id, "data", 4)==0){//if chunk is of data type
// grab data chunk location and size
hdr->data.data_offset = chunk_offset;
hdr->data.data_size = chunk_len;
hdr->data.num_samples = (uint16_t) chunk_len/hdr->fmt.bytes_per_sample;
hdr->data.samples_left = hdr->data.num_samples;
hdr->data.current_offset = chunk_offset+CHUNK_DATA;
}
else if(strncmp(chunk_id, "LIST", 4)==0){//if chunk is of LIST type
//check if it is of info type
fseek(file, chunk_offset+CHUNK_DATA, SEEK_SET);
fgets(chunk_id, 5, file);
if(strncmp(chunk_id, "INFO", 4)==0){
// grab info location and size
hdr->info.info_offset = chunk_offset+4;
hdr->info.info_len = chunk_len;
hdr->info.is_info = 1;
}
else{
// no file info (artist, genre, etc)
hdr->info.is_info = 0;
}
}
else if(strncmp(chunk_id, "data", 4)!=0){
//get length of chunk
fseek(file, chunk_offset+CHUNK_SIZE, SEEK_SET);
br = fread(&chunk_len, 2, 1, file);//get length
}
//done reading skip chunk
chunk_offset=chunk_offset+chunk_len+CHUNK_DATA;
fseek(file, chunk_offset, SEEK_SET);
} while(fgets(chunk_id, 5, file)!=NULL);//loop until the end of the file is reached
return 0;
}
/*
* reads the info chunk if available and puts data into the WaveHeader structure provided
*/
int loadInfo(FILE *file, WaveHeaderChunk *hdr){
char chunk_id[CHUNK_ID_LEN];
uint32_t chunk_offset=hdr->info.info_offset+CHUNK_DATA;//current file position
uint32_t chunk_len=0;//the length of the current chunk
uint32_t tag_len=0;
uint32_t br;
if(hdr->info.is_info==0){//if no data
printf("No artist information is available for this wav file \n");
return 1;//no data
}
fseek(file, chunk_offset, SEEK_SET);//go to chunk data
while(chunk_offset-4 - hdr->info.info_offset < hdr->info.info_len){//while in info chunk
//get the chunk id
fseek(file, chunk_offset+CHUNK_ID, SEEK_SET);
fgets(chunk_id, 5, file);
//get the chunk length
fseek(file, chunk_offset+CHUNK_SIZE, SEEK_SET);
br = fread(&chunk_len, sizeof(uint32_t), 1, file);//get length
//make sure the length of the tag isn't to big
if(chunk_len > MAX_TAG_SIZE){//if too big
tag_len=MAX_TAG_SIZE;
}
else{
tag_len=chunk_len;
}
//go to tag information
fseek(file, chunk_offset+CHUNK_DATA, SEEK_SET);
if(strncmp(chunk_id, "INAM", 4)==0){ // if is name chunk
hdr->info.title=malloc(tag_len+1); // allocate memory for tag
fgets(hdr->info.title, tag_len, file); // read tag
}
else if(strncmp(chunk_id, "IART", 4)==0){ // if is artist chunk
hdr->info.artist=malloc(tag_len+1); // allocate memory for tag
fgets(hdr->info.artist, tag_len, file); // read tag
}
else if(strncmp(chunk_id, "IGNR", 4)==0){ // if is genre chunk
hdr->info.genre=malloc(tag_len+1); // allocate memory for tag
fgets(hdr->info.genre, tag_len, file); // read tag
}
else if(strncmp(chunk_id, "ICRD", 4)==0){ // if is creation date chunk
hdr->info.creation_date=malloc(tag_len+1); // allocate memory for tag
fgets(hdr->info.creation_date, tag_len, file); // read tag
}
//done reading skip chunk
chunk_offset=chunk_offset+chunk_len+CHUNK_DATA;
fseek(file, chunk_offset, SEEK_SET);
}
return 0;
}
int readData(FILE *file, WaveHeaderChunk *hdr, void* buff, int buff_len){
uint32_t br;
int bytes_to_read = hdr->fmt.bytes_per_sample*buff_len;
if(!hdr) {
printf("Error: No valid header\n");
return -1;
}
fseek(file, hdr->data.current_offset, SEEK_SET);
br = fread(buff, hdr->fmt.bytes_per_sample, buff_len, file);
int bytes_read = hdr->fmt.bytes_per_sample*br;
//printf("samples read: %u\n", br);
hdr->data.samples_left -= br;
hdr->data.current_offset += bytes_read;
if(feof(file)){
br = -1;
}
return br;
}
void freeData(WaveHeaderChunk *hdr) {
//free the info tag heap data
if(hdr->info.title){
free(hdr->info.title);
hdr->info.title=NULL;
}
if(hdr->info.artist){
free(hdr->info.artist);
hdr->info.artist=NULL;
}
if(hdr->info.genre){
free(hdr->info.genre);
hdr->info.genre=NULL;
}
if(hdr->info.creation_date){
free(hdr->info.creation_date);
hdr->info.creation_date=NULL;
}
}