Move dead submodules in-tree

Signed-off-by: swurl <swurl@swurl.xyz>
This commit is contained in:
swurl 2025-05-31 02:33:02 -04:00
parent c0cceff365
commit 6c655321e6
No known key found for this signature in database
GPG key ID: A5A7629F109C8FD1
4081 changed files with 1185566 additions and 45 deletions

View file

@ -0,0 +1,13 @@
.gradle
/local.properties
/.idea/workspace.xml
/.idea/libraries
.DS_Store
/build/
.idea/
/app/build/
/app/release/
/app/debug/
/app/app.iml
*.iml
/app/externalNativeBuild/

View file

@ -0,0 +1,7 @@
status: PUBLISHED
technologies: [Android, NDK]
categories: [NDK, C++]
languages: [C++, Java]
solutions: [Mobile]
github: googlesamples/android-ndk
license: apache2

View file

@ -0,0 +1,6 @@
# Oboe Tester
OboeTester is an app that can be used to test many of the features of Oboe, AAudio and OpenSL ES.
It can also be used to measure device latency and glitches.
# [OboeTester Documentation](docs)

View file

@ -0,0 +1,36 @@
cmake_minimum_required(VERSION 3.4.1)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wall -std=c++17 -fvisibility=hidden")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O2")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3")
link_directories(${CMAKE_CURRENT_LIST_DIR}/..)
# Increment this number when adding files to OboeTester => 106
# The change in this file will help Android Studio resync
# and generate new build files that reference the new code.
file(GLOB_RECURSE app_native_sources src/main/cpp/*)
### Name must match loadLibrary() call in MainActivity.java
add_library(oboetester SHARED ${app_native_sources})
### INCLUDE OBOE LIBRARY ###
# Set the path to the Oboe library directory
set (OBOE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../..)
# Add the Oboe library as a subproject. Since Oboe is an out-of-tree source library we must also
# specify a binary directory
add_subdirectory(${OBOE_DIR} ./oboe-bin)
# Specify the path to the Oboe header files and the source.
include_directories(
${OBOE_DIR}/include
${OBOE_DIR}/src
)
### END OBOE INCLUDE SECTION ###
# link to oboe
target_link_libraries(oboetester log oboe atomic)
target_link_options(oboetester PRIVATE "-Wl,-z,max-page-size=16384")

View file

@ -0,0 +1,44 @@
apply plugin: 'com.android.application'
android {
compileSdkVersion 34
defaultConfig {
applicationId = "com.mobileer.oboetester"
minSdkVersion 23
targetSdkVersion 34
versionCode 95
versionName "2.7.6"
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
externalNativeBuild {
cmake {
cppFlags "-std=c++17"
abiFilters "x86", "x86_64", "armeabi-v7a", "arm64-v8a"
}
}
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
}
debug {
jniDebuggable true
}
}
externalNativeBuild {
cmake {
path "CMakeLists.txt"
}
}
namespace 'com.mobileer.oboetester'
}
dependencies {
implementation fileTree(include: ['*.jar'], dir: 'libs')
implementation "androidx.core:core-ktx:1.9.0"
implementation 'androidx.constraintlayout:constraintlayout:2.1.4'
implementation 'androidx.appcompat:appcompat:1.6.1'
androidTestImplementation 'androidx.test.ext:junit:1.1.5'
androidTestImplementation 'androidx.test.espresso:espresso-core:3.5.1'
}

View file

@ -0,0 +1,6 @@
#Thu Apr 11 16:29:30 PDT 2019
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.1-all.zip

View file

@ -0,0 +1,172 @@
#!/usr/bin/env sh
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS=""
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn () {
echo "$*"
}
die () {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=$((i+1))
done
case $i in
(0) set -- ;;
(1) set -- "$args0" ;;
(2) set -- "$args0" "$args1" ;;
(3) set -- "$args0" "$args1" "$args2" ;;
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Escape application args
save () {
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
echo " "
}
APP_ARGS=$(save "$@")
# Collect all arguments for the java command, following the shell quoting and substitution rules
eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
cd "$(dirname "$0")"
fi
exec "$JAVACMD" "$@"

View file

@ -0,0 +1,84 @@
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

View file

@ -0,0 +1,17 @@
# Add project specific ProGuard rules here.
# By default, the flags in this file are appended to flags specified
# in /Users/gfan/dev/android-sdk/tools/proguard/proguard-android.txt
# You can edit the include path and order by changing the proguardFiles
# directive in build.gradle.
#
# For more details, see
# http://developer.android.com/guide/developing/tools/proguard.html
# Add any project specific keep options here:
# If your project uses WebView with JS, uncomment the following
# and specify the fully qualified class name to the JavaScript interface
# class:
#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
# public *;
#}

View file

@ -0,0 +1,162 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android">
<uses-feature
android:name="android.hardware.microphone"
android:required="false" />
<uses-feature
android:name="android.hardware.audio.output"
android:required="true" />
<uses-feature
android:name="android.hardware.touchscreen"
android:required="false" />
<uses-feature
android:name="android.software.midi"
android:required="false" />
<uses-feature
android:name="android.software.leanback"
android:required="false" />
<uses-permission android:name="android.permission.RECORD_AUDIO" />
<uses-permission android:name="android.permission.MODIFY_AUDIO_SETTINGS" />
<uses-permission android:name="android.permission.INTERNET" />
<uses-permission android:name="android.permission.READ_PHONE_STATE" />
<uses-permission android:name="android.permission.FOREGROUND_SERVICE" />
<uses-permission android:name="android.permission.FOREGROUND_SERVICE_MEDIA_PLAYBACK" />
<uses-permission android:name="android.permission.FOREGROUND_SERVICE_MICROPHONE" />
<application
android:icon="@mipmap/ic_launcher"
android:label="@string/app_name"
android:supportsRtl="true"
android:theme="@style/AppTheme"
android:requestLegacyExternalStorage="true"
android:banner="@mipmap/ic_launcher">
<activity
android:name=".MainActivity"
android:launchMode="singleTask"
android:screenOrientation="portrait"
android:exported="true">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
<category android:name="android.intent.category.LEANBACK_LAUNCHER" />
</intent-filter>
</activity>
<activity
android:name=".TestOutputActivity"
android:label="@string/title_activity_test_output"
android:screenOrientation="portrait" />
<activity
android:name=".TestInputActivity"
android:label="@string/title_activity_test_input"
android:screenOrientation="portrait" />
<activity
android:name=".TapToToneActivity"
android:label="@string/title_activity_output_latency"
android:screenOrientation="portrait" />
<activity
android:name=".RecorderActivity"
android:label="@string/title_activity_recorder"
android:screenOrientation="portrait" />
<activity
android:name=".EchoActivity"
android:label="@string/title_activity_echo"
android:screenOrientation="portrait" />
<activity
android:name=".RoundTripLatencyActivity"
android:label="@string/title_activity_rt_latency"
android:screenOrientation="portrait" />
<activity
android:name=".ManualGlitchActivity"
android:label="@string/title_activity_glitches"
android:screenOrientation="portrait" />
<activity
android:name=".AutomatedGlitchActivity"
android:label="@string/title_activity_auto_glitches"
android:screenOrientation="portrait" />
<activity
android:name=".TestDisconnectActivity"
android:label="@string/title_test_disconnect"
android:screenOrientation="portrait" />
<activity
android:name=".DeviceReportActivity"
android:label="@string/title_report_devices"
android:screenOrientation="portrait" />
<activity
android:name=".TestDataPathsActivity"
android:label="@string/title_data_paths"
android:screenOrientation="portrait" />
<activity
android:name=".ExtraTestsActivity"
android:exported="true"
android:label="@string/title_extra_tests"
android:screenOrientation="portrait" />
<activity
android:name=".ExternalTapToToneActivity"
android:label="@string/title_external_tap"
android:exported="true"
android:screenOrientation="portrait" />
<activity
android:name=".TestPlugLatencyActivity"
android:label="@string/title_plug_latency"
android:exported="true"
android:screenOrientation="portrait" />
<activity
android:name=".TestErrorCallbackActivity"
android:label="@string/title_error_callback"
android:exported="true"
android:screenOrientation="portrait" />
<activity
android:name=".TestRouteDuringCallbackActivity"
android:label="@string/title_route_during_callback"
android:exported="true"
android:screenOrientation="portrait" />
<activity
android:name=".DynamicWorkloadActivity"
android:label="@string/title_dynamic_load"
android:exported="true"
android:screenOrientation="portrait" />
<activity
android:name=".TestColdStartLatencyActivity"
android:label="@string/title_cold_start_latency"
android:exported="true"
android:screenOrientation="portrait" />
<activity
android:name=".TestRapidCycleActivity"
android:label="@string/title_rapid_cycle"
android:exported="true"
android:screenOrientation="portrait" />
<service
android:name=".MidiTapTester"
android:permission="android.permission.BIND_MIDI_DEVICE_SERVICE"
android:exported="true">
<intent-filter>
<action android:name="android.media.midi.MidiDeviceService" />
</intent-filter>
<meta-data
android:name="android.media.midi.MidiDeviceService"
android:resource="@xml/service_device_info" />
</service>
<service
android:name=".AudioForegroundService"
android:foregroundServiceType="mediaPlayback|microphone"
android:exported="false">
</service>
<provider
android:name="androidx.core.content.FileProvider"
android:authorities="${applicationId}.provider"
android:exported="false"
android:grantUriPermissions="true">
<meta-data
android:name="android.support.FILE_PROVIDER_PATHS"
android:resource="@xml/provider_paths" />
</provider>
</application>
</manifest>

View file

@ -0,0 +1,40 @@
/*
* Copyright 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstring>
#include <sched.h>
#include "common/OboeDebug.h"
#include "oboe/Oboe.h"
#include "AudioStreamGateway.h"
using namespace oboe::flowgraph;
oboe::DataCallbackResult AudioStreamGateway::onAudioReady(
oboe::AudioStream *audioStream,
void *audioData,
int numFrames) {
maybeHang(getNanoseconds());
printScheduler();
if (mAudioSink != nullptr) {
mAudioSink->read(audioData, numFrames);
}
return oboe::DataCallbackResult::Continue;
}

View file

@ -0,0 +1,55 @@
/*
* Copyright 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NATIVEOBOE_AUDIOGRAPHRUNNER_H
#define NATIVEOBOE_AUDIOGRAPHRUNNER_H
#include <unistd.h>
#include <sys/types.h>
#include "flowgraph/FlowGraphNode.h"
#include "oboe/Oboe.h"
#include "OboeTesterStreamCallback.h"
using namespace oboe::flowgraph;
/**
* Bridge between an audio flowgraph and an audio device.
* Pass in an AudioSink and then pass
* this object to the AudioStreamBuilder as a callback.
*/
class AudioStreamGateway : public OboeTesterStreamCallback {
public:
virtual ~AudioStreamGateway() = default;
void setAudioSink(std::shared_ptr<oboe::flowgraph::FlowGraphSink> sink) {
mAudioSink = sink;
}
/**
* Called by Oboe when the stream is ready to process audio.
*/
oboe::DataCallbackResult onAudioReady(
oboe::AudioStream *audioStream,
void *audioData,
int numFrames) override;
private:
std::shared_ptr<oboe::flowgraph::FlowGraphSink> mAudioSink;
};
#endif //NATIVEOBOE_AUDIOGRAPHRUNNER_H

View file

@ -0,0 +1,107 @@
/*
* Copyright 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "FormatConverterBox.h"
FormatConverterBox::FormatConverterBox(int32_t maxSamples,
oboe::AudioFormat inputFormat,
oboe::AudioFormat outputFormat) {
mInputFormat = inputFormat;
mOutputFormat = outputFormat;
mMaxSamples = maxSamples;
mInputBuffer = std::make_unique<uint8_t[]>(maxSamples * sizeof(int32_t));
mOutputBuffer = std::make_unique<uint8_t[]>(maxSamples * sizeof(int32_t));
mSource.reset();
switch (mInputFormat) {
case oboe::AudioFormat::I16:
case oboe::AudioFormat::IEC61937:
mSource = std::make_unique<oboe::flowgraph::SourceI16>(1);
break;
case oboe::AudioFormat::I24:
mSource = std::make_unique<oboe::flowgraph::SourceI24>(1);
break;
case oboe::AudioFormat::I32:
mSource = std::make_unique<oboe::flowgraph::SourceI32>(1);
break;
case oboe::AudioFormat::Float:
case oboe::AudioFormat::Invalid:
case oboe::AudioFormat::Unspecified:
mSource = std::make_unique<oboe::flowgraph::SourceFloat>(1);
break;
case oboe::AudioFormat::MP3:
case oboe::AudioFormat::AAC_LC:
case oboe::AudioFormat::AAC_HE_V1:
case oboe::AudioFormat::AAC_HE_V2:
case oboe::AudioFormat::AAC_ELD:
case oboe::AudioFormat::AAC_XHE:
case oboe::AudioFormat::OPUS:
break;
}
mSink.reset();
switch (mOutputFormat) {
case oboe::AudioFormat::I16:
case oboe::AudioFormat::IEC61937:
mSink = std::make_unique<oboe::flowgraph::SinkI16>(1);
break;
case oboe::AudioFormat::I24:
mSink = std::make_unique<oboe::flowgraph::SinkI24>(1);
break;
case oboe::AudioFormat::I32:
mSink = std::make_unique<oboe::flowgraph::SinkI32>(1);
break;
case oboe::AudioFormat::Float:
case oboe::AudioFormat::Invalid:
case oboe::AudioFormat::Unspecified:
mSink = std::make_unique<oboe::flowgraph::SinkFloat>(1);
break;
case oboe::AudioFormat::MP3:
case oboe::AudioFormat::AAC_LC:
case oboe::AudioFormat::AAC_HE_V1:
case oboe::AudioFormat::AAC_HE_V2:
case oboe::AudioFormat::AAC_ELD:
case oboe::AudioFormat::AAC_XHE:
case oboe::AudioFormat::OPUS:
break;
}
if (mSource && mSink) {
mSource->output.connect(&mSink->input);
mSink->pullReset();
}
}
int32_t FormatConverterBox::convertInternalBuffers(int32_t numSamples) {
assert(numSamples <= mMaxSamples);
return convert(getOutputBuffer(), numSamples, getInputBuffer());
}
int32_t FormatConverterBox::convertToInternalOutput(int32_t numSamples, const void *inputBuffer) {
assert(numSamples <= mMaxSamples);
return convert(getOutputBuffer(), numSamples, inputBuffer);
}
int32_t FormatConverterBox::convertFromInternalInput(void *outputBuffer, int32_t numSamples) {
assert(numSamples <= mMaxSamples);
return convert(outputBuffer, numSamples, getInputBuffer());
}
int32_t FormatConverterBox::convert(void *outputBuffer, int32_t numSamples, const void *inputBuffer) {
mSource->setData(inputBuffer, numSamples);
return mSink->read(outputBuffer, numSamples);
}

View file

@ -0,0 +1,102 @@
/*
* Copyright 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_FORMAT_CONVERTER_BOX_H
#define OBOETESTER_FORMAT_CONVERTER_BOX_H
#include <unistd.h>
#include <sys/types.h>
#include "oboe/Oboe.h"
#include "flowgraph/SinkFloat.h"
#include "flowgraph/SinkI16.h"
#include "flowgraph/SinkI24.h"
#include "flowgraph/SinkI32.h"
#include "flowgraph/SourceFloat.h"
#include "flowgraph/SourceI16.h"
#include "flowgraph/SourceI24.h"
#include "flowgraph/SourceI32.h"
/**
* Use flowgraph modules to convert between the various data formats.
*
* Note that this does not do channel conversions.
*/
class FormatConverterBox {
public:
FormatConverterBox(int32_t maxSamples,
oboe::AudioFormat inputFormat,
oboe::AudioFormat outputFormat);
/**
* @return internal buffer used to store input data
*/
void *getOutputBuffer() {
return (void *) mOutputBuffer.get();
};
/**
* @return internal buffer used to store output data
*/
void *getInputBuffer() {
return (void *) mInputBuffer.get();
};
/** Convert the data from inputFormat to outputFormat
* using both internal buffers.
*/
int32_t convertInternalBuffers(int32_t numSamples);
/**
* Convert data from external buffer into internal output buffer.
* @param numSamples
* @param inputBuffer
* @return
*/
int32_t convertToInternalOutput(int32_t numSamples, const void *inputBuffer);
/**
*
* Convert data from internal input buffer into external output buffer.
* @param outputBuffer
* @param numSamples
* @return
*/
int32_t convertFromInternalInput(void *outputBuffer, int32_t numSamples);
/**
* Convert data formats between the specified external buffers.
* @param outputBuffer
* @param numSamples
* @param inputBuffer
* @return
*/
int32_t convert(void *outputBuffer, int32_t numSamples, const void *inputBuffer);
private:
oboe::AudioFormat mInputFormat{oboe::AudioFormat::Invalid};
oboe::AudioFormat mOutputFormat{oboe::AudioFormat::Invalid};
int32_t mMaxSamples = 0;
std::unique_ptr<uint8_t[]> mInputBuffer;
std::unique_ptr<uint8_t[]> mOutputBuffer;
std::unique_ptr<oboe::flowgraph::FlowGraphSourceBuffered> mSource;
std::unique_ptr<oboe::flowgraph::FlowGraphSink> mSink;
};
#endif //OBOETESTER_FORMAT_CONVERTER_BOX_H

View file

@ -0,0 +1,78 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/OboeDebug.h"
#include "FullDuplexAnalyzer.h"
oboe::Result FullDuplexAnalyzer::start() {
getLoopbackProcessor()->setSampleRate(getOutputStream()->getSampleRate());
getLoopbackProcessor()->prepareToTest();
mWriteReadDeltaValid = false;
return FullDuplexStreamWithConversion::start();
}
oboe::DataCallbackResult FullDuplexAnalyzer::onBothStreamsReadyFloat(
const float *inputData,
int numInputFrames,
float *outputData,
int numOutputFrames) {
int32_t inputStride = getInputStream()->getChannelCount();
int32_t outputStride = getOutputStream()->getChannelCount();
auto *inputFloat = static_cast<const float *>(inputData);
float *outputFloat = outputData;
// Get atomic snapshot of the relative frame positions so they
// can be used to calculate timestamp latency.
int64_t framesRead = getInputStream()->getFramesRead();
int64_t framesWritten = getOutputStream()->getFramesWritten();
mWriteReadDelta = framesWritten - framesRead;
mWriteReadDeltaValid = true;
(void) getLoopbackProcessor()->process(inputFloat, inputStride, numInputFrames,
outputFloat, outputStride, numOutputFrames);
// Save data for later analysis or for writing to a WAVE file.
if (mRecording != nullptr) {
float buffer[2];
int numBoth = std::min(numInputFrames, numOutputFrames);
// Offset to the selected channels that we are analyzing.
inputFloat += getLoopbackProcessor()->getInputChannel();
outputFloat += getLoopbackProcessor()->getOutputChannel();
for (int i = 0; i < numBoth; i++) {
buffer[0] = *outputFloat;
outputFloat += outputStride;
buffer[1] = *inputFloat;
inputFloat += inputStride;
mRecording->write(buffer, 1);
}
// Handle mismatch in numFrames.
const float gapMarker = -0.9f; // Recognizable value so we can tell underruns from DSP gaps.
buffer[0] = gapMarker; // gap in output
for (int i = numBoth; i < numInputFrames; i++) {
buffer[1] = *inputFloat;
inputFloat += inputStride;
mRecording->write(buffer, 1);
}
buffer[1] = gapMarker; // gap in input
for (int i = numBoth; i < numOutputFrames; i++) {
buffer[0] = *outputFloat;
outputFloat += outputStride;
mRecording->write(buffer, 1);
}
}
return oboe::DataCallbackResult::Continue;
};

View file

@ -0,0 +1,74 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_FULL_DUPLEX_ANALYZER_H
#define OBOETESTER_FULL_DUPLEX_ANALYZER_H
#include <unistd.h>
#include <sys/types.h>
#include "oboe/Oboe.h"
#include "analyzer/LatencyAnalyzer.h"
#include "FullDuplexStreamWithConversion.h"
#include "MultiChannelRecording.h"
class FullDuplexAnalyzer : public FullDuplexStreamWithConversion {
public:
FullDuplexAnalyzer(LoopbackProcessor *processor)
: mLoopbackProcessor(processor) {
}
/**
* Called when data is available on both streams.
* Caller should override this method.
*/
oboe::DataCallbackResult onBothStreamsReadyFloat(
const float *inputData,
int numInputFrames,
float *outputData,
int numOutputFrames
) override;
oboe::Result start() override;
LoopbackProcessor *getLoopbackProcessor() {
return mLoopbackProcessor;
}
void setRecording(MultiChannelRecording *recording) {
mRecording = recording;
}
bool isWriteReadDeltaValid() {
return mWriteReadDeltaValid;
}
int64_t getWriteReadDelta() {
return mWriteReadDelta;
}
private:
MultiChannelRecording *mRecording = nullptr;
LoopbackProcessor * const mLoopbackProcessor;
std::atomic<bool> mWriteReadDeltaValid{false};
std::atomic<int64_t> mWriteReadDelta{0};
};
#endif //OBOETESTER_FULL_DUPLEX_ANALYZER_H

View file

@ -0,0 +1,67 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/OboeDebug.h"
#include "FullDuplexEcho.h"
oboe::Result FullDuplexEcho::start() {
int32_t delayFrames = (int32_t) (kMaxDelayTimeSeconds * getOutputStream()->getSampleRate());
mDelayLine = std::make_unique<InterpolatingDelayLine>(delayFrames);
// Use peak detector for input streams
mNumChannels = getInputStream()->getChannelCount();
mPeakDetectors = std::make_unique<PeakDetector[]>(mNumChannels);
return FullDuplexStreamWithConversion::start();
}
double FullDuplexEcho::getPeakLevel(int index) {
if (mPeakDetectors == nullptr) {
LOGE("%s() called before setup()", __func__);
return -1.0;
} else if (index < 0 || index >= mNumChannels) {
LOGE("%s(), index out of range, 0 <= %d < %d", __func__, index, mNumChannels.load());
return -2.0;
}
return mPeakDetectors[index].getLevel();
}
oboe::DataCallbackResult FullDuplexEcho::onBothStreamsReadyFloat(
const float *inputData,
int numInputFrames,
float *outputData,
int numOutputFrames) {
int32_t framesToEcho = std::min(numInputFrames, numOutputFrames);
auto *inputFloat = const_cast<float *>(inputData);
float *outputFloat = outputData;
// zero out entire output array
memset(outputFloat, 0, static_cast<size_t>(numOutputFrames)
* static_cast<size_t>(getOutputStream()->getBytesPerFrame()));
int32_t inputStride = getInputStream()->getChannelCount();
int32_t outputStride = getOutputStream()->getChannelCount();
float delayFrames = mDelayTimeSeconds * getOutputStream()->getSampleRate();
while (framesToEcho-- > 0) {
*outputFloat = mDelayLine->process(delayFrames, *inputFloat); // mono delay
for (int iChannel = 0; iChannel < inputStride; iChannel++) {
float sample = * (inputFloat + iChannel);
mPeakDetectors[iChannel].process(sample);
}
inputFloat += inputStride;
outputFloat += outputStride;
}
return oboe::DataCallbackResult::Continue;
};

View file

@ -0,0 +1,63 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_FULL_DUPLEX_ECHO_H
#define OBOETESTER_FULL_DUPLEX_ECHO_H
#include <unistd.h>
#include <sys/types.h>
#include "oboe/Oboe.h"
#include "analyzer/LatencyAnalyzer.h"
#include "FullDuplexStreamWithConversion.h"
#include "InterpolatingDelayLine.h"
class FullDuplexEcho : public FullDuplexStreamWithConversion {
public:
FullDuplexEcho() {
setNumInputBurstsCushion(0);
}
/**
* Called when data is available on both streams.
* Caller should override this method.
*/
oboe::DataCallbackResult onBothStreamsReadyFloat(
const float *inputData,
int numInputFrames,
float *outputData,
int numOutputFrames
) override;
oboe::Result start() override;
double getPeakLevel(int index);
void setDelayTime(double delayTimeSeconds) {
mDelayTimeSeconds = delayTimeSeconds;
}
private:
std::unique_ptr<InterpolatingDelayLine> mDelayLine;
static constexpr double kMaxDelayTimeSeconds = 4.0;
double mDelayTimeSeconds = kMaxDelayTimeSeconds;
std::atomic<int32_t> mNumChannels{0};
std::unique_ptr<PeakDetector[]> mPeakDetectors;
};
#endif //OBOETESTER_FULL_DUPLEX_ECHO_H

View file

@ -0,0 +1,61 @@
/*
* Copyright 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/OboeDebug.h"
#include "FullDuplexStreamWithConversion.h"
oboe::Result FullDuplexStreamWithConversion::start() {
// Determine maximum size that could possibly be called.
int32_t maxFrames = getOutputStream()->getBufferCapacityInFrames();
int32_t inputBufferSize = maxFrames * getInputStream()->getChannelCount();
int32_t outputBufferSize = maxFrames * getOutputStream()->getChannelCount();
mInputConverter = std::make_unique<FormatConverterBox>(inputBufferSize,
getInputStream()->getFormat(),
oboe::AudioFormat::Float);
mOutputConverter = std::make_unique<FormatConverterBox>(outputBufferSize,
oboe::AudioFormat::Float,
getOutputStream()->getFormat());
return FullDuplexStream::start();
}
oboe::ResultWithValue<int32_t> FullDuplexStreamWithConversion::readInput(int32_t numFrames) {
oboe::ResultWithValue<int32_t> result = getInputStream()->read(
mInputConverter->getInputBuffer(),
numFrames,
0 /* timeout */);
if (result == oboe::Result::OK) {
int32_t numSamples = result.value() * getInputStream()->getChannelCount();
mInputConverter->convertInternalBuffers(numSamples);
}
return result;
}
oboe::DataCallbackResult FullDuplexStreamWithConversion::onBothStreamsReady(
const void *inputData,
int numInputFrames,
void *outputData,
int numOutputFrames
) {
oboe::DataCallbackResult callbackResult = oboe::DataCallbackResult::Continue;
callbackResult = onBothStreamsReadyFloat(
static_cast<const float *>(mInputConverter->getOutputBuffer()),
numInputFrames,
static_cast<float *>(mOutputConverter->getInputBuffer()),
numOutputFrames);
mOutputConverter->convertFromInternalInput( outputData,
numOutputFrames * getOutputStream()->getChannelCount());
return callbackResult;
}

View file

@ -0,0 +1,61 @@
/*
* Copyright 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_FULL_DUPLEX_STREAM_WITH_CONVERSION_H
#define OBOETESTER_FULL_DUPLEX_STREAM_WITH_CONVERSION_H
#include <unistd.h>
#include <sys/types.h>
#include "oboe/Oboe.h"
#include "FormatConverterBox.h"
class FullDuplexStreamWithConversion : public oboe::FullDuplexStream {
public:
/**
* Called when data is available on both streams.
* Caller must override this method.
*/
virtual oboe::DataCallbackResult onBothStreamsReadyFloat(
const float *inputData,
int numInputFrames,
float *outputData,
int numOutputFrames
) = 0;
/**
* Overrides the default onBothStreamsReady by converting to floats and then calling
* onBothStreamsReadyFloat().
*/
oboe::DataCallbackResult onBothStreamsReady(
const void *inputData,
int numInputFrames,
void *outputData,
int numOutputFrames
) override;
oboe::ResultWithValue<int32_t> readInput(int32_t numFrames) override;
virtual oboe::Result start() override;
private:
std::unique_ptr<FormatConverterBox> mInputConverter;
std::unique_ptr<FormatConverterBox> mOutputConverter;
};
#endif //OBOETESTER_FULL_DUPLEX_STREAM_WITH_CONVERSION_H

View file

@ -0,0 +1,55 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/OboeDebug.h"
#include "InputStreamCallbackAnalyzer.h"
double InputStreamCallbackAnalyzer::getPeakLevel(int index) {
if (mPeakDetectors == nullptr) {
LOGE("%s() called before setup()", __func__);
return -1.0;
} else if (index < 0 || index >= mNumChannels) {
LOGE("%s(), index out of range, 0 <= %d < %d", __func__, index, mNumChannels);
return -2.0;
}
return mPeakDetectors[index].getLevel();
}
oboe::DataCallbackResult InputStreamCallbackAnalyzer::onAudioReady(
oboe::AudioStream *audioStream,
void *audioData,
int numFrames) {
int32_t channelCount = audioStream->getChannelCount();
maybeHang(getNanoseconds());
printScheduler();
mInputConverter->convertToInternalOutput(numFrames * channelCount, audioData);
float *floatData = (float *) mInputConverter->getOutputBuffer();
if (mRecording != nullptr) {
mRecording->write(floatData, numFrames);
}
int32_t sampleIndex = 0;
for (int iFrame = 0; iFrame < numFrames; iFrame++) {
for (int iChannel = 0; iChannel < channelCount; iChannel++) {
float sample = floatData[sampleIndex++];
mPeakDetectors[iChannel].process(sample);
}
}
audioStream->waitForAvailableFrames(mMinimumFramesBeforeRead, oboe::kNanosPerSecond);
return oboe::DataCallbackResult::Continue;
}

View file

@ -0,0 +1,85 @@
/*
* Copyright 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NATIVEOBOE_INPUTSTREAMCALLBACKANALYZER_H
#define NATIVEOBOE_INPUTSTREAMCALLBACKANALYZER_H
#include <unistd.h>
#include <sys/types.h>
// TODO #include "flowgraph/FlowGraph.h"
#include "oboe/Oboe.h"
#include "analyzer/PeakDetector.h"
#include "FormatConverterBox.h"
#include "MultiChannelRecording.h"
#include "OboeTesterStreamCallback.h"
class InputStreamCallbackAnalyzer : public OboeTesterStreamCallback {
public:
void reset() {
for (int iChannel = 0; iChannel < mNumChannels; iChannel++) {
mPeakDetectors[iChannel].reset();
}
OboeTesterStreamCallback::reset();
}
void setup(int32_t maxFramesPerCallback,
int32_t channelCount,
oboe::AudioFormat inputFormat) {
mNumChannels = channelCount;
mPeakDetectors = std::make_unique<PeakDetector[]>(channelCount);
int32_t bufferSize = maxFramesPerCallback * channelCount;
mInputConverter = std::make_unique<FormatConverterBox>(bufferSize,
inputFormat,
oboe::AudioFormat::Float);
}
/**
* Called by Oboe when the stream is ready to process audio.
*/
oboe::DataCallbackResult onAudioReady(
oboe::AudioStream *audioStream,
void *audioData,
int numFrames) override;
void setRecording(MultiChannelRecording *recording) {
mRecording = recording;
}
double getPeakLevel(int index);
void setMinimumFramesBeforeRead(int32_t numFrames) {
mMinimumFramesBeforeRead = numFrames;
}
int32_t getMinimumFramesBeforeRead() {
return mMinimumFramesBeforeRead;
}
public:
int32_t mNumChannels = 0;
std::unique_ptr<PeakDetector[]> mPeakDetectors;
MultiChannelRecording *mRecording = nullptr;
private:
std::unique_ptr<FormatConverterBox> mInputConverter;
int32_t mMinimumFramesBeforeRead = 0;
};
#endif //NATIVEOBOE_INPUTSTREAMCALLBACKANALYZER_H

View file

@ -0,0 +1,43 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include "InterpolatingDelayLine.h"
InterpolatingDelayLine::InterpolatingDelayLine(int32_t delaySize) {
mDelaySize = delaySize;
mDelayLine = std::make_unique<float[]>(delaySize);
}
float InterpolatingDelayLine::process(float delay, float input) {
float *writeAddress = mDelayLine.get() + mCursor;
*writeAddress = input;
mDelayLine.get()[mCursor] = input;
int32_t delayInt = std::min(mDelaySize - 1, (int32_t) delay);
int32_t readIndex = mCursor - delayInt;
if (readIndex < 0) {
readIndex += mDelaySize;
}
// TODO interpolate
float *readAddress = mDelayLine.get() + readIndex;
float output = *readAddress;
mCursor++;
if (mCursor >= mDelaySize) {
mCursor = 0;
}
return output;
};

View file

@ -0,0 +1,45 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_INTERPOLATING_DELAY_LINE_H
#define OBOETESTER_INTERPOLATING_DELAY_LINE_H
#include <memory>
#include <unistd.h>
#include <sys/types.h>
/**
* Monophonic delay line.
*/
class InterpolatingDelayLine {
public:
explicit InterpolatingDelayLine(int32_t delaySize);
/**
* @param input sample to be written to the delay line
* @param delay number of samples to delay the output
* @return delayed value
*/
float process(float delay, float input);
private:
std::unique_ptr<float[]> mDelayLine;
int32_t mCursor = 0;
int32_t mDelaySize = 0;
};
#endif //OBOETESTER_INTERPOLATING_DELAY_LINE_H

View file

@ -0,0 +1,162 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NATIVEOBOE_MULTICHANNEL_RECORDING_H
#define NATIVEOBOE_MULTICHANNEL_RECORDING_H
#include <algorithm>
#include <memory.h>
#include <unistd.h>
#include <sys/types.h>
/**
* Store multi-channel audio data in float format.
* The most recent data will be saved.
* Old data may be overwritten.
*
* Note that this is not thread safe. Do not read and write from separate threads.
*/
class MultiChannelRecording {
public:
MultiChannelRecording(int32_t channelCount, int32_t maxFrames)
: mChannelCount(channelCount)
, mMaxFrames(maxFrames) {
mData = new float[channelCount * maxFrames];
}
~MultiChannelRecording() {
delete[] mData;
}
void rewind() {
mReadCursorFrames = mWriteCursorFrames - getSizeInFrames();
}
void clear() {
mReadCursorFrames = 0;
mWriteCursorFrames = 0;
}
int32_t getChannelCount() {
return mChannelCount;
}
int32_t getSizeInFrames() {
return (int32_t) std::min(mWriteCursorFrames, static_cast<int64_t>(mMaxFrames));
}
int32_t getReadIndex() {
return mReadCursorFrames % mMaxFrames;
}
int32_t getWriteIndex() {
return mWriteCursorFrames % mMaxFrames;
}
/**
* Write numFrames from the short buffer into the recording.
* Overwrite old data if necessary.
* Convert shorts to floats.
*
* @param buffer
* @param numFrames
* @return number of frames actually written.
*/
int32_t write(int16_t *buffer, int32_t numFrames) {
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
int32_t indexFrame = getWriteIndex();
// contiguous writes
int32_t framesToEndOfBuffer = mMaxFrames - indexFrame;
int32_t framesNow = std::min(framesLeft, framesToEndOfBuffer);
int32_t numSamples = framesNow * mChannelCount;
int32_t sampleIndex = indexFrame * mChannelCount;
for (int i = 0; i < numSamples; i++) {
mData[sampleIndex++] = *buffer++ * (1.0f / 32768);
}
mWriteCursorFrames += framesNow;
framesLeft -= framesNow;
}
return numFrames - framesLeft;
}
/**
* Write all numFrames from the float buffer into the recording.
* Overwrite old data if full.
* @param buffer
* @param numFrames
* @return number of frames actually written.
*/
int32_t write(float *buffer, int32_t numFrames) {
int32_t framesLeft = numFrames;
while (framesLeft > 0) {
int32_t indexFrame = getWriteIndex();
// contiguous writes
int32_t framesToEnd = mMaxFrames - indexFrame;
int32_t framesNow = std::min(framesLeft, framesToEnd);
int32_t numSamples = framesNow * mChannelCount;
int32_t sampleIndex = indexFrame * mChannelCount;
memcpy(&mData[sampleIndex],
buffer,
(numSamples * sizeof(float)));
buffer += numSamples;
mWriteCursorFrames += framesNow;
framesLeft -= framesNow;
}
return numFrames;
}
/**
* Read numFrames from the recording into the buffer, if there is enough data.
* Start at the cursor position, aligned up to the next frame.
* @param buffer
* @param numFrames
* @return number of frames actually read.
*/
int32_t read(float *buffer, int32_t numFrames) {
int32_t framesRead = 0;
int32_t framesLeft = std::min(numFrames,
std::min(mMaxFrames, (int32_t)(mWriteCursorFrames - mReadCursorFrames)));
while (framesLeft > 0) {
int32_t indexFrame = getReadIndex();
// contiguous reads
int32_t framesToEnd = mMaxFrames - indexFrame;
int32_t framesNow = std::min(framesLeft, framesToEnd);
int32_t numSamples = framesNow * mChannelCount;
int32_t sampleIndex = indexFrame * mChannelCount;
memcpy(buffer,
&mData[sampleIndex],
(numSamples * sizeof(float)));
mReadCursorFrames += framesNow;
framesLeft -= framesNow;
framesRead += framesNow;
}
return framesRead;
}
private:
float *mData = nullptr;
int64_t mReadCursorFrames = 0;
int64_t mWriteCursorFrames = 0; // monotonically increasing
const int32_t mChannelCount;
const int32_t mMaxFrames;
};
#endif //NATIVEOBOE_MULTICHANNEL_RECORDING_H

View file

@ -0,0 +1,810 @@
/*
* Copyright 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Set to 1 for debugging race condition #1180 with mAAudioStream.
// See also AudioStreamAAudio.cpp in Oboe.
// This was left in the code so that we could test the fix again easily in the future.
// We could not trigger the race condition without adding these get calls and the sleeps.
#define DEBUG_CLOSE_RACE 0
#include <fstream>
#include <iostream>
#if DEBUG_CLOSE_RACE
#include <thread>
#endif // DEBUG_CLOSE_RACE
#include <vector>
#include "oboe/AudioClock.h"
#include "util/WaveFileWriter.h"
#include "NativeAudioContext.h"
using namespace oboe;
static oboe::AudioApi convertNativeApiToAudioApi(int nativeApi) {
switch (nativeApi) {
default:
case NATIVE_MODE_UNSPECIFIED:
return oboe::AudioApi::Unspecified;
case NATIVE_MODE_AAUDIO:
return oboe::AudioApi::AAudio;
case NATIVE_MODE_OPENSLES:
return oboe::AudioApi::OpenSLES;
}
}
class MyOboeOutputStream : public WaveFileOutputStream {
public:
void write(uint8_t b) override {
mData.push_back(b);
}
int32_t length() {
return (int32_t) mData.size();
}
uint8_t *getData() {
return mData.data();
}
private:
std::vector<uint8_t> mData;
};
bool ActivityContext::mUseCallback = true;
int ActivityContext::callbackSize = 0;
std::shared_ptr<oboe::AudioStream> ActivityContext::getOutputStream() {
for (auto entry : mOboeStreams) {
std::shared_ptr<oboe::AudioStream> oboeStream = entry.second;
if (oboeStream->getDirection() == oboe::Direction::Output) {
return oboeStream;
}
}
return nullptr;
}
std::shared_ptr<oboe::AudioStream> ActivityContext::getInputStream() {
for (auto entry : mOboeStreams) {
std::shared_ptr<oboe::AudioStream> oboeStream = entry.second;
if (oboeStream != nullptr) {
if (oboeStream->getDirection() == oboe::Direction::Input) {
return oboeStream;
}
}
}
return nullptr;
}
void ActivityContext::freeStreamIndex(int32_t streamIndex) {
mOboeStreams[streamIndex].reset();
mOboeStreams.erase(streamIndex);
}
int32_t ActivityContext::allocateStreamIndex() {
return mNextStreamHandle++;
}
oboe::Result ActivityContext::release() {
oboe::Result result = oboe::Result::OK;
stopBlockingIOThread();
for (auto entry : mOboeStreams) {
std::shared_ptr<oboe::AudioStream> oboeStream = entry.second;
result = oboeStream->release();
}
return result;
}
void ActivityContext::close(int32_t streamIndex) {
stopBlockingIOThread();
std::shared_ptr<oboe::AudioStream> oboeStream = getStream(streamIndex);
if (oboeStream != nullptr) {
oboeStream->close();
LOGD("ActivityContext::%s() delete stream %d ", __func__, streamIndex);
freeStreamIndex(streamIndex);
}
}
bool ActivityContext::isMMapUsed(int32_t streamIndex) {
std::shared_ptr<oboe::AudioStream> oboeStream = getStream(streamIndex);
if (oboeStream == nullptr) return false;
if (oboeStream->getAudioApi() != AudioApi::AAudio) return false;
return AAudioExtensions::getInstance().isMMapUsed(oboeStream.get());
}
oboe::Result ActivityContext::pause() {
oboe::Result result = oboe::Result::OK;
stopBlockingIOThread();
for (auto entry : mOboeStreams) {
std::shared_ptr<oboe::AudioStream> oboeStream = entry.second;
result = oboeStream->requestPause();
}
return result;
}
oboe::Result ActivityContext::stopAllStreams() {
oboe::Result result = oboe::Result::OK;
stopBlockingIOThread();
for (auto entry : mOboeStreams) {
std::shared_ptr<oboe::AudioStream> oboeStream = entry.second;
result = oboeStream->requestStop();
}
return result;
}
void ActivityContext::configureBuilder(bool isInput, oboe::AudioStreamBuilder &builder) {
// We needed the proxy because we did not know the channelCount when we setup the Builder.
if (mUseCallback) {
builder.setDataCallback(&oboeCallbackProxy);
}
}
int ActivityContext::open(jint nativeApi,
jint sampleRate,
jint channelCount,
jint channelMask,
jint format,
jint sharingMode,
jint performanceMode,
jint inputPreset,
jint usage,
jint contentType,
jint bufferCapacityInFrames,
jint deviceId,
jint sessionId,
jboolean channelConversionAllowed,
jboolean formatConversionAllowed,
jint rateConversionQuality,
jboolean isMMap,
jboolean isInput,
jint spatializationBehavior) {
oboe::AudioApi audioApi = oboe::AudioApi::Unspecified;
switch (nativeApi) {
case NATIVE_MODE_UNSPECIFIED:
case NATIVE_MODE_AAUDIO:
case NATIVE_MODE_OPENSLES:
audioApi = convertNativeApiToAudioApi(nativeApi);
break;
default:
return (jint) oboe::Result::ErrorOutOfRange;
}
int32_t streamIndex = allocateStreamIndex();
if (streamIndex < 0) {
LOGE("ActivityContext::open() stream array full");
return (jint) oboe::Result::ErrorNoFreeHandles;
}
if (channelCount < 0 || channelCount > 256) {
LOGE("ActivityContext::open() channels out of range");
return (jint) oboe::Result::ErrorOutOfRange;
}
// Create an audio stream.
oboe::AudioStreamBuilder builder;
builder.setChannelCount(channelCount)
->setDirection(isInput ? oboe::Direction::Input : oboe::Direction::Output)
->setSharingMode((oboe::SharingMode) sharingMode)
->setPerformanceMode((oboe::PerformanceMode) performanceMode)
->setInputPreset((oboe::InputPreset)inputPreset)
->setUsage((oboe::Usage)usage)
->setContentType((oboe::ContentType)contentType)
->setBufferCapacityInFrames(bufferCapacityInFrames)
->setDeviceId(deviceId)
->setSessionId((oboe::SessionId) sessionId)
->setSampleRate(sampleRate)
->setFormat((oboe::AudioFormat) format)
->setChannelConversionAllowed(channelConversionAllowed)
->setFormatConversionAllowed(formatConversionAllowed)
->setSampleRateConversionQuality((oboe::SampleRateConversionQuality) rateConversionQuality)
->setSpatializationBehavior((oboe::SpatializationBehavior) spatializationBehavior)
;
if (channelMask != (jint) oboe::ChannelMask::Unspecified) {
// Set channel mask when it is specified.
builder.setChannelMask((oboe::ChannelMask) channelMask);
}
if (mUseCallback) {
builder.setFramesPerCallback(callbackSize);
}
configureBuilder(isInput, builder);
builder.setAudioApi(audioApi);
// Temporarily set the AAudio MMAP policy to disable MMAP or not.
bool oldMMapEnabled = AAudioExtensions::getInstance().isMMapEnabled();
AAudioExtensions::getInstance().setMMapEnabled(isMMap);
// Record time for opening.
if (isInput) {
mInputOpenedAt = oboe::AudioClock::getNanoseconds();
} else {
mOutputOpenedAt = oboe::AudioClock::getNanoseconds();
}
// Open a stream based on the builder settings.
std::shared_ptr<oboe::AudioStream> oboeStream;
Result result = builder.openStream(oboeStream);
AAudioExtensions::getInstance().setMMapEnabled(oldMMapEnabled);
if (result != Result::OK) {
freeStreamIndex(streamIndex);
streamIndex = -1;
} else {
mOboeStreams[streamIndex] = oboeStream; // save shared_ptr
mChannelCount = oboeStream->getChannelCount(); // FIXME store per stream
mFramesPerBurst = oboeStream->getFramesPerBurst();
mSampleRate = oboeStream->getSampleRate();
createRecording();
finishOpen(isInput, oboeStream);
}
if (!mUseCallback) {
int numSamples = getFramesPerBlock() * mChannelCount;
dataBuffer = std::make_unique<float[]>(numSamples);
}
if (result != Result::OK) {
return (int) result;
} else {
configureAfterOpen();
return streamIndex;
}
}
oboe::Result ActivityContext::start() {
oboe::Result result = oboe::Result::OK;
std::shared_ptr<oboe::AudioStream> inputStream = getInputStream();
std::shared_ptr<oboe::AudioStream> outputStream = getOutputStream();
if (inputStream == nullptr && outputStream == nullptr) {
LOGD("%s() - no streams defined", __func__);
return oboe::Result::ErrorInvalidState; // not open
}
audioStreamGateway.reset();
result = startStreams();
if (!mUseCallback && result == oboe::Result::OK) {
// Instead of using the callback, start a thread that writes the stream.
threadEnabled.store(true);
dataThread = new std::thread(threadCallback, this);
}
#if DEBUG_CLOSE_RACE
// Also put a sleep for 400 msec in AudioStreamAAudio::updateFramesRead().
if (outputStream != nullptr) {
std::thread raceDebugger([outputStream]() {
while (outputStream->getState() != StreamState::Closed) {
int64_t framesRead = outputStream->getFramesRead();
LOGD("raceDebugger, framesRead = %d, state = %d",
(int) framesRead, (int) outputStream->getState());
}
});
raceDebugger.detach();
}
#endif // DEBUG_CLOSE_RACE
return result;
}
oboe::Result ActivityContext::flush() {
oboe::Result result = oboe::Result::OK;
for (auto entry : mOboeStreams) {
std::shared_ptr<oboe::AudioStream> oboeStream = entry.second;
result = oboeStream->requestFlush();
}
return result;
}
int32_t ActivityContext::saveWaveFile(const char *filename) {
if (mRecording == nullptr) {
LOGW("ActivityContext::saveWaveFile(%s) but no recording!", filename);
return -1;
}
if (mRecording->getSizeInFrames() == 0) {
LOGW("ActivityContext::saveWaveFile(%s) but no frames!", filename);
return -2;
}
MyOboeOutputStream outStream;
WaveFileWriter writer(&outStream);
// You must setup the format before the first write().
writer.setFrameRate(mSampleRate);
writer.setSamplesPerFrame(mRecording->getChannelCount());
writer.setBitsPerSample(24);
writer.setFrameCount(mRecording->getSizeInFrames());
float buffer[mRecording->getChannelCount()];
// Read samples from start to finish.
mRecording->rewind();
for (int32_t frameIndex = 0; frameIndex < mRecording->getSizeInFrames(); frameIndex++) {
mRecording->read(buffer, 1 /* numFrames */);
for (int32_t i = 0; i < mRecording->getChannelCount(); i++) {
writer.write(buffer[i]);
}
}
writer.close();
if (outStream.length() > 0) {
auto myfile = std::ofstream(filename, std::ios::out | std::ios::binary);
myfile.write((char *) outStream.getData(), outStream.length());
myfile.close();
}
return outStream.length();
}
double ActivityContext::getTimestampLatency(int32_t streamIndex) {
std::shared_ptr<oboe::AudioStream> oboeStream = getStream(streamIndex);
if (oboeStream != nullptr) {
auto result = oboeStream->calculateLatencyMillis();
return (!result) ? -1.0 : result.value();
}
return -1.0;
}
// =================================================================== ActivityTestOutput
void ActivityTestOutput::close(int32_t streamIndex) {
ActivityContext::close(streamIndex);
manyToMulti.reset(nullptr);
monoToMulti.reset(nullptr);
mVolumeRamp.reset();
mSinkFloat.reset();
mSinkI16.reset();
mSinkI24.reset();
mSinkI32.reset();
mSinkMemoryDirect.reset();
}
void ActivityTestOutput::setChannelEnabled(int channelIndex, bool enabled) {
if (manyToMulti == nullptr) {
return;
}
if (enabled) {
switch (mSignalType) {
case SignalType::Sine:
sineOscillators[channelIndex].frequency.disconnect();
sineOscillators[channelIndex].output.connect(manyToMulti->inputs[channelIndex].get());
break;
case SignalType::Sawtooth:
sawtoothOscillators[channelIndex].output.connect(manyToMulti->inputs[channelIndex].get());
break;
case SignalType::FreqSweep:
mLinearShape.output.connect(&sineOscillators[channelIndex].frequency);
sineOscillators[channelIndex].output.connect(manyToMulti->inputs[channelIndex].get());
break;
case SignalType::PitchSweep:
mExponentialShape.output.connect(&sineOscillators[channelIndex].frequency);
sineOscillators[channelIndex].output.connect(manyToMulti->inputs[channelIndex].get());
break;
case SignalType::WhiteNoise:
mWhiteNoise.output.connect(manyToMulti->inputs[channelIndex].get());
break;
default:
break;
}
} else {
manyToMulti->inputs[channelIndex]->disconnect();
}
}
void ActivityTestOutput::configureAfterOpen() {
manyToMulti = std::make_unique<ManyToMultiConverter>(mChannelCount);
std::shared_ptr<oboe::AudioStream> outputStream = getOutputStream();
mVolumeRamp = std::make_shared<RampLinear>(mChannelCount);
mVolumeRamp->setLengthInFrames(kRampMSec * outputStream->getSampleRate() /
MILLISECONDS_PER_SECOND);
mVolumeRamp->setTarget(mAmplitude);
mSinkFloat = std::make_shared<SinkFloat>(mChannelCount);
mSinkI16 = std::make_shared<SinkI16>(mChannelCount);
mSinkI24 = std::make_shared<SinkI24>(mChannelCount);
mSinkI32 = std::make_shared<SinkI32>(mChannelCount);
static constexpr int COMPRESSED_FORMAT_BYTES_PER_FRAME = 1;
mSinkMemoryDirect = std::make_shared<SinkMemoryDirect>(
mChannelCount, COMPRESSED_FORMAT_BYTES_PER_FRAME);
mTriangleOscillator.setSampleRate(outputStream->getSampleRate());
mTriangleOscillator.frequency.setValue(1.0/kSweepPeriod);
mTriangleOscillator.amplitude.setValue(1.0);
mTriangleOscillator.setPhase(-1.0);
mLinearShape.setMinimum(0.0);
mLinearShape.setMaximum(outputStream->getSampleRate() * 0.5); // Nyquist
mExponentialShape.setMinimum(110.0);
mExponentialShape.setMaximum(outputStream->getSampleRate() * 0.5); // Nyquist
mTriangleOscillator.output.connect(&(mLinearShape.input));
mTriangleOscillator.output.connect(&(mExponentialShape.input));
{
double frequency = 330.0;
// Go up by a minor third or a perfect fourth just intoned interval.
const float interval = (mChannelCount > 8) ? (6.0f / 5.0f) : (4.0f / 3.0f);
for (int i = 0; i < mChannelCount; i++) {
sineOscillators[i].setSampleRate(outputStream->getSampleRate());
sineOscillators[i].frequency.setValue(frequency);
sineOscillators[i].amplitude.setValue(AMPLITUDE_SINE / mChannelCount);
sawtoothOscillators[i].setSampleRate(outputStream->getSampleRate());
sawtoothOscillators[i].frequency.setValue(frequency);
sawtoothOscillators[i].amplitude.setValue(AMPLITUDE_SAWTOOTH / mChannelCount);
frequency *= interval; // each wave is at a higher frequency
setChannelEnabled(i, true);
}
}
mWhiteNoise.amplitude.setValue(0.5);
manyToMulti->output.connect(&(mVolumeRamp.get()->input));
mVolumeRamp->output.connect(&(mSinkFloat.get()->input));
mVolumeRamp->output.connect(&(mSinkI16.get()->input));
mVolumeRamp->output.connect(&(mSinkI24.get()->input));
mVolumeRamp->output.connect(&(mSinkI32.get()->input));
mSinkFloat->pullReset();
mSinkI16->pullReset();
mSinkI24->pullReset();
mSinkI32->pullReset();
mSinkMemoryDirect->pullReset();
configureStreamGateway();
}
void ActivityTestOutput::configureStreamGateway() {
std::shared_ptr<oboe::AudioStream> outputStream = getOutputStream();
if (outputStream->getFormat() == oboe::AudioFormat::I16) {
audioStreamGateway.setAudioSink(mSinkI16);
} else if (outputStream->getFormat() == oboe::AudioFormat::I24) {
audioStreamGateway.setAudioSink(mSinkI24);
} else if (outputStream->getFormat() == oboe::AudioFormat::I32) {
audioStreamGateway.setAudioSink(mSinkI32);
} else if (outputStream->getFormat() == oboe::AudioFormat::Float) {
audioStreamGateway.setAudioSink(mSinkFloat);
} else if (outputStream->getFormat() == oboe::AudioFormat::MP3) {
audioStreamGateway.setAudioSink(mSinkMemoryDirect);
}
if (mUseCallback) {
oboeCallbackProxy.setDataCallback(&audioStreamGateway);
}
}
void ActivityTestOutput::runBlockingIO() {
int32_t framesPerBlock = getFramesPerBlock();
oboe::DataCallbackResult callbackResult = oboe::DataCallbackResult::Continue;
std::shared_ptr<oboe::AudioStream> oboeStream = getOutputStream();
if (oboeStream == nullptr) {
LOGE("%s() : no stream found\n", __func__);
return;
}
while (threadEnabled.load()
&& callbackResult == oboe::DataCallbackResult::Continue) {
// generate output by calling the callback
callbackResult = audioStreamGateway.onAudioReady(oboeStream.get(),
dataBuffer.get(),
framesPerBlock);
auto result = oboeStream->write(dataBuffer.get(),
framesPerBlock,
NANOS_PER_SECOND);
if (!result) {
LOGE("%s() returned %s\n", __func__, convertToText(result.error()));
break;
}
int32_t framesWritten = result.value();
if (framesWritten < framesPerBlock) {
LOGE("%s() : write() wrote %d of %d\n", __func__, framesWritten, framesPerBlock);
break;
}
}
}
oboe::Result ActivityTestOutput::startStreams() {
mSinkFloat->pullReset();
mSinkI16->pullReset();
mSinkI24->pullReset();
mSinkI32->pullReset();
mSinkMemoryDirect->pullReset();
if (mVolumeRamp != nullptr) {
mVolumeRamp->setTarget(mAmplitude);
}
return getOutputStream()->start();
}
void ActivityTestOutput::setupMemoryBuffer(std::unique_ptr<uint8_t[]> &buffer, int length) {
if (mSinkMemoryDirect != nullptr) {
mSinkMemoryDirect->setupMemoryBuffer(buffer, length);
}
}
// ======================================================================= ActivityTestInput
void ActivityTestInput::configureAfterOpen() {
mInputAnalyzer.reset();
if (mUseCallback) {
oboeCallbackProxy.setDataCallback(&mInputAnalyzer);
}
mInputAnalyzer.setRecording(mRecording.get());
}
void ActivityTestInput::runBlockingIO() {
int32_t framesPerBlock = getFramesPerBlock();
oboe::DataCallbackResult callbackResult = oboe::DataCallbackResult::Continue;
std::shared_ptr<oboe::AudioStream> oboeStream = getInputStream();
if (oboeStream == nullptr) {
LOGE("%s() : no stream found\n", __func__);
return;
}
while (threadEnabled.load()
&& callbackResult == oboe::DataCallbackResult::Continue) {
// Avoid glitches by waiting until there is extra data in the FIFO.
auto err = oboeStream->waitForAvailableFrames(mMinimumFramesBeforeRead, kNanosPerSecond);
if (!err) break;
// read from input
auto result = oboeStream->read(dataBuffer.get(),
framesPerBlock,
NANOS_PER_SECOND);
if (!result) {
LOGE("%s() : read() returned %s\n", __func__, convertToText(result.error()));
break;
}
int32_t framesRead = result.value();
if (framesRead < framesPerBlock) { // timeout?
LOGE("%s() : read() read %d of %d\n", __func__, framesRead, framesPerBlock);
break;
}
// analyze input
callbackResult = mInputAnalyzer.onAudioReady(oboeStream.get(),
dataBuffer.get(),
framesRead);
}
}
oboe::Result ActivityRecording::stopPlayback() {
oboe::Result result = oboe::Result::OK;
if (playbackStream != nullptr) {
result = playbackStream->requestStop();
playbackStream->close();
mPlayRecordingCallback.setRecording(nullptr);
delete playbackStream;
playbackStream = nullptr;
}
return result;
}
oboe::Result ActivityRecording::startPlayback() {
stop();
oboe::AudioStreamBuilder builder;
builder.setChannelCount(mChannelCount)
->setSampleRate(mSampleRate)
->setFormat(oboe::AudioFormat::Float)
->setCallback(&mPlayRecordingCallback);
oboe::Result result = builder.openStream(&playbackStream);
if (result != oboe::Result::OK) {
delete playbackStream;
playbackStream = nullptr;
} else if (playbackStream != nullptr) {
if (mRecording != nullptr) {
mRecording->rewind();
mPlayRecordingCallback.setRecording(mRecording.get());
result = playbackStream->requestStart();
}
}
return result;
}
// ======================================================================= ActivityTapToTone
void ActivityTapToTone::configureAfterOpen() {
monoToMulti = std::make_unique<MonoToMultiConverter>(mChannelCount);
mSinkFloat = std::make_shared<SinkFloat>(mChannelCount);
mSinkI16 = std::make_shared<SinkI16>(mChannelCount);
mSinkI24 = std::make_shared<SinkI24>(mChannelCount);
mSinkI32 = std::make_shared<SinkI32>(mChannelCount);
std::shared_ptr<oboe::AudioStream> outputStream = getOutputStream();
sawPingGenerator.setSampleRate(outputStream->getSampleRate());
sawPingGenerator.frequency.setValue(FREQUENCY_SAW_PING);
sawPingGenerator.amplitude.setValue(AMPLITUDE_SAW_PING);
sawPingGenerator.output.connect(&(monoToMulti->input));
monoToMulti->output.connect(&(mSinkFloat.get()->input));
monoToMulti->output.connect(&(mSinkI16.get()->input));
monoToMulti->output.connect(&(mSinkI24.get()->input));
monoToMulti->output.connect(&(mSinkI32.get()->input));
mSinkFloat->pullReset();
mSinkI16->pullReset();
mSinkI24->pullReset();
mSinkI32->pullReset();
configureStreamGateway();
}
// ======================================================================= ActivityFullDuplex
void ActivityFullDuplex::configureBuilder(bool isInput, oboe::AudioStreamBuilder &builder) {
if (isInput) {
// Ideally the output streams should be opened first.
std::shared_ptr<oboe::AudioStream> outputStream = getOutputStream();
if (outputStream != nullptr) {
// The input and output buffers will run in sync with input empty
// and output full. So set the input capacity to match the output.
builder.setBufferCapacityInFrames(outputStream->getBufferCapacityInFrames());
}
}
}
// ======================================================================= ActivityEcho
void ActivityEcho::configureBuilder(bool isInput, oboe::AudioStreamBuilder &builder) {
ActivityFullDuplex::configureBuilder(isInput, builder);
if (mFullDuplexEcho.get() == nullptr) {
mFullDuplexEcho = std::make_unique<FullDuplexEcho>();
}
// only output uses a callback, input is polled
if (!isInput) {
builder.setCallback((oboe::AudioStreamCallback *) &oboeCallbackProxy);
oboeCallbackProxy.setDataCallback(mFullDuplexEcho.get());
}
}
void ActivityEcho::finishOpen(bool isInput, std::shared_ptr<oboe::AudioStream> &oboeStream) {
if (isInput) {
mFullDuplexEcho->setSharedInputStream(oboeStream);
} else {
mFullDuplexEcho->setSharedOutputStream(oboeStream);
}
}
// ======================================================================= ActivityRoundTripLatency
void ActivityRoundTripLatency::configureBuilder(bool isInput, oboe::AudioStreamBuilder &builder) {
ActivityFullDuplex::configureBuilder(isInput, builder);
if (mFullDuplexLatency.get() == nullptr) {
mFullDuplexLatency = std::make_unique<FullDuplexAnalyzer>(mLatencyAnalyzer.get());
}
if (!isInput) {
// only output uses a callback, input is polled
builder.setCallback((oboe::AudioStreamCallback *) &oboeCallbackProxy);
oboeCallbackProxy.setDataCallback(mFullDuplexLatency.get());
}
}
void ActivityRoundTripLatency::finishOpen(bool isInput, std::shared_ptr<oboe::AudioStream>
&oboeStream) {
if (isInput) {
mFullDuplexLatency->setSharedInputStream(oboeStream);
mFullDuplexLatency->setRecording(mRecording.get());
} else {
mFullDuplexLatency->setSharedOutputStream(oboeStream);
}
}
// The timestamp latency is the difference between the input
// and output times for a specific frame.
// Start with the position and time from an input timestamp.
// Map the input position to the corresponding position in output
// and calculate its time.
// Use the difference between framesWritten and framesRead to
// convert input positions to output positions.
jdouble ActivityRoundTripLatency::measureTimestampLatency() {
if (!mFullDuplexLatency->isWriteReadDeltaValid()) return -1.0;
int64_t writeReadDelta = mFullDuplexLatency->getWriteReadDelta();
auto inputTimestampResult = mFullDuplexLatency->getInputStream()->getTimestamp(CLOCK_MONOTONIC);
if (!inputTimestampResult) return -1.0;
auto outputTimestampResult = mFullDuplexLatency->getOutputStream()->getTimestamp(CLOCK_MONOTONIC);
if (!outputTimestampResult) return -1.0;
int64_t inputPosition = inputTimestampResult.value().position;
int64_t inputTimeNanos = inputTimestampResult.value().timestamp;
int64_t ouputPosition = outputTimestampResult.value().position;
int64_t outputTimeNanos = outputTimestampResult.value().timestamp;
// Map input frame position to the corresponding output frame.
int64_t mappedPosition = inputPosition + writeReadDelta;
// Calculate when that frame will play.
int32_t sampleRate = mFullDuplexLatency->getOutputStream()->getSampleRate();
int64_t mappedTimeNanos = outputTimeNanos + ((mappedPosition - ouputPosition) * 1e9) / sampleRate;
// Latency is the difference in time between when a frame was recorded and
// when its corresponding echo was played.
return (mappedTimeNanos - inputTimeNanos) * 1.0e-6; // convert nanos to millis
}
// ======================================================================= ActivityGlitches
void ActivityGlitches::configureBuilder(bool isInput, oboe::AudioStreamBuilder &builder) {
ActivityFullDuplex::configureBuilder(isInput, builder);
if (mFullDuplexGlitches.get() == nullptr) {
mFullDuplexGlitches = std::make_unique<FullDuplexAnalyzer>(&mGlitchAnalyzer);
}
if (!isInput) {
// only output uses a callback, input is polled
builder.setCallback((oboe::AudioStreamCallback *) &oboeCallbackProxy);
oboeCallbackProxy.setDataCallback(mFullDuplexGlitches.get());
}
}
void ActivityGlitches::finishOpen(bool isInput, std::shared_ptr<oboe::AudioStream> &oboeStream) {
if (isInput) {
mFullDuplexGlitches->setSharedInputStream(oboeStream);
mFullDuplexGlitches->setRecording(mRecording.get());
} else {
mFullDuplexGlitches->setSharedOutputStream(oboeStream);
}
}
// ======================================================================= ActivityDataPath
void ActivityDataPath::configureBuilder(bool isInput, oboe::AudioStreamBuilder &builder) {
ActivityFullDuplex::configureBuilder(isInput, builder);
if (mFullDuplexDataPath.get() == nullptr) {
mFullDuplexDataPath = std::make_unique<FullDuplexAnalyzer>(&mDataPathAnalyzer);
}
if (!isInput) {
// only output uses a callback, input is polled
builder.setCallback((oboe::AudioStreamCallback *) &oboeCallbackProxy);
oboeCallbackProxy.setDataCallback(mFullDuplexDataPath.get());
}
}
void ActivityDataPath::finishOpen(bool isInput, std::shared_ptr<oboe::AudioStream> &oboeStream) {
if (isInput) {
mFullDuplexDataPath->setSharedInputStream(oboeStream);
mFullDuplexDataPath->setRecording(mRecording.get());
} else {
mFullDuplexDataPath->setSharedOutputStream(oboeStream);
}
}
// =================================================================== ActivityTestDisconnect
void ActivityTestDisconnect::close(int32_t streamIndex) {
ActivityContext::close(streamIndex);
mSinkFloat.reset();
}
void ActivityTestDisconnect::configureAfterOpen() {
std::shared_ptr<oboe::AudioStream> outputStream = getOutputStream();
std::shared_ptr<oboe::AudioStream> inputStream = getInputStream();
if (outputStream) {
mSinkFloat = std::make_unique<SinkFloat>(mChannelCount);
sineOscillator = std::make_unique<SineOscillator>();
monoToMulti = std::make_unique<MonoToMultiConverter>(mChannelCount);
sineOscillator->setSampleRate(outputStream->getSampleRate());
sineOscillator->frequency.setValue(440.0);
sineOscillator->amplitude.setValue(AMPLITUDE_SINE);
sineOscillator->output.connect(&(monoToMulti->input));
monoToMulti->output.connect(&(mSinkFloat->input));
mSinkFloat->pullReset();
audioStreamGateway.setAudioSink(mSinkFloat);
} else if (inputStream) {
audioStreamGateway.setAudioSink(nullptr);
}
oboeCallbackProxy.setDataCallback(&audioStreamGateway);
}

View file

@ -0,0 +1,836 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NATIVEOBOE_NATIVEAUDIOCONTEXT_H
#define NATIVEOBOE_NATIVEAUDIOCONTEXT_H
#include <jni.h>
#include <sys/system_properties.h>
#include <thread>
#include <unordered_map>
#include <vector>
#include "common/OboeDebug.h"
#include "oboe/Oboe.h"
#include "aaudio/AAudioExtensions.h"
#include "AudioStreamGateway.h"
#include "SinkMemoryDirect.h"
#include "flowunits/ImpulseOscillator.h"
#include "flowgraph/ManyToMultiConverter.h"
#include "flowgraph/MonoToMultiConverter.h"
#include "flowgraph/RampLinear.h"
#include "flowgraph/SinkFloat.h"
#include "flowgraph/SinkI16.h"
#include "flowgraph/SinkI24.h"
#include "flowgraph/SinkI32.h"
#include "flowunits/ExponentialShape.h"
#include "flowunits/LinearShape.h"
#include "flowunits/SineOscillator.h"
#include "flowunits/SawtoothOscillator.h"
#include "flowunits/TriangleOscillator.h"
#include "flowunits/WhiteNoise.h"
#include "FullDuplexAnalyzer.h"
#include "FullDuplexEcho.h"
#include "analyzer/GlitchAnalyzer.h"
#include "analyzer/DataPathAnalyzer.h"
#include "InputStreamCallbackAnalyzer.h"
#include "MultiChannelRecording.h"
#include "OboeStreamCallbackProxy.h"
#include "OboeTools.h"
#include "PlayRecordingCallback.h"
#include "SawPingGenerator.h"
// These must match order in strings.xml and in StreamConfiguration.java
#define NATIVE_MODE_UNSPECIFIED 0
#define NATIVE_MODE_OPENSLES 1
#define NATIVE_MODE_AAUDIO 2
#define MAX_SINE_OSCILLATORS 16
#define AMPLITUDE_SINE 1.0
#define AMPLITUDE_SAWTOOTH 0.5
#define FREQUENCY_SAW_PING 800.0
#define AMPLITUDE_SAW_PING 0.8
#define AMPLITUDE_IMPULSE 0.7
#define SECONDS_TO_RECORD 10
/**
* Abstract base class that corresponds to a test at the Java level.
*/
class ActivityContext {
public:
ActivityContext() {}
virtual ~ActivityContext() = default;
std::shared_ptr<oboe::AudioStream> getStream(int32_t streamIndex) {
auto it = mOboeStreams.find(streamIndex);
if (it != mOboeStreams.end()) {
return it->second;
} else {
return nullptr;
}
}
virtual void configureBuilder(bool isInput, oboe::AudioStreamBuilder &builder);
/**
* Open a stream with the given parameters.
* @param nativeApi
* @param sampleRate
* @param channelCount
* @param channelMask
* @param format
* @param sharingMode
* @param performanceMode
* @param inputPreset
* @param deviceId
* @param sessionId
* @param framesPerBurst
* @param channelConversionAllowed
* @param formatConversionAllowed
* @param rateConversionQuality
* @param isMMap
* @param isInput
* @param spatializationBehavior
* @return stream ID
*/
int open(jint nativeApi,
jint sampleRate,
jint channelCount,
jint channelMask,
jint format,
jint sharingMode,
jint performanceMode,
jint inputPreset,
jint usage,
jint contentType,
jint bufferCapacityInFrames,
jint deviceId,
jint sessionId,
jboolean channelConversionAllowed,
jboolean formatConversionAllowed,
jint rateConversionQuality,
jboolean isMMap,
jboolean isInput,
jint spatializationBehavior);
oboe::Result release();
virtual void close(int32_t streamIndex);
virtual void configureAfterOpen() {}
oboe::Result start();
oboe::Result pause();
oboe::Result flush();
oboe::Result stopAllStreams();
virtual oboe::Result stop() {
return stopAllStreams();
}
float getCpuLoad() {
return oboeCallbackProxy.getCpuLoad();
}
float getAndResetMaxCpuLoad() {
return oboeCallbackProxy.getAndResetMaxCpuLoad();
}
uint32_t getAndResetCpuMask() {
return oboeCallbackProxy.getAndResetCpuMask();
}
std::string getCallbackTimeString() {
return oboeCallbackProxy.getCallbackTimeString();
}
void setWorkload(int32_t workload) {
oboeCallbackProxy.setWorkload(workload);
}
void setHearWorkload(bool enabled) {
oboeCallbackProxy.setHearWorkload(enabled);
}
virtual oboe::Result startPlayback() {
return oboe::Result::OK;
}
virtual oboe::Result stopPlayback() {
return oboe::Result::OK;
}
virtual void runBlockingIO() {};
static void threadCallback(ActivityContext *context) {
context->runBlockingIO();
}
void stopBlockingIOThread() {
if (dataThread != nullptr) {
// stop a thread that runs in place of the callback
threadEnabled.store(false); // ask thread to exit its loop
dataThread->join();
dataThread = nullptr;
}
}
virtual double getPeakLevel(int index) {
return 0.0;
}
static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
struct timespec time;
int result = clock_gettime(clockId, &time);
if (result < 0) {
return result;
}
return (time.tv_sec * NANOS_PER_SECOND) + time.tv_nsec;
}
// Calculate time between beginning and when frame[0] occurred.
int32_t calculateColdStartLatencyMillis(int32_t sampleRate,
int64_t beginTimeNanos,
int64_t timeStampPosition,
int64_t timestampNanos) const {
int64_t elapsedNanos = NANOS_PER_SECOND * (timeStampPosition / (double) sampleRate);
int64_t timeOfFrameZero = timestampNanos - elapsedNanos;
int64_t coldStartLatencyNanos = timeOfFrameZero - beginTimeNanos;
return coldStartLatencyNanos / NANOS_PER_MILLISECOND;
}
int32_t getColdStartInputMillis() {
std::shared_ptr<oboe::AudioStream> oboeStream = getInputStream();
if (oboeStream != nullptr) {
int64_t framesRead = oboeStream->getFramesRead();
if (framesRead > 0) {
// Base latency on the time that frame[0] would have been received by the app.
int64_t nowNanos = getNanoseconds();
return calculateColdStartLatencyMillis(oboeStream->getSampleRate(),
mInputOpenedAt,
framesRead,
nowNanos);
}
}
return -1;
}
int32_t getColdStartOutputMillis() {
std::shared_ptr<oboe::AudioStream> oboeStream = getOutputStream();
if (oboeStream != nullptr) {
auto result = oboeStream->getTimestamp(CLOCK_MONOTONIC);
if (result) {
auto frameTimestamp = result.value();
// Calculate the time that frame[0] would have been played by the speaker.
int64_t position = frameTimestamp.position;
int64_t timestampNanos = frameTimestamp.timestamp;
return calculateColdStartLatencyMillis(oboeStream->getSampleRate(),
mOutputOpenedAt,
position,
timestampNanos);
}
}
return -1;
}
/**
* Trigger a sound or impulse.
* @param enabled
*/
virtual void trigger() {}
bool isMMapUsed(int32_t streamIndex);
int32_t getFramesPerBlock() {
return (callbackSize == 0) ? mFramesPerBurst : callbackSize;
}
int64_t getCallbackCount() {
return oboeCallbackProxy.getCallbackCount();
}
oboe::Result getLastErrorCallbackResult() {
std::shared_ptr<oboe::AudioStream> stream = getOutputStream();
if (stream == nullptr) {
stream = getInputStream();
}
return stream ? oboe::Result::ErrorNull : stream->getLastErrorCallbackResult();
}
int32_t getFramesPerCallback() {
return oboeCallbackProxy.getFramesPerCallback();
}
virtual void setChannelEnabled(int channelIndex, bool enabled) {}
virtual void setSignalType(int signalType) {}
virtual void setAmplitude(float amplitude) {}
virtual int32_t saveWaveFile(const char *filename);
virtual void setMinimumFramesBeforeRead(int32_t numFrames) {}
static bool mUseCallback;
static int callbackSize;
double getTimestampLatency(int32_t streamIndex);
void setCpuAffinityMask(uint32_t mask) {
oboeCallbackProxy.setCpuAffinityMask(mask);
}
void setWorkloadReportingEnabled(bool enabled) {
oboeCallbackProxy.setWorkloadReportingEnabled(enabled);
}
virtual void setupMemoryBuffer([[maybe_unused]] std::unique_ptr<uint8_t[]>& buffer,
[[maybe_unused]] int length) {}
protected:
std::shared_ptr<oboe::AudioStream> getInputStream();
std::shared_ptr<oboe::AudioStream> getOutputStream();
int32_t allocateStreamIndex();
void freeStreamIndex(int32_t streamIndex);
virtual void createRecording() {
mRecording = std::make_unique<MultiChannelRecording>(mChannelCount,
SECONDS_TO_RECORD * mSampleRate);
}
virtual void finishOpen(bool isInput, std::shared_ptr<oboe::AudioStream> &oboeStream) {}
virtual oboe::Result startStreams() = 0;
std::unique_ptr<float []> dataBuffer{};
AudioStreamGateway audioStreamGateway;
OboeStreamCallbackProxy oboeCallbackProxy;
std::unique_ptr<MultiChannelRecording> mRecording{};
int32_t mNextStreamHandle = 0;
std::unordered_map<int32_t, std::shared_ptr<oboe::AudioStream>> mOboeStreams;
int32_t mFramesPerBurst = 0; // TODO per stream
int32_t mChannelCount = 0; // TODO per stream
int32_t mSampleRate = 0; // TODO per stream
std::atomic<bool> threadEnabled{false};
std::thread *dataThread = nullptr; // FIXME never gets deleted
private:
int64_t mInputOpenedAt = 0;
int64_t mOutputOpenedAt = 0;
};
/**
* Test a single input stream.
*/
class ActivityTestInput : public ActivityContext {
public:
ActivityTestInput() {}
virtual ~ActivityTestInput() = default;
void configureAfterOpen() override;
double getPeakLevel(int index) override {
return mInputAnalyzer.getPeakLevel(index);
}
void runBlockingIO() override;
void setMinimumFramesBeforeRead(int32_t numFrames) override {
mInputAnalyzer.setMinimumFramesBeforeRead(numFrames);
mMinimumFramesBeforeRead = numFrames;
}
int32_t getMinimumFramesBeforeRead() const {
return mMinimumFramesBeforeRead;
}
protected:
oboe::Result startStreams() override {
mInputAnalyzer.reset();
mInputAnalyzer.setup(std::max(getInputStream()->getFramesPerBurst(), callbackSize),
getInputStream()->getChannelCount(),
getInputStream()->getFormat());
return getInputStream()->requestStart();
}
InputStreamCallbackAnalyzer mInputAnalyzer;
int32_t mMinimumFramesBeforeRead = 0;
};
/**
* Record a configured input stream and play it back some simple way.
*/
class ActivityRecording : public ActivityTestInput {
public:
ActivityRecording() {}
virtual ~ActivityRecording() = default;
oboe::Result stop() override {
oboe::Result resultStopPlayback = stopPlayback();
oboe::Result resultStopAudio = ActivityContext::stop();
return (resultStopPlayback != oboe::Result::OK) ? resultStopPlayback : resultStopAudio;
}
oboe::Result startPlayback() override;
oboe::Result stopPlayback() override;
PlayRecordingCallback mPlayRecordingCallback;
oboe::AudioStream *playbackStream = nullptr;
};
/**
* Test a single output stream.
*/
class ActivityTestOutput : public ActivityContext {
public:
ActivityTestOutput()
: sineOscillators(MAX_SINE_OSCILLATORS)
, sawtoothOscillators(MAX_SINE_OSCILLATORS) {}
virtual ~ActivityTestOutput() = default;
void close(int32_t streamIndex) override;
oboe::Result startStreams() override;
void configureAfterOpen() override;
virtual void configureStreamGateway();
void runBlockingIO() override;
void setChannelEnabled(int channelIndex, bool enabled) override;
// WARNING - must match order in strings.xml and OboeAudioOutputStream.java
enum SignalType {
Sine = 0,
Sawtooth = 1,
FreqSweep = 2,
PitchSweep = 3,
WhiteNoise = 4
};
void setSignalType(int signalType) override {
mSignalType = (SignalType) signalType;
}
void setAmplitude(float amplitude) override {
mAmplitude = amplitude;
if (mVolumeRamp) {
mVolumeRamp->setTarget(mAmplitude);
}
}
void setupMemoryBuffer(std::unique_ptr<uint8_t[]>& buffer, int length) final;
protected:
SignalType mSignalType = SignalType::Sine;
std::vector<SineOscillator> sineOscillators;
std::vector<SawtoothOscillator> sawtoothOscillators;
static constexpr float kSweepPeriod = 10.0; // for triangle up and down
// A triangle LFO is shaped into either a linear or an exponential range for sweep.
TriangleOscillator mTriangleOscillator;
LinearShape mLinearShape;
ExponentialShape mExponentialShape;
class WhiteNoise mWhiteNoise;
static constexpr int kRampMSec = 10; // for volume control
float mAmplitude = 1.0f;
std::shared_ptr<RampLinear> mVolumeRamp;
std::unique_ptr<ManyToMultiConverter> manyToMulti;
std::unique_ptr<MonoToMultiConverter> monoToMulti;
std::shared_ptr<oboe::flowgraph::SinkFloat> mSinkFloat;
std::shared_ptr<oboe::flowgraph::SinkI16> mSinkI16;
std::shared_ptr<oboe::flowgraph::SinkI24> mSinkI24;
std::shared_ptr<oboe::flowgraph::SinkI32> mSinkI32;
std::shared_ptr<SinkMemoryDirect> mSinkMemoryDirect;
};
/**
* Generate a short beep with a very short attack.
* This is used by Java to measure output latency.
*/
class ActivityTapToTone : public ActivityTestOutput {
public:
ActivityTapToTone() {}
virtual ~ActivityTapToTone() = default;
void configureAfterOpen() override;
virtual void trigger() override {
sawPingGenerator.trigger();
}
SawPingGenerator sawPingGenerator;
};
/**
* Activity that uses synchronized input/output streams.
*/
class ActivityFullDuplex : public ActivityContext {
public:
void configureBuilder(bool isInput, oboe::AudioStreamBuilder &builder) override;
virtual int32_t getState() { return -1; }
virtual int32_t getResult() { return -1; }
virtual bool isAnalyzerDone() { return false; }
void setMinimumFramesBeforeRead(int32_t numFrames) override {
getFullDuplexAnalyzer()->setMinimumFramesBeforeRead(numFrames);
}
virtual FullDuplexAnalyzer *getFullDuplexAnalyzer() = 0;
int32_t getResetCount() {
return getFullDuplexAnalyzer()->getLoopbackProcessor()->getResetCount();
}
protected:
void createRecording() override {
mRecording = std::make_unique<MultiChannelRecording>(2, // output and input
SECONDS_TO_RECORD * mSampleRate);
}
};
/**
* Echo input to output through a delay line.
*/
class ActivityEcho : public ActivityFullDuplex {
public:
oboe::Result startStreams() override {
return mFullDuplexEcho->start();
}
void configureBuilder(bool isInput, oboe::AudioStreamBuilder &builder) override;
void setDelayTime(double delayTimeSeconds) {
if (mFullDuplexEcho) {
mFullDuplexEcho->setDelayTime(delayTimeSeconds);
}
}
double getPeakLevel(int index) override {
return mFullDuplexEcho->getPeakLevel(index);
}
FullDuplexAnalyzer *getFullDuplexAnalyzer() override {
return (FullDuplexAnalyzer *) mFullDuplexEcho.get();
}
protected:
void finishOpen(bool isInput, std::shared_ptr<oboe::AudioStream> &oboeStream) override;
private:
std::unique_ptr<FullDuplexEcho> mFullDuplexEcho{};
};
/**
* Measure Round Trip Latency
*/
class ActivityRoundTripLatency : public ActivityFullDuplex {
public:
ActivityRoundTripLatency() {
#define USE_WHITE_NOISE_ANALYZER 1
#if USE_WHITE_NOISE_ANALYZER
// New analyzer that uses a short pattern of white noise bursts.
mLatencyAnalyzer = std::make_unique<WhiteNoiseLatencyAnalyzer>();
#else
// Old analyzer based on encoded random bits.
mLatencyAnalyzer = std::make_unique<EncodedRandomLatencyAnalyzer>();
#endif
mLatencyAnalyzer->setup();
}
virtual ~ActivityRoundTripLatency() = default;
oboe::Result startStreams() override {
mAnalyzerLaunched = false;
return mFullDuplexLatency->start();
}
void configureBuilder(bool isInput, oboe::AudioStreamBuilder &builder) override;
LatencyAnalyzer *getLatencyAnalyzer() {
return mLatencyAnalyzer.get();
}
int32_t getState() override {
return getLatencyAnalyzer()->getState();
}
int32_t getResult() override {
return getLatencyAnalyzer()->getState(); // TODO This does not look right.
}
bool isAnalyzerDone() override {
if (!mAnalyzerLaunched) {
mAnalyzerLaunched = launchAnalysisIfReady();
}
return mLatencyAnalyzer->isDone();
}
FullDuplexAnalyzer *getFullDuplexAnalyzer() override {
return (FullDuplexAnalyzer *) mFullDuplexLatency.get();
}
static void analyzeData(LatencyAnalyzer *analyzer) {
analyzer->analyze();
}
bool launchAnalysisIfReady() {
// Are we ready to do the analysis?
if (mLatencyAnalyzer->hasEnoughData()) {
// Crunch the numbers on a separate thread.
std::thread t(analyzeData, mLatencyAnalyzer.get());
t.detach();
return true;
}
return false;
}
jdouble measureTimestampLatency();
protected:
void finishOpen(bool isInput, std::shared_ptr<oboe::AudioStream> &oboeStream) override;
private:
std::unique_ptr<FullDuplexAnalyzer> mFullDuplexLatency{};
std::unique_ptr<LatencyAnalyzer> mLatencyAnalyzer;
bool mAnalyzerLaunched = false;
};
/**
* Measure Glitches
*/
class ActivityGlitches : public ActivityFullDuplex {
public:
oboe::Result startStreams() override {
return mFullDuplexGlitches->start();
}
void configureBuilder(bool isInput, oboe::AudioStreamBuilder &builder) override;
GlitchAnalyzer *getGlitchAnalyzer() {
return &mGlitchAnalyzer;
}
int32_t getState() override {
return getGlitchAnalyzer()->getState();
}
int32_t getResult() override {
return getGlitchAnalyzer()->getResult();
}
bool isAnalyzerDone() override {
return mGlitchAnalyzer.isDone();
}
FullDuplexAnalyzer *getFullDuplexAnalyzer() override {
return (FullDuplexAnalyzer *) mFullDuplexGlitches.get();
}
protected:
void finishOpen(bool isInput, std::shared_ptr<oboe::AudioStream> &oboeStream) override;
private:
std::unique_ptr<FullDuplexAnalyzer> mFullDuplexGlitches{};
GlitchAnalyzer mGlitchAnalyzer;
};
/**
* Measure Data Path
*/
class ActivityDataPath : public ActivityFullDuplex {
public:
oboe::Result startStreams() override {
return mFullDuplexDataPath->start();
}
void configureBuilder(bool isInput, oboe::AudioStreamBuilder &builder) override;
void configureAfterOpen() override {
// set buffer size
std::shared_ptr<oboe::AudioStream> outputStream = getOutputStream();
int32_t capacityInFrames = outputStream->getBufferCapacityInFrames();
int32_t burstInFrames = outputStream->getFramesPerBurst();
int32_t capacityInBursts = capacityInFrames / burstInFrames;
int32_t sizeInBursts = std::max(2, capacityInBursts / 2);
// Set size of buffer to minimize underruns.
auto result = outputStream->setBufferSizeInFrames(sizeInBursts * burstInFrames);
static_cast<void>(result); // Avoid unused variable.
LOGD("ActivityDataPath: %s() capacity = %d, burst = %d, size = %d",
__func__, capacityInFrames, burstInFrames, result.value());
}
DataPathAnalyzer *getDataPathAnalyzer() {
return &mDataPathAnalyzer;
}
FullDuplexAnalyzer *getFullDuplexAnalyzer() override {
return (FullDuplexAnalyzer *) mFullDuplexDataPath.get();
}
protected:
void finishOpen(bool isInput, std::shared_ptr<oboe::AudioStream> &oboeStream) override;
private:
std::unique_ptr<FullDuplexAnalyzer> mFullDuplexDataPath{};
DataPathAnalyzer mDataPathAnalyzer;
};
/**
* Test a single output stream.
*/
class ActivityTestDisconnect : public ActivityContext {
public:
ActivityTestDisconnect() {}
virtual ~ActivityTestDisconnect() = default;
void close(int32_t streamIndex) override;
oboe::Result startStreams() override {
std::shared_ptr<oboe::AudioStream> outputStream = getOutputStream();
if (outputStream) {
return outputStream->start();
}
std::shared_ptr<oboe::AudioStream> inputStream = getInputStream();
if (inputStream) {
return inputStream->start();
}
return oboe::Result::ErrorNull;
}
void configureAfterOpen() override;
private:
std::unique_ptr<SineOscillator> sineOscillator;
std::unique_ptr<MonoToMultiConverter> monoToMulti;
std::shared_ptr<oboe::flowgraph::SinkFloat> mSinkFloat;
};
/**
* Global context for native tests.
* Switch between various ActivityContexts.
*/
class NativeAudioContext {
public:
ActivityContext *getCurrentActivity() {
return currentActivity;
};
void setActivityType(int activityType) {
mActivityType = (ActivityType) activityType;
switch(mActivityType) {
default:
case ActivityType::Undefined:
case ActivityType::TestOutput:
currentActivity = &mActivityTestOutput;
break;
case ActivityType::TestInput:
currentActivity = &mActivityTestInput;
break;
case ActivityType::TapToTone:
currentActivity = &mActivityTapToTone;
break;
case ActivityType::RecordPlay:
currentActivity = &mActivityRecording;
break;
case ActivityType::Echo:
currentActivity = &mActivityEcho;
break;
case ActivityType::RoundTripLatency:
currentActivity = &mActivityRoundTripLatency;
break;
case ActivityType::Glitches:
currentActivity = &mActivityGlitches;
break;
case ActivityType::TestDisconnect:
currentActivity = &mActivityTestDisconnect;
break;
case ActivityType::DataPath:
currentActivity = &mActivityDataPath;
break;
}
}
void setDelayTime(double delayTimeMillis) {
mActivityEcho.setDelayTime(delayTimeMillis);
}
ActivityTestOutput mActivityTestOutput;
ActivityTestInput mActivityTestInput;
ActivityTapToTone mActivityTapToTone;
ActivityRecording mActivityRecording;
ActivityEcho mActivityEcho;
ActivityRoundTripLatency mActivityRoundTripLatency;
ActivityGlitches mActivityGlitches;
ActivityDataPath mActivityDataPath;
ActivityTestDisconnect mActivityTestDisconnect;
private:
// WARNING - must match definitions in TestAudioActivity.java
enum ActivityType {
Undefined = -1,
TestOutput = 0,
TestInput = 1,
TapToTone = 2,
RecordPlay = 3,
Echo = 4,
RoundTripLatency = 5,
Glitches = 6,
TestDisconnect = 7,
DataPath = 8,
};
ActivityType mActivityType = ActivityType::Undefined;
ActivityContext *currentActivity = &mActivityTestOutput;
};
#endif //NATIVEOBOE_NATIVEAUDIOCONTEXT_H

View file

@ -0,0 +1,117 @@
/*
* Copyright 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/OboeDebug.h"
#include "OboeStreamCallbackProxy.h"
bool OboeStreamCallbackProxy::mCallbackReturnStop = false;
oboe::DataCallbackResult OboeStreamCallbackProxy::onAudioReady(
oboe::AudioStream *audioStream,
void *audioData,
int numFrames) {
oboe::DataCallbackResult callbackResult = oboe::DataCallbackResult::Stop;
int64_t startTimeNanos = getNanoseconds();
int32_t numWorkloadVoices = mNumWorkloadVoices;
// Record which CPU this is running on.
orCurrentCpuMask(sched_getcpu());
// Tell ADPF in advance what our workload will be.
if (mWorkloadReportingEnabled) {
audioStream->reportWorkload(numWorkloadVoices);
}
// Change affinity if app requested a change.
uint32_t mask = mCpuAffinityMask;
if (mask != mPreviousMask) {
int err = applyCpuAffinityMask(mask);
if (err != 0) {
}
mPreviousMask = mask;
}
mCallbackCount++;
mFramesPerCallback = numFrames;
if (mCallbackReturnStop) {
return oboe::DataCallbackResult::Stop;
}
if (mCallback != nullptr) {
callbackResult = mCallback->onAudioReady(audioStream, audioData, numFrames);
}
mSynthWorkload.onCallback(numWorkloadVoices);
if (numWorkloadVoices > 0) {
// Render into the buffer or discard the synth voices.
float *buffer = (audioStream->getChannelCount() == 2 && mHearWorkload)
? static_cast<float *>(audioData) : nullptr;
mSynthWorkload.renderStereo(buffer, numFrames);
}
// Measure CPU load.
int64_t currentTimeNanos = getNanoseconds();
// Sometimes we get a short callback when doing sample rate conversion.
// Just ignore those to avoid noise.
if (numFrames > (getFramesPerCallback() / 2)) {
int64_t calculationTime = currentTimeNanos - startTimeNanos;
float currentCpuLoad = calculationTime * 0.000000001f * audioStream->getSampleRate() / numFrames;
mCpuLoad = (mCpuLoad * 0.95f) + (currentCpuLoad * 0.05f); // simple low pass filter
mMaxCpuLoad = std::max(currentCpuLoad, mMaxCpuLoad.load());
}
if (mPreviousCallbackTimeNs != 0) {
mStatistics.add((currentTimeNanos - mPreviousCallbackTimeNs) * kNsToMsScaler);
}
mPreviousCallbackTimeNs = currentTimeNanos;
return callbackResult;
}
int OboeStreamCallbackProxy::applyCpuAffinityMask(uint32_t mask) {
int err = 0;
// Capture original CPU set so we can restore it.
if (!mIsOriginalCpuSetValid) {
err = sched_getaffinity((pid_t) 0,
sizeof(mOriginalCpuSet),
&mOriginalCpuSet);
if (err) {
LOGE("%s(0x%02X) - sched_getaffinity(), errno = %d\n", __func__, mask, errno);
return -errno;
}
mIsOriginalCpuSetValid = true;
}
if (mask) {
cpu_set_t cpu_set;
CPU_ZERO(&cpu_set);
int cpuCount = sysconf(_SC_NPROCESSORS_CONF);
for (int cpuIndex = 0; cpuIndex < cpuCount; cpuIndex++) {
if (mask & (1 << cpuIndex)) {
CPU_SET(cpuIndex, &cpu_set);
}
}
err = sched_setaffinity((pid_t) 0, sizeof(cpu_set_t), &cpu_set);
} else {
// Restore original mask.
err = sched_setaffinity((pid_t) 0, sizeof(mOriginalCpuSet), &mOriginalCpuSet);
}
if (err) {
LOGE("%s(0x%02X) - sched_setaffinity(), errno = %d\n", __func__, mask, errno);
return -errno;
}
return 0;
}

View file

@ -0,0 +1,273 @@
/*
* Copyright 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NATIVEOBOE_OBOESTREAMCALLBACKPROXY_H
#define NATIVEOBOE_OBOESTREAMCALLBACKPROXY_H
#include <unistd.h>
#include <sys/types.h>
#include <sys/sysinfo.h>
#include "oboe/Oboe.h"
#include "synth/Synthesizer.h"
#include "synth/SynthTools.h"
#include "OboeTesterStreamCallback.h"
class DoubleStatistics {
public:
void add(double statistic) {
if (skipCount < kNumberStatisticsToSkip) {
skipCount++;
} else {
if (statistic <= 0.0) return;
sum = statistic + sum;
count++;
minimum = std::min(statistic, minimum.load());
maximum = std::max(statistic, maximum.load());
}
}
double getAverage() const {
return sum / count;
}
std::string dump() const {
if (count == 0) return "?";
char buff[100];
snprintf(buff, sizeof(buff), "%3.1f/%3.1f/%3.1f ms", minimum.load(), getAverage(), maximum.load());
std::string buffAsStr = buff;
return buffAsStr;
}
void clear() {
skipCount = 0;
sum = 0;
count = 0;
minimum = DBL_MAX;
maximum = 0;
}
private:
static constexpr double kNumberStatisticsToSkip = 5; // Skip the first 5 frames
std::atomic<int> skipCount { 0 };
std::atomic<double> sum { 0 };
std::atomic<int> count { 0 };
std::atomic<double> minimum { DBL_MAX };
std::atomic<double> maximum { 0 };
};
/**
* Manage the synthesizer workload that burdens the CPU.
* Adjust the number of voices according to the requested workload.
* Trigger noteOn and noteOff messages.
*/
class SynthWorkload {
public:
SynthWorkload() {
mSynth.setup(marksynth::kSynthmarkSampleRate, marksynth::kSynthmarkMaxVoices);
}
void onCallback(double workload) {
// If workload changes then restart notes.
if (workload != mPreviousWorkload) {
mSynth.allNotesOff();
mAreNotesOn = false;
mCountdown = 0; // trigger notes on
mPreviousWorkload = workload;
}
if (mCountdown <= 0) {
if (mAreNotesOn) {
mSynth.allNotesOff();
mAreNotesOn = false;
mCountdown = mOffFrames;
} else {
mSynth.notesOn((int)mPreviousWorkload);
mAreNotesOn = true;
mCountdown = mOnFrames;
}
}
}
/**
* Render the notes into a stereo buffer.
* Passing a nullptr will cause the calculated results to be discarded.
* The workload should be the same.
* @param buffer a real stereo buffer or nullptr
* @param numFrames
*/
void renderStereo(float *buffer, int numFrames) {
if (buffer == nullptr) {
int framesLeft = numFrames;
while (framesLeft > 0) {
int framesThisTime = std::min(kDummyBufferSizeInFrames, framesLeft);
// Do the work then throw it away.
mSynth.renderStereo(&mDummyStereoBuffer[0], framesThisTime);
framesLeft -= framesThisTime;
}
} else {
mSynth.renderStereo(buffer, numFrames);
}
mCountdown -= numFrames;
}
private:
marksynth::Synthesizer mSynth;
static constexpr int kDummyBufferSizeInFrames = 32;
float mDummyStereoBuffer[kDummyBufferSizeInFrames * 2];
double mPreviousWorkload = 1.0;
bool mAreNotesOn = false;
int mCountdown = 0;
int mOnFrames = (int) (0.2 * 48000);
int mOffFrames = (int) (0.3 * 48000);
};
class OboeStreamCallbackProxy : public OboeTesterStreamCallback {
public:
void setDataCallback(oboe::AudioStreamDataCallback *callback) {
mCallback = callback;
setCallbackCount(0);
mStatistics.clear();
mPreviousMask = 0;
}
static void setCallbackReturnStop(bool b) {
mCallbackReturnStop = b;
}
int64_t getCallbackCount() {
return mCallbackCount;
}
void setCallbackCount(int64_t count) {
mCallbackCount = count;
}
int32_t getFramesPerCallback() {
return mFramesPerCallback.load();
}
/**
* Called when the stream is ready to process audio.
*/
oboe::DataCallbackResult onAudioReady(
oboe::AudioStream *audioStream,
void *audioData,
int numFrames) override;
/**
* Specify the amount of artificial workload that will waste CPU cycles
* and increase the CPU load.
* @param workload typically ranges from 0 to 400
*/
void setWorkload(int32_t workload) {
mNumWorkloadVoices = std::max(0, workload);
}
int32_t getWorkload() const {
return mNumWorkloadVoices;
}
void setHearWorkload(bool enabled) {
mHearWorkload = enabled;
}
/**
* This is the callback duration relative to the real-time equivalent.
* So it may be higher than 1.0.
* @return low pass filtered value for the fractional CPU load
*/
float getCpuLoad() const {
return mCpuLoad;
}
/**
* Calling this will atomically reset the max to zero so only call
* this from one client.
*
* @return last value of the maximum unfiltered CPU load.
*/
float getAndResetMaxCpuLoad() {
return mMaxCpuLoad.exchange(0.0f);
}
std::string getCallbackTimeString() const {
return mStatistics.dump();
}
/**
* @return mask of the CPUs used since the last reset
*/
uint32_t getAndResetCpuMask() {
return mCpuMask.exchange(0);
}
void orCurrentCpuMask(int cpuIndex) {
mCpuMask |= (1 << cpuIndex);
}
/**
* @param cpuIndex
* @return 0 on success or a negative errno
*/
int setCpuAffinity(int cpuIndex) {
cpu_set_t cpu_set;
CPU_ZERO(&cpu_set);
CPU_SET(cpuIndex, &cpu_set);
int err = sched_setaffinity((pid_t) 0, sizeof(cpu_set_t), &cpu_set);
return err == 0 ? 0 : -errno;
}
/**
*
* @param mask bits for each CPU or zero for all
* @return
*/
int applyCpuAffinityMask(uint32_t mask);
void setCpuAffinityMask(uint32_t mask) {
mCpuAffinityMask = mask;
}
void setWorkloadReportingEnabled(bool enabled) {
mWorkloadReportingEnabled = enabled;
}
private:
static constexpr double kNsToMsScaler = 0.000001;
std::atomic<float> mCpuLoad{0.0f};
std::atomic<float> mMaxCpuLoad{0.0f};
int64_t mPreviousCallbackTimeNs = 0;
DoubleStatistics mStatistics;
std::atomic<int32_t> mNumWorkloadVoices{0};
SynthWorkload mSynthWorkload;
bool mHearWorkload = false;
bool mWorkloadReportingEnabled = false;
oboe::AudioStreamDataCallback *mCallback = nullptr;
static bool mCallbackReturnStop;
int64_t mCallbackCount = 0;
std::atomic<int32_t> mFramesPerCallback{0};
std::atomic<uint32_t> mCpuAffinityMask{0};
std::atomic<uint32_t> mPreviousMask{0};
std::atomic<uint32_t> mCpuMask{0};
cpu_set_t mOriginalCpuSet;
bool mIsOriginalCpuSetValid = false;
};
#endif //NATIVEOBOE_OBOESTREAMCALLBACKPROXY_H

View file

@ -0,0 +1,86 @@
/*
* Copyright 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sched.h>
#include <cstring>
#include "AudioStreamGateway.h"
#include "common/OboeDebug.h"
#include "oboe/Oboe.h"
#include "OboeStreamCallbackProxy.h"
#include "OboeTesterStreamCallback.h"
#include "OboeTools.h"
#include "synth/IncludeMeOnce.h"
int32_t OboeTesterStreamCallback::mHangTimeMillis = 0;
// Print if scheduler changes.
void OboeTesterStreamCallback::printScheduler() {
#if OBOE_ENABLE_LOGGING
int scheduler = sched_getscheduler(gettid());
if (scheduler != mPreviousScheduler) {
int schedulerType = scheduler & 0xFFFF; // mask off high flags
LOGD("callback CPU scheduler = 0x%08x = %s",
scheduler,
((schedulerType == SCHED_FIFO) ? "SCHED_FIFO" :
((schedulerType == SCHED_OTHER) ? "SCHED_OTHER" :
((schedulerType == SCHED_RR) ? "SCHED_RR" : "UNKNOWN")))
);
mPreviousScheduler = scheduler;
}
#endif
}
// Sleep to cause an XRun. Then reschedule.
void OboeTesterStreamCallback::maybeHang(const int64_t startNanos) {
if (mHangTimeMillis == 0) return;
if (startNanos > mNextTimeToHang) {
LOGD("%s() start sleeping", __func__);
// Take short naps until it is time to wake up.
int64_t nowNanos = startNanos;
int64_t wakeupNanos = startNanos + (mHangTimeMillis * NANOS_PER_MILLISECOND);
while (nowNanos < wakeupNanos && mHangTimeMillis > 0) {
int32_t sleepTimeMicros = (int32_t) ((wakeupNanos - nowNanos) / 1000);
if (sleepTimeMicros == 0) break;
// The usleep() function can fail if it sleeps for more than one second.
// So sleep for several small intervals.
// This also allows us to exit the loop if mHangTimeMillis gets set to zero.
const int32_t maxSleepTimeMicros = 100 * 1000;
sleepTimeMicros = std::min(maxSleepTimeMicros, sleepTimeMicros);
usleep(sleepTimeMicros);
nowNanos = getNanoseconds();
}
// Calculate when we hang again.
const int32_t minDurationMillis = 500;
const int32_t maxDurationMillis = std::max(10000, mHangTimeMillis * 2);
int32_t durationMillis = mHangTimeMillis * 10;
durationMillis = std::max(minDurationMillis, std::min(maxDurationMillis, durationMillis));
mNextTimeToHang = startNanos + (durationMillis * NANOS_PER_MILLISECOND);
LOGD("%s() slept for %d msec, durationMillis = %d", __func__,
(int)((nowNanos - startNanos) / 1e6L),
durationMillis);
}
}
int64_t OboeTesterStreamCallback::getNanoseconds(clockid_t clockId) {
struct timespec time;
int result = clock_gettime(clockId, &time);
if (result < 0) {
return result;
}
return (time.tv_sec * 1e9) + time.tv_nsec;
}

View file

@ -0,0 +1,58 @@
/*
* Copyright 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_STREAM_CALLBACK_H
#define OBOETESTER_STREAM_CALLBACK_H
#include <unistd.h>
#include <sys/types.h>
#include <sys/sysinfo.h>
#include "flowgraph/FlowGraphNode.h"
#include "oboe/Oboe.h"
#include "synth/Synthesizer.h"
#include "synth/SynthTools.h"
class OboeTesterStreamCallback : public oboe::AudioStreamCallback {
public:
virtual ~OboeTesterStreamCallback() = default;
// Call this before starting.
void reset() {
mPreviousScheduler = -1;
}
static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC);
/**
* Specify a sleep time that will hang the audio periodically.
*
* @param hangTimeMillis
*/
static void setHangTimeMillis(int hangTimeMillis) {
mHangTimeMillis = hangTimeMillis;
}
protected:
void printScheduler();
void maybeHang(int64_t nowNanos);
int mPreviousScheduler = -1;
static int mHangTimeMillis;
int64_t mNextTimeToHang = 0;
};
#endif //OBOETESTER_STREAM_CALLBACK_H

View file

@ -0,0 +1,25 @@
/*
* Copyright 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_OBOETOOLS_H
#define OBOETESTER_OBOETOOLS_H
#define NANOS_PER_MICROSECOND ((int64_t) 1000)
#define NANOS_PER_MILLISECOND (1000 * NANOS_PER_MICROSECOND)
#define NANOS_PER_SECOND (1000 * NANOS_PER_MILLISECOND)
#define MILLISECONDS_PER_SECOND 1000
#endif //OBOETESTER_OBOETOOLS_H

View file

@ -0,0 +1,33 @@
/*
* Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "PlayRecordingCallback.h"
/**
* Called when the stream is ready to process audio.
*/
oboe::DataCallbackResult PlayRecordingCallback::onAudioReady(
oboe::AudioStream *audioStream,
void *audioData,
int numFrames) {
float *floatData = (float *)audioData;
// Read stored data into the buffer provided.
int32_t framesRead = mRecording->read(floatData, numFrames);
// LOGI("%s() framesRead = %d, numFrames = %d", __func__, framesRead, numFrames);
return framesRead > 0
? oboe::DataCallbackResult::Continue
: oboe::DataCallbackResult::Stop;
}

View file

@ -0,0 +1,46 @@
/*
* Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NATIVEOBOE_PLAY_RECORDING_CALLBACK_H
#define NATIVEOBOE_PLAY_RECORDING_CALLBACK_H
#include "oboe/Oboe.h"
#include "MultiChannelRecording.h"
class PlayRecordingCallback : public oboe::AudioStreamCallback {
public:
PlayRecordingCallback() {}
~PlayRecordingCallback() = default;
void setRecording(MultiChannelRecording *recording) {
mRecording = recording;
}
/**
* Called when the stream is ready to process audio.
*/
oboe::DataCallbackResult onAudioReady(
oboe::AudioStream *audioStream,
void *audioData,
int numFrames);
private:
MultiChannelRecording *mRecording = nullptr;
};
#endif //NATIVEOBOE_PLAYRECORDINGCALLBACK_H

View file

@ -0,0 +1,69 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <unistd.h>
#include "common/OboeDebug.h"
#include "oboe/Definitions.h"
#include "SawPingGenerator.h"
using namespace oboe::flowgraph;
SawPingGenerator::SawPingGenerator()
: OscillatorBase()
, mRequestCount(0)
, mAcknowledgeCount(0)
, mLevel(0.0f) {
}
SawPingGenerator::~SawPingGenerator() { }
void SawPingGenerator::reset() {
FlowGraphNode::reset();
mAcknowledgeCount.store(mRequestCount.load());
}
int32_t SawPingGenerator::onProcess(int numFrames) {
const float *frequencies = frequency.getBuffer();
const float *amplitudes = amplitude.getBuffer();
float *buffer = output.getBuffer();
if (mRequestCount.load() > mAcknowledgeCount.load()) {
mPhase = -1.0f;
mLevel = 1.0;
mAcknowledgeCount++;
}
// Check level to prevent numeric underflow.
if (mLevel > 0.000001) {
for (int i = 0; i < numFrames; i++) {
float sawtooth = incrementPhase(frequencies[i]);
*buffer++ = (float) (sawtooth * mLevel * amplitudes[i]);
mLevel *= 0.999;
}
} else {
for (int i = 0; i < numFrames; i++) {
*buffer++ = 0.0f;
}
}
return numFrames;
}
void SawPingGenerator::trigger() {
mRequestCount++;
}

View file

@ -0,0 +1,46 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NATIVEOBOE_SAWPINGGENERATOR_H
#define NATIVEOBOE_SAWPINGGENERATOR_H
#include <atomic>
#include <unistd.h>
#include <sys/types.h>
#include "flowgraph/FlowGraphNode.h"
#include "flowunits/OscillatorBase.h"
class SawPingGenerator : public OscillatorBase {
public:
SawPingGenerator();
virtual ~SawPingGenerator();
int32_t onProcess(int numFrames) override;
void trigger();
void reset() override;
private:
std::atomic<int> mRequestCount; // external thread increments this to request a beep
std::atomic<int> mAcknowledgeCount; // audio thread sets this to acknowledge
double mLevel;
};
#endif //NATIVEOBOE_SAWPINGGENERATOR_H

View file

@ -0,0 +1,50 @@
/*
* Copyright 2025 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "SinkMemoryDirect.h"
#include "common/OboeDebug.h"
SinkMemoryDirect::SinkMemoryDirect(int channelCount, int bytesPerFrame) :
oboe::flowgraph::FlowGraphSink(channelCount), mBytesPerFrame(bytesPerFrame) {
}
void SinkMemoryDirect::setupMemoryBuffer(std::unique_ptr<uint8_t[]>& buffer, int length) {
mBuffer = std::make_unique<uint8_t[]>(length);
memcpy(mBuffer.get(), buffer.get(), length);
mBufferLength = length;
mCurPosition = 0;
}
void SinkMemoryDirect::reset() {
oboe::flowgraph::FlowGraphNode::reset();
mCurPosition = 0;
}
int32_t SinkMemoryDirect::read(void *data, int32_t numFrames) {
auto uint8Data = static_cast<uint8_t*>(data);
int bytesLeft = numFrames * mBytesPerFrame;
while (bytesLeft > 0) {
int bytesToCopy = std::min(bytesLeft, mBufferLength - mCurPosition);
memcpy(uint8Data, mBuffer.get() + mCurPosition, bytesToCopy);
mCurPosition += bytesToCopy;
if (mCurPosition >= mBufferLength) {
mCurPosition = 0;
}
bytesLeft -= bytesToCopy;
uint8Data += bytesToCopy;
}
return numFrames;
}

View file

@ -0,0 +1,43 @@
/*
* Copyright 2025 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory>
#include "flowgraph/FlowGraphNode.h"
/**
* AudioSink that provides data from a cached memory.
* Data conversion is not allowed when using this sink.
*/
class SinkMemoryDirect : public oboe::flowgraph::FlowGraphSink {
public:
explicit SinkMemoryDirect(int channelCount, int bytesPerFrame);
void setupMemoryBuffer(std::unique_ptr<uint8_t[]>& buffer, int length);
void reset() final;
int32_t read(void* data, int32_t numFrames) final;
private:
std::unique_ptr<uint8_t[]> mBuffer = nullptr;
int mBufferLength = 0;
int mCurPosition = 0;
const int mBytesPerFrame;
};

View file

@ -0,0 +1,118 @@
/*
* Copyright 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <aaudio/AAudioExtensions.h>
#include "common/OboeDebug.h"
#include "oboe/AudioClock.h"
#include "TestColdStartLatency.h"
#include "OboeTools.h"
using namespace oboe;
int32_t TestColdStartLatency::open(bool useInput, bool useLowLatency, bool useMmap, bool
useExclusive) {
mDataCallback = std::make_shared<MyDataCallback>();
// Enable MMAP if needed
bool wasMMapEnabled = AAudioExtensions::getInstance().isMMapEnabled();
AAudioExtensions::getInstance().setMMapEnabled(useMmap);
int64_t beginOpenNanos = AudioClock::getNanoseconds();
AudioStreamBuilder builder;
Result result = builder.setFormat(AudioFormat::Float)
->setPerformanceMode(useLowLatency ? PerformanceMode::LowLatency :
PerformanceMode::None)
->setDirection(useInput ? Direction::Input : Direction::Output)
->setChannelCount(kChannelCount)
->setDataCallback(mDataCallback)
->setSharingMode(useExclusive ? SharingMode::Exclusive : SharingMode::Shared)
->openStream(mStream);
int64_t endOpenNanos = AudioClock::getNanoseconds();
int64_t actualDurationNanos = endOpenNanos - beginOpenNanos;
mOpenTimeMicros = actualDurationNanos / NANOS_PER_MICROSECOND;
// Revert MMAP back to its previous state
AAudioExtensions::getInstance().setMMapEnabled(wasMMapEnabled);
mDeviceId = mStream->getDeviceId();
return (int32_t) result;
}
int32_t TestColdStartLatency::start() {
mBeginStartNanos = AudioClock::getNanoseconds();
Result result = mStream->requestStart();
int64_t endStartNanos = AudioClock::getNanoseconds();
int64_t actualDurationNanos = endStartNanos - mBeginStartNanos;
mStartTimeMicros = actualDurationNanos / NANOS_PER_MICROSECOND;
return (int32_t) result;
}
int32_t TestColdStartLatency::close() {
Result result1 = mStream->requestStop();
Result result2 = mStream->close();
return (int32_t)((result1 != Result::OK) ? result1 : result2);
}
int32_t TestColdStartLatency::getColdStartTimeMicros() {
int64_t position;
int64_t timestampNanos;
if (mStream->getDirection() == Direction::Output) {
auto result = mStream->getTimestamp(CLOCK_MONOTONIC);
if (!result) {
return -1; // ERROR
}
auto frameTimestamp = result.value();
// Calculate the time that frame[0] would have been played by the speaker.
position = frameTimestamp.position;
timestampNanos = frameTimestamp.timestamp;
} else {
position = mStream->getFramesRead();
timestampNanos = AudioClock::getNanoseconds();
}
double sampleRate = (double) mStream->getSampleRate();
int64_t elapsedNanos = NANOS_PER_SECOND * (position / sampleRate);
int64_t timeOfFrameZero = timestampNanos - elapsedNanos;
int64_t coldStartLatencyNanos = timeOfFrameZero - mBeginStartNanos;
return coldStartLatencyNanos / NANOS_PER_MICROSECOND;
}
// Callback that sleeps then touches the audio buffer.
DataCallbackResult TestColdStartLatency::MyDataCallback::onAudioReady(
AudioStream *audioStream,
void *audioData,
int32_t numFrames) {
float *floatData = (float *) audioData;
const int numSamples = numFrames * kChannelCount;
if (audioStream->getDirection() == Direction::Output) {
// Fill mono buffer with a sine wave.
for (int i = 0; i < numSamples; i++) {
*floatData++ = sinf(mPhase) * 0.2f;
if ((i % kChannelCount) == (kChannelCount - 1)) {
mPhase += kPhaseIncrement;
// Wrap the phase around in a circle.
if (mPhase >= M_PI) mPhase -= 2 * M_PI;
}
}
}
return DataCallbackResult::Continue;
}

View file

@ -0,0 +1,76 @@
/*
* Copyright 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_TEST_COLD_START_LATENCY_H
#define OBOETESTER_TEST_COLD_START_LATENCY_H
#include "oboe/Oboe.h"
#include <thread>
/**
* Test for getting the cold start latency
*/
class TestColdStartLatency {
public:
int32_t open(bool useInput, bool useLowLatency, bool useMmap, bool useExclusive);
int32_t start();
int32_t close();
int32_t getColdStartTimeMicros();
int32_t getOpenTimeMicros() {
return (int32_t) (mOpenTimeMicros.load());
}
int32_t getStartTimeMicros() {
return (int32_t) (mStartTimeMicros.load());
}
int32_t getDeviceId() {
return mDeviceId;
}
protected:
std::atomic<int64_t> mBeginStartNanos{0};
std::atomic<double> mOpenTimeMicros{0};
std::atomic<double> mStartTimeMicros{0};
std::atomic<double> mColdStartTimeMicros{0};
std::atomic<int32_t> mDeviceId{0};
private:
class MyDataCallback : public oboe::AudioStreamDataCallback { public:
MyDataCallback() {}
oboe::DataCallbackResult onAudioReady(
oboe::AudioStream *audioStream,
void *audioData,
int32_t numFrames) override;
private:
// For sine generator.
float mPhase = 0.0f;
static constexpr float kPhaseIncrement = 2.0f * (float) M_PI * 440.0f / 48000.0f;
};
std::shared_ptr<oboe::AudioStream> mStream;
std::shared_ptr<MyDataCallback> mDataCallback;
static constexpr int kChannelCount = 1;
};
#endif //OBOETESTER_TEST_COLD_START_LATENCY_H

View file

@ -0,0 +1,75 @@
/*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include "common/OboeDebug.h"
#include "TestErrorCallback.h"
using namespace oboe;
oboe::Result TestErrorCallback::open() {
mCallbackMagic = 0;
mDataCallback = std::make_shared<MyDataCallback>();
mErrorCallback = std::make_shared<MyErrorCallback>(this);
AudioStreamBuilder builder;
oboe::Result result = builder.setSharingMode(oboe::SharingMode::Exclusive)
->setPerformanceMode(oboe::PerformanceMode::LowLatency)
->setFormat(oboe::AudioFormat::Float)
->setChannelCount(kChannelCount)
#if 0
->setDataCallback(mDataCallback.get())
->setErrorCallback(mErrorCallback.get()) // This can lead to a crash or FAIL.
#else
->setDataCallback(mDataCallback)
->setErrorCallback(mErrorCallback) // shared_ptr avoids a crash
#endif
->openStream(mStream);
return result;
}
oboe::Result TestErrorCallback::start() {
return mStream->requestStart();
}
oboe::Result TestErrorCallback::stop() {
return mStream->requestStop();
}
oboe::Result TestErrorCallback::close() {
return mStream->close();
}
int TestErrorCallback::test() {
oboe::Result result = open();
if (result != oboe::Result::OK) {
return (int) result;
}
return (int) start();
}
DataCallbackResult TestErrorCallback::MyDataCallback::onAudioReady(
AudioStream *audioStream,
void *audioData,
int32_t numFrames) {
float *output = (float *) audioData;
// Fill buffer with random numbers to create "white noise".
int numSamples = numFrames * kChannelCount;
for (int i = 0; i < numSamples; i++) {
*output++ = (float)((drand48() - 0.5) * 0.2);
}
return oboe::DataCallbackResult::Continue;
}

View file

@ -0,0 +1,114 @@
/*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_TEST_ERROR_CALLBACK_H
#define OBOETESTER_TEST_ERROR_CALLBACK_H
#include "common/OboeDebug.h"
#include "oboe/Oboe.h"
#include <thread>
/**
* This code is an experiment to see if we can cause a crash from the ErrorCallback.
*/
class TestErrorCallback {
public:
oboe::Result open();
oboe::Result start();
oboe::Result stop();
oboe::Result close();
int test();
int32_t getCallbackMagic() {
return mCallbackMagic.load();
}
protected:
std::atomic<int32_t> mCallbackMagic{0};
private:
void cleanup() {
mDataCallback.reset();
mErrorCallback.reset();
mStream.reset();
}
class MyDataCallback : public oboe::AudioStreamDataCallback { public:
oboe::DataCallbackResult onAudioReady(
oboe::AudioStream *audioStream,
void *audioData,
int32_t numFrames) override;
};
class MyErrorCallback : public oboe::AudioStreamErrorCallback {
public:
MyErrorCallback(TestErrorCallback *parent): mParent(parent) {}
virtual ~MyErrorCallback() {
// If the delete occurs before onErrorAfterClose() then this bad magic
// value will be seen by the Java test code, causing a failure.
// It is also possible that this code will just cause OboeTester to crash!
mMagic = 0xdeadbeef;
LOGE("%s() called", __func__);
}
void onErrorBeforeClose(oboe::AudioStream *oboeStream, oboe::Result error) override {
LOGE("%s() - error = %s, parent = %p",
__func__, oboe::convertToText(error), &mParent);
// Trigger a crash by "deleting" this callback object while in use!
// Do not try this at home. We are just trying to reproduce the crash
// reported in #1603.
std::thread t([this]() {
this->mParent->cleanup(); // Possibly delete stream and callback objects.
LOGE("onErrorBeforeClose called cleanup!");
});
t.detach();
// There is a race condition between the deleting thread and this thread.
// We do not want to add synchronization because the object is getting deleted
// and cannot be relied on.
// So we sleep here to give the deleting thread a chance to win the race.
usleep(10 * 1000);
}
void onErrorAfterClose(oboe::AudioStream *oboeStream, oboe::Result error) override {
// The callback was probably deleted by now.
LOGE("%s() - error = %s, mMagic = 0x%08X",
__func__, oboe::convertToText(error), mMagic.load());
mParent->mCallbackMagic = mMagic.load();
}
private:
TestErrorCallback *mParent;
// This must match the value in TestErrorCallbackActivity.java
static constexpr int32_t kMagicGood = 0x600DCAFE;
std::atomic<int32_t> mMagic{kMagicGood};
};
std::shared_ptr<oboe::AudioStream> mStream;
std::shared_ptr<MyDataCallback> mDataCallback;
std::shared_ptr<MyErrorCallback> mErrorCallback;
static constexpr int kChannelCount = 2;
};
#endif //OBOETESTER_TEST_ERROR_CALLBACK_H

View file

@ -0,0 +1,97 @@
/*
* Copyright 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <aaudio/AAudioExtensions.h>
#include "common/OboeDebug.h"
#include "oboe/AudioClock.h"
#include "TestRapidCycle.h"
using namespace oboe;
// start a thread to cycle through stream tests
int32_t TestRapidCycle::start(bool useOpenSL) {
mThreadEnabled = true;
mCycleCount = 0;
mCycleThread = std::thread([this, useOpenSL]() {
cycleRapidly(useOpenSL);
});
return 0;
}
int32_t TestRapidCycle::stop() {
mThreadEnabled = false;
mCycleThread.join();
return 0;
}
void TestRapidCycle::cycleRapidly(bool useOpenSL) {
while(mThreadEnabled && (oneCycle(useOpenSL) == 0));
}
int32_t TestRapidCycle::oneCycle(bool useOpenSL) {
mCycleCount++;
mDataCallback = std::make_shared<MyDataCallback>();
AudioStreamBuilder builder;
oboe::Result result = builder.setFormat(oboe::AudioFormat::Float)
->setAudioApi(useOpenSL ? oboe::AudioApi::OpenSLES : oboe::AudioApi::AAudio)
->setPerformanceMode(oboe::PerformanceMode::LowLatency)
->setChannelCount(kChannelCount)
->setDataCallback(mDataCallback)
->setUsage(oboe::Usage::Notification)
->openStream(mStream);
if (result != oboe::Result::OK) {
return (int32_t) result;
}
mStream->setDelayBeforeCloseMillis(0);
result = mStream->requestStart();
if (result != oboe::Result::OK) {
mStream->close();
return (int32_t) result;
}
// Sleep for some random time.
int32_t durationMicros = (int32_t)(drand48() * kMaxSleepMicros);
LOGD("TestRapidCycle::oneCycle() - Sleep for %d micros", durationMicros);
usleep(durationMicros);
LOGD("TestRapidCycle::oneCycle() - Woke up, close stream");
mDataCallback->returnStop = true;
result = mStream->close();
return (int32_t) result;
}
// Callback that sleeps then touches the audio buffer.
DataCallbackResult TestRapidCycle::MyDataCallback::onAudioReady(
AudioStream *audioStream,
void *audioData,
int32_t numFrames) {
float *floatData = (float *) audioData;
const int numSamples = numFrames * kChannelCount;
// Fill buffer with white noise.
for (int i = 0; i < numSamples; i++) {
floatData[i] = ((float) drand48() - 0.5f) * 2 * 0.1f;
}
usleep(500); // half a millisecond
if (returnStop) {
usleep(20 * 1000);
return DataCallbackResult::Stop;
} else {
return DataCallbackResult::Continue;
}
}

View file

@ -0,0 +1,67 @@
/*
* Copyright 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_TEST_RAPID_CYCLE_H
#define OBOETESTER_TEST_RAPID_CYCLE_H
#include "oboe/Oboe.h"
#include <thread>
/**
* Try to cause a crash by changing routing during a data callback.
* We use Use::VoiceCommunication for the stream and
* setSpeakerPhoneOn(b) to force a routing change.
* This works best when connected to a BT headset.
*/
class TestRapidCycle {
public:
int32_t start(bool useOpenSL);
int32_t stop();
int32_t getCycleCount() {
return mCycleCount.load();
}
private:
void cycleRapidly(bool useOpenSL);
int32_t oneCycle(bool useOpenSL);
class MyDataCallback : public oboe::AudioStreamDataCallback { public:
MyDataCallback() {}
oboe::DataCallbackResult onAudioReady(
oboe::AudioStream *audioStream,
void *audioData,
int32_t numFrames) override;
bool returnStop = false;
};
std::shared_ptr<oboe::AudioStream> mStream;
std::shared_ptr<MyDataCallback> mDataCallback;
std::atomic<int32_t> mCycleCount{0};
std::atomic<bool> mThreadEnabled{false};
std::thread mCycleThread;
static constexpr int kChannelCount = 1;
static constexpr int kMaxSleepMicros = 25000;
};
#endif //OBOETESTER_TEST_RAPID_CYCLE_H

View file

@ -0,0 +1,111 @@
/*
* Copyright 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <aaudio/AAudioExtensions.h>
#include "common/OboeDebug.h"
#include "oboe/AudioClock.h"
#include "TestRoutingCrash.h"
using namespace oboe;
// open start start an Oboe stream
int32_t TestRoutingCrash::start(bool useInput) {
mDataCallback = std::make_shared<MyDataCallback>(this);
// Disable MMAP because we are trying to crash a Legacy Stream.
bool wasMMapEnabled = AAudioExtensions::getInstance().isMMapEnabled();
AAudioExtensions::getInstance().setMMapEnabled(false);
AudioStreamBuilder builder;
oboe::Result result = builder.setFormat(oboe::AudioFormat::Float)
#if 1
->setPerformanceMode(oboe::PerformanceMode::LowLatency)
#else
->setPerformanceMode(oboe::PerformanceMode::None)
#endif
->setDirection(useInput ? oboe::Direction::Input : oboe::Direction::Output)
->setChannelCount(kChannelCount)
->setDataCallback(mDataCallback)
// Use VoiceCommunication so we can reroute it by setting SpeakerPhone ON/OFF.
->setUsage(oboe::Usage::VoiceCommunication)
->openStream(mStream);
if (result != oboe::Result::OK) {
return (int32_t) result;
}
AAudioExtensions::getInstance().setMMapEnabled(wasMMapEnabled);
return (int32_t) mStream->requestStart();
}
int32_t TestRoutingCrash::stop() {
oboe::Result result1 = mStream->requestStop();
oboe::Result result2 = mStream->close();
return (int32_t)((result1 != oboe::Result::OK) ? result1 : result2);
}
// Callback that sleeps then touches the audio buffer.
DataCallbackResult TestRoutingCrash::MyDataCallback::onAudioReady(
AudioStream *audioStream,
void *audioData,
int32_t numFrames) {
float *floatData = (float *) audioData;
// If I call getTimestamp() here it does NOT crash!
// Simulate the timing of a heavy workload by sleeping.
// Otherwise the window for the crash is very narrow.
const double kDutyCycle = 0.7;
const double bufferTimeNanos = 1.0e9 * numFrames / (double) audioStream->getSampleRate();
const int64_t targetDurationNanos = (int64_t) (bufferTimeNanos * kDutyCycle);
if (targetDurationNanos > 0) {
AudioClock::sleepForNanos(targetDurationNanos);
}
const double kFilterCoefficient = 0.95; // low pass IIR filter
const double sleepMicros = targetDurationNanos * 0.0001;
mParent->averageSleepTimeMicros = ((1.0 - kFilterCoefficient) * sleepMicros)
+ (kFilterCoefficient * mParent->averageSleepTimeMicros);
// If I call getTimestamp() here it crashes.
audioStream->getTimestamp(CLOCK_MONOTONIC); // Trigger a restoreTrack_l() in framework.
const int numSamples = numFrames * kChannelCount;
if (audioStream->getDirection() == oboe::Direction::Input) {
// Read buffer and write sum of samples to a member variable.
// We just want to touch the memory and not get optimized away by the compiler.
float sum = 0.0f;
for (int i = 0; i < numSamples; i++) {
sum += *floatData++;
}
mInputSum = sum;
} else {
// Fill mono buffer with a sine wave.
// If the routing occurred then the buffer may be dead and
// we may be writing into unallocated memory.
for (int i = 0; i < numSamples; i++) {
*floatData++ = sinf(mPhase) * 0.2f;
mPhase += kPhaseIncrement;
// Wrap the phase around in a circle.
if (mPhase >= M_PI) mPhase -= 2 * M_PI;
}
}
// If I call getTimestamp() here it does NOT crash!
return oboe::DataCallbackResult::Continue;
}

View file

@ -0,0 +1,67 @@
/*
* Copyright 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_TEST_ROUTING_CRASH_H
#define OBOETESTER_TEST_ROUTING_CRASH_H
#include "oboe/Oboe.h"
#include <thread>
/**
* Try to cause a crash by changing routing during a data callback.
* We use Use::VoiceCommunication for the stream and
* setSpeakerPhoneOn(b) to force a routing change.
* This works best when connected to a BT headset.
*/
class TestRoutingCrash {
public:
int32_t start(bool useInput);
int32_t stop();
int32_t getSleepTimeMicros() {
return (int32_t) (averageSleepTimeMicros.load());
}
protected:
std::atomic<double> averageSleepTimeMicros{0};
private:
class MyDataCallback : public oboe::AudioStreamDataCallback { public:
MyDataCallback(TestRoutingCrash *parent): mParent(parent) {}
oboe::DataCallbackResult onAudioReady(
oboe::AudioStream *audioStream,
void *audioData,
int32_t numFrames) override;
private:
TestRoutingCrash *mParent;
// For sine generator.
float mPhase = 0.0f;
static constexpr float kPhaseIncrement = 2.0f * (float) M_PI * 440.0f / 48000.0f;
float mInputSum = 0.0f; // For saving input data sum to prevent over-optimization.
};
std::shared_ptr<oboe::AudioStream> mStream;
std::shared_ptr<MyDataCallback> mDataCallback;
static constexpr int kChannelCount = 1;
};
#endif //OBOETESTER_TEST_ROUTING_CRASH_H

View file

@ -0,0 +1,240 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANALYZER_BASE_SINE_ANALYZER_H
#define ANALYZER_BASE_SINE_ANALYZER_H
#include <algorithm>
#include <cctype>
#include <iomanip>
#include <iostream>
#include "InfiniteRecording.h"
#include "LatencyAnalyzer.h"
/**
* Output a steady sine wave and analyze the return signal.
*
* Use a cosine transform to measure the predicted magnitude and relative phase of the
* looped back sine wave. Then generate a predicted signal and compare with the actual signal.
*/
class BaseSineAnalyzer : public LoopbackProcessor {
public:
BaseSineAnalyzer()
: LoopbackProcessor()
, mInfiniteRecording(64 * 1024) {}
virtual bool isOutputEnabled() { return true; }
void setMagnitude(double magnitude) {
mMagnitude = magnitude;
mScaledTolerance = mMagnitude * getTolerance();
}
/**
*
* @return valid phase or kPhaseInvalid=-999
*/
double getPhaseOffset() {
ALOGD("%s(), mPhaseOffset = %f\n", __func__, mPhaseOffset);
return mPhaseOffset;
}
double getMagnitude() const {
return mMagnitude;
}
void setNoiseAmplitude(double noiseAmplitude) {
mNoiseAmplitude = noiseAmplitude;
}
double getNoiseAmplitude() const {
return mNoiseAmplitude;
}
double getTolerance() {
return mTolerance;
}
void setTolerance(double tolerance) {
mTolerance = tolerance;
}
// advance and wrap phase
void incrementInputPhase() {
mInputPhase += mPhaseIncrement;
if (mInputPhase > M_PI) {
mInputPhase -= (2.0 * M_PI);
}
}
// advance and wrap phase
void incrementOutputPhase() {
mOutputPhase += mPhaseIncrement;
if (mOutputPhase > M_PI) {
mOutputPhase -= (2.0 * M_PI);
}
}
/**
* @param frameData upon return, contains the reference sine wave
* @param channelCount
*/
result_code processOutputFrame(float *frameData, int channelCount) override {
float output = 0.0f;
// Output sine wave so we can measure it.
if (isOutputEnabled()) {
float sinOut = sinf(mOutputPhase);
incrementOutputPhase();
output = (sinOut * mOutputAmplitude)
+ (mWhiteNoise.nextRandomDouble() * getNoiseAmplitude());
// ALOGD("sin(%f) = %f, %f\n", mOutputPhase, sinOut, kPhaseIncrement);
}
for (int i = 0; i < channelCount; i++) {
frameData[i] = (i == getOutputChannel()) ? output : 0.0f;
}
return RESULT_OK;
}
/**
* Calculate the magnitude of the component of the input signal
* that matches the analysis frequency.
* Also calculate the phase that we can use to create a
* signal that matches that component.
* The phase will be between -PI and +PI.
*/
double calculateMagnitudePhase(double *phasePtr = nullptr) {
if (mFramesAccumulated == 0) {
return 0.0;
}
double sinMean = mSinAccumulator / mFramesAccumulated;
double cosMean = mCosAccumulator / mFramesAccumulated;
double magnitude = 2.0 * sqrt((sinMean * sinMean) + (cosMean * cosMean));
if (phasePtr != nullptr) {
double phase;
if (magnitude < kMinValidMagnitude) {
phase = kPhaseInvalid;
ALOGD("%s() mag very low! sinMean = %7.5f, cosMean = %7.5f",
__func__, sinMean, cosMean);
} else {
phase = atan2(cosMean, sinMean);
if (phase == 0.0) {
ALOGD("%s() phase zero! sinMean = %7.5f, cosMean = %7.5f",
__func__, sinMean, cosMean);
}
}
*phasePtr = phase;
}
return magnitude;
}
/**
* Perform sin/cos analysis on each sample.
* Measure magnitude and phase on every period.
* Updates mPhaseOffset
* @param sample
* @param referencePhase
* @return true if magnitude and phase updated
*/
bool transformSample(float sample) {
// Compare incoming signal with the reference input sine wave.
mSinAccumulator += static_cast<double>(sample) * sinf(mInputPhase);
mCosAccumulator += static_cast<double>(sample) * cosf(mInputPhase);
incrementInputPhase();
mFramesAccumulated++;
// Must be a multiple of the period or the calculation will not be accurate.
if (mFramesAccumulated == mSinePeriod) {
const double coefficient = 0.1;
double magnitude = calculateMagnitudePhase(&mPhaseOffset);
ALOGD("%s(), phaseOffset = %f\n", __func__, mPhaseOffset);
if (mPhaseOffset != kPhaseInvalid) {
// One pole averaging filter.
setMagnitude((mMagnitude * (1.0 - coefficient)) + (magnitude * coefficient));
}
resetAccumulator();
return true;
} else {
return false;
}
}
// reset the sine wave detector
virtual void resetAccumulator() {
mFramesAccumulated = 0;
mSinAccumulator = 0.0;
mCosAccumulator = 0.0;
}
void reset() override {
LoopbackProcessor::reset();
resetAccumulator();
mMagnitude = 0.0;
}
void prepareToTest() override {
LoopbackProcessor::prepareToTest();
mSinePeriod = getSampleRate() / kTargetGlitchFrequency;
mInputPhase = 0.0f;
mOutputPhase = 0.0f;
mInverseSinePeriod = 1.0 / mSinePeriod;
mPhaseIncrement = 2.0 * M_PI * mInverseSinePeriod;
}
protected:
// Try to get a prime period so the waveform plot changes every time.
static constexpr int32_t kTargetGlitchFrequency = 48000 / 113;
int32_t mSinePeriod = 1; // this will be set before use
double mInverseSinePeriod = 1.0;
double mPhaseIncrement = 0.0;
// Use two sine wave phases, input and output.
// This is because the number of input and output samples may differ
// in a callback and the output frame count may advance ahead of the input, or visa versa.
double mInputPhase = 0.0;
double mOutputPhase = 0.0;
double mOutputAmplitude = 0.75;
// This is the phase offset between the mInputPhase sine wave and the recorded
// signal at the tuned frequency.
// If this jumps around then we are probably just hearing noise.
// Noise can cause the magnitude to be high but mPhaseOffset will be pretty random.
// If we are tracking a sine wave then mPhaseOffset should be consistent.
double mPhaseOffset = 0.0;
// kPhaseInvalid indicates that the phase measurement cannot be used.
// We were seeing times when a magnitude of zero was causing atan2(s,c) to
// return a phase of zero, which looked valid to Java. This is a way of passing
// an error code back to Java as a single value to avoid race conditions.
static constexpr double kPhaseInvalid = -999.0;
double mMagnitude = 0.0;
static constexpr double kMinValidMagnitude = 2.0 / (1 << 16);
int32_t mFramesAccumulated = 0;
double mSinAccumulator = 0.0;
double mCosAccumulator = 0.0;
double mScaledTolerance = 0.0;
InfiniteRecording<float> mInfiniteRecording;
private:
float mTolerance = 0.10; // scaled from 0.0 to 1.0
float mNoiseAmplitude = 0.00; // Used to experiment with warbling caused by DRC.
PseudoRandom mWhiteNoise;
};
#endif //ANALYZER_BASE_SINE_ANALYZER_H

View file

@ -0,0 +1,106 @@
/*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANALYZER_DATA_PATH_ANALYZER_H
#define ANALYZER_DATA_PATH_ANALYZER_H
#include <algorithm>
#include <cctype>
#include <iomanip>
#include <iostream>
#include <math.h>
#include "BaseSineAnalyzer.h"
#include "InfiniteRecording.h"
#include "LatencyAnalyzer.h"
/**
* Output a steady sine wave and analyze the return signal.
*
* Use a cosine transform to measure the predicted magnitude and relative phase of the
* looped back sine wave.
*/
class DataPathAnalyzer : public BaseSineAnalyzer {
public:
DataPathAnalyzer() : BaseSineAnalyzer() {
// Add a little bit of noise to reduce blockage by speaker protection and DRC.
setNoiseAmplitude(0.02);
}
double calculatePhaseError(double p1, double p2) {
double diff = p1 - p2;
// Wrap around the circle.
while (diff > M_PI) {
diff -= (2 * M_PI);
}
while (diff < -M_PI) {
diff += (2 * M_PI);
}
return diff;
}
/**
* @param frameData contains microphone data with sine signal feedback
* @param channelCount
*/
result_code processInputFrame(const float *frameData, int /* channelCount */) override {
result_code result = RESULT_OK;
float sample = frameData[getInputChannel()];
mInfiniteRecording.write(sample);
if (transformSample(sample)) {
// Analyze magnitude and phase on every period.
if (mPhaseOffset != kPhaseInvalid) {
double diff = fabs(calculatePhaseError(mPhaseOffset, mPreviousPhaseOffset));
if (diff < mPhaseTolerance) {
mMaxMagnitude = std::max(mMagnitude, mMaxMagnitude);
}
mPreviousPhaseOffset = mPhaseOffset;
}
}
return result;
}
std::string analyze() override {
std::stringstream report;
report << "DataPathAnalyzer ------------------\n";
report << LOOPBACK_RESULT_TAG "sine.magnitude = " << std::setw(8)
<< mMagnitude << "\n";
report << LOOPBACK_RESULT_TAG "frames.accumulated = " << std::setw(8)
<< mFramesAccumulated << "\n";
report << LOOPBACK_RESULT_TAG "sine.period = " << std::setw(8)
<< mSinePeriod << "\n";
return report.str();
}
void reset() override {
BaseSineAnalyzer::reset();
mPreviousPhaseOffset = 999.0; // Arbitrary high offset to prevent early lock.
mMaxMagnitude = 0.0;
}
double getMaxMagnitude() {
return mMaxMagnitude;
}
private:
double mPreviousPhaseOffset = 0.0;
double mPhaseTolerance = 2 * M_PI / 48;
double mMaxMagnitude = 0.0;
};
#endif // ANALYZER_DATA_PATH_ANALYZER_H

View file

@ -0,0 +1,416 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANALYZER_GLITCH_ANALYZER_H
#define ANALYZER_GLITCH_ANALYZER_H
#include <algorithm>
#include <cctype>
#include <iomanip>
#include <iostream>
#include "InfiniteRecording.h"
#include "LatencyAnalyzer.h"
#include "BaseSineAnalyzer.h"
#include "PseudoRandom.h"
/**
* Output a steady sine wave and analyze the return signal.
*
* Use a cosine transform to measure the predicted magnitude and relative phase of the
* looped back sine wave. Then generate a predicted signal and compare with the actual signal.
*/
class GlitchAnalyzer : public BaseSineAnalyzer {
public:
GlitchAnalyzer() : BaseSineAnalyzer() {}
int32_t getState() const {
return mState;
}
double getPeakAmplitude() const {
return mPeakFollower.getLevel();
}
double getSineAmplitude() const {
return mMagnitude;
}
int getSinePeriod() const {
return mSinePeriod;
}
int32_t getGlitchCount() const {
return mGlitchCount;
}
int32_t getGlitchLength() const {
return mGlitchLength;
}
int32_t getStateFrameCount(int state) const {
return mStateFrameCounters[state];
}
double getSignalToNoiseDB() {
static const double threshold = 1.0e-14;
if (mState != STATE_LOCKED
|| mMeanSquareSignal < threshold
|| mMeanSquareNoise < threshold) {
return -999.0; // error indicator
} else {
double signalToNoise = mMeanSquareSignal / mMeanSquareNoise; // power ratio
double signalToNoiseDB = 10.0 * log(signalToNoise);
if (signalToNoiseDB < static_cast<float>(MIN_SNR_DB)) {
setResult(ERROR_VOLUME_TOO_LOW);
}
return signalToNoiseDB;
}
}
std::string analyze() override {
std::stringstream report;
report << "GlitchAnalyzer ------------------\n";
report << LOOPBACK_RESULT_TAG "peak.amplitude = " << std::setw(8)
<< getPeakAmplitude() << "\n";
report << LOOPBACK_RESULT_TAG "sine.magnitude = " << std::setw(8)
<< getSineAmplitude() << "\n";
report << LOOPBACK_RESULT_TAG "rms.noise = " << std::setw(8)
<< mMeanSquareNoise << "\n";
report << LOOPBACK_RESULT_TAG "signal.to.noise.db = " << std::setw(8)
<< getSignalToNoiseDB() << "\n";
report << LOOPBACK_RESULT_TAG "frames.accumulated = " << std::setw(8)
<< mFramesAccumulated << "\n";
report << LOOPBACK_RESULT_TAG "sine.period = " << std::setw(8)
<< mSinePeriod << "\n";
report << LOOPBACK_RESULT_TAG "test.state = " << std::setw(8)
<< mState << "\n";
report << LOOPBACK_RESULT_TAG "frame.count = " << std::setw(8)
<< mFrameCounter << "\n";
// Did we ever get a lock?
bool gotLock = (mState == STATE_LOCKED) || (mGlitchCount > 0);
if (!gotLock) {
report << "ERROR - failed to lock on reference sine tone.\n";
setResult(ERROR_NO_LOCK);
} else {
// Only print if meaningful.
report << LOOPBACK_RESULT_TAG "glitch.count = " << std::setw(8)
<< mGlitchCount << "\n";
report << LOOPBACK_RESULT_TAG "max.glitch = " << std::setw(8)
<< mMaxGlitchDelta << "\n";
if (mGlitchCount > 0) {
report << "ERROR - number of glitches > 0\n";
setResult(ERROR_GLITCHES);
}
}
return report.str();
}
void printStatus() override {
ALOGD("st = %d, #gl = %3d,", mState, mGlitchCount);
}
/**
* @param frameData contains microphone data with sine signal feedback
* @param channelCount
*/
result_code processInputFrame(const float *frameData, int /* channelCount */) override {
result_code result = RESULT_OK;
float sample = frameData[getInputChannel()];
// Force a periodic glitch to test the detector!
if (mForceGlitchDurationFrames > 0) {
if (mForceGlitchCounter == 0) {
ALOGE("%s: finish a glitch!!", __func__);
mForceGlitchCounter = kForceGlitchPeriod;
} else if (mForceGlitchCounter <= mForceGlitchDurationFrames) {
// Force an abrupt offset.
sample += (sample > 0.0) ? -kForceGlitchOffset : kForceGlitchOffset;
}
--mForceGlitchCounter;
}
float peak = mPeakFollower.process(sample);
mInfiniteRecording.write(sample);
mStateFrameCounters[mState]++; // count how many frames we are in each state
switch (mState) {
case STATE_IDLE:
mDownCounter--;
if (mDownCounter <= 0) {
mState = STATE_IMMUNE;
mDownCounter = IMMUNE_FRAME_COUNT;
mInputPhase = 0.0; // prevent spike at start
mOutputPhase = 0.0;
resetAccumulator();
}
break;
case STATE_IMMUNE:
mDownCounter--;
if (mDownCounter <= 0) {
mState = STATE_WAITING_FOR_SIGNAL;
}
break;
case STATE_WAITING_FOR_SIGNAL:
if (peak > mThreshold) {
mState = STATE_WAITING_FOR_LOCK;
//ALOGD("%5d: switch to STATE_WAITING_FOR_LOCK", mFrameCounter);
resetAccumulator();
}
break;
case STATE_WAITING_FOR_LOCK:
mSinAccumulator += static_cast<double>(sample) * sinf(mInputPhase);
mCosAccumulator += static_cast<double>(sample) * cosf(mInputPhase);
mFramesAccumulated++;
// Must be a multiple of the period or the calculation will not be accurate.
if (mFramesAccumulated == mSinePeriod * PERIODS_NEEDED_FOR_LOCK) {
double magnitude = calculateMagnitudePhase(&mPhaseOffset);
if (mPhaseOffset != kPhaseInvalid) {
setMagnitude(magnitude);
ALOGD("%s() mag = %f, mPhaseOffset = %f",
__func__, magnitude, mPhaseOffset);
if (mMagnitude > mThreshold) {
if (fabs(mPhaseOffset) < kMaxPhaseError) {
mState = STATE_LOCKED;
mConsecutiveBadFrames = 0;
// ALOGD("%5d: switch to STATE_LOCKED", mFrameCounter);
}
// Adjust mInputPhase to match measured phase
mInputPhase += mPhaseOffset;
}
}
resetAccumulator();
}
incrementInputPhase();
break;
case STATE_LOCKED: {
// Predict next sine value
double predicted = sinf(mInputPhase) * mMagnitude;
double diff = predicted - sample;
double absDiff = fabs(diff);
mMaxGlitchDelta = std::max(mMaxGlitchDelta, absDiff);
if (absDiff > mScaledTolerance) { // bad frame
mConsecutiveBadFrames++;
mConsecutiveGoodFrames = 0;
LOGI("diff glitch frame #%d detected, absDiff = %g > %g",
mConsecutiveBadFrames, absDiff, mScaledTolerance);
if (mConsecutiveBadFrames > 0) {
result = ERROR_GLITCHES;
onGlitchStart();
}
resetAccumulator();
} else { // good frame
mConsecutiveBadFrames = 0;
mConsecutiveGoodFrames++;
mSumSquareSignal += predicted * predicted;
mSumSquareNoise += diff * diff;
// Track incoming signal and slowly adjust magnitude to account
// for drift in the DRC or AGC.
// Must be a multiple of the period or the calculation will not be accurate.
if (transformSample(sample)) {
// Adjust phase to account for sample rate drift.
mInputPhase += mPhaseOffset;
mMeanSquareNoise = mSumSquareNoise * mInverseSinePeriod;
mMeanSquareSignal = mSumSquareSignal * mInverseSinePeriod;
mSumSquareNoise = 0.0;
mSumSquareSignal = 0.0;
if (fabs(mPhaseOffset) > kMaxPhaseError) {
result = ERROR_GLITCHES;
onGlitchStart();
ALOGD("phase glitch detected, phaseOffset = %g", mPhaseOffset);
} else if (mMagnitude < mThreshold) {
result = ERROR_GLITCHES;
onGlitchStart();
ALOGD("magnitude glitch detected, mMagnitude = %g", mMagnitude);
}
}
}
} break;
case STATE_GLITCHING: {
// Predict next sine value
double predicted = sinf(mInputPhase) * mMagnitude;
double diff = predicted - sample;
double absDiff = fabs(diff);
mMaxGlitchDelta = std::max(mMaxGlitchDelta, absDiff);
if (absDiff > mScaledTolerance) { // bad frame
mConsecutiveBadFrames++;
mConsecutiveGoodFrames = 0;
mGlitchLength++;
if (mGlitchLength > maxMeasurableGlitchLength()) {
onGlitchTerminated();
}
} else { // good frame
mConsecutiveBadFrames = 0;
mConsecutiveGoodFrames++;
// If we get a full sine period of good samples in a row then consider the glitch over.
// We don't want to just consider a zero crossing the end of a glitch.
if (mConsecutiveGoodFrames > mSinePeriod) {
onGlitchEnd();
}
}
incrementInputPhase();
} break;
case NUM_STATES: // not a real state
break;
}
mFrameCounter++;
return result;
}
int maxMeasurableGlitchLength() const { return 2 * mSinePeriod; }
bool isOutputEnabled() override { return mState != STATE_IDLE; }
void onGlitchStart() {
mState = STATE_GLITCHING;
mGlitchLength = 1;
mLastGlitchPosition = mInfiniteRecording.getTotalWritten();
ALOGD("%5d: STARTED a glitch # %d, pos = %5d",
mFrameCounter, mGlitchCount, (int)mLastGlitchPosition);
ALOGD("glitch mSinePeriod = %d", mSinePeriod);
}
/**
* Give up waiting for a glitch to end and try to resync.
*/
void onGlitchTerminated() {
mGlitchCount++;
ALOGD("%5d: TERMINATED a glitch # %d, length = %d", mFrameCounter, mGlitchCount, mGlitchLength);
// We don't know how long the glitch really is so set the length to -1.
mGlitchLength = -1;
mState = STATE_WAITING_FOR_LOCK;
resetAccumulator();
}
void onGlitchEnd() {
mGlitchCount++;
ALOGD("%5d: ENDED a glitch # %d, length = %d", mFrameCounter, mGlitchCount, mGlitchLength);
mState = STATE_LOCKED;
resetAccumulator();
}
// reset the sine wave detector
void resetAccumulator() override {
BaseSineAnalyzer::resetAccumulator();
}
void reset() override {
BaseSineAnalyzer::reset();
mState = STATE_IDLE;
mDownCounter = IDLE_FRAME_COUNT;
}
void prepareToTest() override {
BaseSineAnalyzer::prepareToTest();
mGlitchCount = 0;
mGlitchLength = 0;
mMaxGlitchDelta = 0.0;
for (int i = 0; i < NUM_STATES; i++) {
mStateFrameCounters[i] = 0;
}
}
int32_t getLastGlitch(float *buffer, int32_t length) {
const int margin = mSinePeriod;
int32_t numSamples = mInfiniteRecording.readFrom(buffer,
mLastGlitchPosition - margin,
length);
ALOGD("%s: glitch at %d, edge = %7.4f, %7.4f, %7.4f",
__func__, (int)mLastGlitchPosition,
buffer[margin - 1], buffer[margin], buffer[margin+1]);
return numSamples;
}
int32_t getRecentSamples(float *buffer, int32_t length) {
int firstSample = mInfiniteRecording.getTotalWritten() - length;
int32_t numSamples = mInfiniteRecording.readFrom(buffer,
firstSample,
length);
return numSamples;
}
void setForcedGlitchDuration(int frames) {
mForceGlitchDurationFrames = frames;
}
private:
// These must match the values in GlitchActivity.java
enum sine_state_t {
STATE_IDLE, // beginning
STATE_IMMUNE, // ignoring input, waiting for HW to settle
STATE_WAITING_FOR_SIGNAL, // looking for a loud signal
STATE_WAITING_FOR_LOCK, // trying to lock onto the phase of the sine
STATE_LOCKED, // locked on the sine wave, looking for glitches
STATE_GLITCHING, // locked on the sine wave but glitching
NUM_STATES
};
enum constants {
// Arbitrary durations, assuming 48000 Hz
IDLE_FRAME_COUNT = 48 * 100,
IMMUNE_FRAME_COUNT = 48 * 100,
PERIODS_NEEDED_FOR_LOCK = 8,
MIN_SNR_DB = 65
};
static constexpr double kMaxPhaseError = M_PI * 0.05;
double mThreshold = 0.005;
int32_t mStateFrameCounters[NUM_STATES];
sine_state_t mState = STATE_IDLE;
int64_t mLastGlitchPosition;
double mMaxGlitchDelta = 0.0;
int32_t mGlitchCount = 0;
int32_t mConsecutiveBadFrames = 0;
int32_t mConsecutiveGoodFrames = 0;
int32_t mGlitchLength = 0;
int mDownCounter = IDLE_FRAME_COUNT;
int32_t mFrameCounter = 0;
int32_t mForceGlitchDurationFrames = 0; // if > 0 then force a glitch for debugging
static constexpr int32_t kForceGlitchPeriod = 2 * 48000; // How often we glitch
static constexpr float kForceGlitchOffset = 0.20f;
int32_t mForceGlitchCounter = kForceGlitchPeriod; // count down and trigger at zero
// measure background noise continuously as a deviation from the expected signal
double mSumSquareSignal = 0.0;
double mSumSquareNoise = 0.0;
double mMeanSquareSignal = 0.0;
double mMeanSquareNoise = 0.0;
PeakDetector mPeakFollower;
};
#endif //ANALYZER_GLITCH_ANALYZER_H

View file

@ -0,0 +1,68 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_INFINITE_RECORDING_H
#define OBOETESTER_INFINITE_RECORDING_H
#include <memory>
#include <unistd.h>
/**
* Record forever. Keep last data.
*/
template <typename T>
class InfiniteRecording {
public:
InfiniteRecording(size_t maxSamples)
: mMaxSamples(maxSamples) {
mData = std::make_unique<T[]>(mMaxSamples);
}
int32_t readFrom(T *buffer, size_t position, size_t count) {
const size_t maxPosition = mWritten.load();
position = std::min(position, maxPosition);
size_t numToRead = std::min(count, mMaxSamples);
numToRead = std::min(numToRead, maxPosition - position);
if (numToRead == 0) return 0;
// We may need to read in two parts if it wraps.
const size_t offset = position % mMaxSamples;
const size_t firstReadSize = std::min(numToRead, mMaxSamples - offset); // till end
std::copy(&mData[offset], &mData[offset + firstReadSize], buffer);
if (firstReadSize < numToRead) {
// Second read needed.
std::copy(&mData[0], &mData[numToRead - firstReadSize], &buffer[firstReadSize]);
}
return numToRead;
}
void write(T sample) {
const size_t position = mWritten.load();
const size_t offset = position % mMaxSamples;
mData[offset] = sample;
mWritten++;
}
int64_t getTotalWritten() {
return mWritten.load();
}
private:
std::unique_ptr<T[]> mData;
std::atomic<size_t> mWritten{0};
const size_t mMaxSamples;
};
#endif //OBOETESTER_INFINITE_RECORDING_H

View file

@ -0,0 +1,862 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Tools for measuring latency and for detecting glitches.
* These classes are pure math and can be used with any audio system.
*/
#ifndef ANALYZER_LATENCY_ANALYZER_H
#define ANALYZER_LATENCY_ANALYZER_H
#include <algorithm>
#include <assert.h>
#include <cctype>
#include <iomanip>
#include <iostream>
#include <math.h>
#include <memory>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <vector>
#include "PeakDetector.h"
#include "PseudoRandom.h"
#include "RandomPulseGenerator.h"
// This is used when the code is in not in Android.
#ifndef ALOGD
#define ALOGD LOGD
#define ALOGE LOGE
#define ALOGW LOGW
#endif
#define LOOPBACK_RESULT_TAG "RESULT: "
// Enable or disable the optimized latency calculation.
#define USE_FAST_LATENCY_CALCULATION 1
static constexpr int32_t kDefaultSampleRate = 48000;
static constexpr int32_t kMillisPerSecond = 1000; // by definition
static constexpr int32_t kMaxLatencyMillis = 1000; // arbitrary and generous
struct LatencyReport {
int32_t latencyInFrames = 0;
double correlation = 0.0;
void reset() {
latencyInFrames = 0;
correlation = 0.0;
}
};
/**
* Calculate a normalized cross correlation.
* @return value between -1.0 and 1.0
*/
static float calculateNormalizedCorrelation(const float *a,
const float *b,
int windowSize,
int stride) {
float correlation = 0.0;
float sumProducts = 0.0;
float sumSquares = 0.0;
// Correlate a against b.
for (int i = 0; i < windowSize; i += stride) {
float s1 = a[i];
float s2 = b[i];
// Use a normalized cross-correlation.
sumProducts += s1 * s2;
sumSquares += ((s1 * s1) + (s2 * s2));
}
if (sumSquares >= 1.0e-9) {
correlation = 2.0 * sumProducts / sumSquares;
}
return correlation;
}
static double calculateRootMeanSquare(float *data, int32_t numSamples) {
double sum = 0.0;
for (int32_t i = 0; i < numSamples; i++) {
double sample = data[i];
sum += sample * sample;
}
return sqrt(sum / numSamples);
}
/**
* Monophonic recording with processing.
* Samples are stored as floats internally.
*/
class AudioRecording
{
public:
void allocate(int maxFrames) {
mData = std::make_unique<float[]>(maxFrames);
mMaxFrames = maxFrames;
mFrameCounter = 0;
}
// Write SHORT data from the first channel.
int32_t write(const int16_t *inputData, int32_t inputChannelCount, int32_t numFrames) {
// stop at end of buffer
if ((mFrameCounter + numFrames) > mMaxFrames) {
numFrames = mMaxFrames - mFrameCounter;
}
for (int i = 0; i < numFrames; i++) {
mData[mFrameCounter++] = inputData[i * inputChannelCount] * (1.0f / 32768);
}
return numFrames;
}
// Write FLOAT data from the first channel.
int32_t write(const float *inputData, int32_t inputChannelCount, int32_t numFrames) {
// stop at end of buffer
if ((mFrameCounter + numFrames) > mMaxFrames) {
numFrames = mMaxFrames - mFrameCounter;
}
for (int i = 0; i < numFrames; i++) {
mData[mFrameCounter++] = inputData[i * inputChannelCount];
}
return numFrames;
}
// Write single FLOAT value.
int32_t write(float sample) {
// stop at end of buffer
if (mFrameCounter < mMaxFrames) {
mData[mFrameCounter++] = sample;
return 1;
}
return 0;
}
void clear() {
mFrameCounter = 0;
}
int32_t size() const {
return mFrameCounter;
}
bool isFull() const {
return mFrameCounter >= mMaxFrames;
}
float *getData() const {
return mData.get();
}
void setSampleRate(int32_t sampleRate) {
mSampleRate = sampleRate;
}
int32_t getSampleRate() const {
return mSampleRate;
}
/**
* Square the samples so they are all positive and so the peaks are emphasized.
*/
void square() {
float *x = mData.get();
for (int i = 0; i < mFrameCounter; i++) {
x[i] *= x[i];
}
}
// Envelope follower that rides over the peak values.
void detectPeaks(float decay) {
float level = 0.0f;
float *x = mData.get();
for (int i = 0; i < mFrameCounter; i++) {
level *= decay; // exponential decay
float input = fabs(x[i]);
// never fall below the input signal
if (input > level) {
level = input;
}
x[i] = level; // write result back into the array
}
}
/**
* Amplify a signal so that the peak matches the specified target.
*
* @param target final max value
* @return gain applied to signal
*/
float normalize(float target) {
float maxValue = 1.0e-9f;
for (int i = 0; i < mFrameCounter; i++) {
maxValue = std::max(maxValue, fabsf(mData[i]));
}
float gain = target / maxValue;
for (int i = 0; i < mFrameCounter; i++) {
mData[i] *= gain;
}
return gain;
}
private:
std::unique_ptr<float[]> mData;
int32_t mFrameCounter = 0;
int32_t mMaxFrames = 0;
int32_t mSampleRate = kDefaultSampleRate; // common default
};
/**
* Find latency using cross correlation in window of the recorded audio.
* The stride is used to skip over samples and reduce the CPU load.
*/
static int measureLatencyFromPulsePartial(AudioRecording &recorded,
int32_t recordedOffset,
int32_t recordedWindowSize,
AudioRecording &pulse,
LatencyReport *report,
int32_t stride) {
report->reset();
if (recordedOffset + recordedWindowSize + pulse.size() > recorded.size()) {
ALOGE("%s() tried to correlate past end of recording, recordedOffset = %d frames\n",
__func__, recordedOffset);
return -3;
}
int32_t numCorrelations = recordedWindowSize / stride;
if (numCorrelations < 10) {
ALOGE("%s() recording too small = %d frames, numCorrelations = %d\n",
__func__, recorded.size(), numCorrelations);
return -1;
}
std::unique_ptr<float[]> correlations= std::make_unique<float[]>(numCorrelations);
// Correlate pulse against the recorded data.
for (int32_t i = 0; i < numCorrelations; i++) {
const int32_t index = (i * stride) + recordedOffset;
float correlation = calculateNormalizedCorrelation(&recorded.getData()[index],
&pulse.getData()[0],
pulse.size(),
stride);
correlations[i] = correlation;
}
// Find highest peak in correlation array.
float peakCorrelation = 0.0;
int32_t peakIndex = -1;
for (int32_t i = 0; i < numCorrelations; i++) {
float value = fabsf(correlations[i]);
if (value > peakCorrelation) {
peakCorrelation = value;
peakIndex = i;
}
}
if (peakIndex < 0) {
ALOGE("%s() no signal for correlation\n", __func__);
return -2;
}
#if 0
// Dump correlation data for charting.
else {
const int32_t margin = 50;
int32_t startIndex = std::max(0, peakIndex - margin);
int32_t endIndex = std::min(numCorrelations - 1, peakIndex + margin);
for (int32_t index = startIndex; index < endIndex; index++) {
ALOGD("Correlation, %d, %f", index, correlations[index]);
}
}
#endif
report->latencyInFrames = recordedOffset + (peakIndex * stride);
report->correlation = peakCorrelation;
return 0;
}
#if USE_FAST_LATENCY_CALCULATION
static int measureLatencyFromPulse(AudioRecording &recorded,
AudioRecording &pulse,
LatencyReport *report) {
const int32_t coarseStride = 16;
const int32_t fineWindowSize = coarseStride * 8;
const int32_t fineStride = 1;
LatencyReport courseReport;
courseReport.reset();
// Do a rough search, skipping over most of the samples.
int result = measureLatencyFromPulsePartial(recorded,
0, // recordedOffset,
recorded.size() - pulse.size(),
pulse,
&courseReport,
coarseStride);
if (result != 0) {
return result;
}
// Now do a fine resolution search near the coarse latency result.
int32_t recordedOffset = std::max(0, courseReport.latencyInFrames - (fineWindowSize / 2));
result = measureLatencyFromPulsePartial(recorded,
recordedOffset,
fineWindowSize,
pulse,
report,
fineStride );
return result;
}
#else
// TODO - When we are confident of the new code we can remove this old code.
static int measureLatencyFromPulse(AudioRecording &recorded,
AudioRecording &pulse,
LatencyReport *report) {
return measureLatencyFromPulsePartial(recorded,
0,
recorded.size() - pulse.size(),
pulse,
report,
1 );
}
#endif
// ====================================================================================
class LoopbackProcessor {
public:
virtual ~LoopbackProcessor() = default;
enum result_code {
RESULT_OK = 0,
ERROR_NOISY = -99,
ERROR_VOLUME_TOO_LOW,
ERROR_VOLUME_TOO_HIGH,
ERROR_CONFIDENCE,
ERROR_INVALID_STATE,
ERROR_GLITCHES,
ERROR_NO_LOCK
};
virtual void prepareToTest() {
reset();
}
virtual void reset() {
mResult = 0;
mResetCount++;
}
virtual result_code processInputFrame(const float *frameData, int channelCount) = 0;
virtual result_code processOutputFrame(float *frameData, int channelCount) = 0;
void process(const float *inputData, int inputChannelCount, int numInputFrames,
float *outputData, int outputChannelCount, int numOutputFrames) {
int numBoth = std::min(numInputFrames, numOutputFrames);
// Process one frame at a time.
for (int i = 0; i < numBoth; i++) {
processInputFrame(inputData, inputChannelCount);
inputData += inputChannelCount;
processOutputFrame(outputData, outputChannelCount);
outputData += outputChannelCount;
}
// If there is more input than output.
for (int i = numBoth; i < numInputFrames; i++) {
processInputFrame(inputData, inputChannelCount);
inputData += inputChannelCount;
}
// If there is more output than input.
for (int i = numBoth; i < numOutputFrames; i++) {
processOutputFrame(outputData, outputChannelCount);
outputData += outputChannelCount;
}
}
virtual std::string analyze() = 0;
virtual void printStatus() {};
int32_t getResult() {
return mResult;
}
void setResult(int32_t result) {
mResult = result;
}
virtual bool isDone() {
return false;
}
virtual int save(const char *fileName) {
(void) fileName;
return -1;
}
virtual int load(const char *fileName) {
(void) fileName;
return -1;
}
virtual void setSampleRate(int32_t sampleRate) {
mSampleRate = sampleRate;
}
int32_t getSampleRate() const {
return mSampleRate;
}
int32_t getResetCount() const {
return mResetCount;
}
/** Called when not enough input frames could be read after synchronization.
*/
virtual void onInsufficientRead() {
reset();
}
/**
* Some analyzers may only look at one channel.
* You can optionally specify that channel here.
*
* @param inputChannel
*/
void setInputChannel(int inputChannel) {
mInputChannel = inputChannel;
}
int getInputChannel() const {
return mInputChannel;
}
/**
* Some analyzers may only generate one channel.
* You can optionally specify that channel here.
*
* @param outputChannel
*/
void setOutputChannel(int outputChannel) {
mOutputChannel = outputChannel;
}
int getOutputChannel() const {
return mOutputChannel;
}
protected:
int32_t mResetCount = 0;
private:
int32_t mInputChannel = 0;
int32_t mOutputChannel = 0;
int32_t mSampleRate = kDefaultSampleRate;
int32_t mResult = 0;
};
class LatencyAnalyzer : public LoopbackProcessor {
public:
LatencyAnalyzer() : LoopbackProcessor() {}
virtual ~LatencyAnalyzer() = default;
/**
* Call this after the constructor because it calls other virtual methods.
*/
virtual void setup() = 0;
virtual int32_t getProgress() const = 0;
virtual int getState() const = 0;
// @return latency in frames
virtual int32_t getMeasuredLatency() const = 0;
/**
* This is an overall confidence in the latency result based on correlation, SNR, etc.
* @return probability value between 0.0 and 1.0
*/
double getMeasuredConfidence() const {
// Limit the ratio and prevent divide-by-zero.
double noiseSignalRatio = getSignalRMS() <= getBackgroundRMS()
? 1.0 : getBackgroundRMS() / getSignalRMS();
// Prevent high background noise and low signals from generating false matches.
double adjustedConfidence = getMeasuredCorrelation() - noiseSignalRatio;
return std::max(0.0, adjustedConfidence);
}
/**
* Cross correlation value for the noise pulse against
* the corresponding position in the normalized recording.
*
* @return value between -1.0 and 1.0
*/
virtual double getMeasuredCorrelation() const = 0;
virtual double getBackgroundRMS() const = 0;
virtual double getSignalRMS() const = 0;
virtual bool hasEnoughData() const = 0;
};
// ====================================================================================
/**
* Measure latency given a loopback stream data.
* Use an encoded bit train as the sound source because it
* has an unambiguous correlation value.
* Uses a state machine to cycle through various stages.
*
*/
class PulseLatencyAnalyzer : public LatencyAnalyzer {
public:
void setup() override {
int32_t pulseLength = calculatePulseLength();
int32_t maxLatencyFrames = getSampleRate() * kMaxLatencyMillis / kMillisPerSecond;
mFramesToRecord = pulseLength + maxLatencyFrames;
mAudioRecording.allocate(mFramesToRecord);
mAudioRecording.setSampleRate(getSampleRate());
}
int getState() const override {
return mState;
}
void setSampleRate(int32_t sampleRate) override {
LoopbackProcessor::setSampleRate(sampleRate);
mAudioRecording.setSampleRate(sampleRate);
}
void reset() override {
LoopbackProcessor::reset();
mState = STATE_MEASURE_BACKGROUND;
mDownCounter = (int32_t) (getSampleRate() * kBackgroundMeasurementLengthSeconds);
mLoopCounter = 0;
mPulseCursor = 0;
mBackgroundSumSquare = 0.0f;
mBackgroundSumCount = 0;
mBackgroundRMS = 0.0f;
mSignalRMS = 0.0f;
generatePulseRecording(calculatePulseLength());
mAudioRecording.clear();
mLatencyReport.reset();
}
bool hasEnoughData() const override {
return mAudioRecording.isFull();
}
bool isDone() override {
return mState == STATE_DONE;
}
int32_t getProgress() const override {
return mAudioRecording.size();
}
std::string analyze() override {
std::stringstream report;
report << "PulseLatencyAnalyzer ---------------\n";
report << LOOPBACK_RESULT_TAG "test.state = "
<< std::setw(8) << mState << "\n";
report << LOOPBACK_RESULT_TAG "test.state.name = "
<< convertStateToText(mState) << "\n";
report << LOOPBACK_RESULT_TAG "background.rms = "
<< std::setw(8) << mBackgroundRMS << "\n";
int32_t newResult = RESULT_OK;
if (mState != STATE_GOT_DATA) {
report << "WARNING - Bad state. Check volume on device.\n";
// setResult(ERROR_INVALID_STATE);
} else {
float gain = mAudioRecording.normalize(1.0f);
measureLatency();
// Calculate signalRMS even if it is bogus.
// Also it may be used in the confidence calculation below.
mSignalRMS = calculateRootMeanSquare(
&mAudioRecording.getData()[mLatencyReport.latencyInFrames], mPulse.size())
/ gain;
if (getMeasuredConfidence() < getMinimumConfidence()) {
report << " ERROR - confidence too low!";
newResult = ERROR_CONFIDENCE;
}
double latencyMillis = kMillisPerSecond * (double) mLatencyReport.latencyInFrames
/ getSampleRate();
report << LOOPBACK_RESULT_TAG "latency.frames = " << std::setw(8)
<< mLatencyReport.latencyInFrames << "\n";
report << LOOPBACK_RESULT_TAG "latency.msec = " << std::setw(8)
<< latencyMillis << "\n";
report << LOOPBACK_RESULT_TAG "latency.confidence = " << std::setw(8)
<< getMeasuredConfidence() << "\n";
report << LOOPBACK_RESULT_TAG "latency.correlation = " << std::setw(8)
<< getMeasuredCorrelation() << "\n";
}
mState = STATE_DONE;
if (getResult() == RESULT_OK) {
setResult(newResult);
}
return report.str();
}
int32_t getMeasuredLatency() const override {
return mLatencyReport.latencyInFrames;
}
double getMeasuredCorrelation() const override {
return mLatencyReport.correlation;
}
double getBackgroundRMS() const override {
return mBackgroundRMS;
}
double getSignalRMS() const override {
return mSignalRMS;
}
bool isRecordingComplete() {
return mState == STATE_GOT_DATA;
}
void printStatus() override {
ALOGD("latency: st = %d = %s", mState, convertStateToText(mState));
}
result_code processInputFrame(const float *frameData, int /* channelCount */) override {
echo_state nextState = mState;
mLoopCounter++;
float input = frameData[0];
switch (mState) {
case STATE_MEASURE_BACKGROUND:
// Measure background RMS on channel 0
mBackgroundSumSquare += static_cast<double>(input) * input;
mBackgroundSumCount++;
mDownCounter--;
if (mDownCounter <= 0) {
mBackgroundRMS = sqrtf(mBackgroundSumSquare / mBackgroundSumCount);
nextState = STATE_IN_PULSE;
mPulseCursor = 0;
}
break;
case STATE_IN_PULSE:
// Record input until the mAudioRecording is full.
mAudioRecording.write(input);
if (hasEnoughData()) {
nextState = STATE_GOT_DATA;
}
break;
case STATE_GOT_DATA:
case STATE_DONE:
default:
break;
}
mState = nextState;
return RESULT_OK;
}
result_code processOutputFrame(float *frameData, int channelCount) override {
switch (mState) {
case STATE_IN_PULSE:
if (mPulseCursor < mPulse.size()) {
float pulseSample = mPulse.getData()[mPulseCursor++];
for (int i = 0; i < channelCount; i++) {
frameData[i] = pulseSample;
}
} else {
for (int i = 0; i < channelCount; i++) {
frameData[i] = 0;
}
}
break;
case STATE_MEASURE_BACKGROUND:
case STATE_GOT_DATA:
case STATE_DONE:
default:
for (int i = 0; i < channelCount; i++) {
frameData[i] = 0.0f; // silence
}
break;
}
return RESULT_OK;
}
protected:
virtual int32_t calculatePulseLength() const = 0;
virtual void generatePulseRecording(int32_t pulseLength) = 0;
virtual void measureLatency() = 0;
virtual double getMinimumConfidence() const {
return 0.5;
}
AudioRecording mPulse;
AudioRecording mAudioRecording; // contains only the input after starting the pulse
LatencyReport mLatencyReport;
static constexpr int32_t kPulseLengthMillis = 500;
float mPulseAmplitude = 0.5f;
double mBackgroundRMS = 0.0;
double mSignalRMS = 0.0;
private:
enum echo_state {
STATE_MEASURE_BACKGROUND,
STATE_IN_PULSE,
STATE_GOT_DATA, // must match RoundTripLatencyActivity.java
STATE_DONE,
};
const char *convertStateToText(echo_state state) {
switch (state) {
case STATE_MEASURE_BACKGROUND:
return "INIT";
case STATE_IN_PULSE:
return "PULSE";
case STATE_GOT_DATA:
return "GOT_DATA";
case STATE_DONE:
return "DONE";
}
return "UNKNOWN";
}
int32_t mDownCounter = 500;
int32_t mLoopCounter = 0;
echo_state mState = STATE_MEASURE_BACKGROUND;
static constexpr double kBackgroundMeasurementLengthSeconds = 0.5;
int32_t mPulseCursor = 0;
double mBackgroundSumSquare = 0.0;
int32_t mBackgroundSumCount = 0;
int32_t mFramesToRecord = 0;
};
/**
* This algorithm uses a series of random bits encoded using the
* Manchester encoder. It works well for wired loopback but not very well for
* through the air loopback.
*/
class EncodedRandomLatencyAnalyzer : public PulseLatencyAnalyzer {
protected:
int32_t calculatePulseLength() const override {
// Calculate integer number of bits.
int32_t numPulseBits = getSampleRate() * kPulseLengthMillis
/ (kFramesPerEncodedBit * kMillisPerSecond);
return numPulseBits * kFramesPerEncodedBit;
}
void generatePulseRecording(int32_t pulseLength) override {
mPulse.allocate(pulseLength);
RandomPulseGenerator pulser(kFramesPerEncodedBit);
for (int i = 0; i < pulseLength; i++) {
mPulse.write(pulser.nextFloat() * mPulseAmplitude);
}
}
double getMinimumConfidence() const override {
return 0.2;
}
void measureLatency() override {
measureLatencyFromPulse(mAudioRecording,
mPulse,
&mLatencyReport);
}
private:
static constexpr int32_t kFramesPerEncodedBit = 8; // multiple of 2
};
/**
* This algorithm uses White Noise sent in a short burst pattern.
* The original signal and the recorded signal are then run through
* an envelope follower to convert the fine detail into more of
* a rectangular block before the correlation phase.
*/
class WhiteNoiseLatencyAnalyzer : public PulseLatencyAnalyzer {
protected:
int32_t calculatePulseLength() const override {
return getSampleRate() * kPulseLengthMillis / kMillisPerSecond;
}
void generatePulseRecording(int32_t pulseLength) override {
mPulse.allocate(pulseLength);
// Turn the noise on and off to sharpen the correlation peak.
// Use more zeros than ones so that the correlation will be less than 0.5 even when there
// is a strong background noise.
int8_t pattern[] = {1, 0, 0,
1, 1, 0, 0, 0,
1, 1, 1, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 0
};
PseudoRandom random;
const int32_t numSections = sizeof(pattern);
const int32_t framesPerSection = pulseLength / numSections;
for (int section = 0; section < numSections; section++) {
if (pattern[section]) {
for (int i = 0; i < framesPerSection; i++) {
mPulse.write((float) (random.nextRandomDouble() * mPulseAmplitude));
}
} else {
for (int i = 0; i < framesPerSection; i++) {
mPulse.write(0.0f);
}
}
}
// Write any remaining frames.
int32_t framesWritten = framesPerSection * numSections;
for (int i = framesWritten; i < pulseLength; i++) {
mPulse.write(0.0f);
}
}
void measureLatency() override {
// Smooth out the noise so we see rectangular blocks.
// This improves immunity against phase cancellation and distortion.
static constexpr float decay = 0.99f; // just under 1.0, lower numbers decay faster
mAudioRecording.detectPeaks(decay);
mPulse.detectPeaks(decay);
measureLatencyFromPulse(mAudioRecording,
mPulse,
&mLatencyReport);
}
};
#endif // ANALYZER_LATENCY_ANALYZER_H

View file

@ -0,0 +1,97 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANALYZER_MANCHESTER_ENCODER_H
#define ANALYZER_MANCHESTER_ENCODER_H
#include <cstdint>
/**
* Encode bytes using Manchester Coding scheme.
*
* Manchester Code is self clocking.
* There is a transition in the middle of every bit.
* Zero is high then low.
* One is low then high.
*
* This avoids having long DC sections that would droop when
* passed though analog circuits with AC coupling.
*
* IEEE 802.3 compatible.
*/
class ManchesterEncoder {
public:
ManchesterEncoder(int samplesPerPulse)
: mSamplesPerPulse(samplesPerPulse)
, mSamplesPerPulseHalf(samplesPerPulse / 2)
, mCursor(samplesPerPulse) {
}
virtual ~ManchesterEncoder() = default;
/**
* This will be called when the next byte is needed.
* @return next byte
*/
virtual uint8_t onNextByte() = 0;
/**
* Generate the next floating point sample.
* @return next float
*/
virtual float nextFloat() {
advanceSample();
if (mCurrentBit) {
return (mCursor < mSamplesPerPulseHalf) ? -1.0f : 1.0f; // one
} else {
return (mCursor < mSamplesPerPulseHalf) ? 1.0f : -1.0f; // zero
}
}
protected:
/**
* This will be called when a new bit is ready to be encoded.
* It can be used to prepare the encoded samples.
*/
virtual void onNextBit(bool /* current */) {};
void advanceSample() {
// Are we ready for a new bit?
if (++mCursor >= mSamplesPerPulse) {
mCursor = 0;
if (mBitsLeft == 0) {
mCurrentByte = onNextByte();
mBitsLeft = 8;
}
--mBitsLeft;
mCurrentBit = (mCurrentByte >> mBitsLeft) & 1;
onNextBit(mCurrentBit);
}
}
bool getCurrentBit() {
return mCurrentBit;
}
const int mSamplesPerPulse;
const int mSamplesPerPulseHalf;
int mCursor;
int mBitsLeft = 0;
uint8_t mCurrentByte = 0;
bool mCurrentBit = false;
};
#endif //ANALYZER_MANCHESTER_ENCODER_H

View file

@ -0,0 +1,68 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANALYZER_PEAK_DETECTOR_H
#define ANALYZER_PEAK_DETECTOR_H
#include <math.h>
/**
* Measure a peak envelope by rising with the peaks,
* and decaying exponentially after each peak.
* The absolute value of the input signal is used.
*/
class PeakDetector {
public:
void reset() {
mLevel = 0.0;
}
double process(double input) {
mLevel *= mDecay; // exponential decay
input = fabs(input);
// never fall below the input signal
if (input > mLevel) {
mLevel = input;
}
return mLevel;
}
double getLevel() const {
return mLevel;
}
double getDecay() const {
return mDecay;
}
/**
* Multiply the level by this amount on every iteration.
* This provides an exponential decay curve.
* A value just under 1.0 is best, for example, 0.99;
* @param decay scale level for each input
*/
void setDecay(double decay) {
mDecay = decay;
}
private:
static constexpr double kDefaultDecay = 0.99f;
double mLevel = 0.0;
double mDecay = kDefaultDecay;
};
#endif //ANALYZER_PEAK_DETECTOR_H

View file

@ -0,0 +1,57 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANALYZER_PSEUDORANDOM_H
#define ANALYZER_PSEUDORANDOM_H
#include <cstdint>
class PseudoRandom {
public:
PseudoRandom(int64_t seed = 99887766)
: mSeed(seed)
{}
/**
* Returns the next random double from -1.0 to 1.0
*
* @return value from -1.0 to 1.0
*/
double nextRandomDouble() {
return nextRandomInteger() * (0.5 / (((int32_t)1) << 30));
}
/** Calculate random 32 bit number using linear-congruential method
* with known real-time performance.
*/
int32_t nextRandomInteger() {
#if __has_builtin(__builtin_mul_overflow) && __has_builtin(__builtin_add_overflow)
int64_t prod;
// Use values for 64-bit sequence from MMIX by Donald Knuth.
__builtin_mul_overflow(mSeed, (int64_t)6364136223846793005, &prod);
__builtin_add_overflow(prod, (int64_t)1442695040888963407, &mSeed);
#else
mSeed = (mSeed * (int64_t)6364136223846793005) + (int64_t)1442695040888963407;
#endif
return (int32_t) (mSeed >> 32); // The higher bits have a longer sequence.
}
private:
int64_t mSeed;
};
#endif //ANALYZER_PSEUDORANDOM_H

View file

@ -0,0 +1,43 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANALYZER_RANDOM_PULSE_GENERATOR_H
#define ANALYZER_RANDOM_PULSE_GENERATOR_H
#include <stdlib.h>
#include "RoundedManchesterEncoder.h"
/**
* Encode random ones and zeros using Manchester Code per IEEE 802.3.
*/
class RandomPulseGenerator : public RoundedManchesterEncoder {
public:
RandomPulseGenerator(int samplesPerPulse)
: RoundedManchesterEncoder(samplesPerPulse) {
}
virtual ~RandomPulseGenerator() = default;
/**
* This will be called when the next byte is needed.
* @return random byte
*/
uint8_t onNextByte() override {
return static_cast<uint8_t>(rand());
}
};
#endif //ANALYZER_RANDOM_PULSE_GENERATOR_H

View file

@ -0,0 +1,88 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANALYZER_ROUNDED_MANCHESTER_ENCODER_H
#define ANALYZER_ROUNDED_MANCHESTER_ENCODER_H
#include <math.h>
#include <memory>
#include <stdlib.h>
#include "ManchesterEncoder.h"
/**
* Encode bytes using Manchester Code.
* Round the edges using a half cosine to reduce ringing caused by a hard edge.
*/
class RoundedManchesterEncoder : public ManchesterEncoder {
public:
RoundedManchesterEncoder(int samplesPerPulse)
: ManchesterEncoder(samplesPerPulse) {
int rampSize = samplesPerPulse / 4;
mZeroAfterZero = std::make_unique<float[]>(samplesPerPulse);
mZeroAfterOne = std::make_unique<float[]>(samplesPerPulse);
int sampleIndex = 0;
for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
float phase = (rampIndex + 1) * M_PI / rampSize;
float sample = -cosf(phase);
mZeroAfterZero[sampleIndex] = sample;
mZeroAfterOne[sampleIndex] = 1.0f;
sampleIndex++;
}
for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
mZeroAfterZero[sampleIndex] = 1.0f;
mZeroAfterOne[sampleIndex] = 1.0f;
sampleIndex++;
}
for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
float phase = (rampIndex + 1) * M_PI / rampSize;
float sample = cosf(phase);
mZeroAfterZero[sampleIndex] = sample;
mZeroAfterOne[sampleIndex] = sample;
sampleIndex++;
}
for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
mZeroAfterZero[sampleIndex] = -1.0f;
mZeroAfterOne[sampleIndex] = -1.0f;
sampleIndex++;
}
}
void onNextBit(bool current) override {
// Do we need to use the rounded edge?
mCurrentSamples = (current ^ mPreviousBit)
? mZeroAfterOne.get()
: mZeroAfterZero.get();
mPreviousBit = current;
}
float nextFloat() override {
advanceSample();
float output = mCurrentSamples[mCursor];
if (getCurrentBit()) output = -output;
return output;
}
private:
bool mPreviousBit = false;
float *mCurrentSamples = nullptr;
std::unique_ptr<float[]> mZeroAfterZero;
std::unique_ptr<float[]> mZeroAfterOne;
};
#endif //ANALYZER_ROUNDED_MANCHESTER_ENCODER_H

View file

@ -0,0 +1,41 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef NATIVE_AUDIO_ANDROID_DEBUG_H_H
#define NATIVE_AUDIO_ANDROID_DEBUG_H_H
#include <android/log.h>
#if 1
#define MODULE_NAME "OboeAudio"
#define LOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, MODULE_NAME, __VA_ARGS__)
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, MODULE_NAME, __VA_ARGS__)
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, MODULE_NAME, __VA_ARGS__)
#define LOGW(...) __android_log_print(ANDROID_LOG_WARN, MODULE_NAME, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, MODULE_NAME, __VA_ARGS__)
#define LOGF(...) __android_log_print(ANDROID_LOG_FATAL, MODULE_NAME, __VA_ARGS__)
#else
#define LOGV(...)
#define LOGD(...)
#define LOGI(...)
#define LOGW(...)
#define LOGE(...)
#define LOGF(...)
#endif
#endif //NATIVE_AUDIO_ANDROID_DEBUG_H_H

View file

@ -0,0 +1,35 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include "ExponentialShape.h"
ExponentialShape::ExponentialShape()
: FlowGraphFilter(1) {
}
int32_t ExponentialShape::onProcess(int32_t numFrames) {
float *inputs = input.getBuffer();
float *outputs = output.getBuffer();
for (int i = 0; i < numFrames; i++) {
float normalizedPhase = (inputs[i] * 0.5) + 0.5;
outputs[i] = mMinimum * powf(mRatio, normalizedPhase);
}
return numFrames;
}

View file

@ -0,0 +1,70 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_EXPONENTIAL_SHAPE_H
#define OBOETESTER_EXPONENTIAL_SHAPE_H
#include "flowgraph/FlowGraphNode.h"
/**
* Generate a exponential sweep between min and max.
*
* The waveform is not band-limited so it will have aliasing artifacts at higher frequencies.
*/
class ExponentialShape : public oboe::flowgraph::FlowGraphFilter {
public:
ExponentialShape();
int32_t onProcess(int32_t numFrames) override;
float getMinimum() const {
return mMinimum;
}
/**
* The minimum and maximum should not span zero.
* They should both be positive or both negative.
*
* @param minimum
*/
void setMinimum(float minimum) {
mMinimum = minimum;
mRatio = mMaximum / mMinimum;
}
float getMaximum() const {
return mMaximum;
}
/**
* The minimum and maximum should not span zero.
* They should both be positive or both negative.
*
* @param maximum
*/
void setMaximum(float maximum) {
mMaximum = maximum;
mRatio = mMaximum / mMinimum;
}
private:
float mMinimum = 0.0;
float mMaximum = 1.0;
float mRatio = 1.0;
};
#endif //OBOETESTER_EXPONENTIAL_SHAPE_H

View file

@ -0,0 +1,42 @@
/*
* Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <unistd.h>
#include "ImpulseOscillator.h"
ImpulseOscillator::ImpulseOscillator()
: OscillatorBase() {
}
int32_t ImpulseOscillator::onProcess(int32_t numFrames) {
const float *frequencies = frequency.getBuffer();
const float *amplitudes = amplitude.getBuffer();
float *buffer = output.getBuffer();
for (int i = 0; i < numFrames; i++) {
float value = 0.0f;
mPhase += mFrequencyToPhaseIncrement * frequencies[i];
if (mPhase >= 1.0f) {
value = amplitudes[i]; // spike
mPhase -= 2.0f;
}
*buffer++ = value;
}
return numFrames;
}

View file

@ -0,0 +1,39 @@
/*
* Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NATIVEOBOE_IMPULSE_GENERATOR_H
#define NATIVEOBOE_IMPULSE_GENERATOR_H
#include <unistd.h>
#include <sys/types.h>
#include "flowgraph/FlowGraphNode.h"
#include "OscillatorBase.h"
/**
* Generate a raw impulse equal to the amplitude.
* The output baseline is zero.
*
* The waveform is not band-limited so it will have aliasing artifacts at higher frequencies.
*/
class ImpulseOscillator : public OscillatorBase {
public:
ImpulseOscillator();
int32_t onProcess(int32_t numFrames) override;
};
#endif //NATIVEOBOE_IMPULSE_GENERATOR_H

View file

@ -0,0 +1,36 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "LinearShape.h"
using namespace oboe::flowgraph;
LinearShape::LinearShape()
: FlowGraphFilter(1) {
}
int32_t LinearShape::onProcess(int numFrames) {
float *inputs = input.getBuffer();
float *outputs = output.getBuffer();
for (int i = 0; i < numFrames; i++) {
float normalizedPhase = (inputs[i] * 0.5f) + 0.5f; // from 0.0 to 1.0
outputs[i] = mMinimum + (normalizedPhase * (mMaximum - mMinimum));
}
return numFrames;
}

View file

@ -0,0 +1,53 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_LINEAR_SHAPE_H
#define OBOETESTER_LINEAR_SHAPE_H
#include "flowgraph/FlowGraphNode.h"
/**
* Convert an input between -1.0 and +1.0 to a linear region between min and max.
*/
class LinearShape : public oboe::flowgraph::FlowGraphFilter {
public:
LinearShape();
int32_t onProcess(int numFrames) override;
float getMinimum() const {
return mMinimum;
}
void setMinimum(float minimum) {
mMinimum = minimum;
}
float getMaximum() const {
return mMaximum;
}
void setMaximum(float maximum) {
mMaximum = maximum;
}
private:
float mMinimum = 0.0;
float mMaximum = 1.0;
};
#endif //OBOETESTER_LINEAR_SHAPE_H

View file

@ -0,0 +1,26 @@
/*
* Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "OscillatorBase.h"
using namespace oboe::flowgraph;
OscillatorBase::OscillatorBase()
: frequency(*this, 1)
, amplitude(*this, 1)
, output(*this, 1) {
setSampleRate(48000);
}

View file

@ -0,0 +1,100 @@
/*
* Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NATIVEOBOE_OSCILLATORBASE_H
#define NATIVEOBOE_OSCILLATORBASE_H
#include "flowgraph/FlowGraphNode.h"
/**
* Base class for various oscillators.
* The oscillator has a phase that ranges from -1.0 to +1.0.
* That makes it easier to implement simple algebraic waveforms.
*
* Subclasses must implement onProcess().
*
* This module has "frequency" and "amplitude" ports for control.
*/
class OscillatorBase : public oboe::flowgraph::FlowGraphNode {
public:
OscillatorBase();
virtual ~OscillatorBase() = default;
void setSampleRate(float sampleRate) {
mSampleRate = sampleRate;
mFrequencyToPhaseIncrement = 2.0f / sampleRate; // -1 to +1 is a range of 2
}
float getSampleRate() {
return mSampleRate;
}
/**
* This can be used to set the initial phase of an oscillator before starting.
* This is mostly used with an LFO.
* Calling this while the oscillator is running will cause sharp pops.
* @param phase between -1.0 and +1.0
*/
void setPhase(float phase) {
mPhase = phase;
}
float getPhase() {
return mPhase;
}
/**
* Control the frequency of the oscillator in Hz.
*/
oboe::flowgraph::FlowGraphPortFloatInput frequency;
/**
* Control the linear amplitude of the oscillator.
* Silence is 0.0.
* A typical full amplitude would be 1.0.
*/
oboe::flowgraph::FlowGraphPortFloatInput amplitude;
oboe::flowgraph::FlowGraphPortFloatOutput output;
protected:
/**
* Increment phase based on frequency in Hz.
* Frequency may be positive or negative.
*
* Frequency should not exceed +/- Nyquist Rate.
* Nyquist Rate is sampleRate/2.
*/
float incrementPhase(float frequency) {
mPhase += frequency * mFrequencyToPhaseIncrement;
// Wrap phase in the range of -1 to +1
if (mPhase >= 1.0f) {
mPhase -= 2.0f;
} else if (mPhase < -1.0f) {
mPhase += 2.0f;
}
return mPhase;
}
float mPhase = 0.0f; // phase that ranges from -1.0 to +1.0
float mSampleRate = 0.0f;
float mFrequencyToPhaseIncrement = 0.0f; // scaler for converting frequency to phase increment
};
#endif //NATIVEOBOE_OSCILLATORBASE_H

View file

@ -0,0 +1,39 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <unistd.h>
#include "SawtoothOscillator.h"
SawtoothOscillator::SawtoothOscillator()
: OscillatorBase() {
}
int32_t SawtoothOscillator::onProcess(int32_t numFrames) {
const float *frequencies = frequency.getBuffer();
const float *amplitudes = amplitude.getBuffer();
float *buffer = output.getBuffer();
// Use the phase directly as a non-band-limited "sawtooth".
// WARNING: This will generate unpleasant aliasing artifacts at higher frequencies.
for (int i = 0; i < numFrames; i++) {
float phase = incrementPhase(frequencies[i]); // phase ranges from -1 to +1
*buffer++ = phase * amplitudes[i];
}
return numFrames;
}

View file

@ -0,0 +1,36 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLOWGRAPH_SAWTOOTH_OSCILLATOR_H
#define FLOWGRAPH_SAWTOOTH_OSCILLATOR_H
#include <unistd.h>
#include "OscillatorBase.h"
/**
* Oscillator that generates a sawtooth wave at the specified frequency and amplitude.
*
* The waveform is not band-limited so it will have aliasing artifacts at higher frequencies.
*/
class SawtoothOscillator : public OscillatorBase {
public:
SawtoothOscillator();
int32_t onProcess(int32_t numFrames) override;
};
#endif //FLOWGRAPH_SAWTOOTH_OSCILLATOR_H

View file

@ -0,0 +1,42 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <unistd.h>
#include "SineOscillator.h"
/*
* This calls sinf() so it is not very efficient.
* A more efficient implementation might use a wave-table or a polynomial.
*/
SineOscillator::SineOscillator()
: OscillatorBase() {
}
int32_t SineOscillator::onProcess(int32_t numFrames) {
const float *frequencies = frequency.getBuffer();
const float *amplitudes = amplitude.getBuffer();
float *buffer = output.getBuffer();
// Generate sine wave.
for (int i = 0; i < numFrames; i++) {
float phase = incrementPhase(frequencies[i]); // phase ranges from -1 to +1
*buffer++ = sinf(phase * M_PI) * amplitudes[i];
}
return numFrames;
}

View file

@ -0,0 +1,34 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLOWGRAPH_SINE_OSCILLATOR_H
#define FLOWGRAPH_SINE_OSCILLATOR_H
#include <unistd.h>
#include "OscillatorBase.h"
/**
* Oscillator that generates a sine wave at the specified frequency and amplitude.
*/
class SineOscillator : public OscillatorBase {
public:
SineOscillator();
int32_t onProcess(int32_t numFrames) override;
};
#endif //FLOWGRAPH_SINE_OSCILLATOR_H

View file

@ -0,0 +1,40 @@
/*
* Copyright 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <unistd.h>
#include "TriangleOscillator.h"
TriangleOscillator::TriangleOscillator()
: OscillatorBase() {
}
int32_t TriangleOscillator::onProcess(int32_t numFrames) {
const float *frequencies = frequency.getBuffer();
const float *amplitudes = amplitude.getBuffer();
float *buffer = output.getBuffer();
// Use the phase directly as a non-band-limited "triangle".
// WARNING: This will generate unpleasant aliasing artifacts at higher frequencies.
for (int i = 0; i < numFrames; i++) {
float phase = incrementPhase(frequencies[i]); // phase ranges from -1 to +1
float triangle = 2.0f * ((phase < 0.0f) ? (0.5f + phase): (0.5f - phase));
*buffer++ = triangle * amplitudes[i];
}
return numFrames;
}

View file

@ -0,0 +1,39 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLOWGRAPH_TRIANGLE_OSCILLATOR_H
#define FLOWGRAPH_TRIANGLE_OSCILLATOR_H
#include <unistd.h>
#include "OscillatorBase.h"
/**
* Oscillator that generates a triangle wave at the specified frequency and amplitude.
*
* The triangle output rises from -1 to +1 when the phase is between -1 and 0.
* The triangle output falls from +1 to 11 when the phase is between 0 and +1.
*
* The waveform is not band-limited so it will have aliasing artifacts at higher frequencies.
*/
class TriangleOscillator : public OscillatorBase {
public:
TriangleOscillator();
int32_t onProcess(int32_t numFrames) override;
};
#endif //FLOWGRAPH_TRIANGLE_OSCILLATOR_H

View file

@ -0,0 +1,32 @@
/*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <unistd.h>
#include "WhiteNoise.h"
int32_t WhiteNoise::onProcess(int32_t numFrames) {
const float *amplitudes = amplitude.getBuffer();
float *buffer = output.getBuffer();
for (int i = 0; i < numFrames; i++) {
float noise = (float) mPseudoRandom.nextRandomDouble(); // -1 to +1
*buffer++ = noise * (*amplitudes++);
}
return numFrames;
}

View file

@ -0,0 +1,57 @@
/*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLOWGRAPH_WHITE_NOISE_H
#define FLOWGRAPH_WHITE_NOISE_H
#include <unistd.h>
#include "flowgraph/FlowGraphNode.h"
#include "../analyzer/PseudoRandom.h"
/**
* White noise with equal energy in all frequencies up to the Nyquist.
* This is a based on random numbers with a uniform distribution.
*/
class WhiteNoise : public oboe::flowgraph::FlowGraphNode {
public:
WhiteNoise()
: oboe::flowgraph::FlowGraphNode()
, amplitude(*this, 1)
, output(*this, 1)
{
}
virtual ~WhiteNoise() = default;
int32_t onProcess(int32_t numFrames) override;
/**
* Control the amplitude amplitude of the noise.
* Silence is 0.0.
* A typical full amplitude would be 1.0.
*/
oboe::flowgraph::FlowGraphPortFloatInput amplitude;
oboe::flowgraph::FlowGraphPortFloatOutput output;
private:
PseudoRandom mPseudoRandom;
};
#endif //FLOWGRAPH_WHITE_NOISE_H

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,163 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code was translated from the JSyn Java code.
* JSyn is Copyright 2009 Phil Burk, Mobileer Inc
* JSyn is licensed under the Apache License, Version 2.0
*/
#ifndef SYNTHMARK_BIQUAD_FILTER_H
#define SYNTHMARK_BIQUAD_FILTER_H
#include <cstdint>
#include <math.h>
#include "SynthTools.h"
#include "UnitGenerator.h"
namespace marksynth {
#define BIQUAD_MIN_FREQ (0.00001f) // REVIEW
#define BIQUAD_MIN_Q (0.00001f) // REVIEW
#define RECALCULATE_PER_SAMPLE 0
/**
* Time varying lowpass resonant filter.
*/
class BiquadFilter : public UnitGenerator
{
public:
BiquadFilter()
: mQ(1.0)
{
xn1 = xn2 = yn1 = yn2 = (synth_float_t) 0;
a0 = a1 = a2 = b1 = b2 = (synth_float_t) 0;
}
virtual ~BiquadFilter() = default;
/**
* Resonance, typically between 1.0 and 10.0.
* Input will clipped at a BIQUAD_MIN_Q.
*/
void setQ(synth_float_t q) {
if( q < BIQUAD_MIN_Q ) {
q = BIQUAD_MIN_Q;
}
mQ = q;
}
synth_float_t getQ() {
return mQ;
}
void generate(synth_float_t *input,
synth_float_t *frequencies,
int32_t numSamples) {
synth_float_t xn, yn;
#if RECALCULATE_PER_SAMPLE == 0
calculateCoefficients(frequencies[0], mQ);
#endif
for (int i = 0; i < numSamples; i++) {
#if RECALCULATE_PER_SAMPLE == 1
calculateCoefficients(frequencies[i], mQ);
#endif
// Generate outputs by filtering inputs.
xn = input[i];
synth_float_t finite = (a0 * xn) + (a1 * xn1) + (a2 * xn2);
// Use double precision for recursive portion.
yn = finite - (b1 * yn1) - (b2 * yn2);
output[i] = (synth_float_t) yn;
// Delay input and output values.
xn2 = xn1;
xn1 = xn;
yn2 = yn1;
yn1 = yn;
}
// Apply a small bipolar impulse to filter to prevent arithmetic underflow.
yn1 += (synth_float_t) 1.0E-26;
yn2 -= (synth_float_t) 1.0E-26;
}
private:
synth_float_t mQ;
synth_float_t xn1; // delay lines
synth_float_t xn2;
double yn1;
double yn2;
synth_float_t a0; // coefficients
synth_float_t a1;
synth_float_t a2;
synth_float_t b1;
synth_float_t b2;
synth_float_t cos_omega;
synth_float_t sin_omega;
synth_float_t alpha;
// Calculate coefficients common to many parametric biquad filters.
void calcCommon( synth_float_t ratio, synth_float_t Q )
{
synth_float_t omega;
/* Don't let frequency get too close to Nyquist or filter will blow up. */
if( ratio >= 0.499f ) ratio = 0.499f;
omega = 2.0f * (synth_float_t)M_PI * ratio;
#if 1
// This is not significantly faster on Mac or Linux.
cos_omega = SynthTools::fastCosine(omega);
sin_omega = SynthTools::fastSine(omega );
#else
{
float fsin_omega;
float fcos_omega;
sincosf(omega, &fsin_omega, &fcos_omega);
cos_omega = (synth_float_t) fcos_omega;
sin_omega = (synth_float_t) fsin_omega;
}
#endif
alpha = sin_omega / (2.0f * Q);
}
// Lowpass coefficients
void calculateCoefficients( synth_float_t frequency, synth_float_t Q )
{
synth_float_t scalar, omc;
if( frequency < BIQUAD_MIN_FREQ ) frequency = BIQUAD_MIN_FREQ;
calcCommon( frequency * mSamplePeriod, Q );
scalar = 1.0f / (1.0f + alpha);
omc = (1.0f - cos_omega);
a0 = omc * 0.5f * scalar;
a1 = omc * scalar;
a2 = a0;
b1 = -2.0f * cos_omega * scalar;
b2 = (1.0f - alpha) * scalar;
}
};
};
#endif // SYNTHMARK_BIQUAD_FILTER_H

View file

@ -0,0 +1,78 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code was translated from the JSyn Java code.
* JSyn is Copyright 2009 Phil Burk, Mobileer Inc
* JSyn is licensed under the Apache License, Version 2.0
*/
#ifndef SYNTHMARK_DIFFERENTIATED_PARABOLA_H
#define SYNTHMARK_DIFFERENTIATED_PARABOLA_H
#include <cstdint>
#include <math.h>
#include "SynthTools.h"
namespace marksynth {
constexpr double kDPWVeryLowFrequency = 2.0 * 0.1 / kSynthmarkSampleRate;
/**
* DPW is a tool for generating band-limited waveforms
* based on a paper by Antti Huovilainen and Vesa Valimaki:
* "New Approaches to Digital Subtractive Synthesis"
*/
class DifferentiatedParabola
{
public:
DifferentiatedParabola()
: mZ1(0)
, mZ2(0) {}
virtual ~DifferentiatedParabola() = default;
synth_float_t next(synth_float_t phase, synth_float_t phaseIncrement) {
synth_float_t dpw;
synth_float_t positivePhaseIncrement = (phaseIncrement < 0.0)
? phaseIncrement
: 0.0 - phaseIncrement;
// If the frequency is very low then just use the raw sawtooth.
// This avoids divide by zero problems and scaling problems.
if (positivePhaseIncrement < kDPWVeryLowFrequency) {
dpw = phase;
} else {
// Calculate the parabola.
synth_float_t squared = phase * phase;
// Differentiate using a delayed value.
synth_float_t diffed = squared - mZ2;
// Delay line.
// TODO - Why Z2. Vesa's paper says use Z1?
mZ2 = mZ1;
mZ1 = squared;
// Calculate scaling
dpw = diffed * 0.25f / positivePhaseIncrement; // TODO extract and optimize
}
return dpw;
}
private:
synth_float_t mZ1;
synth_float_t mZ2;
};
};
#endif // SYNTHMARK_DIFFERENTIATED_PARABOLA_H

View file

@ -0,0 +1,229 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code was translated from the JSyn Java code.
* JSyn is Copyright 2009 Phil Burk, Mobileer Inc
* JSyn is licensed under the Apache License, Version 2.0
*/
#ifndef SYNTHMARK_ENVELOPE_ADSR_H
#define SYNTHMARK_ENVELOPE_ADSR_H
#include <cstdint>
#include <math.h>
#include "SynthTools.h"
#include "UnitGenerator.h"
namespace marksynth {
/**
* Generate a contour that can be used to control amplitude or
* other parameters.
*/
class EnvelopeADSR : public UnitGenerator
{
public:
EnvelopeADSR()
: mAttack(0.05)
, mDecay(0.6)
, mSustainLevel(0.4)
, mRelease(2.5)
{}
virtual ~EnvelopeADSR() = default;
#define MIN_DURATION (1.0 / 100000.0)
enum State {
IDLE, ATTACKING, DECAYING, SUSTAINING, RELEASING
};
void setGate(bool gate) {
triggered = gate;
}
bool isIdle() {
return mState == State::IDLE;
}
/**
* Time in seconds for the falling stage to go from 0 dB to -90 dB. The decay stage will stop at
* the sustain level. But we calculate the time to fall to -90 dB so that the decay
* <em>rate</em> will be unaffected by the sustain level.
*/
void setDecayTime(synth_float_t time) {
mDecay = time;
}
synth_float_t getDecayTime() {
return mDecay;
}
/**
* Time in seconds for the rising stage of the envelope to go from 0.0 to 1.0. The attack is a
* linear ramp.
*/
void setAttackTime(synth_float_t time) {
mAttack = time;
}
synth_float_t getAttackTime() {
return mAttack;
}
void generate(int32_t numSamples) {
for (int i = 0; i < numSamples; i++) {
switch (mState) {
case IDLE:
for (; i < numSamples; i++) {
output[i] = mLevel;
if (triggered) {
startAttack();
break;
}
}
break;
case ATTACKING:
for (; i < numSamples; i++) {
// Increment first so we can render fast attacks.
mLevel += increment;
if (mLevel >= 1.0) {
mLevel = 1.0;
output[i] = mLevel;
startDecay();
break;
} else {
output[i] = mLevel;
if (!triggered) {
startRelease();
break;
}
}
}
break;
case DECAYING:
for (; i < numSamples; i++) {
output[i] = mLevel;
mLevel *= mScaler; // exponential decay
if (mLevel < kAmplitudeDb96) {
startIdle();
break;
} else if (!triggered) {
startRelease();
break;
} else if (mLevel < mSustainLevel) {
mLevel = mSustainLevel;
startSustain();
break;
}
}
break;
case SUSTAINING:
for (; i < numSamples; i++) {
mLevel = mSustainLevel;
output[i] = mLevel;
if (!triggered) {
startRelease();
break;
}
}
break;
case RELEASING:
for (; i < numSamples; i++) {
output[i] = mLevel;
mLevel *= mScaler; // exponential decay
if (triggered) {
startAttack();
break;
} else if (mLevel < kAmplitudeDb96) {
startIdle();
break;
}
}
break;
}
}
}
private:
void startIdle() {
mState = State::IDLE;
mLevel = 0.0;
}
void startAttack() {
if (mAttack < MIN_DURATION) {
mLevel = 1.0;
startDecay();
} else {
increment = mSamplePeriod / mAttack;
mState = State::ATTACKING;
}
}
void startDecay() {
double duration = mDecay;
if (duration < MIN_DURATION) {
startSustain();
} else {
mScaler = SynthTools::convertTimeToExponentialScaler(duration, mSampleRate);
mState = State::DECAYING;
}
}
void startSustain() {
mState = State::SUSTAINING;
}
void startRelease() {
double duration = mRelease;
if (duration < MIN_DURATION) {
duration = MIN_DURATION;
}
mScaler = SynthTools::convertTimeToExponentialScaler(duration, mSampleRate);
mState = State::RELEASING;
}
synth_float_t mAttack;
synth_float_t mDecay;
/**
* Level for the sustain stage. The envelope will hold here until the input goes to zero or
* less. This should be set between 0.0 and 1.0.
*/
synth_float_t mSustainLevel;
/**
* Time in seconds to go from 0 dB to -90 dB. This stage is triggered when the input goes to
* zero or less. The release stage will start from the sustain level. But we calculate the time
* to fall from full amplitude so that the release <em>rate</em> will be unaffected by the
* sustain level.
*/
synth_float_t mRelease;
State mState = State::IDLE;
synth_float_t mScaler = 1.0;
synth_float_t mLevel = 0.0;
synth_float_t increment = 0;
bool triggered = false;
};
};
#endif // SYNTHMARK_ENVELOPE_ADSR_H

View file

@ -0,0 +1,36 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code was translated from the JSyn Java code.
* JSyn is Copyright 2009 Phil Burk, Mobileer Inc
* JSyn is licensed under the Apache License, Version 2.0
*/
#ifndef INCLUDE_ME_ONCE_H
#define INCLUDE_ME_ONCE_H
#include "UnitGenerator.h"
#include "PitchToFrequency.h"
namespace marksynth {
//synth statics
int32_t UnitGenerator::mSampleRate = kSynthmarkSampleRate;
synth_float_t UnitGenerator::mSamplePeriod = 1.0f / kSynthmarkSampleRate;
PowerOfTwoTable PitchToFrequency::mPowerTable(64);
};
#endif //INCLUDE_ME_ONCE_H

View file

@ -0,0 +1,72 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code was translated from the JSyn Java code.
* JSyn is Copyright 2009 Phil Burk, Mobileer Inc
* JSyn is licensed under the Apache License, Version 2.0
*/
#ifndef SYNTHMARK_LOOKUP_TABLE_H
#define SYNTHMARK_LOOKUP_TABLE_H
#include <cstdint>
#include "SynthTools.h"
namespace marksynth {
class LookupTable {
public:
LookupTable(int32_t numEntries)
: mNumEntries(numEntries)
{}
virtual ~LookupTable() {
delete[] mTable;
}
void fillTable() {
// Add 2 guard points for interpolation and roundoff error.
int tableSize = mNumEntries + 2;
mTable = new float[tableSize];
// Fill the table with calculated values
float scale = 1.0f / mNumEntries;
for (int i = 0; i < tableSize; i++) {
float value = calculate(i * scale);
mTable[i] = value;
}
}
/**
* @param input normalized between 0.0 and 1.0
*/
float lookup(float input) {
float fractionalTableIndex = input * mNumEntries;
int32_t index = (int) floor(fractionalTableIndex);
float fraction = fractionalTableIndex - index;
float baseValue = mTable[index];
float value = baseValue
+ (fraction * (mTable[index + 1] - baseValue));
return value;
}
virtual float calculate(float input) = 0;
private:
int32_t mNumEntries;
synth_float_t *mTable;
};
};
#endif // SYNTHMARK_LOOKUP_TABLE_H

View file

@ -0,0 +1,104 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code was translated from the JSyn Java code.
* JSyn is Copyright 2009 Phil Burk, Mobileer Inc
* JSyn is licensed under the Apache License, Version 2.0
*/
#ifndef SYNTHMARK_PITCH_TO_FREQUENCY_H
#define SYNTHMARK_PITCH_TO_FREQUENCY_H
#include <cstdint>
#include <math.h>
#include "SynthTools.h"
#include "LookupTable.h"
namespace marksynth {
constexpr int kSemitonesPerOctave = 12;
// Pitches are in semitones based on the MIDI standard.
constexpr int kPitchMiddleC = 60;
constexpr double kFrequencyMiddleC = 261.625549;
class PowerOfTwoTable : public LookupTable {
public:
PowerOfTwoTable(int32_t numEntries)
: LookupTable(numEntries)
{
fillTable();
}
virtual ~PowerOfTwoTable() {}
virtual float calculate(float input) override {
return powf(2.0f, input);
}
};
class PitchToFrequency
{
public:
PitchToFrequency() {}
virtual ~PitchToFrequency() {
}
static double convertPitchToFrequency(double pitch) {
double exponent = (pitch - kPitchMiddleC) * (1.0 / kSemitonesPerOctave);
return kFrequencyMiddleC * pow(2.0, exponent);
}
synth_float_t lookupPitchToFrequency(synth_float_t pitch) {
// Only calculate if input changed since last time.
if (pitch != lastInput) {
synth_float_t octavePitch = (pitch - kPitchMiddleC) * (1.0 / kSemitonesPerOctave);
int32_t octaveIndex = (int) floor(octavePitch);
synth_float_t fractionalOctave = octavePitch - octaveIndex;
// Do table lookup.
synth_float_t value = kFrequencyMiddleC * mPowerTable.lookup(fractionalOctave);
// Adjust for octave by multiplying by a power of 2. Allow for +/- 16 octaves;
const int32_t octaveOffset = 16;
synth_float_t octaveScaler = ((synth_float_t)(1 << (octaveIndex + octaveOffset)))
* (1.0 / (1 << octaveOffset));
value *= octaveScaler;
lastInput = pitch;
lastOutput = value;
}
return lastOutput;
}
/**
* @param pitches an array of fractional MIDI pitches
*/
void generate(const synth_float_t *pitches, synth_float_t *frequencies, int32_t count) {
for (int i = 0; i < count; i++) {
frequencies[i] = lookupPitchToFrequency(pitches[i]);
}
}
private:
static PowerOfTwoTable mPowerTable;
synth_float_t lastInput = kPitchMiddleC;
synth_float_t lastOutput = kFrequencyMiddleC;
};
};
#endif // SYNTHMARK_PITCH_TO_FREQUENCY_H

View file

@ -0,0 +1,80 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code was translated from the JSyn Java code.
* JSyn is Copyright 2009 Phil Burk, Mobileer Inc
* JSyn is licensed under the Apache License, Version 2.0
*/
#ifndef SYNTHMARK_SAWTOOTH_OSCILLATOR_H
#define SYNTHMARK_SAWTOOTH_OSCILLATOR_H
#include <cstdint>
#include <math.h>
#include "SynthTools.h"
#include "UnitGenerator.h"
#include "DifferentiatedParabola.h"
namespace marksynth {
/**
* Simple phasor that can be used to implement other oscillators.
* Note that this is NON-bandlimited and should not be used
* directly as a sound source.
*/
class SawtoothOscillator : public UnitGenerator
{
public:
SawtoothOscillator()
: mPhase(0) {}
virtual ~SawtoothOscillator() = default;
void generate(synth_float_t frequency, int32_t numSamples) {
synth_float_t phase = mPhase;
synth_float_t phaseIncrement = 2.0 * frequency * mSamplePeriod;
for (int i = 0; i < numSamples; i++) {
output[i] = translatePhase(phase, phaseIncrement);
phase += phaseIncrement;
if (phase > 1.0) {
phase -= 2.0;
}
}
mPhase = phase;
}
void generate(synth_float_t *frequencies, int32_t numSamples) {
synth_float_t phase = mPhase;
for (int i = 0; i < numSamples; i++) {
synth_float_t phaseIncrement = 2.0 * frequencies[i] * mSamplePeriod;
output[i] = translatePhase(phase, phaseIncrement);
phase += phaseIncrement;
if (phase > 1.0) {
phase -= 2.0;
}
}
mPhase = phase;
}
virtual synth_float_t translatePhase(synth_float_t phase, synth_float_t phaseIncrement) {
(void) phaseIncrement;
return phase;
}
private:
synth_float_t mPhase; // between -1.0 and +1.0
};
};
#endif // SYNTHMARK_SAWTOOTH_OSCILLATOR_H

View file

@ -0,0 +1,54 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code was translated from the JSyn Java code.
* JSyn is Copyright 2009 Phil Burk, Mobileer Inc
* JSyn is licensed under the Apache License, Version 2.0
*/
#ifndef SYNTHMARK_SAWTOOTH_OSCILLATOR_DPW_H
#define SYNTHMARK_SAWTOOTH_OSCILLATOR_DPW_H
#include <cstdint>
#include <math.h>
#include "SynthTools.h"
#include "DifferentiatedParabola.h"
#include "SawtoothOscillator.h"
namespace marksynth {
/**
* Band limited sawtooth oscillator.
* Suitable as a sound source.
*/
class SawtoothOscillatorDPW : public SawtoothOscillator
{
public:
SawtoothOscillatorDPW()
: SawtoothOscillator()
, dpw() {}
virtual ~SawtoothOscillatorDPW() = default;
virtual inline synth_float_t translatePhase(synth_float_t phase, synth_float_t phaseIncrement) {
return dpw.next(phase, phaseIncrement);
}
private:
DifferentiatedParabola dpw;
};
};
#endif // SYNTHMARK_SAWTOOTH_OSCILLATOR_DPW_H

View file

@ -0,0 +1,137 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SYNTHMARK_SIMPLE_VOICE_H
#define SYNTHMARK_SIMPLE_VOICE_H
#include <cstdint>
#include <math.h>
#include "SynthTools.h"
#include "VoiceBase.h"
#include "SawtoothOscillator.h"
#include "SawtoothOscillatorDPW.h"
#include "SquareOscillatorDPW.h"
#include "SineOscillator.h"
#include "EnvelopeADSR.h"
#include "PitchToFrequency.h"
#include "BiquadFilter.h"
namespace marksynth {
/**
* Classic subtractive synthesizer voice with
* 2 LFOs, 2 audio oscillators, filter and envelopes.
*/
class SimpleVoice : public VoiceBase
{
public:
SimpleVoice()
: VoiceBase()
, mLfo1()
, mOsc1()
, mOsc2()
, mPitchToFrequency()
, mFilter()
, mFilterEnvelope()
, mAmplitudeEnvelope()
// The following values are arbitrary but typical values.
, mDetune(1.0001f) // slight phasing
, mVibratoDepth(0.03f)
, mVibratoRate(6.0f)
, mFilterEnvDepth(3000.0f)
, mFilterCutoff(400.0f)
{
mFilter.setQ(2.0);
// Randomize attack times to smooth out CPU load for envelope state transitions.
mFilterEnvelope.setAttackTime(0.05 + (0.2 * SynthTools::nextRandomDouble()));
mFilterEnvelope.setDecayTime(7.0 + (1.0 * SynthTools::nextRandomDouble()));
mAmplitudeEnvelope.setAttackTime(0.02 + (0.05 * SynthTools::nextRandomDouble()));
mAmplitudeEnvelope.setDecayTime(1.0 + (0.2 * SynthTools::nextRandomDouble()));
}
virtual ~SimpleVoice() = default;
void setPitch(synth_float_t pitch) {
mPitch = pitch;
}
void noteOn(synth_float_t pitch, synth_float_t velocity) {
(void) velocity; // TODO use velocity?
mPitch = pitch;
mFilterEnvelope.setGate(true);
mAmplitudeEnvelope.setGate(true);
}
void noteOff() {
mFilterEnvelope.setGate(false);
mAmplitudeEnvelope.setGate(false);
}
void generate(int32_t numFrames) {
assert(numFrames <= kSynthmarkFramesPerRender);
// LFO #1 - vibrato
mLfo1.generate(mVibratoRate, numFrames);
synth_float_t *pitches = mBuffer1;
SynthTools::scaleOffsetBuffer(mLfo1.output, pitches, numFrames, mVibratoDepth, mPitch);
synth_float_t *frequencies = mBuffer2;
mPitchToFrequency.generate(pitches, frequencies, numFrames);
// OSC #1 - sawtooth
mOsc1.generate(frequencies, numFrames);
// OSC #2 - detuned square wave oscillator
SynthTools::scaleBuffer(frequencies, frequencies, numFrames, mDetune);
mOsc2.generate(frequencies, numFrames);
// Mix the two oscillators
synth_float_t *mixed = frequencies;
SynthTools::mixBuffers(mOsc1.output, 0.6, mOsc2.output, 0.4, mixed, numFrames);
// Filter envelope
mFilterEnvelope.generate(numFrames);
synth_float_t *cutoffFrequencies = pitches; // reuse unneeded buffer
SynthTools::scaleOffsetBuffer(mFilterEnvelope.output, cutoffFrequencies, numFrames,
mFilterEnvDepth, mFilterCutoff);
// Biquad resonant low-pass filter
mFilter.generate(mixed, cutoffFrequencies, numFrames);
// Amplitude ADSR
mAmplitudeEnvelope.generate(numFrames);
SynthTools::multiplyBuffers(mFilter.output, mAmplitudeEnvelope.output, output, numFrames);
}
private:
SineOscillator mLfo1;
SawtoothOscillatorDPW mOsc1;
SquareOscillatorDPW mOsc2;
PitchToFrequency mPitchToFrequency;
BiquadFilter mFilter;
EnvelopeADSR mFilterEnvelope;
EnvelopeADSR mAmplitudeEnvelope;
synth_float_t mDetune; // frequency scaler
synth_float_t mVibratoDepth; // in semitones
synth_float_t mVibratoRate; // in Hertz
synth_float_t mFilterEnvDepth; // in Hertz
synth_float_t mFilterCutoff; // in Hertz
// Buffers for storing signals that are being passed between units.
synth_float_t mBuffer1[kSynthmarkFramesPerRender];
synth_float_t mBuffer2[kSynthmarkFramesPerRender];
};
};
#endif // SYNTHMARK_SIMPLE_VOICE_H

View file

@ -0,0 +1,45 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code was translated from the JSyn Java code.
* JSyn is Copyright 2009 Phil Burk, Mobileer Inc
* JSyn is licensed under the Apache License, Version 2.0
*/
#ifndef SYNTHMARK_SINE_OSCILLATOR_H
#define SYNTHMARK_SINE_OSCILLATOR_H
#include <cstdint>
#include <math.h>
#include "SawtoothOscillator.h"
#include "SynthTools.h"
namespace marksynth {
class SineOscillator : public SawtoothOscillator
{
public:
SineOscillator()
: SawtoothOscillator() {}
virtual ~SineOscillator() = default;
virtual inline synth_float_t translatePhase(synth_float_t phase, synth_float_t phaseIncrement) {
(void) phaseIncrement;
return SynthTools::fastSine(phase * M_PI);
}
};
};
#endif // SYNTHMARK_SINE_OSCILLATOR_H

View file

@ -0,0 +1,74 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code was translated from the JSyn Java code.
* JSyn is Copyright 2009 Phil Burk, Mobileer Inc
* JSyn is licensed under the Apache License, Version 2.0
*/
#ifndef SYNTHMARK_SQUARE_OSCILLATOR_DPW_H
#define SYNTHMARK_SQUARE_OSCILLATOR_DPW_H
#include <cstdint>
#include <math.h>
#include "SynthTools.h"
#include "DifferentiatedParabola.h"
#include "SawtoothOscillator.h"
namespace marksynth {
/**
* Square waves contains the odd partials of a fundamental.
* The square wave is generated by combining two sawtooth waveforms
* that are 180 degrees out of phase. This causes the even partials
* to be cancelled out.
*/
class SquareOscillatorDPW : public SawtoothOscillator
{
public:
SquareOscillatorDPW()
: SawtoothOscillator()
, dpw1()
, dpw2() {}
virtual ~SquareOscillatorDPW() = default;
virtual inline synth_float_t translatePhase(synth_float_t phase1,
synth_float_t phaseIncrement) {
synth_float_t val1 = dpw1.next(phase1, phaseIncrement);
/* Generate second sawtooth so we can add them together. */
synth_float_t phase2 = phase1 + 1.0; /* 180 degrees out of phase. */
if (phase2 >= 1.0)
phase2 -= 2.0;
synth_float_t val2 = dpw1.next(phase2, phaseIncrement);
/*
* Need to adjust amplitude based on positive phaseInc. little less than half at
* Nyquist/2.0!
*/
const synth_float_t STARTAMP = 0.92; // derived empirically
synth_float_t positivePhaseIncrement = (phaseIncrement < 0.0)
? phaseIncrement
: 0.0 - phaseIncrement;
synth_float_t scale = STARTAMP - positivePhaseIncrement;
return scale * (val1 - val2);
}
private:
DifferentiatedParabola dpw1;
DifferentiatedParabola dpw2;
};
};
#endif // SYNTHMARK_SQUARE_OSCILLATOR_DPW_H

View file

@ -0,0 +1,173 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SYNTHMARK_SYNTHTOOLS_H
#define SYNTHMARK_SYNTHTOOLS_H
#include <cmath>
#include <cstdint>
namespace marksynth {
typedef float synth_float_t;
// The number of frames that are synthesized at one time.
constexpr int kSynthmarkFramesPerRender = 8;
constexpr int kSynthmarkSampleRate = 48000;
constexpr int kSynthmarkMaxVoices = 1024;
/**
* A fractional amplitude corresponding to exactly -96 dB.
* amplitude = pow(10.0, db/20.0)
*/
constexpr double kAmplitudeDb96 = 1.0 / 63095.73444801943;
/** A fraction that is approximately -90.3 dB. Defined as 1 bit of an S16. */
constexpr double kAmplitudeDb90 = 1.0 / (1 << 15);
class SynthTools
{
public:
static void fillBuffer(synth_float_t *output,
int32_t numSamples,
synth_float_t value) {
for (int i = 0; i < numSamples; i++) {
*output++ = value;
}
}
static void scaleBuffer(const synth_float_t *input,
synth_float_t *output,
int32_t numSamples,
synth_float_t multiplier) {
for (int i = 0; i < numSamples; i++) {
*output++ = *input++ * multiplier;
}
}
static void scaleOffsetBuffer(const synth_float_t *input,
synth_float_t *output,
int32_t numSamples,
synth_float_t multiplier,
synth_float_t offset) {
for (int i = 0; i < numSamples; i++) {
*output++ = (*input++ * multiplier) + offset;
}
}
static void mixBuffers(const synth_float_t *input1,
synth_float_t gain1,
const synth_float_t *input2,
synth_float_t gain2,
synth_float_t *output,
int32_t numSamples) {
for (int i = 0; i < numSamples; i++) {
*output++ = (*input1++ * gain1) + (*input2++ * gain2);
}
}
static void multiplyBuffers(const synth_float_t *input1,
const synth_float_t *input2,
synth_float_t *output,
int32_t numSamples) {
for (int i = 0; i < numSamples; i++) {
*output++ = *input1++ * *input2;
}
}
static double convertTimeToExponentialScaler(synth_float_t duration, synth_float_t sampleRate) {
// Calculate scaler so that scaler^frames = target/source
synth_float_t numFrames = duration * sampleRate;
return pow(kAmplitudeDb90, (1.0 / numFrames));
}
/**
* Calculate sine using a Taylor expansion.
* Code is based on SineOscillator from JSyn.
*
* @param phase between -PI and +PI
*/
static synth_float_t fastSine(synth_float_t phase) {
// Factorial coefficients.
const synth_float_t IF3 = 1.0 / (2 * 3);
const synth_float_t IF5 = IF3 / (4 * 5);
const synth_float_t IF7 = IF5 / (6 * 7);
const synth_float_t IF9 = IF7 / (8 * 9);
const synth_float_t IF11 = IF9 / (10 * 11);
/* Wrap phase back into region where results are more accurate. */
synth_float_t x = (phase > M_PI_2) ? M_PI - phase
: ((phase < -M_PI_2) ? -(M_PI + phase) : phase);
synth_float_t x2 = (x * x);
/* Taylor expansion out to x**11/11! factored into multiply-adds */
return x * (x2 * (x2 * (x2 * (x2 * ((x2 * (-IF11)) + IF9) - IF7) + IF5) - IF3) + 1);
}
/**
* Calculate cosine using a Taylor expansion.
*
* @param phase between -PI and +PI
*/
static synth_float_t fastCosine(synth_float_t phase) {
// Factorial coefficients.
const synth_float_t IF2 = 1.0 / (2);
const synth_float_t IF4 = IF2 / (3 * 4);
const synth_float_t IF6 = IF4 / (5 * 6);
const synth_float_t IF8 = IF6 / (7 * 8);
const synth_float_t IF10 = IF8 / (9 * 10);
/* Wrap phase back into region where results are more accurate. */
synth_float_t x = phase;
if (x < 0.0) {
x = 0.0 - phase;
}
int negate = 1;
if (x > M_PI_2) {
x = M_PI_2 - x;
negate = -1;
}
synth_float_t x2 = (x * x);
/* Taylor expansion out to x**11/11! factored into multiply-adds */
synth_float_t cosine =
1 + (x2 * (x2 * (x2 * (x2 * ((x2 * (-IF10)) + IF8) - IF6) + IF4) - IF2));
return cosine * negate;
}
/**
* Calculate random 32 bit number using linear-congruential method.
*/
static uint32_t nextRandomInteger() {
static uint64_t seed = 99887766;
// Use values for 64-bit sequence from MMIX by Donald Knuth.
seed = (seed * 6364136223846793005L) + 1442695040888963407L;
return (uint32_t) (seed >> 32); // The higher bits have a longer sequence.
}
/**
* @return a random double between 0.0 and 1.0
*/
static double nextRandomDouble() {
const double scaler = 1.0 / (((uint64_t)1) << 32);
return nextRandomInteger() * scaler;
}
};
};
#endif // SYNTHMARK_SYNTHTOOLS_H

View file

@ -0,0 +1,135 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SYNTHMARK_SYNTHESIZER_H
#define SYNTHMARK_SYNTHESIZER_H
#include <cstdint>
#include <math.h>
#include <memory>
#include <string.h>
#include <cassert>
#include "SynthTools.h"
#include "VoiceBase.h"
#include "SimpleVoice.h"
namespace marksynth {
#define SAMPLES_PER_FRAME 2
/**
* Manage an array of voices.
* Note that this is not a fully featured general purpose synthesizer.
* It is designed simply to have a similar CPU load as a common synthesizer.
*/
class Synthesizer
{
public:
Synthesizer()
: mMaxVoices(0)
, mActiveVoiceCount(0)
, mVoices(NULL)
{}
virtual ~Synthesizer() {
delete[] mVoices;
};
int32_t setup(int32_t sampleRate, int32_t maxVoices) {
mMaxVoices = maxVoices;
UnitGenerator::setSampleRate(sampleRate);
mVoices = new SimpleVoice[mMaxVoices];
return (mVoices == NULL) ? -1 : 0;
}
void allNotesOn() {
notesOn(mMaxVoices);
}
int32_t notesOn(int32_t numVoices) {
if (numVoices > mMaxVoices) {
return -1;
}
mActiveVoiceCount = numVoices;
// Leave some headroom so the resonant filter does not clip.
mVoiceAmplitude = 0.5f / sqrt(mActiveVoiceCount);
int pitchIndex = 0;
synth_float_t pitches[] = {60.0, 64.0, 67.0, 69.0};
for(int iv = 0; iv < mActiveVoiceCount; iv++ ) {
SimpleVoice *voice = &mVoices[iv];
// Randomize pitches by a few cents to smooth out the CPU load.
float pitchOffset = 0.03f * (float) SynthTools::nextRandomDouble();
synth_float_t pitch = pitches[pitchIndex++] + pitchOffset;
if (pitchIndex > 3) pitchIndex = 0;
voice->noteOn(pitch, 1.0);
}
return 0;
}
void allNotesOff() {
for(int iv = 0; iv < mActiveVoiceCount; iv++ ) {
SimpleVoice *voice = &mVoices[iv];
voice->noteOff();
}
}
void renderStereo(float *output, int32_t numFrames) {
int32_t framesLeft = numFrames;
float *renderBuffer = output;
// Clear mixing buffer.
memset(output, 0, numFrames * SAMPLES_PER_FRAME * sizeof(float));
while (framesLeft > 0) {
int framesThisTime = std::min(kSynthmarkFramesPerRender, framesLeft);
for(int iv = 0; iv < mActiveVoiceCount; iv++ ) {
SimpleVoice *voice = &mVoices[iv];
voice->generate(framesThisTime);
float *mix = renderBuffer;
synth_float_t leftGain = mVoiceAmplitude;
synth_float_t rightGain = mVoiceAmplitude;
if (mActiveVoiceCount > 1) {
synth_float_t pan = iv / (mActiveVoiceCount - 1.0f);
leftGain *= pan;
rightGain *= 1.0 - pan;
}
for(int n = 0; n < kSynthmarkFramesPerRender; n++ ) {
synth_float_t sample = voice->output[n];
*mix++ += (float) (sample * leftGain);
*mix++ += (float) (sample * rightGain);
}
}
framesLeft -= framesThisTime;
mFrameCounter += framesThisTime;
renderBuffer += framesThisTime * SAMPLES_PER_FRAME;
}
assert(framesLeft == 0);
}
int32_t getActiveVoiceCount() {
return mActiveVoiceCount;
}
private:
int32_t mMaxVoices;
int32_t mActiveVoiceCount;
int64_t mFrameCounter;
SimpleVoice *mVoices;
synth_float_t mVoiceAmplitude = 1.0;
};
};
#endif // SYNTHMARK_SYNTHESIZER_H

View file

@ -0,0 +1,55 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This code was translated from the JSyn Java code.
* JSyn is Copyright 2009 Phil Burk, Mobileer Inc
* JSyn is licensed under the Apache License, Version 2.0
*/
#ifndef SYNTHMARK_UNIT_GENERATOR_H
#define SYNTHMARK_UNIT_GENERATOR_H
#include <cstdint>
#include <assert.h>
#include <math.h>
#include "SynthTools.h"
//#include "DifferentiatedParabola.h"
namespace marksynth {
class UnitGenerator
{
public:
UnitGenerator() {}
virtual ~UnitGenerator() = default;
static void setSampleRate(int32_t sampleRate) {
assert(sampleRate > 0);
mSampleRate = sampleRate;
mSamplePeriod = 1.0f / sampleRate;
}
static int32_t getSampleRate() {
return mSampleRate;
}
synth_float_t output[kSynthmarkFramesPerRender];
public:
static int32_t mSampleRate;
static synth_float_t mSamplePeriod;
};
}
#endif // SYNTHMARK_UNIT_GENERATOR_H

View file

@ -0,0 +1,58 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SYNTHMARK_VOICE_BASE_H
#define SYNTHMARK_VOICE_BASE_H
#include <cstdint>
#include "SynthTools.h"
#include "UnitGenerator.h"
namespace marksynth {
/**
* Base class for building synthesizers.
*/
class VoiceBase : public UnitGenerator
{
public:
VoiceBase()
: mPitch(60.0) // MIDI Middle C is 60
, mVelocity(1.0) // normalized
{
}
virtual ~VoiceBase() = default;
void setPitch(synth_float_t pitch) {
mPitch = pitch;
}
void noteOn(synth_float_t pitch, synth_float_t velocity) {
mVelocity = velocity;
mPitch = pitch;
}
void noteOff() {
}
virtual void generate(int32_t numFrames) = 0;
protected:
synth_float_t mPitch;
synth_float_t mVelocity;
};
};
#endif // SYNTHMARK_VOICE_BASE_H

View file

@ -0,0 +1,145 @@
/*
* Copyright 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef OBOETESTER_UNUSED_H
#define OBOETESTER_UNUSED_H
// Store this code for later use.
#if 0
/*
FIR filter designed with
http://t-filter.appspot.com
sampling frequency: 48000 Hz
* 0 Hz - 8000 Hz
gain = 1.2
desired ripple = 5 dB
actual ripple = 5.595266169703693 dB
* 12000 Hz - 20000 Hz
gain = 0
desired attenuation = -40 dB
actual attenuation = -37.58691566571914 dB
*/
#define FILTER_TAP_NUM 11
static const float sFilterTaps8000[FILTER_TAP_NUM] = {
-0.05944219353343189f,
-0.07303434839503208f,
-0.037690487672689066f,
0.1870480506596512f,
0.3910337357836833f,
0.5333672385425637f,
0.3910337357836833f,
0.1870480506596512f,
-0.037690487672689066f,
-0.07303434839503208f,
-0.05944219353343189f
};
class LowPassFilter {
public:
/*
* Filter one input sample.
* @return filtered output
*/
float filter(float input) {
float output = 0.0f;
mX[mCursor] = input;
// Index backwards over x.
int xIndex = mCursor + FILTER_TAP_NUM;
// Write twice so we avoid having to wrap in the middle of the convolution.
mX[xIndex] = input;
for (int i = 0; i < FILTER_TAP_NUM; i++) {
output += sFilterTaps8000[i] * mX[xIndex--];
}
if (++mCursor >= FILTER_TAP_NUM) {
mCursor = 0;
}
return output;
}
/**
* @return true if PASSED
*/
bool test() {
// Measure the impulse of the filter at different phases so we exercise
// all the wraparound cases in the FIR.
for (int offset = 0; offset < (FILTER_TAP_NUM * 2); offset++ ) {
// LOGD("LowPassFilter: cursor = %d\n", mCursor);
// Offset by one each time.
if (filter(0.0f) != 0.0f) {
LOGD("ERROR: filter should return 0.0 before impulse response\n");
return false;
}
for (int i = 0; i < FILTER_TAP_NUM; i++) {
float output = filter((i == 0) ? 1.0f : 0.0f); // impulse
if (output != sFilterTaps8000[i]) {
LOGD("ERROR: filter should return impulse response\n");
return false;
}
}
for (int i = 0; i < FILTER_TAP_NUM; i++) {
if (filter(0.0f) != 0.0f) {
LOGD("ERROR: filter should return 0.0 after impulse response\n");
return false;
}
}
}
return true;
}
private:
float mX[FILTER_TAP_NUM * 2]{}; // twice as big as needed to avoid wrapping
int32_t mCursor = 0;
};
/**
* Low pass filter the recording using a simple FIR filter.
* Note that the lowpass filter cutoff tracks the sample rate.
* That is OK because the impulse width is a fixed number of samples.
*/
void lowPassFilter() {
for (int i = 0; i < mFrameCounter; i++) {
mData[i] = mLowPassFilter.filter(mData[i]);
}
}
/**
* Remove DC offset using a one-pole one-zero IIR filter.
*/
void dcBlocker() {
const float R = 0.996; // narrow notch at zero Hz
float x1 = 0.0;
float y1 = 0.0;
for (int i = 0; i < mFrameCounter; i++) {
const float x = mData[i];
const float y = x - x1 + (R * y1);
mData[i] = y;
y1 = y;
x1 = x;
}
}
#endif
#endif //OBOETESTER_UNUSED_H

View file

@ -0,0 +1,140 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "WaveFileWriter.h"
void WaveFileWriter::WaveFileWriter::write(float value) {
if (!mHeaderWritten) {
writeHeader();
}
if (mBitsPerSample == 24) {
writePCM24(value);
} else {
writePCM16(value);
}
}
void WaveFileWriter::write(float *buffer, int32_t startSample, int32_t numSamples) {
for (int32_t i = 0; i < numSamples; i++) {
write(buffer[startSample + i]);
}
}
void WaveFileWriter::writeIntLittle(int32_t n) {
writeByte(n);
writeByte(n >> 8);
writeByte(n >> 16);
writeByte(n >> 24);
}
void WaveFileWriter::writeShortLittle(int16_t n) {
writeByte(n);
writeByte(n >> 8);
}
void WaveFileWriter::writeFormatChunk() {
int32_t bytesPerSample = (mBitsPerSample + 7) / 8;
writeByte('f');
writeByte('m');
writeByte('t');
writeByte(' ');
writeIntLittle(16); // chunk size
writeShortLittle(WAVE_FORMAT_PCM);
writeShortLittle((int16_t) mSamplesPerFrame);
writeIntLittle(mFrameRate);
// bytes/second
writeIntLittle(mFrameRate * mSamplesPerFrame * bytesPerSample);
// block align
writeShortLittle((int16_t) (mSamplesPerFrame * bytesPerSample));
writeShortLittle((int16_t) mBitsPerSample);
}
int32_t WaveFileWriter::getDataSizeInBytes() {
if (mFrameCount <= 0) return INT32_MAX;
int64_t dataSize = ((int64_t)mFrameCount) * mSamplesPerFrame * mBitsPerSample / 8;
return (int32_t)std::min(dataSize, (int64_t)INT32_MAX);
}
void WaveFileWriter::writeDataChunkHeader() {
writeByte('d');
writeByte('a');
writeByte('t');
writeByte('a');
writeIntLittle(getDataSizeInBytes());
}
void WaveFileWriter::writeHeader() {
writeRiffHeader();
writeFormatChunk();
writeDataChunkHeader();
mHeaderWritten = true;
}
// Write lower 8 bits. Upper bits ignored.
void WaveFileWriter::writeByte(uint8_t b) {
mOutputStream->write(b);
mBytesWritten += 1;
}
void WaveFileWriter::writePCM24(float value) {
// Offset before casting so that we can avoid using floor().
// Also round by adding 0.5 so that very small signals go to zero.
float temp = (PCM24_MAX * value) + 0.5 - PCM24_MIN;
int32_t sample = ((int) temp) + PCM24_MIN;
// clip to 24-bit range
if (sample > PCM24_MAX) {
sample = PCM24_MAX;
} else if (sample < PCM24_MIN) {
sample = PCM24_MIN;
}
// encode as little-endian
writeByte(sample); // little end
writeByte(sample >> 8); // middle
writeByte(sample >> 16); // big end
}
void WaveFileWriter::writePCM16(float value) {
// Offset before casting so that we can avoid using floor().
// Also round by adding 0.5 so that very small signals go to zero.
float temp = (INT16_MAX * value) + 0.5 - INT16_MIN;
int32_t sample = ((int) temp) + INT16_MIN;
if (sample > INT16_MAX) {
sample = INT16_MAX;
} else if (sample < INT16_MIN) {
sample = INT16_MIN;
}
writeByte(sample); // little end
writeByte(sample >> 8); // big end
}
void WaveFileWriter::writeRiffHeader() {
writeByte('R');
writeByte('I');
writeByte('F');
writeByte('F');
// Maximum size is not strictly correct but is commonly used
// when we do not know the final size.
const int kExtraHeaderBytes = 36;
int32_t dataSize = getDataSizeInBytes();
writeIntLittle((dataSize > (INT32_MAX - kExtraHeaderBytes))
? INT32_MAX
: dataSize + kExtraHeaderBytes);
writeByte('W');
writeByte('A');
writeByte('V');
writeByte('E');
}

View file

@ -0,0 +1,187 @@
/*
* Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Based on the WaveFileWriter in Java from the open source JSyn library by Phil Burk
// https://github.com/philburk/jsyn/blob/master/src/main/java/com/jsyn/util/WaveFileWriter.java
#ifndef UTIL_WAVE_FILE_WRITER
#define UTIL_WAVE_FILE_WRITER
#include <cassert>
#include <stdio.h>
#include <algorithm>
class WaveFileOutputStream {
public:
virtual ~WaveFileOutputStream() = default;
virtual void write(uint8_t b) = 0;
};
/**
* Write audio data to a WAV file.
*
* <pre>
* <code>
* WaveFileWriter writer = new WaveFileWriter(waveFileOutputStream);
* writer.setFrameRate(48000);
* writer.setBitsPerSample(24);
* writer.write(floatArray, 0, numSamples);
* writer.close();
* </code>
* </pre>
*
*/
class WaveFileWriter {
public:
/**
* Create an object that will write a WAV file image to the specified stream.
*
* @param outputStream stream to receive the bytes
* @throws FileNotFoundException
*/
WaveFileWriter(WaveFileOutputStream *outputStream) {
mOutputStream = outputStream;
}
/**
* Set the number of frames per second, also known as "sample rate".
*
* If you call this then it must be called before the first write().
*
* @param frameRate default is 44100
*/
void setFrameRate(int32_t frameRate) {
mFrameRate = frameRate;
}
int32_t getFrameRate() const {
return mFrameRate;
}
/**
* Set the size of one frame.
* For stereo, set this to 2. Default is mono = 1.
* Also known as ChannelCount.
*
* If you call this then it must be called before the first write().
*
* @param samplesPerFrame is 2 for stereo or 1 for mono
*/
void setSamplesPerFrame(int32_t samplesPerFrame) {
mSamplesPerFrame = samplesPerFrame;
}
/**
* Sets the number of frames in the file.
*
* If you do not know the final number of frames then that is OK.
* Just do not call this method and the RIFF and DATA chunk sizes
* will default to INT32_MAX. That is technically invalid WAV format
* but is common practice.
*
* If you call this then it must be called before the first write().
* @param frameCount number of frames to be written
*/
void setFrameCount(int32_t frameCount) {
mFrameCount = frameCount;
}
int32_t getSamplesPerFrame() const {
return mSamplesPerFrame;
}
/** Only 16 or 24 bit samples supported at the moment. Default is 16.
*
* If you call this then it must be called before the first write().
* @param bits number of bits in a PCM sample
*/
void setBitsPerSample(int32_t bits) {
assert((bits == 16) || (bits == 24));
mBitsPerSample = bits;
}
int32_t getBitsPerSample() const {
return mBitsPerSample;
}
void close() {
}
/** Write single audio data value to the WAV file. */
void write(float value);
/**
* Write a buffer to the WAV file.
*/
void write(float *buffer, int32_t startSample, int32_t numSamples);
private:
/**
* Write a 32 bit integer to the stream in Little Endian format.
*/
void writeIntLittle(int32_t n);
/**
* Write a 16 bit integer to the stream in Little Endian format.
*/
void writeShortLittle(int16_t n);
/**
* Write an 'fmt ' chunk to the WAV file containing the given information.
*/
void writeFormatChunk();
/**
* Write a 'data' chunk header to the WAV file. This should be followed by call to
* writeShortLittle() to write the data to the chunk.
*/
void writeDataChunkHeader();
/**
* Write a simple WAV header for PCM data.
*/
void writeHeader();
// Write lower 8 bits. Upper bits ignored.
void writeByte(uint8_t b);
void writePCM24(float value);
void writePCM16(float value);
/**
* Write a 'RIFF' file header and a 'WAVE' ID to the WAV file.
*/
void writeRiffHeader();
int32_t getDataSizeInBytes();
static constexpr int WAVE_FORMAT_PCM = 1;
WaveFileOutputStream *mOutputStream = nullptr;
int32_t mFrameRate = 48000;
int32_t mSamplesPerFrame = 1;
int32_t mFrameCount = 0; // 0 for unknown
int32_t mBitsPerSample = 16;
int32_t mBytesWritten = 0;
bool mHeaderWritten = false;
static constexpr int32_t PCM24_MIN = -(1 << 23);
static constexpr int32_t PCM24_MAX = (1 << 23) - 1;
};
#endif /* UTIL_WAVE_FILE_WRITER */

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

View file

@ -0,0 +1,59 @@
package com.mobileer.audio_device;
/*
* Copyright 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import android.content.Context;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ArrayAdapter;
import android.widget.TextView;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import com.mobileer.oboetester.R;
/**
* Provides views for a list of audio devices. Usually used as an Adapter for a Spinner or ListView.
*/
public class AudioDeviceAdapter extends ArrayAdapter<AudioDeviceListEntry> {
public AudioDeviceAdapter(Context context) {
super(context, R.layout.audio_devices);
}
@NonNull
@Override
public View getView(int position, @Nullable View convertView, @NonNull ViewGroup parent) {
return getDropDownView(position, convertView, parent);
}
@Override
public View getDropDownView(int position, @Nullable View convertView, @NonNull ViewGroup parent) {
View rowView = convertView;
if (rowView == null) {
LayoutInflater inflater = LayoutInflater.from(parent.getContext());
rowView = inflater.inflate(R.layout.audio_devices, parent, false);
}
TextView deviceName = (TextView) rowView.findViewById(R.id.device_name);
AudioDeviceListEntry deviceInfo = getItem(position);
deviceName.setText(deviceInfo.getName());
return rowView;
}
}

View file

@ -0,0 +1,220 @@
package com.mobileer.audio_device;
/*
* Copyright 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import android.media.AudioDescriptor;
import android.media.AudioDeviceInfo;
import android.media.AudioManager;
import android.media.AudioMixerAttributes;
import android.media.AudioProfile;
import android.os.Build;
import java.util.List;
import java.util.Locale;
public class AudioDeviceInfoConverter {
/**
* Converts an {@link AudioDeviceInfo} object into a human readable representation
*
* @param adi The AudioDeviceInfo object to be converted to a String
* @return String containing all the information from the AudioDeviceInfo object
*/
public static String toString(AudioManager audioManager, AudioDeviceInfo adi){
StringBuilder sb = new StringBuilder();
sb.append("Id: ");
sb.append(adi.getId());
sb.append("\nProduct name: ");
sb.append(adi.getProductName());
sb.append("\nType: ");
sb.append(typeToString(adi.getType()));
sb.append("\nIs source: ");
sb.append((adi.isSource() ? "Yes" : "No"));
sb.append("\nIs sink: ");
sb.append((adi.isSink() ? "Yes" : "No"));
sb.append("\nChannel counts: ");
int[] channelCounts = adi.getChannelCounts();
sb.append(intArrayToString(channelCounts));
sb.append("\nChannel masks: ");
int[] channelMasks = adi.getChannelMasks();
sb.append(intArrayToStringHex(channelMasks));
sb.append("\nChannel index masks: ");
int[] channelIndexMasks = adi.getChannelIndexMasks();
sb.append(intArrayToStringHex(channelIndexMasks));
sb.append("\nEncodings: ");
int[] encodings = adi.getEncodings();
sb.append(intArrayToString(encodings));
sb.append("\nSample Rates: ");
int[] sampleRates = adi.getSampleRates();
sb.append(intArrayToString(sampleRates));
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
sb.append("\nAddress: ");
sb.append(adi.getAddress());
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) {
sb.append("\nEncapsulation Metadata Types: ");
int[] encapsulationMetadataTypes = adi.getEncapsulationMetadataTypes();
sb.append(intArrayToString(encapsulationMetadataTypes));
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) {
sb.append("\nEncapsulation Modes: ");
int[] encapsulationModes = adi.getEncapsulationModes();
sb.append(intArrayToString(encapsulationModes));
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
sb.append("\nAudio Descriptors: ");
List<AudioDescriptor> audioDescriptors = adi.getAudioDescriptors();
sb.append(audioDescriptors);
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
sb.append("\nAudio Profiles: ");
List<AudioProfile> audioProfiles = adi.getAudioProfiles();
sb.append(audioProfiles);
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.UPSIDE_DOWN_CAKE) {
sb.append("\nSupported Mixer Attributes: ");
List<AudioMixerAttributes> audioMixerAttributes =
audioManager.getSupportedMixerAttributes(adi);
sb.append(audioMixerAttributes);
}
sb.append("\n");
return sb.toString();
}
/**
* Converts an integer array into a string where each int is separated by a space
*
* @param integerArray the integer array to convert to a string
* @return string containing all the integer values separated by spaces
*/
private static String intArrayToString(int[] integerArray){
StringBuilder sb = new StringBuilder();
for (int i = 0; i < integerArray.length; i++){
sb.append(integerArray[i]);
if (i != integerArray.length - 1) sb.append(" ");
}
return sb.toString();
}
/**
* Converts an integer array into a hexadecimal string where each int is separated by a space
*
* @param integerArray the integer array to convert to a string
* @return string containing all the integer values separated by spaces
*/
private static String intArrayToStringHex(int[] integerArray){
StringBuilder sb = new StringBuilder();
for (int i = 0; i < integerArray.length; i++){
sb.append(String.format(Locale.getDefault(), "0x%02X", integerArray[i]));
if (i != integerArray.length - 1) sb.append(" ");
}
return sb.toString();
}
/**
* Converts the value from {@link AudioDeviceInfo#getType()} into a human
* readable string
* @param type One of the {@link AudioDeviceInfo}.TYPE_* values
* e.g. AudioDeviceInfo.TYPE_BUILT_IN_SPEAKER
* @return string which describes the type of audio device
*/
public static String typeToString(int type){
switch (type) {
case AudioDeviceInfo.TYPE_AUX_LINE:
return "auxiliary line-level connectors";
case AudioDeviceInfo.TYPE_BLE_BROADCAST:
return "BLE broadcast";
case AudioDeviceInfo.TYPE_BLE_HEADSET:
return "BLE headset";
case AudioDeviceInfo.TYPE_BLE_SPEAKER:
return "BLE speaker";
case AudioDeviceInfo.TYPE_BLUETOOTH_A2DP:
return "Bluetooth A2DP";
case AudioDeviceInfo.TYPE_BLUETOOTH_SCO:
return "Bluetooth telephony SCO";
case AudioDeviceInfo.TYPE_BUILTIN_EARPIECE:
return "built-in earpiece";
case AudioDeviceInfo.TYPE_BUILTIN_MIC:
return "built-in microphone";
case AudioDeviceInfo.TYPE_BUILTIN_SPEAKER:
return "built-in speaker";
case AudioDeviceInfo.TYPE_BUILTIN_SPEAKER_SAFE:
return "built-in speaker safe";
case AudioDeviceInfo.TYPE_BUS:
return "bus";
case AudioDeviceInfo.TYPE_DOCK:
return "dock";
case 31: // AudioDeviceInfo.TYPE_DOCK_ANALOG:
return "dock analog";
case 28: // AudioDeviceInfo.TYPE_ECHO_REFERENCE:
return "echo reference";
case AudioDeviceInfo.TYPE_FM:
return "FM";
case AudioDeviceInfo.TYPE_FM_TUNER:
return "FM tuner";
case AudioDeviceInfo.TYPE_HDMI:
return "HDMI";
case AudioDeviceInfo.TYPE_HEARING_AID:
return "hearing aid";
case AudioDeviceInfo.TYPE_HDMI_ARC:
return "HDMI audio return channel";
case AudioDeviceInfo.TYPE_HDMI_EARC:
return "HDMI enhanced ARC";
case AudioDeviceInfo.TYPE_IP:
return "IP";
case AudioDeviceInfo.TYPE_LINE_ANALOG:
return "line analog";
case AudioDeviceInfo.TYPE_LINE_DIGITAL:
return "line digital";
case AudioDeviceInfo.TYPE_REMOTE_SUBMIX:
return "remote submix";
case AudioDeviceInfo.TYPE_TELEPHONY:
return "telephony";
case AudioDeviceInfo.TYPE_TV_TUNER:
return "TV tuner";
case AudioDeviceInfo.TYPE_USB_ACCESSORY:
return "USB accessory";
case AudioDeviceInfo.TYPE_USB_DEVICE:
return "USB device";
case AudioDeviceInfo.TYPE_USB_HEADSET:
return "USB headset";
case AudioDeviceInfo.TYPE_WIRED_HEADPHONES:
return "wired headphones";
case AudioDeviceInfo.TYPE_WIRED_HEADSET:
return "wired headset";
default:
case AudioDeviceInfo.TYPE_UNKNOWN:
return "unknown=" + type;
}
}
}

View file

@ -0,0 +1,105 @@
package com.mobileer.audio_device;
/*
* Copyright 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import android.annotation.TargetApi;
import android.media.AudioDeviceInfo;
import android.media.AudioManager;
import java.util.List;
import java.util.Vector;
/**
* POJO which represents basic information for an audio device.
*
* Example: id: 8, deviceName: "built-in speaker"
*/
public class AudioDeviceListEntry {
private int mId;
private String mName;
private AudioDeviceInfo mDeviceInfo;
public AudioDeviceListEntry(int deviceId, String deviceName) {
this(deviceId, deviceName, null);
}
public AudioDeviceListEntry(int deviceId, String deviceName, AudioDeviceInfo deviceInfo) {
mId = deviceId;
mName = deviceName;
mDeviceInfo = deviceInfo;
}
public int getId() {
return mId;
}
public String getName(){
return mName;
}
public AudioDeviceInfo getDeviceInfo() { return mDeviceInfo; }
public String toString(){
return getName();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AudioDeviceListEntry that = (AudioDeviceListEntry) o;
if (mId != that.mId) return false;
return mName != null ? mName.equals(that.mName) : that.mName == null;
}
@Override
public int hashCode() {
int result = mId;
result = 31 * result + (mName != null ? mName.hashCode() : 0);
return result;
}
/**
* Create a list of AudioDeviceListEntry objects from a list of AudioDeviceInfo objects.
*
* @param devices A list of {@Link AudioDeviceInfo} objects
* @param directionType Only audio devices with this direction will be included in the list.
* Valid values are GET_DEVICES_ALL, GET_DEVICES_OUTPUTS and
* GET_DEVICES_INPUTS.
* @return A list of AudioDeviceListEntry objects
*/
@TargetApi(23)
static List<AudioDeviceListEntry> createListFrom(AudioDeviceInfo[] devices, int directionType){
List<AudioDeviceListEntry> listEntries = new Vector<>();
for (AudioDeviceInfo info : devices) {
if (directionType == AudioManager.GET_DEVICES_ALL ||
(directionType == AudioManager.GET_DEVICES_OUTPUTS && info.isSink()) ||
(directionType == AudioManager.GET_DEVICES_INPUTS && info.isSource())) {
listEntries.add(new AudioDeviceListEntry(info.getId(),
info.getId() + ": " +
info.getProductName() + " " +
AudioDeviceInfoConverter.typeToString(info.getType()),
info));
}
}
return listEntries;
}
}

View file

@ -0,0 +1,122 @@
package com.mobileer.audio_device;
/*
* Copyright 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import android.annotation.TargetApi;
import android.content.Context;
import android.content.res.Resources.Theme;
import android.media.AudioDeviceCallback;
import android.media.AudioDeviceInfo;
import android.media.AudioManager;
import android.util.AttributeSet;
import androidx.appcompat.widget.AppCompatSpinner;
import com.mobileer.oboetester.R;
import java.util.List;
public class AudioDeviceSpinner extends AppCompatSpinner {
private static final int AUTO_SELECT_DEVICE_ID = 0;
private static final String TAG = AudioDeviceSpinner.class.getName();
private int mDirectionType;
private AudioDeviceAdapter mDeviceAdapter;
private AudioManager mAudioManager;
private Context mContext;
public AudioDeviceSpinner(Context context){
super(context);
setup(context);
}
public AudioDeviceSpinner(Context context, int mode){
super(context, mode);
setup(context);
}
public AudioDeviceSpinner(Context context, AttributeSet attrs){
super(context, attrs);
setup(context);
}
public AudioDeviceSpinner(Context context, AttributeSet attrs, int defStyleAttr){
super(context, attrs, defStyleAttr);
setup(context);
}
public AudioDeviceSpinner(Context context, AttributeSet attrs, int defStyleAttr, int mode){
super(context, attrs, defStyleAttr, mode);
setup(context);
}
public AudioDeviceSpinner(Context context, AttributeSet attrs, int defStyleAttr,
int mode, Theme popupTheme){
super(context, attrs, defStyleAttr, mode, popupTheme);
setup(context);
}
private void setup(Context context){
mContext = context;
mAudioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
mDeviceAdapter = new AudioDeviceAdapter(context);
setAdapter(mDeviceAdapter);
// Add a default entry to the list and select it
mDeviceAdapter.add(new AudioDeviceListEntry(AUTO_SELECT_DEVICE_ID,
mContext.getString(R.string.auto_select)));
setSelection(0);
}
@TargetApi(23)
public void setDirectionType(int directionType){
this.mDirectionType = directionType;
setupAudioDeviceCallback();
}
@TargetApi(23)
private void setupAudioDeviceCallback(){
// Note that we will immediately receive a call to onDevicesAdded with the list of
// devices which are currently connected.
mAudioManager.registerAudioDeviceCallback(new AudioDeviceCallback() {
@Override
public void onAudioDevicesAdded(AudioDeviceInfo[] addedDevices) {
List<AudioDeviceListEntry> deviceList =
AudioDeviceListEntry.createListFrom(addedDevices, mDirectionType);
if (deviceList.size() > 0){
// Prevent duplicate entries caused by b/80138804
for (AudioDeviceListEntry entry : deviceList){
mDeviceAdapter.remove(entry);
}
mDeviceAdapter.addAll(deviceList);
}
}
public void onAudioDevicesRemoved(AudioDeviceInfo[] removedDevices) {
List<AudioDeviceListEntry> deviceList =
AudioDeviceListEntry.createListFrom(removedDevices, mDirectionType);
for (AudioDeviceListEntry entry : deviceList){
mDeviceAdapter.remove(entry);
}
setSelection(0);
}
}, null);
}
}

View file

@ -0,0 +1,126 @@
package com.mobileer.audio_device;
/*
* Copyright 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import android.annotation.TargetApi;
import android.content.Context;
import android.content.res.Resources.Theme;
import android.media.AudioDeviceCallback;
import android.media.AudioDeviceInfo;
import android.media.AudioManager;
import android.util.AttributeSet;
import androidx.appcompat.widget.AppCompatSpinner;
import com.mobileer.oboetester.R;
import java.util.List;
public class CommunicationDeviceSpinner extends AppCompatSpinner {
private static final String TAG = CommunicationDeviceSpinner.class.getName();
// menu positions
public static final int POS_CLEAR = 0;
public static final int POS_DEVICES = 1; // base position for device list
private AudioDeviceAdapter mDeviceAdapter;
private AudioManager mAudioManager;
private Context mContext;
AudioDeviceInfo[] mCommDeviceArray = null;
public CommunicationDeviceSpinner(Context context){
super(context);
setup(context);
}
public CommunicationDeviceSpinner(Context context, int mode){
super(context, mode);
setup(context);
}
public CommunicationDeviceSpinner(Context context, AttributeSet attrs){
super(context, attrs);
setup(context);
}
public CommunicationDeviceSpinner(Context context, AttributeSet attrs, int defStyleAttr){
super(context, attrs, defStyleAttr);
setup(context);
}
public CommunicationDeviceSpinner(Context context, AttributeSet attrs, int defStyleAttr, int mode){
super(context, attrs, defStyleAttr, mode);
setup(context);
}
public CommunicationDeviceSpinner(Context context, AttributeSet attrs, int defStyleAttr,
int mode, Theme popupTheme){
super(context, attrs, defStyleAttr, mode, popupTheme);
setup(context);
}
public AudioDeviceInfo[] getCommunicationsDevices() {
return mCommDeviceArray;
}
private void setup(Context context){
mContext = context;
mAudioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
mDeviceAdapter = new AudioDeviceAdapter(context);
setAdapter(mDeviceAdapter);
// Add default entries to the list and select one.
addDefaultDevicesOptions();
setSelection(POS_CLEAR);
setupCommunicationDeviceListener();
}
@TargetApi(31)
private void setupCommunicationDeviceListener(){
// Note that we will immediately receive a call to onDevicesAdded with the list of
// devices which are currently connected.
mAudioManager.registerAudioDeviceCallback(new AudioDeviceCallback() {
@Override
public void onAudioDevicesAdded(AudioDeviceInfo[] addedDevices) {
updateDeviceList();
}
public void onAudioDevicesRemoved(AudioDeviceInfo[] removedDevices) {
updateDeviceList();
}
private void updateDeviceList() {
mDeviceAdapter.clear();
addDefaultDevicesOptions();
setSelection(POS_CLEAR);
if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.S) {
List<AudioDeviceInfo> commDeviceList = mAudioManager.getAvailableCommunicationDevices();
mCommDeviceArray = commDeviceList.toArray(new AudioDeviceInfo[0]);
// Communications Devices are always OUTPUTS.
List<AudioDeviceListEntry> deviceList =
AudioDeviceListEntry.createListFrom(
mCommDeviceArray, AudioManager.GET_DEVICES_OUTPUTS);
mDeviceAdapter.addAll(deviceList);
}
}
}, null);
}
private void addDefaultDevicesOptions() {
mDeviceAdapter.add(new AudioDeviceListEntry(POS_CLEAR,
mContext.getString(R.string.clear_comm)));
}
}

View file

@ -0,0 +1,243 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mobileer.miditools;
import java.util.SortedMap;
import java.util.TreeMap;
/**
* Store SchedulableEvents in a timestamped buffer.
* Events may be written in any order.
* Events will be read in sorted order.
* Events with the same timestamp will be read in the order they were added.
*
* Only one Thread can write into the buffer.
* And only one Thread can read from the buffer.
*/
public class EventScheduler {
private static final long NANOS_PER_MILLI = 1000000;
private final Object lock = new Object();
private SortedMap<Long, FastEventQueue> mEventBuffer;
// This does not have to be guarded. It is only set by the writing thread.
// If the reader sees a null right before being set then that is OK.
private FastEventQueue mEventPool = null;
private static final int MAX_POOL_SIZE = 200;
public EventScheduler() {
mEventBuffer = new TreeMap<Long, FastEventQueue>();
}
// If we keep at least one node in the list then it can be atomic
// and non-blocking.
private class FastEventQueue {
// One thread takes from the beginning of the list.
volatile SchedulableEvent mFirst;
// A second thread returns events to the end of the list.
volatile SchedulableEvent mLast;
volatile long mEventsAdded;
volatile long mEventsRemoved;
FastEventQueue(SchedulableEvent event) {
mFirst = event;
mLast = mFirst;
mEventsAdded = 1; // Always created with one event added. Never empty.
mEventsRemoved = 0; // None removed yet.
}
int size() {
return (int)(mEventsAdded - mEventsRemoved);
}
/**
* Do not call this unless there is more than one event
* in the list.
* @return first event in the list
*/
public SchedulableEvent remove() {
// Take first event.
mEventsRemoved++;
SchedulableEvent event = mFirst;
mFirst = event.mNext;
return event;
}
/**
* @param event
*/
public void add(SchedulableEvent event) {
event.mNext = null;
mLast.mNext = event;
mLast = event;
mEventsAdded++;
}
}
/**
* Base class for events that can be stored in the EventScheduler.
*/
public static class SchedulableEvent {
private long mTimestamp;
private SchedulableEvent mNext = null;
/**
* @param timestamp
*/
public SchedulableEvent(long timestamp) {
mTimestamp = timestamp;
}
/**
* @return timestamp
*/
public long getTimestamp() {
return mTimestamp;
}
/**
* The timestamp should not be modified when the event is in the
* scheduling buffer.
*/
public void setTimestamp(long timestamp) {
mTimestamp = timestamp;
}
}
/**
* Get an event from the pool.
* Always leave at least one event in the pool.
* @return event or null
*/
public SchedulableEvent removeEventfromPool() {
SchedulableEvent event = null;
if (mEventPool != null && (mEventPool.size() > 1)) {
event = mEventPool.remove();
}
return event;
}
/**
* Return events to a pool so they can be reused.
*
* @param event
*/
public void addEventToPool(SchedulableEvent event) {
if (mEventPool == null) {
mEventPool = new FastEventQueue(event); // add event to pool
// If we already have enough items in the pool then just
// drop the event. This prevents unbounded memory leaks.
} else if (mEventPool.size() < MAX_POOL_SIZE) {
mEventPool.add(event);
}
}
/**
* Add an event to the scheduler. Events with the same time will be
* processed in order.
*
* @param event
*/
public void add(SchedulableEvent event) {
synchronized (lock) {
FastEventQueue list = mEventBuffer.get(event.getTimestamp());
if (list == null) {
long lowestTime = mEventBuffer.isEmpty() ? Long.MAX_VALUE
: mEventBuffer.firstKey();
list = new FastEventQueue(event);
mEventBuffer.put(event.getTimestamp(), list);
// If the event we added is earlier than the previous earliest
// event then notify any threads waiting for the next event.
if (event.getTimestamp() < lowestTime) {
lock.notify();
}
} else {
list.add(event);
}
}
}
// Caller must synchronize on lock before calling.
private SchedulableEvent removeNextEventLocked(long lowestTime) {
SchedulableEvent event;
FastEventQueue list = mEventBuffer.get(lowestTime);
// Remove list from tree if this is the last node.
if ((list.size() == 1)) {
mEventBuffer.remove(lowestTime);
}
event = list.remove();
return event;
}
/**
* Check to see if any scheduled events are ready to be processed.
*
* @param timestamp
* @return next event or null if none ready
*/
public SchedulableEvent getNextEvent(long time) {
SchedulableEvent event = null;
synchronized (lock) {
if (!mEventBuffer.isEmpty()) {
long lowestTime = mEventBuffer.firstKey();
// Is it time for this list to be processed?
if (lowestTime <= time) {
event = removeNextEventLocked(lowestTime);
}
}
}
// Log.i(TAG, "getNextEvent: event = " + event);
return event;
}
/**
* Return the next available event or wait until there is an event ready to
* be processed. This method assumes that the timestamps are in nanoseconds
* and that the current time is System.nanoTime().
*
* @return event
* @throws InterruptedException
*/
public SchedulableEvent waitNextEvent() throws InterruptedException {
SchedulableEvent event = null;
while (true) {
long millisToWait = Integer.MAX_VALUE;
synchronized (lock) {
if (!mEventBuffer.isEmpty()) {
long now = System.nanoTime();
long lowestTime = mEventBuffer.firstKey();
// Is it time for the earliest list to be processed?
if (lowestTime <= now) {
event = removeNextEventLocked(lowestTime);
break;
} else {
// Figure out how long to sleep until next event.
long nanosToWait = lowestTime - now;
// Add 1 millisecond so we don't wake up before it is
// ready.
millisToWait = 1 + (nanosToWait / NANOS_PER_MILLI);
// Clip 64-bit value to 32-bit max.
if (millisToWait > Integer.MAX_VALUE) {
millisToWait = Integer.MAX_VALUE;
}
}
}
lock.wait((int) millisToWait);
}
}
return event;
}
}

Some files were not shown because too many files have changed in this diff Show more