Skip to content

Instantly share code, notes, and snippets.

@AIWintermuteAI
Created November 25, 2024 21:10
Show Gist options
  • Save AIWintermuteAI/a725aabdde682a1b711c3602bc02f67b to your computer and use it in GitHub Desktop.
Save AIWintermuteAI/a725aabdde682a1b711c3602bc02f67b to your computer and use it in GitHub Desktop.
Sketches for the Pi Pico 2 Audio
#include <PWMAudio.h>
#include <ADCInput.h>
ADCInput mike(A0);
int16_t out_raw[16000];
// The sample pointers
const int16_t *start = (const int16_t *)out_raw;
const int16_t *p = start;
// Create the PWM audio device on GPIO 1. Hook amp/speaker between GPIO1 and convenient GND.
PWMAudio pwm(16);
unsigned int count = 0;
void cb() {
while (pwm.availableForWrite()) {
pwm.write(*p++);
count += 2;
if (count >= sizeof(out_raw)) {
count = 0;
p = start;
}
}
}
void setup() {
mike.begin(8000);
pwm.onTransmit(cb);
pwm.begin(8000);
}
void loop() {
Serial.println("Recording");
for (uint32_t i = 0; i < 16000; i++) {
out_raw[i] = mike.read() * 16;
}
Serial.println("Playing");
delay(2000);
}
/* Edge Impulse ingestion SDK
* Copyright (c) 2022 EdgeImpulse Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// If your target is limited in memory remove this macro to save 10K RAM
#define EIDSP_QUANTIZE_FILTERBANK 0
#define EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW 2
#define EI_CLASSIFIER_SLICE_SIZE (EI_CLASSIFIER_RAW_SAMPLE_COUNT / EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW)
/*
** NOTE: If you run into TFLite arena allocation issue.
**
** This may be due to may dynamic memory fragmentation.
** Try defining "-DEI_CLASSIFIER_ALLOCATION_STATIC" in boards.local.txt (create
** if it doesn't exist) and copy this file to
** `<ARDUINO_CORE_INSTALL_PATH>/arduino/hardware/<mbed_core>/<core_version>/`.
**
** See
** (https://support.arduino.cc/hc/en-us/articles/360012076960-Where-are-the-installed-cores-located-)
** to find where Arduino installs cores on your machine.
**
** If the problem persists then there's not enough memory for this model and application.
*/
/*
** NOTE: If you are seeing Error sample buffer overrun.
**
** Cortex M0+ has no hardware floating point support, therfore DSP
** operations are rather slow. You can try decreasing
** EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW or tweak the MFCC/MFE block parameters
** in Studio and re-deploy your project to get faster processing times.
*/
/* Includes ---------------------------------------------------------------- */
#include <robot-control-english_inferencing.h> // you need to change it to the name of your deployment!
#include <ADCInput.h>
/** Audio buffers, pointers and selectors */
typedef struct {
signed short *buffers[2];
unsigned char buf_select;
unsigned char buf_ready;
unsigned int buf_count;
unsigned int n_samples;
} inference_t;
static inference_t inference;
static volatile bool record_ready = false;
// static signed short *sampleBuffer;
static signed short sampleBuffer[2048];
static bool debug_nn = false; // Set this to true to see e.g. features generated from the raw signal
static int print_results = -(EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW);
ADCInput mike(A0);
/**
* @brief Arduino setup function
*/
void setup()
{
// put your setup code here, to run once:
Serial.begin(115200);
// comment out the below line to cancel the wait for USB connection (needed for native USB)
while (!Serial);
Serial.println("Edge Impulse Inferencing Demo");
// summary of inferencing settings (from model_metadata.h)
ei_printf("Inferencing settings:\n");
ei_printf("\tInterval: ");
ei_printf_float((float)EI_CLASSIFIER_INTERVAL_MS);
ei_printf(" ms.\n");
ei_printf("\tFrame size: %d\n", EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE);
ei_printf("\tSample length: %d ms.\n", EI_CLASSIFIER_RAW_SAMPLE_COUNT / 16);
ei_printf("\tNo. of classes: %d\n", sizeof(ei_classifier_inferencing_categories) /
sizeof(ei_classifier_inferencing_categories[0]));
run_classifier_init();
if (microphone_inference_start(EI_CLASSIFIER_SLICE_SIZE) == false) {
ei_printf("ERR: Could not allocate audio buffer (size %d), this could be due to the window length of your model\r\n", EI_CLASSIFIER_RAW_SAMPLE_COUNT);
return;
}
}
/**
* @brief Arduino main function. Runs the inferencing loop.
*/
void loop()
{
bool m = microphone_inference_record();
if (!m) {
ei_printf("ERR: Failed to record audio...\n");
return;
}
signal_t signal;
signal.total_length = EI_CLASSIFIER_SLICE_SIZE;
signal.get_data = &microphone_audio_signal_get_data;
ei_impulse_result_t result = {0};
EI_IMPULSE_ERROR res = run_classifier_continuous(&signal, &result, debug_nn);
if (res != EI_IMPULSE_OK) {
ei_printf("ERR: Failed to run classifier (%d)\n", res);
return;
}
if (++print_results >= (EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW)) {
// print inference return code
ei_printf("run_classifier returned: %d\r\n", res);
print_inference_result(result);
print_results = 0;
}
}
/**
* @brief buffer full callback
* Copy audio data to app buffers
*/
static void data_ready_inference_callback(void)
{
int bytesAvailable = mike.available();
// read into the sample buffer
// int bytesRead = PDM.read((char *)&sampleBuffer[0], bytesAvailable);
if ((inference.buf_ready == 0) && (record_ready == true)) {
for(int i = 0; i < bytesAvailable>>1; i++) {
inference.buffers[inference.buf_select][inference.buf_count++] = mike.read() * 16;
if (inference.buf_count >= inference.n_samples) {
inference.buf_select ^= 1;
inference.buf_count = 0;
inference.buf_ready = 1;
break;
}
}
}
}
/**
* @brief Init inferencing struct and setup/start PDM
*
* @param[in] n_samples The n samples
*
* @return { description_of_the_return_value }
*/
static bool microphone_inference_start(uint32_t n_samples)
{
inference.buffers[0] = (signed short *)malloc(n_samples * sizeof(signed short));
if (inference.buffers[0] == NULL) {
return false;
}
inference.buffers[1] = (signed short *)malloc(n_samples * sizeof(signed short));
if (inference.buffers[1] == NULL) {
ei_free(inference.buffers[0]);
return false;
}
inference.buf_select = 0;
inference.buf_count = 0;
inference.n_samples = n_samples;
inference.buf_ready = 0;
mike.setBuffers(2, 2048);
mike.onReceive(data_ready_inference_callback);
mike.begin(16000);
record_ready = true;
return true;
}
/**
* @brief Wait on new data
*
* @return True when finished
*/
static bool microphone_inference_record(void)
{
bool ret = true;
if (inference.buf_ready == 1) {
ei_printf(
"Error sample buffer overrun. Decrease the number of slices per model window "
"EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW is currently set to %d\n", EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW);
ret = false;
}
while (inference.buf_ready == 0) {
delay(1);
}
inference.buf_ready = 0;
return ret;
}
/**
* Get raw audio signal data
*/
static int microphone_audio_signal_get_data(size_t offset, size_t length, float *out_ptr)
{
numpy::int16_to_float(&inference.buffers[inference.buf_select ^ 1][offset], out_ptr, length);
return 0;
}
/**
* @brief Stop PDM and release buffers
*/
static void microphone_inference_end(void)
{
mike.end();
ei_free(inference.buffers[0]);
ei_free(inference.buffers[1]);
record_ready = false;
}
void print_inference_result(ei_impulse_result_t result) {
// Print how long it took to perform inference
ei_printf("Timing: DSP %d ms, inference %d ms, anomaly %d ms\r\n",
result.timing.dsp,
result.timing.classification,
result.timing.anomaly);
ei_printf("Predictions:\r\n");
for (uint16_t i = 0; i < EI_CLASSIFIER_LABEL_COUNT; i++) {
ei_printf(" %s: ", ei_classifier_inferencing_categories[i]);
ei_printf("%.5f\r\n", result.classification[i].value);
}
// Print anomaly result (if it exists)
#if EI_CLASSIFIER_HAS_ANOMALY == 1
ei_printf("Anomaly prediction: %.3f\r\n", result.anomaly);
#endif
}
#if !defined(EI_CLASSIFIER_SENSOR) || EI_CLASSIFIER_SENSOR != EI_CLASSIFIER_SENSOR_MICROPHONE
#error "Invalid model for current sensor."
#endif
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment