Get hands-on experience with 20+ free Google Cloud products and $300 in free credit for new customers.

Google Cloud Speech to Text V2 streaming audio feed from a microphone

spy
Bronze 1
Bronze 1

I'm running speech-to-text stream on an Android device with microphone input, and it works smoothly in V1.

Here is a tutorial from V1
https://cloud.google.com/speech-to-text/docs/transcribe-streaming-audio

I used the StreamingRecognizeRequest and set a ResponseObserver as a callback, the final transcripts would be return.

However I tried to migrate the code to V2, it could not work properly.
Here is my code (Java).

ResponseObserver<StreamingRecognizeResponse> responseObserver = new ResponseObserver<>() {
@Override
public void onStart(StreamController controller) {
Log.d(TAG, "onStart = " + controller);
}

@Override
public void onResponse(StreamingRecognizeResponse response) {
Log.d(TAG, "onResponse = ");
}

@Override
public void onComplete() {
Log.d(TAG, "onComplete = ");
}

@Override
public void onError(Throwable t) {
Log.d(TAG, "onError = " + t);
}
};

RecognitionConfig recognitionConfig = RecognitionConfig.newBuilder()
.addLanguageCodes("en-US")
.setAutoDecodingConfig(AutoDetectDecodingConfig.newBuilder().build())
.build();
StreamingRecognitionConfig streamingRecognitionConfig = StreamingRecognitionConfig.newBuilder()
.setConfig(recognitionConfig)
.build();
StreamingRecognizeRequest streamingRecognizeRequest = StreamingRecognizeRequest.newBuilder()
.setStreamingConfig(streamingRecognitionConfig)
.setRecognizer(recognizer.getName())
.build();
mClientStream = mSpeechClient.streamingRecognizeCallable().splitCall(responseObserver);
mClientStream.send(streamingRecognizeRequest);

// receive audio buffer continuously
if (mAudioEmitter != null) {
mAudioEmitter.start((ByteString bytes) -> {
StreamingRecognizeRequest.Builder sBuilder = StreamingRecognizeRequest.newBuilder().setRecognizerBytes(recognizer.getNameBytes())
.setAudio(bytes);
mClientStream.send(sBuilder.build());
});
}

I realized one of the differences between V1 and V2 is the Recognizer object, so I set the parameter and make sure it is right.
But it still cannot work, the onStart() method is called but no onResponse().

And there is no any sample about audio input (microphone) in V2 developer guides, they are all audio file recognition.

Is there any restriction on V2?

Thanks

2 4 3,986
4 REPLIES 4

Hello, Have you managed to migrate your code from version 1 to version 2 of the Google Cloud Speech-to-Text API for streaming audio from a microphone? Or have you found any examples that were helpful in this process?

I am also interested if  streaming speech recognition on an audio stream is possible on v2

 

I think this is a common question. I found a similar one in stackoverflow here.

I am interested too and I posted a new topic also here in the google cloud community 

Hello, I finally solved this. Of course, since it is my code, I think I just need to find the necessary parts and solve it.

package com.example.demo.scheduler;

import com.google.api.gax.rpc.ClientStream;
import com.google.api.gax.rpc.ResponseObserver;
import com.google.api.gax.rpc.StreamController;
import com.google.cloud.speech.v2.*;
import com.google.protobuf.ByteString;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import org.springframework.web.socket.*;
import org.springframework.web.socket.handler.BinaryWebSocketHandler;

import java.io.*;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

@Component
public class AudioWebSocketHandler extends BinaryWebSocketHandler {

private static final Logger logger = LoggerFactory.getLogger(AudioWebSocketHandler.class);

private final String projectId = "[PROJECT_ID]"; // Masked sensitive project ID
private final String recognizerId = "[RECOGNIZER_ID]"; // Masked sensitive recognizer ID
private final Map<String, ClientStream<StreamingRecognizeRequest>> clientStreams = new ConcurrentHashMap<>();
private final Map<String, StringBuilder> transcriptBuilders = new ConcurrentHashMap<>();

@Override
public void afterConnectionEstablished(WebSocketSession session) {
logger.info("WebSocket connection established: {}", session.getId());
transcriptBuilders.put(session.getId(), new StringBuilder());

try {
SpeechClient speechClient = SpeechClient.create();

// Check recognizer resource (e.g., "projects/<PROJECT>/locations/global/recognizers/<ID>")
String recognizerName = String.format(
"projects/%s/locations/global/recognizers/%s",
projectId, recognizerId
);

// Configure recognition settings (16kHz LINEAR16, mono)
RecognitionConfig recognitionConfig = RecognitionConfig.newBuilder()
.setExplicitDecodingConfig(
ExplicitDecodingConfig.newBuilder()
.setEncoding(ExplicitDecodingConfig.AudioEncoding.LINEAR16)
.setSampleRateHertz(16000)
.setAudioChannelCount(1)
.build()
)
.addLanguageCodes("ko-KR")
.build();

// StreamingRecognitionConfig
StreamingRecognitionConfig streamingRecognitionConfig = StreamingRecognitionConfig.newBuilder()
.setConfig(recognitionConfig)
// Add partial results or Voice Activity Timeout settings if needed
.build();

// Define response observer
ResponseObserver<StreamingRecognizeResponse> responseObserver = createResponseObserver(session);

// Prepare for streaming call
ClientStream<StreamingRecognizeRequest> clientStream =
speechClient.streamingRecognizeCallable().splitCall(responseObserver);
clientStreams.put(session.getId(), clientStream);

// First request: Recognizer + StreamingConfig settings
StreamingRecognizeRequest configRequest = StreamingRecognizeRequest.newBuilder()
.setRecognizer(recognizerName)
.setStreamingConfig(streamingRecognitionConfig)
.build();

clientStream.send(configRequest);

} catch (Exception e) {
logger.error("SpeechClient initialization error: {}", e.getMessage(), e);
handleError(session, "Error occurred during SpeechClient initialization");
}
}

@Override
protected void handleBinaryMessage(WebSocketSession session, BinaryMessage message) {
String sessionId = session.getId();
byte[] pcmBytes = message.getPayload().array(); // Assumes 16kHz Int16 PCM

ClientStream<StreamingRecognizeRequest> clientStream = clientStreams.get(sessionId);
if (clientStream != null) {
try {
// Send request for each PCM packet
StreamingRecognizeRequest audioRequest = StreamingRecognizeRequest.newBuilder()
.setAudio(ByteString.copyFrom(pcmBytes))
.build();
clientStream.send(audioRequest);

} catch (Exception e) {
logger.error("Error occurred while sending audio data: {}", e.getMessage(), e);
}
} else {
logger.warn("No ClientStream found: {}", sessionId);
}
}

private ResponseObserver<StreamingRecognizeResponse> createResponseObserver(WebSocketSession session) {
String sessionId = session.getId();

return new ResponseObserver<StreamingRecognizeResponse>() {
@Override
public void onStart(StreamController controller) {
logger.info("onStart - Recognition started (Session: {})", sessionId);
}

@Override
public void onResponse(StreamingRecognizeResponse response) {
logger.info("onResponse (Session: {})", sessionId);

if (response.getResultsList().isEmpty()) {
return;
}
StreamingRecognitionResult result = response.getResultsList().get(0);
if (result.getAlternativesList().isEmpty()) {
return;
}
SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
String transcript = alternative.getTranscript();

try {
if (result.getIsFinal()) {
// Final result
session.sendMessage(new TextMessage("FINAL: " + transcript));
transcriptBuilders.get(sessionId).append(transcript).append("\n");
saveTranscriptToFile(sessionId);
} else {
// Interim result
session.sendMessage(new TextMessage("INTERIM: " + transcript));
}
} catch (IOException e) {
logger.error("Error while sending WebSocket message: {}", e.getMessage(), e);
}
}

@Override
public void onComplete() {
logger.info("onComplete - Recognition complete (Session: {})", sessionId);
}

@Override
public void onError(Throwable t) {
logger.error("onError - Recognition error (Session: {}): {}", sessionId, t.getMessage());
}
};
}

private void saveTranscriptToFile(String sessionId) {
try {
StringBuilder transcriptBuilder = transcriptBuilders.get(sessionId);
if (transcriptBuilder == null || transcriptBuilder.length() == 0) {
return;
}
File file = new File("src/main/resources/transcripts/" + sessionId + "_recording.txt");
if (!file.exists()) {
file.getParentFile().mkdirs();
file.createNewFile();
}
try (BufferedWriter writer = new BufferedWriter(new FileWriter(file, true))) {
writer.write(transcriptBuilder.toString());
writer.flush();
transcriptBuilder.setLength(0);
}
} catch (IOException e) {
logger.error("Error occurred while saving file: {}", e.getMessage());
}
}

@Override
public void afterConnectionClosed(WebSocketSession session, CloseStatus status) {
String sessionId = session.getId();
logger.info("WebSocket connection closed: {} (Status: {})", sessionId, status);

ClientStream<StreamingRecognizeRequest> clientStream = clientStreams.remove(sessionId);
if (clientStream != null) {
// Close stream
clientStream.closeSend();
}
transcriptBuilders.remove(sessionId);
}

@Override
public void handleTransportError(WebSocketSession session, Throwable exception) {
logger.error("WebSocket transport error occurred: {} (Session ID: {})", exception.getMessage(), session.getId());
try {
session.close(CloseStatus.SERVER_ERROR);
} catch (IOException e) {
logger.error("Error while closing WebSocket session: {}", e.getMessage());
}
}

private void handleError(WebSocketSession session, String errorMessage) {
try {
session.sendMessage(new TextMessage("ERROR: " + errorMessage));
} catch (IOException e) {
logger.error("Error while sending error message: {}", e.getMessage());
}
}
}