cancel
Showing results for 
Search instead for 
Did you mean: 
cancel
Showing results for 
Search instead for 
Did you mean: 

Community Tip - Stay updated on what is happening on the PTC Community by subscribing to PTC Community Announcements. X

Using currentStep to narrate steps of multiple sequence in Vuforia Studio

Aditya1702
15-Moonstone

Using currentStep to narrate steps of multiple sequence in Vuforia Studio

Hello,

I want to narrate steps in vuforia studio on step change for a pvz file having more than one animation.

I'm using the below code for it, but since there are two animation inside same pvz file, it reads the step1 of first sequence and also step1 of the second sequence at the same time.

 

Code:

var msg;

var spokenText;

 

$scope.$on('stepstarted', function(evt, arg1, arg2, arg3) { 
    var parsedArg3 = JSON.parse(arg3);
  
        if($scope.view.wdg["model-1"].currentStep==1){
          let spokenText = $scope.view.wdg["labelStep"].text;
          let msg = new SpeechSynthesisUtterance(spokenText);
          window.speechSynthesis.speak(msg);
        }
        else if($scope.view.wdg["model-1"].currentStep==2){
          let spokenText = $scope.view.wdg["labelStep"].text;
          let msg = new SpeechSynthesisUtterance(spokenText);
          window.speechSynthesis.speak(msg);
        }

I want to narrate the corresponding steps of the sequence if sequence is set to one sequence and also, narrate corresponding step of the sequence when it is set to second sequence.

 

Also, let me know how can we have narration in female voice?

 

Thanks in advance.

 

 

1 REPLY 1

Hi @Aditya1702 ,

because the most calls are asyncronous therefore is it better to use directy the step number which is passed to the stepstarted event:

 

//==================================
$scope.app.playAudio = function (mp3) {
     var audio = new Audio(mp3);
        audio.addEventListener('ended', function() {
      console.warn("mp3="+mp3+" finished!")
      console.warn("call the next audio from the list");
    }, false);
  //let wdgName = 'label-3'
  //let volume =parseFloat($scope.view.wdg[wdgName].text)
 //audio.volume = volume;
 audio.volume = 0.76;
  audio.play();
}; 
//
//==================================
$scope.$on('stepstarted', function(evt, arg1, arg2, arg3) { 
 var parsedArg3 = JSON.parse(arg3);
// console.warn(evt); console.warn(arg1);console.warn(arg2);console.warn(arg3);

 
  
  console.log("stepName="+parsedArg3.stepName);
  console.log("stepDescription="+parsedArg3.stepDescription);
  console.log("nextStep="+parsedArg3.nextStep);
              
  $scope.stepDescription=parsedArg3.stepDescription;
  console.warn("Event: " + evt.name + " arg3 fields, name: " + parsedArg3.stepName + " duration(ms): " + parsedArg3.duration  + " total steps: " + parsedArg3.totalSteps  + " step desc: " + parsedArg3.stepDescription );
  
   $scope.app.playAudio('app/resources/Uploaded/seq_step_'+parsedArg3.stepNumber+'.mp3');
}); 

 

so according the voice type this is not part of Studio but the js library what is availible on HL need to check which langues 

e.g.  https://developer.mozilla.org/en-US/docs/Web/API/SpeechSynthesisVoice

https://developer.mozilla.org/en-US/docs/Web/API/SpeechSynthesisVoice/name

example there:

 

for (const voice of voices) {
  const option = document.createElement("option");
  option.textContent = `${voice.name} (${voice.lang})`;

  if (voice.default) {
    option.textContent += " — DEFAULT";
  }

  option.setAttribute("data-lang", voice.lang);
  option.setAttribute("data-name", voice.name);
  voiceSelect.appendChild(option);
}

 

I never used on HoloLens2 - tested in IOS

 

$scope.app.speak1 = function (msg_txt)
{
 console.log('$scope.app.speak('+ msg_txt+ ')')
                               

try { 
                       
var msg = new SpeechSynthesisUtterance();
var voices = window.speechSynthesis.getVoices();
var found=0
//this is only for debugging purpuse
  for(i = 0; i < voices.length ; i++) {
     
    console.log("voices["+i+"] ="+ voices[i].name + ' (' + voices[i].lang + ')');
    
    if(voices[i].default) {
     console.log( 'the last voice is -- DEFAULT');
    }
  
  if(voices[i].name.inculdes('en-US')) {found= i; break;}
  }
msg.voice = voices[found]; // Note: some voices don't support altering params
//msg.voiceURI = 'native';
msg.volume = 1; // 0 to 1
msg.rate = 1; // 0.1 to 10
msg.pitch = 1; //0 to 2
msg.text = msg_txt;
//msg.lang = 'en-US';

msg.onend = function(e) {
  console.log('Finished in ' + event.elapsedTime + ' seconds.');
};
msg.onerror = function(event) {
  console.log('An error has occurred with the speech synthesis: ' + event.error);
}
                           
 window.speechSynthesis.speak(msg);
                            
    
} catch (e_attempt2) {console.error("error on attempt2 :"+e_attempt2)}
 
}
/////////

 

So far I remeber the voices are different on different platforms so you need first to test with different parameters which will lead to the best resutls. The code above was only a test code where I wanted to check this functionality.

 

Announcements

Top Tags