forked from space10-community/conversational-form
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvoice-input-output.html
executable file
·269 lines (231 loc) · 9.93 KB
/
voice-input-output.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no, minimal-ui" />
<link rel="stylesheet" href="build/conversational-form-docs.min.css">
<!-- <script id="conversational-form-development" src="../dist/conversational-form.min.js"></script>
<link rel="stylesheet" href="../dist/conversational-form.min.css"> -->
<script type="text/javascript" src="https://cf-4053.kxcdn.com/conversational-form/0.9.6/conversational-form.min.js" crossorigin></script>
</head>
<body>
<main class="content">
<menu id="small-screen-menu">
<h2>Conversational Form examples</h2>
<div class="switch-btn">
<label class="switch">
<input type="checkbox" onclick="window.conversationalFormExamples.toggleConversation(event)">
<div class="slider round"></div>
</label>
</div>
<div class="hamburger-btn" onclick="window.conversationalFormExamples.toggleMenuState(event)">
<svg viewBox="0 0 29 14" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g stroke="none" stroke-width="1" fill="none" fill-rule="evenodd" stroke-linecap="square">
<g transform="translate(-325.000000, -87.000000)" stroke="#FFFFFF" stroke-width="2">
<g transform="translate(325.000000, 87.000000)">
<path d="M27.4802431,7 L1.23827993,7"></path>
<path d="M27.4802431,1 L1.23827993,1"></path>
<path d="M27.4802431,13 L1.23827993,13"></path>
</g>
</g>
</g>
</svg>
</div>
</menu>
<div class="switch-btn" id="cf-toggle-btn" data-label="Enable Conversational Form" data-label-toggled="Disable Conversational Form">
<label class="switch">
<input type="checkbox" onclick="window.conversationalFormExamples.toggleConversation(event)">
<div class="slider round"></div>
</label>
</div>
<section id="info" role="info">
<div class="close-btn" onclick="window.conversationalFormExamples.toggleMenuState()">
<svg viewBox="0 0 22 22" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g stroke="none" stroke-width="1" fill="none" fill-rule="evenodd" stroke-linecap="square">
<g transform="translate(-328.000000, -83.000000)" stroke="#FFFFFF" stroke-width="2">
<g id="close" transform="translate(329.000000, 84.000000)">
<path d="M19.6371966,19.2779351 L1.08132646,0.722064927"></path>
<path d="M19.4923318,0.722064927 L0.936461672,19.2779351"></path>
</g>
</g>
</g>
</svg>
</div>
<article>
<h1 id="writer">
Voice Control (2/2)
</h1>
<h2>
Fill out a form using your voice. This example uses HTML5s <a href="https://developer.mozilla.org/en-US/docs/Web/API/SpeechSynthesisUtterance" target="_blank">SpeechSynthesisUtterance</a> and <a href="https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition" target="_blank">SpeechRecognition</a> APIs to have <u>text 2 speech</u> and <u>speech 2 text</u>.
</br></br>
Read WIKI <a href="https://github.com/space10-community/conversational-form/wiki/Voice" target="_blank">here</a>
</br></br>
See a simpler example <a href="voice-input.html" target="_self">here</a>
</h2>
</article>
</section>
<section role="form">
<div class="form-outer">
<!-- Conversational Form will auto-run because of attribute "cf-form" -->
<form id="form">
<input
id="123"
name="123"
type="text"
cf-questions="Hello, please tell me your name?"
/>
<fieldset cf-questions="Choose your favourite color, <span style='background: blue;'>blue</span>, <span style='background: red;'>red</span> or <span style='background: yellow;'>yellow</span>">
<input type="radio" cf-label="blue" value="blue" id="1" />
<input type="radio" cf-label="red" value="red" id="2" />
<input type="radio" cf-label="yellow" value="yellow" id="3" />
</fieldset>
</form>
</div>
</section>
<section id="cf-context" role="cf-context" cf-context>
</section>
</main>
<script>
function initExample(){
var dispatcher = new cf.EventDispatcher(),
synth = null,
recognition = null,
msg = null,
SpeechSynthesisUtterance = null,
SpeechRecognition = null;
try{
SpeechRecognition = SpeechRecognition || webkitSpeechRecognition;
}catch(e){
console.log("Example support range: https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition#Browser_compatibility");
}
try{
SpeechSynthesisUtterance = window.webkitSpeechSynthesisUtterance ||
window.mozSpeechSynthesisUtterance ||
window.msSpeechSynthesisUtterance ||
window.oSpeechSynthesisUtterance ||
window.SpeechSynthesisUtterance;
}catch(e){
console.log("Example support range: https://developer.mozilla.org/en-US/docs/Web/API/SpeechSynthesisUtterance#Browser_compatibility")
}
// here we use https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API
// you can use what ever API you want, ex.: Google Cloud Speech API -> https://cloud.google.com/speech/
// here we create our input
if(SpeechSynthesisUtterance && SpeechRecognition){
var microphoneInput = {
init: function() {
// init is called one time, when the custom input is instantiated.
// load voices \o/
synth = window.speechSynthesis;
msg = new SpeechSynthesisUtterance();
window.speechSynthesis.onvoiceschanged = function(e) {
var voices = synth.getVoices();
msg.voice = voices[0]; // <-- Alex
msg.lang = msg.voice.lang; // change language here
};
synth.getVoices();
// here we want to control the Voice input availability, so we don't end up with speech overlapping voice-input
msg.onstart = function(event) {
// on message end, so deactivate input
console.log("voice: deactivate 1")
conversationalForm.userInput.deactivate();
}
msg.onend = function(event) {
// on message end, so reactivate input
conversationalForm.userInput.reactivate();
}
// setup events to speak robot response
dispatcher.addEventListener(cf.ChatListEvents.CHATLIST_UPDATED, function(event){
if(event.detail.currentResponse.isRobotResponse){
// https://developer.mozilla.org/en-US/docs/Web/API/SpeechSynthesisUtterance
// msg.text = event.detail.currentResponse.response
msg.text = event.detail.currentResponse.strippedSesponse//<-- no html tags
window.speechSynthesis.speak(msg);
}
}, false);
// do other init stuff, like connect with external APIs ...
},
// set awaiting callback, as we will await the speak in this example
awaitingCallback: true,
cancelInput: function() {
console.log("voice: CANCEL")
finalTranscript = null;
if(recognition){
recognition.onend = null;
recognition.onerror = null;
recognition.stop();
}
},
input: function(resolve, reject, mediaStream) {
console.log("voice: INPUT")
// input is called when user is interacting with the CF input button (UserVoiceInput)
// connect to Speech API (ex. Google Cloud Speech), Watson (https://github.com/watson-developer-cloud/speech-javascript-sdk) or use Web Speech API (like below), resolve with the text returned..
// using Promise pattern -> https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Promise
// if API fails use reject(result.toString())
// if API succedes use resolve(result.toString())
if(recognition)
recognition.stop();
recognition = new SpeechRecognition(),
finalTranscript = '';
recognition.continuous = false; // react only on single input
recognition.interimResults = false; // we don't care about interim, only final.
// recognition.onstart = function() {}
recognition.onresult = function(event) {
// var interimTranscript = "";
for (var i = event.resultIndex; i < event.results.length; ++i) {
if (event.results[i].isFinal) {
finalTranscript += event.results[i][0].transcript;
}
}
}
recognition.onerror = function(event) {
reject(event.error);
}
recognition.onend = function(event) {
if(finalTranscript && finalTranscript !== ""){
resolve(finalTranscript);
}
}
recognition.start();
}
}
}
var conversationalForm = window.cf.ConversationalForm.startTheConversation({
formEl: document.getElementById("form"),
context: document.getElementById("cf-context"),
eventDispatcher: dispatcher,
// add the custom input (microphone)
microphoneInput: microphoneInput,
submitCallback: function(){
// remove Conversational Form
console.log("voice: Form submitted...", conversationalForm.getFormData(true));
alert("You made it! Check console for data")
}
});
if(!SpeechRecognition){
conversationalForm.addRobotChatResponse("SpeechRecognition not supported, so <strong>no</strong> Microphone here.");
}
if(!SpeechSynthesisUtterance){
conversationalForm.addRobotChatResponse("SpeechSynthesisUtterance not supported, so <strong>no</strong> Microphone here.");
}
}
</script>
<script src="build/conversational-form-examples.min.js" id="examples-script"></script>
<style>
.custom-template{
font-size:12px;
color:red;
}
main.content section[role=form] form > input{
margin-bottom: 10px;
}
main.content section[role=form] form > label{
display: block;
}
main.content section[role=form] form > fieldset > label, main.content section[role=form] form > fieldset > input{
display: inline-block !important;
width: auto;
}
</style>
</body>
</html>