-
Notifications
You must be signed in to change notification settings - Fork 35
/
Copy pathindex.bs
1087 lines (878 loc) · 56.4 KB
/
index.bs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<pre class='metadata'>
Title: Web Speech API
Level:
Status: CG-DRAFT
Group: audiocg
TR:
URL: https://webaudio.github.io/web-speech-api/
Repository: WebAudio/web-speech-api
Shortname: speech-api
Editor: Evan Liu, Google
Former Editor: André Natal, Mozilla
Former Editor: Glen Shires, Google
Former Editor: Philip Jägenstedt, Google
Former Editor: Hans Wennborg, Google
!Tests: <a href=https://github.com/web-platform-tests/wpt/tree/master/speech-api>web-platform-tests speech-api/</a> (<a href=https://github.com/web-platform-tests/wpt/labels/speech-api>ongoing work</a>)
Abstract: This specification defines a JavaScript API to enable web developers to incorporate speech recognition and synthesis into their web pages.
Abstract: It enables developers to use scripting to generate text-to-speech output and to use speech recognition as an input for forms, continuous dictation and control.
Abstract: The JavaScript API allows web pages to control activation and timing and to handle results and alternatives.
</pre>
<pre class=biblio>
{
"HTMLSPEECH": {
"authors": [
"Michael Bodell",
"Björn Bringert",
"Robert Brown",
"Daniel C. Burnett",
"Deborah Dahl",
"Dan Druta",
"Patrick Ehlen",
"Charles Hemphill",
"Michael Johnston",
"Olli Pettay",
"Satish Sampath",
"Marc Schröder",
"Glen Shires",
"Raj Tumuluri",
"Milan Young"
],
"href": "https://www.w3.org/2005/Incubator/htmlspeech/XGR-htmlspeech-20111206/",
"title": "HTML Speech Incubator Group Final Report"
}
}
</pre>
<h2 id=introduction>Introduction</h2>
<p><em>This section is non-normative.</em></p>
<p>The Web Speech API aims to enable web developers to provide, in a web browser, speech-input and text-to-speech output features that are typically not available when using standard speech-recognition or screen-reader software.
The API itself is agnostic of the underlying speech recognition and synthesis implementation and can support both server-based and client-based/embedded recognition and synthesis.
The API is designed to enable both brief (one-shot) speech input and continuous speech input.
Speech recognition results are provided to the web page as a list of hypotheses, along with other relevant information for each hypothesis.</p>
<p>This specification is a subset of the API defined in the [[HTMLSPEECH|HTML Speech Incubator Group Final Report]].
That report is entirely informative since it is not a standards track document.
All portions of that report may be considered informative with regards to this document, and provide an informative background to this document.
This specification is a fully-functional subset of that report.
Specifically, this subset excludes the underlying transport protocol, the proposed additions to HTML markup, and it defines a simplified subset of the JavaScript API.
This subset supports the majority of use-cases and sample code in the Incubator Group Final Report.
This subset does not preclude future standardization of additions to the markup, API or underlying transport protocols, and indeed the Incubator Report defines a potential roadmap for such future work.</p>
<h2 id=use_cases>Use Cases</h2>
<p><em>This section is non-normative.</em></p>
<p>This specification supports the following use cases, as defined in [[HTMLSPEECH#use-cases|Section 4 of the Incubator Report]].</p>
<ul>
<li>Voice Web Search</li>
<li>Speech Command Interface</li>
<li>Domain Specific Grammars Contingent on Earlier Inputs</li>
<li>Continuous Recognition of Open Dialog</li>
<li>Domain Specific Grammars Filling Multiple Input Fields</li>
<li>Speech UI present when no visible UI need be present</li>
<li>Voice Activity Detection</li>
<li>Temporal Structure of Synthesis to Provide Visual Feedback</li>
<li>Hello World</li>
<li>Speech Translation</li>
<li>Speech Enabled Email Client</li>
<li>Dialog Systems</li>
<li>Multimodal Interaction</li>
<li>Speech Driving Directions</li>
<li>Multimodal Video Game</li>
<li>Multimodal Search</li>
</ul>
<p>To keep the API to a minimum, this specification does not directly support the following use case.
This does not preclude adding support for this as a future API enhancement, and indeed the Incubator report provides a roadmap for doing so.</p>
<ul>
<li>Rerecognition</li>
</ul>
<p>Note that for many usages and implementations, it is possible to avoid the need for Rerecognition by using a larger grammar, or by combining multiple grammars — both of these techniques are supported in this specification.</p>
<h2 id=security>Security and privacy considerations</h2>
<ol>
<li>User agents must only start speech input sessions with explicit, informed user consent.
User consent can include, for example:
<ul>
<li>User click on a visible speech input element which has an obvious graphical representation showing that it will start speech input.</li>
<li>Accepting a permission prompt shown as the result of a call to <code>SpeechRecognition.start</code>.</li>
<li>Consent previously granted to always allow speech input for this web page.</li>
</ul>
</li>
<li>User agents must give the user an obvious indication when audio is being recorded.
<ul>
<li>In a graphical user agent, this could be a mandatory notification displayed by the user agent as part of its chrome and not accessible by the web page.
This could for example be a pulsating/blinking record icon as part of the browser chrome/address bar, an indication in the status bar, an audible notification, or anything else relevant and accessible to the user.
This UI element must also allow the user to stop recording.<br>
<img src="ui-example.png" alt="Example UI recording notification."></li>
<li>In a speech-only user agent, the indication may for example take the form of the system speaking the label of the speech input element, followed by a short beep.</li>
</ul>
</li>
<li>The user agent may also give the user a longer explanation the first time speech input is used, to let the user know what it is and how they can tune their privacy settings to disable speech recording if required.</li>
<li>User agents must obtain explicit and informed user consent before installing on-device speech recognition languages that differ from the user's preferred language or when the user is not connected to an Ethernet or Wi-Fi network.</li>
</ol>
<h3 id="implementation-considerations">Implementation considerations</h3>
<p><em>This section is non-normative.</em></p>
<ol>
<li>Spoken password inputs can be problematic from a security perspective, but it is up to the user to decide if they want to speak their password.</li>
<li>Speech input could potentially be used to eavesdrop on users.
Malicious webpages could use tricks such as hiding the input element or otherwise making the user believe that it has stopped recording speech while continuing to do so.
They could also potentially style the input element to appear as something else and trick the user into clicking them.
An example of styling the file input element can be seen at <a href="https://www.quirksmode.org/dom/inputfile.html">https://www.quirksmode.org/dom/inputfile.html</a>.
The above recommendations are intended to reduce this risk of such attacks.</li>
</ol>
<h2 id="api_description">API Description</h2>
<p><em>This section is normative.</em></p>
<h3 id="speechreco-section">The SpeechRecognition Interface</h3>
<p>The speech recognition interface is the scripted web API for controlling a given recognition.</p>
The term "final result" indicates a SpeechRecognitionResult in which the final attribute is true.
The term "interim result" indicates a SpeechRecognitionResult in which the final attribute is false.
<pre class="idl">
[Exposed=Window]
interface SpeechRecognition : EventTarget {
constructor();
// recognition parameters
attribute SpeechGrammarList grammars;
attribute DOMString lang;
attribute boolean continuous;
attribute boolean interimResults;
attribute unsigned long maxAlternatives;
attribute SpeechRecognitionMode mode;
// methods to drive the speech interaction
undefined start();
undefined start(MediaStreamTrack audioTrack);
undefined stop();
undefined abort();
boolean onDeviceWebSpeechAvailable(DOMString lang);
boolean installOnDeviceSpeechRecognition(DOMString lang);
// event methods
attribute EventHandler onaudiostart;
attribute EventHandler onsoundstart;
attribute EventHandler onspeechstart;
attribute EventHandler onspeechend;
attribute EventHandler onsoundend;
attribute EventHandler onaudioend;
attribute EventHandler onresult;
attribute EventHandler onnomatch;
attribute EventHandler onerror;
attribute EventHandler onstart;
attribute EventHandler onend;
};
enum SpeechRecognitionErrorCode {
"no-speech",
"aborted",
"audio-capture",
"network",
"not-allowed",
"service-not-allowed",
"bad-grammar",
"language-not-supported"
};
enum SpeechRecognitionMode {
"ondevice-preferred", // On-device speech recognition if available, otherwise use Cloud speech recognition as a fallback.
"ondevice-only", // On-device speech recognition only. Returns an error if on-device speech recognition is not available.
"cloud-only", // Cloud speech recognition only.
};
[Exposed=Window]
interface SpeechRecognitionErrorEvent : Event {
constructor(DOMString type, SpeechRecognitionErrorEventInit eventInitDict);
readonly attribute SpeechRecognitionErrorCode error;
readonly attribute DOMString message;
};
dictionary SpeechRecognitionErrorEventInit : EventInit {
required SpeechRecognitionErrorCode error;
DOMString message = "";
};
// Item in N-best list
[Exposed=Window]
interface SpeechRecognitionAlternative {
readonly attribute DOMString transcript;
readonly attribute float confidence;
};
// A complete one-shot simple response
[Exposed=Window]
interface SpeechRecognitionResult {
readonly attribute unsigned long length;
getter SpeechRecognitionAlternative item(unsigned long index);
readonly attribute boolean isFinal;
};
// A collection of responses (used in continuous mode)
[Exposed=Window]
interface SpeechRecognitionResultList {
readonly attribute unsigned long length;
getter SpeechRecognitionResult item(unsigned long index);
};
// A full response, which could be interim or final, part of a continuous response or not
[Exposed=Window]
interface SpeechRecognitionEvent : Event {
constructor(DOMString type, SpeechRecognitionEventInit eventInitDict);
readonly attribute unsigned long resultIndex;
readonly attribute SpeechRecognitionResultList results;
};
dictionary SpeechRecognitionEventInit : EventInit {
unsigned long resultIndex = 0;
required SpeechRecognitionResultList results;
};
// The object representing a speech grammar
[Exposed=Window]
interface SpeechGrammar {
attribute DOMString src;
attribute float weight;
};
// The object representing a speech grammar collection
[Exposed=Window]
interface SpeechGrammarList {
constructor();
readonly attribute unsigned long length;
getter SpeechGrammar item(unsigned long index);
undefined addFromURI(DOMString src,
optional float weight = 1.0);
undefined addFromString(DOMString string,
optional float weight = 1.0);
};
</pre>
<h4 id="speechreco-attributes">SpeechRecognition Attributes</h4>
<dl>
<dt><dfn attribute for=SpeechRecognition>grammars</dfn> attribute</dt>
<dd>The grammars attribute stores the collection of SpeechGrammar objects which represent the grammars that are active for this recognition.</dd>
<dt><dfn attribute for=SpeechRecognition>lang</dfn> attribute</dt>
<dd>This attribute will set the language of the recognition for the request, using a valid BCP 47 language tag. [[!BCP47]]
If unset it remains unset for getting in script, but will default to use the <a spec=html>language</a> of the html document root element and associated hierarchy.
This default value is computed and used when the input request opens a connection to the recognition service.</dd>
<dt><dfn attribute for=SpeechRecognition>continuous</dfn> attribute</dt>
<dd>When the continuous attribute is set to false, the user agent must return no more than one final result in response to starting recognition,
for example a single turn pattern of interaction.
When the continuous attribute is set to true, the user agent must return zero or more final results representing multiple consecutive recognitions in response to starting recognition,
for example a dictation.
The default value must be false. Note, this attribute setting does not affect interim results.</dd>
<dt><dfn attribute for=SpeechRecognition>interimResults</dfn> attribute</dt>
<dd>Controls whether interim results are returned.
When set to true, interim results should be returned.
When set to false, interim results must not be returned.
The default value must be false. Note, this attribute setting does not affect final results.</dd>
<dt><dfn attribute for=SpeechRecognition>maxAlternatives</dfn> attribute</dt>
<dd>This attribute will set the maximum number of {{SpeechRecognitionAlternative}}s per result.
The default value is 1.</dd>
<dt><dfn attribute for=SpeechRecognition>mode</dfn> attribute</dt>
<dd>An enum to determine where speech recognition takes place. The default value is "ondevice-preferred".</dd>
</dl>
<p class=issue>The group has discussed whether WebRTC might be used to specify selection of audio sources and remote recognizers.
See <a href="https://lists.w3.org/Archives/Public/public-speech-api/2012Sep/0072.html">Interacting with WebRTC, the Web Audio API and other external sources</a> thread on [email protected].</p>
<h4 id="speechreco-methods">SpeechRecognition Methods</h4>
<dl>
<dt><dfn method for=SpeechRecognition>start()</dfn> method</dt>
<dd>When the start method is called it represents the moment in time the web application wishes to begin recognition.
When the speech input is streaming live through the input media stream, then this start call represents the moment in time that the service must begin to listen and try to match the grammars associated with this request.
Once the system is successfully listening to the recognition the user agent must raise a start event.
If the start method is called on an already started object (that is, start has previously been called, and no <a event for=SpeechRecognition>error</a> or <a event for=SpeechRecognition>end</a> event has fired on the object), the user agent must throw an "{{InvalidStateError!!exception}}" {{DOMException}} and ignore the call.</dd>
<dt><dfn method for=SpeechRecognition>start({{MediaStreamTrack}} audioTrack)</dfn> method</dt>
<dd>The overloaded start method does the same thing as the parameterless start method except it performs speech recognition on provided {{MediaStreamTrack}} instead of the input media stream. If the {{MediaStreamTrack/kind}} attribute of the {{MediaStreamTrack}} is not "audio" or the {{MediaStreamTrack/readyState}} attribute is not "live", the user agent must throw an "{{InvalidStateError!!exception}}" {{DOMException}} and ignore the call.</dd>
<dt><dfn method for=SpeechRecognition>stop()</dfn> method</dt>
<dd>The stop method represents an instruction to the recognition service to stop listening to more audio, and to try and return a result using just the audio that it has already received for this recognition.
A typical use of the stop method might be for a web application where the end user is doing the end pointing, similar to a walkie-talkie.
The end user might press and hold the space bar to talk to the system and on the space down press the start call would have occurred and when the space bar is released the stop method is called to ensure that the system is no longer listening to the user.
Once the stop method is called the speech service must not collect additional audio and must not continue to listen to the user.
The speech service must attempt to return a recognition result (or a nomatch) based on the audio that it has already collected for this recognition.
If the stop method is called on an object which is already stopped or being stopped (that is, start was never called on it, the <a event for=SpeechRecognition>end</a> or <a event for=SpeechRecognition>error</a> event has fired on it, or stop was previously called on it), the user agent must ignore the call.</dd>
<dt><dfn method for=SpeechRecognition>abort()</dfn> method</dt>
<dd>The abort method is a request to immediately stop listening and stop recognizing and do not return any information but that the system is done.
When the abort method is called, the speech service must stop recognizing.
The user agent must raise an <a event for=SpeechRecognition>end</a> event once the speech service is no longer connected.
If the abort method is called on an object which is already stopped or aborting (that is, start was never called on it, the <a event for=SpeechRecognition>end</a> or <a event for=SpeechRecognition>error</a> event has fired on it, or abort was previously called on it), the user agent must ignore the call.</dd>
<dt><dfn method for=SpeechRecognition>onDeviceWebSpeechAvailable({{DOMString}} lang)</dfn> method</dt>
<dd>The onDeviceWebSpeechAvailable method returns a boolean indicating whether on-device speech recognition is available for a given BCP 47 language tag. [[!BCP47]] The method returns true if on-device speech recognition is available for the given BCP 47 language tag and false otherwise.</dd>
<dt><dfn method for=SpeechRecognition>installOnDeviceSpeechRecognition({{DOMString}} lang)</dfn> method</dt>
<dd>The installOnDeviceSpeechRecognition method returns a boolean indicating whether the installation of on-device speech recognition for a given BCP 47 language tag initiated successfully. [[!BCP47]] Any website can automatically trigger a download of a new language pack if the user is connected to ethernet/Wifi and the language pack matches the user's preferred language. All sites must prompt the user if they wish to trigger a download over a cellular network or a language pack that does not match the user's preferred language.</dd>
</dl>
<h4 id="speechreco-events">SpeechRecognition Events</h4>
<p>The DOM Level 2 Event Model is used for speech recognition events.
The methods in the EventTarget interface should be used for registering event listeners.
The SpeechRecognition interface also contains convenience attributes for registering a single event handler for each event type.
These events do not bubble and are not cancelable.</p>
<p>For all these events, the timeStamp attribute defined in the DOM Level 2 Event interface must be set to the best possible estimate of when the real-world event which the event object represents occurred.
This timestamp must be represented in the user agent's view of time, even for events where the timestamps in question could be raised on a different machine like a remote recognition service (i.e., in a <a event for=SpeechRecognition>speechend</a> event with a remote speech endpointer).</p>
<p>Unless specified below, the ordering of the different events is undefined.
For example, some implementations may fire <a event for=SpeechRecognition>audioend</a> before <a event for=SpeechRecognition>speechstart</a> or <a event for=SpeechRecognition>speechend</a> if the audio detector is client-side and the speech detector is server-side.</p>
<dl>
<dt><dfn event for=SpeechRecognition>audiostart</dfn> event</dt>
<dd>Fired when the user agent has started to capture audio.</dd>
<dt><dfn event for=SpeechRecognition>soundstart</dfn> event</dt>
<dd>Fired when some sound, possibly speech, has been detected.
This must be fired with low latency, e.g. by using a client-side energy detector.
The <a event for=SpeechRecognition>audiostart</a> event must always have been fired before the soundstart event.</dd>
<dt><dfn event for=SpeechRecognition>speechstart</dfn> event</dt>
<dd>Fired when the speech that will be used for speech recognition has started.
The <a event for=SpeechRecognition>audiostart</a> event must always have been fired before the speechstart event.</dd>
<dt><dfn event for=SpeechRecognition>speechend</dfn> event</dt>
<dd>Fired when the speech that will be used for speech recognition has ended.
The <a event for=SpeechRecognition>speechstart</a> event must always have been fired before speechend.</dd>
<dt><dfn event for=SpeechRecognition>soundend</dfn> event</dt>
<dd>Fired when some sound is no longer detected.
This must be fired with low latency, e.g. by using a client-side energy detector.
The <a event for=SpeechRecognition>soundstart</a> event must always have been fired before soundend.</dd>
<dt><dfn event for=SpeechRecognition>audioend</dfn> event</dt>
<dd>Fired when the user agent has finished capturing audio.
The <a event for=SpeechRecognition>audiostart</a> event must always have been fired before audioend.</dd>
<dt><dfn event for=SpeechRecognition>result</dfn> event</dt>
<dd>Fired when the speech recognizer returns a result.
The event must use the {{SpeechRecognitionEvent}} interface.
The <a event for=SpeechRecognition>audiostart</a> event must always have been fired before the result event.</dd>
<dt><dfn event for=SpeechRecognition>nomatch</dfn> event</dt>
<dd>Fired when the speech recognizer returns a final result with no recognition hypothesis that meet or exceed the confidence threshold.
The event must use the {{SpeechRecognitionEvent}} interface.
The {{SpeechRecognitionEvent/results}} attribute in the event may contain speech recognition results that are below the confidence threshold or may be null.
The {{audiostart}} event must always have been fired before the nomatch event.</dd>
<dt><dfn event for=SpeechRecognition>error</dfn> event</dt>
<dd>Fired when a speech recognition error occurs.
The event must use the {{SpeechRecognitionErrorEvent}} interface.</dd>
<dt><dfn event for=SpeechRecognition>start</dfn> event</dt>
<dd>Fired when the recognition service has begun to listen to the audio with the intention of recognizing.
</dd><dt><dfn event for=SpeechRecognition>end</dfn> event</dt>
<dd>Fired when the service has disconnected.
The event must always be generated when the session ends no matter the reason for the end.</dd>
</dl>
<h4 id="speechreco-error">SpeechRecognitionErrorEvent</h4>
<p>The {{SpeechRecognitionErrorEvent}} interface is used for the <a event for=SpeechRecognition>error</a> event.</p>
<dl>
<dt><dfn attribute for=SpeechRecognitionErrorEvent>error</dfn> attribute</dt>
<dd>The errorCode is an enumeration indicating what has gone wrong.
The values are:
<dl>
<dt><dfn enum-value for=SpeechRecognitionErrorCode>"no-speech"</dfn></dt>
<dd>No speech was detected.</dd>
<dt><dfn enum-value for=SpeechRecognitionErrorCode>"aborted"</dfn></dt>
<dd>Speech input was aborted somehow, maybe by some user-agent-specific behavior such as UI that lets the user cancel speech input.</dd>
<dt><dfn enum-value for=SpeechRecognitionErrorCode>"audio-capture"</dfn></dt>
<dd>Audio capture failed.</dd>
<dt><dfn enum-value for=SpeechRecognitionErrorCode>"network"</dfn></dt>
<dd>Some network communication that was required to complete the recognition failed.</dd>
<dt><dfn enum-value for=SpeechRecognitionErrorCode>"not-allowed"</dfn></dt>
<dd>The user agent is not allowing any speech input to occur for reasons of security, privacy or user preference.</dd>
<dt><dfn enum-value for=SpeechRecognitionErrorCode>"service-not-allowed"</dfn></dt>
<dd>The user agent is not allowing the web application requested speech service, but would allow some speech service, to be used either because the user agent doesn't support the selected one or because of reasons of security, privacy or user preference.</dd>
<dt><dfn enum-value for=SpeechRecognitionErrorCode>"bad-grammar"</dfn></dt>
<dd>There was an error in the speech recognition grammar or semantic tags, or the grammar format or semantic tag format is unsupported.</dd>
<dt><dfn enum-value for=SpeechRecognitionErrorCode>"language-not-supported"</dfn></dt>
<dd>The language was not supported.</dd>
</dl>
</dd>
<dt><dfn attribute for=SpeechRecognitionErrorEvent>message</dfn> attribute</dt>
<dd>The message content is implementation specific.
This attribute is primarily intended for debugging and developers should not use it directly in their application user interface.</dd>
</dl>
<h4 id="speechreco-alternative">SpeechRecognitionAlternative</h4>
<p>The SpeechRecognitionAlternative represents a simple view of the response that gets used in a n-best list.
<dl>
<dt><dfn attribute for=SpeechRecognitionAlternative>transcript</dfn> attribute</dt>
<dd>The transcript string represents the raw words that the user spoke.
For continuous recognition, leading or trailing whitespace MUST be included where necessary such that concatenation of consecutive SpeechRecognitionResults produces a proper transcript of the session.</dd>
<dt><dfn attribute for=SpeechRecognitionAlternative>confidence</dfn> attribute</dt>
<dd>The confidence represents a numeric estimate between 0 and 1 of how confident the recognition system is that the recognition is correct.
A higher number means the system is more confident.
<p class=issue>The group has discussed whether confidence can be specified in a speech-recognition-engine-independent manner and whether confidence threshold and nomatch should be included, because this is not a dialog API.
See <a href="https://lists.w3.org/Archives/Public/public-speech-api/2012Jun/0143.html">Confidence property</a> thread on [email protected].</p></dd>
</dl>
<h4 id="speechreco-result">SpeechRecognitionResult</h4>
<p>The SpeechRecognitionResult object represents a single one-shot recognition match, either as one small part of a continuous recognition or as the complete return result of a non-continuous recognition.</p>
<dl>
<dt><dfn attribute for=SpeechRecognitionResult>length</dfn> attribute</dt>
<dd>The long attribute represents how many n-best alternatives are represented in the item array.</dd>
<dt><dfn method for=SpeechRecognitionResult>item(<var>index</var>)</dfn> getter</dt>
<dd>The item getter returns a SpeechRecognitionAlternative from the index into an array of n-best values.
If index is greater than or equal to length, this returns null.
The user agent must ensure that the length attribute is set to the number of elements in the array.
The user agent must ensure that the n-best list is sorted in non-increasing confidence order (each element must be less than or equal to the confidence of the preceding elements).</dd>
<dt><dfn attribute for=SpeechRecognitionResult>isFinal</dfn> attribute</dt>
<dd>The final boolean must be set to true if this is the final time the speech service will return this particular index value.
If the value is false, then this represents an interim result that could still be changed.</dd>
</dl>
<h4 id="speechreco-resultlist">SpeechRecognitionResultList</h4>
<p>The SpeechRecognitionResultList object holds a sequence of recognition results representing the complete return result of a continuous recognition.
For a non-continuous recognition it will hold only a single value.</p>
<dl>
<dt><dfn attribute for=SpeechRecognitionResultList>length</dfn> attribute</dt>
<dd>The length attribute indicates how many results are represented in the item array.</dd>
<dt><dfn method for=SpeechRecognitionResultList>item(<var>index</var>)</dfn> getter</dt>
<dd>The item getter returns a SpeechRecognitionResult from the index into an array of result values.
If index is greater than or equal to length, this returns null.
The user agent must ensure that the length attribute is set to the number of elements in the array.</dd>
</dl>
<h4 id="speechreco-event">SpeechRecognitionEvent</h4>
<p>The SpeechRecognitionEvent is the event that is raised each time there are any changes to interim or final results.</p>
<dl>
<dt><dfn attribute for=SpeechRecognitionEvent>resultIndex</dfn> attribute</dt>
<dd>The resultIndex must be set to the lowest index in the "results" array that has changed.</dd>
<dt><dfn attribute for=SpeechRecognitionEvent>results</dfn> attribute</dt>
<dd>The array of all current recognition results for this session.
Specifically all final results that have been returned, followed by the current best hypothesis for all interim results.
It must consist of zero or more final results followed by zero or more interim results.
On subsequent SpeechRecognitionResultEvent events, interim results may be overwritten by a newer interim result or by a final result or may be removed (when at the end of the "results" array and the array length decreases).
Final results must not be overwritten or removed.
All entries for indexes less than resultIndex must be identical to the array that was present when the last SpeechRecognitionResultEvent was raised.
All array entries (if any) for indexes equal or greater than resultIndex that were present in the array when the last SpeechRecognitionResultEvent was raised are removed and overwritten with new results.
The length of the "results" array may increase or decrease, but must not be less than resultIndex.
Note that when resultIndex equals results.length, no new results are returned, this may occur when the array length decreases to remove one or more interim results.</dd>
</dl>
<h4 id="speechreco-speechgrammar">SpeechGrammar</h4>
<p>The SpeechGrammar object represents a container for a grammar.</p>
<p class=issue>The group has discussed options for which grammar formats should be supported, how builtin grammar types are specified, and default grammars when not specified.
See <a href="https://lists.w3.org/Archives/Public/public-speech-api/2012Jun/0179.html">Default value of SpeechRecognition.grammars</a> thread on [email protected].</p>
<p>This structure has the following attributes:</p>
<dl>
<dt><dfn attribute for=SpeechGrammar>src</dfn> attribute</dt>
<dd>The required src attribute is the URI for the grammar.
Note some services may support builtin grammars that can be specified using a builtin URI scheme.</dd>
<dt><dfn attribute for=SpeechGrammar>weight</dfn> attribute</dt>
<dd>The optional weight attribute controls the weight that the speech recognition service should use with this grammar.
By default, a grammar has a weight of 1.
Larger weight values positively weight the grammar while smaller weight values make the grammar weighted less strongly.</dd>
</dl>
<h4 id="speechreco-speechgrammarlist">SpeechGrammarList</h4>
<p>The SpeechGrammarList object represents a collection of SpeechGrammar objects.
This structure has the following attributes:</p>
<dl>
<dt><dfn attribute for=SpeechGrammarList>length</dfn> attribute</dt>
<dd>The length attribute represents how many grammars are currently in the array.</dd>
<dt><dfn method for=SpeechGrammarList>item(<var>index</var>)</dfn> getter</dt>
<dd>The item getter returns a SpeechGrammar from the index into an array of grammars.
The user agent must ensure that the length attribute is set to the number of elements in the array.
The user agent must ensure that the index order from smallest to largest matches the order in which grammars were added to the array.</dd>
<dt><dfn method for=SpeechGrammarList>addFromURI(<var>src</var>, <var>weight</var>)</dfn> method</dt>
<dd>This method appends a grammar to the grammars array parameter based on URI.
The URI for the grammar is specified by the <var>src</var> parameter, which represents the URI for the grammar.
Note, some services may support builtin grammars that can be specified by URI.
The <var>weight</var> parameter represents this grammar's weight relative to the other grammar.
<dt><dfn method for=SpeechGrammarList>addFromString(<var>string</var>, <var>weight</var>)</dfn> method</dt>
<dd>This method appends a grammar to the grammars array parameter based on text.
The content of the grammar is specified by the <var>string</var> parameter.
This content should be encoded into a data: URI when the SpeechGrammar object is created.
The <var>weight</var> parameter represents this grammar's weight relative to the other grammar.
</dl>
<h3 id="tts-section">The SpeechSynthesis Interface</h3>
<p>The SpeechSynthesis interface is the scripted web API for controlling a text-to-speech output.</p>
<pre class="idl">
[Exposed=Window]
interface SpeechSynthesis : EventTarget {
readonly attribute boolean pending;
readonly attribute boolean speaking;
readonly attribute boolean paused;
attribute EventHandler onvoiceschanged;
undefined speak(SpeechSynthesisUtterance utterance);
undefined cancel();
undefined pause();
undefined resume();
sequence<SpeechSynthesisVoice> getVoices();
};
partial interface Window {
[SameObject] readonly attribute SpeechSynthesis speechSynthesis;
};
[Exposed=Window]
interface SpeechSynthesisUtterance : EventTarget {
constructor(optional DOMString text);
attribute DOMString text;
attribute DOMString lang;
attribute SpeechSynthesisVoice? voice;
attribute float volume;
attribute float rate;
attribute float pitch;
attribute EventHandler onstart;
attribute EventHandler onend;
attribute EventHandler onerror;
attribute EventHandler onpause;
attribute EventHandler onresume;
attribute EventHandler onmark;
attribute EventHandler onboundary;
};
[Exposed=Window]
interface SpeechSynthesisEvent : Event {
constructor(DOMString type, SpeechSynthesisEventInit eventInitDict);
readonly attribute SpeechSynthesisUtterance utterance;
readonly attribute unsigned long charIndex;
readonly attribute unsigned long charLength;
readonly attribute float elapsedTime;
readonly attribute DOMString name;
};
dictionary SpeechSynthesisEventInit : EventInit {
required SpeechSynthesisUtterance utterance;
unsigned long charIndex = 0;
unsigned long charLength = 0;
float elapsedTime = 0;
DOMString name = "";
};
enum SpeechSynthesisErrorCode {
"canceled",
"interrupted",
"audio-busy",
"audio-hardware",
"network",
"synthesis-unavailable",
"synthesis-failed",
"language-unavailable",
"voice-unavailable",
"text-too-long",
"invalid-argument",
"not-allowed",
};
[Exposed=Window]
interface SpeechSynthesisErrorEvent : SpeechSynthesisEvent {
constructor(DOMString type, SpeechSynthesisErrorEventInit eventInitDict);
readonly attribute SpeechSynthesisErrorCode error;
};
dictionary SpeechSynthesisErrorEventInit : SpeechSynthesisEventInit {
required SpeechSynthesisErrorCode error;
};
[Exposed=Window]
interface SpeechSynthesisVoice {
readonly attribute DOMString voiceURI;
readonly attribute DOMString name;
readonly attribute DOMString lang;
readonly attribute boolean localService;
readonly attribute boolean default;
};
</pre>
<h4 id="tts-attributes">SpeechSynthesis Attributes</h4>
<dl>
<dt><dfn attribute for=SpeechSynthesis>pending</dfn> attribute</dt>
<dd>This attribute is true if the queue for the global SpeechSynthesis instance contains any utterances which have not started speaking.</dd>
<dt><dfn attribute for=SpeechSynthesis>speaking</dfn> attribute</dt>
<dd>This attribute is true if an utterance is being spoken.
Specifically if an utterance has begun being spoken and has not completed being spoken.
This is independent of whether the global SpeechSynthesis instance is in the paused state.</dd>
<dt><dfn attribute for=SpeechSynthesis>paused</dfn> attribute</dt>
<dd>This attribute is true when the global SpeechSynthesis instance is in the paused state.
This state is independent of whether anything is in the queue.
The default state of a the global SpeechSynthesis instance for a new window is the non-paused state.</dd>
</dl>
<h4 id="tts-methods">SpeechSynthesis Methods</h4>
<dl>
<dt><dfn method for=SpeechSynthesis>speak(<var>utterance</var>)</dfn> method</dt>
<dd>This method appends the SpeechSynthesisUtterance object <var>utterance</var> to the end of the queue for the global SpeechSynthesis instance.
It does not change the paused state of the SpeechSynthesis instance.
If the SpeechSynthesis instance is paused, it remains paused.
If it is not paused and no other utterances are in the queue, then this utterance is spoken immediately,
else this utterance is queued to begin speaking after the other utterances in the queue have been spoken.
If changes are made to the SpeechSynthesisUtterance object after calling this method and prior to the corresponding <a event for=SpeechSynthesisUtterance>end</a> or <a event for=SpeechSynthesisUtterance>error</a> event,
it is not defined whether those changes will affect what is spoken, and those changes may cause an error to be returned.
The SpeechSynthesis object takes exclusive ownership of the SpeechSynthesisUtterance object.
Passing it as a speak() argument to another SpeechSynthesis object should throw an exception.
(For example, two frames may have the same origin and each will contain a SpeechSynthesis object.)</dd>
<dt><dfn method for=SpeechSynthesis>cancel()</dfn> method</dt>
<dd>This method removes all utterances from the queue.
If an utterance is being spoken, speaking ceases immediately.
This method does not change the paused state of the global SpeechSynthesis instance.</dd>
<dt><dfn method for=SpeechSynthesis>pause()</dfn> method</dt>
<dd>This method puts the global SpeechSynthesis instance into the paused state.
If an utterance was being spoken, it pauses mid-utterance.
(If called when the SpeechSynthesis instance was already in the paused state, it does nothing.)</dd>
<dt><dfn method for=SpeechSynthesis>resume()</dfn> method</dt>
<dd>This method puts the global SpeechSynthesis instance into the non-paused state.
If an utterance was speaking, it continues speaking the utterance at the point at which it was paused, else it begins speaking the next utterance in the queue (if any).
(If called when the SpeechSynthesis instance was already in the non-paused state, it does nothing.)</dd>
<dt><dfn method for=SpeechSynthesis>getVoices()</dfn> method</dt>
<dd>This method returns the available voices.
It is user agent dependent which voices are available.
If there are no voices available, or if the the list of available voices is not yet known (for example: server-side synthesis where the list is determined asynchronously),
then this method must return a SpeechSynthesisVoiceList of length zero.</dd>
</dl>
<h4 id="tts-events">SpeechSynthesis Events</h4>
<dl>
<dt><dfn event for=SpeechSynthesis>voiceschanged</dfn> event</dt>
<dd>Fired when the contents of the SpeechSynthesisVoiceList, that the getVoices method will return, have changed.
Examples include: server-side synthesis where the list is determined asynchronously, or when client-side voices are installed/uninstalled.</dd>
</dl>
<h4 id="utterance-attributes">SpeechSynthesisUtterance Attributes</h4>
<dl>
<dt><dfn attribute for=SpeechSynthesisUtterance>text</dfn> attribute</dt>
<dd>This attribute specifies the text to be synthesized and spoken for this utterance.
This may be either plain text or a complete, well-formed SSML document. [[!SSML]]
For speech synthesis engines that do not support SSML, or only support certain tags, the user agent or speech engine must strip away the tags they do not support and speak the text.
There may be a maximum length of the text, it may be limited to 32,767 characters.</dd>
<dt><dfn attribute for=SpeechSynthesisUtterance>lang</dfn> attribute</dt>
<dd>This attribute specifies the language of the speech synthesis for the utterance, using a valid BCP 47 language tag. [[!BCP47]]
If unset it remains unset for getting in script, but will default to use the <a spec=html>language</a> of the html document root element and associated hierarchy.
This default value is computed and used when the input request opens a connection to the recognition service.</dd>
<dt><dfn attribute for=SpeechSynthesisUtterance>voice</dfn> attribute</dt>
<dd>This attribute specifies the speech synthesis voice that the web application wishes to use.
When a {{SpeechSynthesisUtterance}} object is created this attribute must be initialized to null.
If, at the time of the {{speak()}} method call, this attribute has been set to one of the {{SpeechSynthesisVoice}} objects returned by {{getVoices()}}, then the user agent must use that voice.
If this attribute is unset or null at the time of the {{speak()}} method call, then the user agent must use a user agent default voice.
The user agent default voice should support the current language (see {{SpeechSynthesisUtterance/lang}}) and can be a local or remote speech service and can incorporate end user choices via interfaces provided by the user agent such as browser configuration parameters.
</dd>
<dt><dfn attribute for=SpeechSynthesisUtterance>volume</dfn> attribute</dt>
<dd>This attribute specifies the speaking volume for the utterance.
It ranges between 0 and 1 inclusive, with 0 being the lowest volume and 1 the highest volume, with a default of 1.
If SSML is used, this value will be overridden by prosody tags in the markup.</dd>
<dt><dfn attribute for=SpeechSynthesisUtterance>rate</dfn> attribute</dt>
<dd>This attribute specifies the speaking rate for the utterance.
It is relative to the default rate for this voice.
1 is the default rate supported by the speech synthesis engine or specific voice (which should correspond to a normal speaking rate).
2 is twice as fast, and 0.5 is half as fast.
Values below 0.1 or above 10 are strictly disallowed, but speech synthesis engines or specific voices may constrain the minimum and maximum rates further, for example, a particular voice may not actually speak faster than 3 times normal even if you specify a value larger than 3.
If SSML is used, this value will be overridden by prosody tags in the markup.</dd>
<dt><dfn attribute for=SpeechSynthesisUtterance>pitch</dfn> attribute</dt>
<dd>This attribute specifies the speaking pitch for the utterance.
It ranges between 0 and 2 inclusive, with 0 being the lowest pitch and 2 the highest pitch.
1 corresponds to the default pitch of the speech synthesis engine or specific voice.
Speech synthesis engines or voices may constrain the minimum and maximum rates further.
If SSML is used, this value will be overridden by prosody tags in the markup.</dd>
</dl>
<h4 id="utterance-events">SpeechSynthesisUtterance Events</h4>
Each of these events must use the {{SpeechSynthesisEvent}} interface,
except the error event which must use the {{SpeechSynthesisErrorEvent}} interface.
These events do not bubble and are not cancelable.
<dl>
<dt><dfn event for=SpeechSynthesisUtterance>start</dfn> event</dt>
<dd>Fired when this utterance has begun to be spoken.</dd>
<dt><dfn event for=SpeechSynthesisUtterance>end</dfn> event</dt>
<dd>Fired when this utterance has completed being spoken.
If this event fires, the <a event for=SpeechSynthesisUtterance>error</a> event must not be fired for this utterance.</dd>
<dt><dfn event for=SpeechSynthesisUtterance>error</dfn> event</dt>
<dd>Fired if there was an error that prevented successful speaking of this utterance.
If this event fires, the <a event for=SpeechSynthesisUtterance>end</a> event must not be fired for this utterance.</dd>
<dt><dfn event for=SpeechSynthesisUtterance>pause</dfn> event</dt>
<dd>Fired when and if this utterance is paused mid-utterance.</dd>
<dt><dfn event for=SpeechSynthesisUtterance>resume</dfn> event</dt>
<dd>Fired when and if this utterance is resumed after being paused mid-utterance.
Adding the utterance to the queue while the global SpeechSynthesis instance is in the paused state, and then calling the resume method
does not cause the resume event to be fired, in this case the utterance's <a event for=SpeechSynthesisUtterance>start</a> event will be called when the utterance starts.</dd>
<dt><dfn event for=SpeechSynthesisUtterance>mark</dfn> event</dt>
<dd>Fired when the spoken utterance reaches a named "mark" tag in SSML. [[!SSML]]
The user agent must fire this event if the speech synthesis engine provides the event.</dd>
<dt><dfn event for=SpeechSynthesisUtterance>boundary</dfn> event</dt>
<dd>Fired when the spoken utterance reaches a word or sentence boundary.
The user agent must fire this event if the speech synthesis engine provides the event.</dd>
</dl>
<h4 id="speechsynthesisevent-attributes">SpeechSynthesisEvent Attributes</h4>
<dl>
<dt><dfn attribute for=SpeechSynthesisEvent>utterance</dfn> attribute</dt>
<dd>This attribute contains the SpeechSynthesisUtterance that triggered this event.</dd>
<dt><dfn attribute for=SpeechSynthesisEvent>charIndex</dfn> attribute</dt>
<dd>This attribute indicates the zero-based character index into the original utterance string that most closely approximates the current speaking position of the speech engine.
No guarantee is given as to where charIndex will be with respect to word boundaries (such as at the end of the previous word or the beginning of the next word), only that all text before charIndex has already been spoken, and all text after charIndex has not yet been spoken.
The user agent must return this value if the speech synthesis engine supports it, otherwise the user agent must return 0.</dd>
<dt><dfn attribute for=SpeechSynthesisEvent>charLength</dfn> attribute</dt>
<dd>This attribute indicates the length of the text (word or sentence) that will be spoken corresponding to this event.
This attribute is the length, in characters, starting from this event's {{SpeechSynthesisEvent/charIndex}}.
The user agent must return this value if the speech synthesis engine supports it or the user agent can otherwise determine it, otherwise the user agent must return 0.</dd>
<dt><dfn attribute for=SpeechSynthesisEvent>elapsedTime</dfn> attribute</dt>
<dd>This attribute indicates the time, in seconds, that this event triggered, relative to when this utterance has begun to be spoken.
The user agent must return this value if the speech synthesis engine supports it or the user agent can otherwise determine it, otherwise the user agent must return 0.</dd>
<dt><dfn attribute for=SpeechSynthesisEvent>name</dfn> attribute</dt>
<dd>For <a event for=SpeechSynthesisUtterance>mark</a> events, this attribute indicates the name of the marker, as defined in SSML as the name attribute of a mark element. [[!SSML]]
For <a event for=SpeechSynthesisUtterance>boundary</a> events, this attribute indicates the type of boundary that caused the event: "word" or "sentence".
For all other events, this value should return "".</dd>
</dl>
<h4 id="speechsynthesiserrorevent-attributes">SpeechSynthesisErrorEvent Attributes</h4>
<p>The SpeechSynthesisErrorEvent is the interface used for the SpeechSynthesisUtterance <a event for=SpeechSynthesisUtterance>error</a> event.</p>
<dl>
<dt><dfn attribute for=SpeechSynthesisErrorEvent>error</dfn> attribute</dt>
<dd>The errorCode is an enumeration indicating what has gone wrong.
The values are:
<dl>
<dt><dfn enum-value for=SpeechSynthesisErrorCode>"canceled"</dfn></dt>
<dd>A cancel method call caused the SpeechSynthesisUtterance to be removed from the queue before it had begun being spoken.</dd>
<dt><dfn enum-value for=SpeechSynthesisErrorCode>"interrupted"</dfn></dt>
<dd>A cancel method call caused the SpeechSynthesisUtterance to be interrupted after it has begun being spoken and before it completed.</dd>
<dt><dfn enum-value for=SpeechSynthesisErrorCode>"audio-busy"</dfn></dt>
<dd>The operation cannot be completed at this time because the user-agent cannot access the audio output device.
(For example, the user may need to correct this by closing another application.)</dd>
<dt><dfn enum-value for=SpeechSynthesisErrorCode>"audio-hardware"</dfn></dt>
<dd>The operation cannot be completed at this time because the user-agent cannot identify an audio output device.
(For example, the user may need to connect a speaker or configure system settings.)</dd>
<dt><dfn enum-value for=SpeechSynthesisErrorCode>"network"</dfn></dt>
<dd>The operation cannot be completed at this time because some required network communication failed.</dd>
<dt><dfn enum-value for=SpeechSynthesisErrorCode>"synthesis-unavailable"</dfn></dt>
<dd>The operation cannot be completed at this time because no synthesis engine is available.
(For example, the user may need to install or configure a synthesis engine.)</dd>
<dt><dfn enum-value for=SpeechSynthesisErrorCode>"synthesis-failed"</dfn></dt>
<dd>The operation failed because synthesis engine had an error.</dd>
<dt><dfn enum-value for=SpeechSynthesisErrorCode>"language-unavailable"</dfn></dt>
<dd>No appropriate voice is available for the language designated in SpeechSynthesisUtterance lang.</dd>
<dt><dfn enum-value for=SpeechSynthesisErrorCode>"voice-unavailable"</dfn></dt>
<dd>The voice designated in SpeechSynthesisUtterance voice attribute is not available.</dd>
<dt><dfn enum-value for=SpeechSynthesisErrorCode>"text-too-long"</dfn></dt>
<dd>The contents of the SpeechSynthesisUtterance text attribute is too long to synthesize.</dd>
<dt><dfn enum-value for=SpeechSynthesisErrorCode>"invalid-argument"</dfn></dt>
<dd>The contents of the SpeechSynthesisUtterance rate, pitch or volume attribute is not supported by synthesizer.</dd>
<dt><dfn enum-value for=SpeechSynthesisErrorCode>"not-allowed"</dfn></dt>
<dd>Synthesis was not allowed to start by the user agent or system in the current context.</dd>
</dl>
</dd>
</dl>
<h4 id="speechsynthesisvoice-attributes">SpeechSynthesisVoice Attributes</h4>
<dl>
<dt><dfn attribute for=SpeechSynthesisVoice>voiceURI</dfn> attribute</dt>
<dd>The voiceURI attribute specifies the speech synthesis voice and the location of the speech synthesis service for this voice.
Note that the voiceURI is a generic URI and can thus point to local or remote services, either through use of a URN with meaning to the user agent or by specifying a URL that the user agent recognizes as a local service.</dd>
<dt><dfn attribute for=SpeechSynthesisVoice>name</dfn> attribute</dt>
<dd>This attribute is a human-readable name that represents the voice.
There is no guarantee that all names returned are unique.</dd>
<dt><dfn attribute for=SpeechSynthesisVoice>lang</dfn> attribute</dt>
<dd>This attribute is a BCP 47 language tag indicating the language of the voice. [[!BCP47]]</dd>
<dt><dfn attribute for=SpeechSynthesisVoice>localService</dfn> attribute</dt>
<dd>This attribute is true for voices supplied by a local speech synthesizer, and is false for voices supplied by a remote speech synthesizer service.
(This may be useful because remote services may imply additional latency, bandwidth or cost, whereas local voices may imply lower quality, however there is no guarantee that any of these implications are true.)</dd>
<dt><dfn attribute for=SpeechSynthesisVoice>default</dfn> attribute</dt>
<dd>This attribute is true for at most one voice per language.
There may be a different default for each language.
It is user agent dependent how default voices are determined.</dd>
</dl>
<h2 id="examples">Examples</h2>
<p><em>This section is non-normative.</em></p>
<h3 id="examples-recognition">Speech Recognition Examples</h3>
<div class="example">
<p>Using speech recognition to fill an input-field and perform a web search.</p>
<pre class="lang-html">
<script type="text/javascript">
var recognition = new SpeechRecognition();
recognition.onresult = function(event) {
if (event.results.length > 0) {
q.value = event.results[0][0].transcript;
q.form.submit();
}
}
</script>
<form action="https://www.example.com/search">
<input type="search" id="q" name="q" size=60>
<input type="button" value="Click to Speak" onclick="recognition.start()">
</form>
</pre>
</div>
<div class="example">
<p>Using speech recognition to fill an options list with alternative speech results.</p>
<pre class="lang-html">
<script type="text/javascript">
var recognition = new SpeechRecognition();
recognition.maxAlternatives = 10;
recognition.onresult = function(event) {
if (event.results.length > 0) {
var result = event.results[0];
for (var i = 0; i < result.length; ++i) {
var text = result[i].transcript;
select.options[i] = new Option(text, text);
}
}
}
function start() {
select.options.length = 0;
recognition.start();
}
</script>
<select id="select"></select>
<button onclick="start()">Click to Speak</button>
</pre>
</div>
<div class="example">
<p>Using continuous speech recognition to fill a textarea.</p>
<pre class="lang-html">
<textarea id="textarea" rows=10 cols=80></textarea>
<button id="button" onclick="toggleStartStop()"></button>
<script type="text/javascript">
var recognizing;
var recognition = new SpeechRecognition();
recognition.continuous = true;
reset();
recognition.onend = reset;
recognition.onresult = function (event) {
for (var i = event.resultIndex; i < event.results.length; ++i) {
if (event.results[i].isFinal) {
textarea.value += event.results[i][0].transcript;
}
}
}
function reset() {
recognizing = false;
button.innerHTML = "Click to Speak";
}
function toggleStartStop() {
if (recognizing) {
recognition.stop();
reset();
} else {
recognition.start();
recognizing = true;
button.innerHTML = "Click to Stop";
}
}
</script>
</pre>
</div>
<div class="example">
<p>Using continuous speech recognition, showing final results in black and interim results in grey.</p>
<pre class="lang-html">
<button id="button" onclick="toggleStartStop()"></button>
<div style="border:dotted;padding:10px">
<span id="final_span"></span>
<span id="interim_span" style="color:grey"></span>
</div>
<script type="text/javascript">
var recognizing;