forked from microsoft/Windows-universal-samples
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathScenario_PredefinedWebSearchGrammar.xaml.cs
More file actions
343 lines (311 loc) · 15.6 KB
/
Scenario_PredefinedWebSearchGrammar.xaml.cs
File metadata and controls
343 lines (311 loc) · 15.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
//*********************************************************
//
// Copyright (c) Microsoft. All rights reserved.
// This code is licensed under the MIT License (MIT).
// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF
// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY
// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR
// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT.
//
//*********************************************************
using SDKTemplate;
using System;
using System.Collections.Generic;
using System.Threading.Tasks;
using Windows.Globalization;
using Windows.Media.SpeechRecognition;
using Windows.UI.Core;
using Windows.UI.Xaml;
using Windows.UI.Xaml.Controls;
using Windows.UI.Xaml.Documents;
using Windows.UI.Xaml.Navigation;
using Windows.Foundation;
using Windows.ApplicationModel.Resources.Core;
namespace SDKTemplate
{
public sealed partial class Scenario_PredefinedWebSearchGrammar : Page
{
/// <summary>
/// This HResult represents the scenario where a user is prompted to allow in-app speech, but
/// declines. This should only happen on a Phone device, where speech is enabled for the entire device,
/// not per-app.
/// </summary>
private static uint HResultPrivacyStatementDeclined = 0x80045509;
private SpeechRecognizer speechRecognizer;
private IAsyncOperation<SpeechRecognitionResult> recognitionOperation;
private ResourceContext speechContext;
private ResourceMap speechResourceMap;
public Scenario_PredefinedWebSearchGrammar()
{
InitializeComponent();
}
/// <summary>
/// When activating the scenario, ensure we have permission from the user to access their microphone, and
/// provide an appropriate path for the user to enable access to the microphone if they haven't
/// given explicit permission for it.
/// </summary>
/// <param name="e">The navigation event details</param>
protected async override void OnNavigatedTo(NavigationEventArgs e)
{
bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();
if (permissionGained)
{
// Enable the recognition buttons.
btnRecognizeWithUI.IsEnabled = true;
btnRecognizeWithoutUI.IsEnabled = true;
Language speechLanguage = SpeechRecognizer.SystemSpeechLanguage;
speechContext = ResourceContext.GetForCurrentView();
speechContext.Languages = new string[] { speechLanguage.LanguageTag };
speechResourceMap = ResourceManager.Current.MainResourceMap.GetSubtree("LocalizationSpeechResources");
PopulateLanguageDropDown();
await InitializeRecognizer(SpeechRecognizer.SystemSpeechLanguage);
}
else
{
resultTextBlock.Visibility = Visibility.Visible;
resultTextBlock.Text = "Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone.";
btnRecognizeWithUI.IsEnabled = false;
btnRecognizeWithoutUI.IsEnabled = false;
cbLanguageSelection.IsEnabled = false;
}
}
/// <summary>
/// Look up the supported languages for this speech recognition scenario,
/// that are installed on this machine, and populate a dropdown with a list.
/// </summary>
private void PopulateLanguageDropDown()
{
Language defaultLanguage = SpeechRecognizer.SystemSpeechLanguage;
IEnumerable<Language> supportedLanguages = SpeechRecognizer.SupportedTopicLanguages;
foreach (Language lang in supportedLanguages)
{
ComboBoxItem item = new ComboBoxItem();
item.Tag = lang;
item.Content = lang.DisplayName;
cbLanguageSelection.Items.Add(item);
if (lang.LanguageTag == defaultLanguage.LanguageTag)
{
item.IsSelected = true;
cbLanguageSelection.SelectedItem = item;
}
}
}
/// <summary>
/// Ensure that we clean up any state tracking event handlers created in OnNavigatedTo to prevent leaks.
/// </summary>
/// <param name="e">Details about the navigation event</param>
protected override void OnNavigatedFrom(NavigationEventArgs e)
{
base.OnNavigatedFrom(e);
if (speechRecognizer != null)
{
if (speechRecognizer.State != SpeechRecognizerState.Idle)
{
if (recognitionOperation != null)
{
recognitionOperation.Cancel();
recognitionOperation = null;
}
}
speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
this.speechRecognizer.Dispose();
this.speechRecognizer = null;
}
}
/// <summary>
/// Initialize Speech Recognizer and compile constraints.
/// </summary>
/// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
/// <returns>Awaitable task.</returns>
private async Task InitializeRecognizer(Language recognizerLanguage)
{
if (speechRecognizer != null)
{
// cleanup prior to re-initializing this scenario.
speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;
this.speechRecognizer.Dispose();
this.speechRecognizer = null;
}
// Create an instance of SpeechRecognizer.
speechRecognizer = new SpeechRecognizer(recognizerLanguage);
// Provide feedback to the user about the state of the recognizer.
speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;
// Add a web search topic constraint to the recognizer.
var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");
speechRecognizer.Constraints.Add(webSearchGrammar);
// RecognizeWithUIAsync allows developers to customize the prompts.
speechRecognizer.UIOptions.AudiblePrompt = "Say what you want to search for...";
speechRecognizer.UIOptions.ExampleText = speechResourceMap.GetValue("WebSearchUIOptionsExampleText", speechContext).ValueAsString;
// Compile the constraint.
SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();
// Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
{
// Disable the recognition buttons.
btnRecognizeWithUI.IsEnabled = false;
btnRecognizeWithoutUI.IsEnabled = false;
// Let the user know that the grammar didn't compile properly.
resultTextBlock.Visibility = Visibility.Visible;
resultTextBlock.Text = "Unable to compile grammar.";
}
}
/// <summary>
/// Handle SpeechRecognizer state changed events by updating a UI component.
/// </summary>
/// <param name="sender">Speech recognizer that generated this status event</param>
/// <param name="args">The recognizer's status</param>
private async void SpeechRecognizer_StateChanged(SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args)
{
await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
{
MainPage.Current.NotifyUser("Speech recognizer state: " + args.State.ToString(), NotifyType.StatusMessage);
});
}
/// <summary>
/// Uses the recognizer constructed earlier to listen for speech from the user before displaying
/// it back on the screen. Uses the built-in speech recognition UI.
/// </summary>
/// <param name="sender">Button that triggered this event</param>
/// <param name="e">State information about the routed event</param>
private async void RecognizeWithUIWebSearchGrammar_Click(object sender, RoutedEventArgs e)
{
heardYouSayTextBlock.Visibility = resultTextBlock.Visibility = Visibility.Collapsed;
hlOpenPrivacySettings.Visibility = Visibility.Collapsed;
// Start recognition.
try
{
recognitionOperation = speechRecognizer.RecognizeWithUIAsync();
SpeechRecognitionResult speechRecognitionResult = await recognitionOperation;
// If successful, display the recognition result.
if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
{
heardYouSayTextBlock.Visibility = resultTextBlock.Visibility = Visibility.Visible;
resultTextBlock.Text = speechRecognitionResult.Text;
}
else
{
resultTextBlock.Visibility = Visibility.Visible;
resultTextBlock.Text = string.Format("Speech Recognition Failed, Status: {0}", speechRecognitionResult.Status.ToString());
}
}
catch (TaskCanceledException exception)
{
// TaskCanceledException will be thrown if you exit the scenario while the recognizer is actively
// processing speech. Since this happens here when we navigate out of the scenario, don't try to
// show a message dialog for this exception.
System.Diagnostics.Debug.WriteLine("TaskCanceledException caught while recognition in progress (can be ignored):");
System.Diagnostics.Debug.WriteLine(exception.ToString());
}
catch (Exception exception)
{
// Handle the speech privacy policy error.
if ((uint)exception.HResult == HResultPrivacyStatementDeclined)
{
hlOpenPrivacySettings.Visibility = Visibility.Visible;
}
else
{
var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
await messageDialog.ShowAsync();
}
}
}
/// <summary>
/// Uses the recognizer constructed earlier to listen for speech from the user before displaying
/// it back on the screen. Uses developer-provided UI for user feedback.
/// </summary>
/// <param name="sender">Button that triggered this event</param>
/// <param name="e">State information about the routed event</param>
private async void RecognizeWithoutUIWebSearchGrammar_Click(object sender, RoutedEventArgs e)
{
heardYouSayTextBlock.Visibility = resultTextBlock.Visibility = Visibility.Collapsed;
// Disable the UI while recognition is occurring, and provide feedback to the user about current state.
btnRecognizeWithUI.IsEnabled = false;
btnRecognizeWithoutUI.IsEnabled = false;
cbLanguageSelection.IsEnabled = false;
hlOpenPrivacySettings.Visibility = Visibility.Collapsed;
listenWithoutUIButtonText.Text = " listening for speech...";
// Start recognition.
try
{
recognitionOperation = speechRecognizer.RecognizeAsync();
SpeechRecognitionResult speechRecognitionResult = await recognitionOperation;
// If successful, display the recognition result.
if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
{
heardYouSayTextBlock.Visibility = resultTextBlock.Visibility = Visibility.Visible;
resultTextBlock.Text = speechRecognitionResult.Text;
}
else
{
resultTextBlock.Visibility = Visibility.Visible;
resultTextBlock.Text = string.Format("Speech Recognition Failed, Status: {0}", speechRecognitionResult.Status.ToString());
}
}
catch (TaskCanceledException exception)
{
// TaskCanceledException will be thrown if you exit the scenario while the recognizer is actively
// processing speech. Since this happens here when we navigate out of the scenario, don't try to
// show a message dialog for this exception.
System.Diagnostics.Debug.WriteLine("TaskCanceledException caught while recognition in progress (can be ignored):");
System.Diagnostics.Debug.WriteLine(exception.ToString());
}
catch (Exception exception)
{
// Handle the speech privacy policy error.
if ((uint)exception.HResult == HResultPrivacyStatementDeclined)
{
hlOpenPrivacySettings.Visibility = Visibility.Visible;
}
else
{
var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
await messageDialog.ShowAsync();
}
}
// Reset UI state.
listenWithoutUIButtonText.Text = " without UI";
btnRecognizeWithUI.IsEnabled = true;
cbLanguageSelection.IsEnabled = true;
btnRecognizeWithoutUI.IsEnabled = true;
}
/// <summary>
/// When a user changes the speech recognition language, trigger re-initialization of the
/// speech engine with that language, and change any speech-specific UI assets.
/// </summary>
/// <param name="sender">Ignored</param>
/// <param name="e">Ignored</param>
private async void cbLanguageSelection_SelectionChanged(object sender, SelectionChangedEventArgs e)
{
if (speechRecognizer != null)
{
ComboBoxItem item = (ComboBoxItem)(cbLanguageSelection.SelectedItem);
Language newLanguage = (Language)item.Tag;
if (speechRecognizer.CurrentLanguage != newLanguage)
{
// trigger cleanup and re-initialization of speech.
try
{
speechContext.Languages = new string[] { newLanguage.LanguageTag };
await InitializeRecognizer(newLanguage);
}
catch (Exception exception)
{
var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
await messageDialog.ShowAsync();
}
}
}
}
/// <summary>
/// Open the Speech, Inking and Typing page under Settings -> Privacy, enabling a user to accept the
/// Microsoft Privacy Policy, and enable personalization.
/// </summary>
/// <param name="sender">Ignored</param>
/// <param name="args">Ignored</param>
private async void openPrivacySettings_Click(Hyperlink sender, HyperlinkClickEventArgs args)
{
await Windows.System.Launcher.LaunchUriAsync(new Uri("ms-settings:privacy-speech"));
}
}
}