Thank you for your reply.
I’m afraid that I didn’t present my problem clearly.
I try to recognize English sentences using the SpeechRecognitionEngine class ,which is part of SAPI5.4,on an Chinese Windows7 which has installed the Microsoft Speech Recognizer 8.0 for Windows (Chinese Simplified - PRC).Using the Grammar object constructed with the Choice object ,the SpeechRecognitionEngine object loaded the grammar can recognize some of simple English sentences,for example,”How are you ”,”yes”,”quit”.The fragments of code as followed.
private static SpeechRecognizer recognizer;
static void Main(string[] args)
{
recognizer = new SpeechRecognizer();
recognizer.LoadGrammarCompleted += new EventHandler<loadgrammarcompletedeventargs>(recognizer_LoadGrammarCompleted);
recognizer.SpeechRecognized += new EventHandler<speechrecognizedeventargs>(recognizer_SpeechRecognized);
recognizer.StateChanged += new EventHandler<statechangedeventargs>(recognizer_StateChanged);
Choices yesChoices = new Choices(new string[] { "how are you", "yah", "yup" });
SemanticResultValue yesValue = new SemanticResultValue(yesChoices, true);
Choices noChoices = new Choices(new string[] { "no", "nope", "nah" });
SemanticResultValue noValue = new SemanticResultValue(noChoices, false); ;
SemanticResultKey yesnoKey = new SemanticResultKey("yesno", new Choices(new GrammarBuilder[] { yesValue, noValue }));
Grammar yesnoGrammar = new Grammar(yesnoKey);
yesnoGrammar.Name = "yesno";
Grammar doneGrammar = new Grammar(new GrammarBuilder(new Choices(new string[] { "done", "eixt", "quit", "stop" })));
doneGrammar.Name = "done";
recognizer.LoadGrammarAsync(yesnoGrammar);
recognizer.LoadGrammarAsync(doneGrammar);
Console.ReadLine();
}
static void recognizer_StateChanged(object sender, StateChangedEventArgs e)
{
try
{
if (e.RecognizerState != RecognizerState.Stopped)
{
recognizer.EmulateRecognizeAsync("Start Listening");
}
}
catch (Exception ex)
{
Console.WriteLine(ex.ToString());
}
}
static void recognizer_LoadGrammarCompleted(object sender, LoadGrammarCompletedEventArgs e)
{
try
{
string grammarName = e.Grammar.Name;
bool grammarLoaded = e.Grammar.Loaded;
if (e.Error != null)
{
Console.WriteLine("load grammar for{0] failed with {1}", grammarName, e.Error.GetType().Name);
}
Console.WriteLine("Grammar {0} {1} loaded", grammarName, (grammarLoaded) ? "is" : "is'nt");
}
catch (Exception ex)
{
Console.WriteLine(ex.ToString());
}
}
static void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
Console.WriteLine("Grammar({0}): {1}", e.Result.Grammar.Name, e.Result.Text);
Console.WriteLine("Speech recognized: " + e.Result.Text);
Console.WriteLine();
Console.WriteLine("Semantic results:{0},alternates:{1}",e.Result.Text ,e.Result.Alternates.ToString());
Console.WriteLine("Grammar({0}), {1}: {2}",
e.Result.Grammar.Name, e.Result.Audio.Duration, e.Result.Text);
foreach (KeyValuePair<string,> child in e.Result.Semantics)
{
Console.WriteLine(" {0} key: {1}",
child.Key, child.Value.Value ?? "null");
}
Console.WriteLine();
foreach (RecognizedWordUnit word in e.Result.Words)
{
RecognizedAudio audio = e.Result.GetAudioForWordRange(word, word);
Console.WriteLine(" {0,-10} {1,-10} {2,-10} {3} ({4})",
word.Text, word.LexicalForm, word.Pronunciation,
audio.Duration, word.DisplayAttributes);
}
foreach (RecognizedPhrase phrase in e.Result.Alternates)
{
Console.WriteLine(" alt({0}) {1}", phrase.Confidence, phrase.Text);
}
}
</statechangedeventargs></speechrecognizedeventargs></loadgrammarcompletedeventargs>
However, using the Grammar object constructed with the SrgsDocement object which is constructed with .grxml file,the SpeechRecognitionEngine object loaded the grammar can’t recognize some of simple English sentences and only can detect audioinput.The fragments of code as follewed.
SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine ( System.Globalization.CultureInfo.CreateSpecificCulture("zh-CN"));
SrgsDocument srgsdoc = new SrgsDocument("./commongreetingGrammar.grxml");
recognizer.MaxAlternates = 5;
recognizer.LoadGrammarCompleted += new EventHandler<loadgrammarcompletedeventargs>(recognizer_LoadGrammarCompleted);
recognizer.SpeechDetected += new EventHandler<speechdetectedeventargs>(recognizer_SpeechDetected);
recognizer.RecognizeCompleted += new EventHandler<recognizecompletedeventargs>(recognizer_RecognizeCompleted);
recognizer.LoadGrammar(new Grammar(srgsdoc));
recognizer.SetInputToDefaultAudioDevice();
recognizer.RecognizeAsync (RecognizeMode .Multiple);
}
catch (Exception ex)
{ Console.WriteLine(ex.Message); }
Console.ReadKey();
}
static void recognizer_SpeechDetected(object sender, SpeechDetectedEventArgs e)
{
Console.WriteLine("Detect that someone is speeching");
}
static void recognizer_LoadGrammarCompleted(object sender, LoadGrammarCompletedEventArgs e)
{
if (e.Error == null)
Console.WriteLine("complete to load grammar ");
else
Console.WriteLine("Fail to load grammar");
}
static void recognizer_RecognizeCompleted(object sender, RecognizeCompletedEventArgs e)
{
if (e.Result.Semantics["step"].Value.ToString() == "A1")
{
Console.WriteLine("A start to speak:{0}", e.Result.Text);
}
}</recognizecompletedeventargs></speechdetectedeventargs></loadgrammarcompletedeventargs>
And following is the grxml file named commongreetingGrammar.grxml.
<grammar version="1.0" xml:lang="zh-CN" mode="voice" root="commongreeting">
xmlns="http://www.w3.org/2001/06/grammar" tag-format="semantics-ms/1.0">
<rule id="commongreeting">
<one-of>
<item>Hi,Jack,How's it going?<tag> $.step="A1";</tag></item>
<item>I’m fine. I’ve been out of town. I just got back. </item>
<item>Keeping busy. </item>
<item>Not too bad,and you? </item>
</one-of>
</rule>
</grammar>
Luckliy,I find the solution to the problem today.
The problem is that I didn’t install the English language pack and constructed the Grammar object wrongly,which cause the SpeechRecognitionEngine object to fail to recognize the English sentences.The details of the solution as followed.
1. Dowloading the Vistalizator and English language pack
We can visited the website:
http://www.froggie.sk/ to download.
2. Install English language pack with Vistalizator
Run Vistalizator as administror,then click [Add Language] to select the English language pack,after loading Windows Language Pack,select Internal Installation mode to install language.If install language fail in Internal Installation mode,then choose install language in Express Installation mode.
3. Change the Settings of your computer
Modify the keyboard layout, location standards and formats: Control Panel - Clock, Language, and Regional.
Click this link,
http://www.siqiboke.com/post/153.html you can get the detail how to install language pack.
4. The fragement of code as followed.
static void Main(string[] args)
{
recognizer = new SpeechRecognizer();
string grammarPath = @"C:\test\";
FileStream fs = new FileStream(grammarPath + "AirportCodes.cfg", FileMode.Create);
SrgsGrammarCompiler.Compile(grammarPath + "AirportCodes.grxml", (Stream)fs);
fs.Close();
Grammar gr = new Grammar(grammarPath + "AirportCodes.cfg", "flightBooker");
gr.Name = "Flight Chooser";
recognizer.LoadGrammarAsync(gr);
recognizer.SetInputToDefaultAudioDevice();
recognizer.SpeechRecognized +=new EventHandler<speechrecognizedeventargs>(recognizer_SpeechRecognized);
recognizer.RecognizeAsync(RecognizeMode.Multiple);
Console.WriteLine("Starting asynchronous recognition...");
Console.ReadLine();
}
static void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
Console.WriteLine("Speech recognized: " + e.Result.Text);
Console.WriteLine();
Console.WriteLine("Semantic results:");
Console.WriteLine(" The departure city is: " + e.Result.Semantics["LeavingFrom"].Value);
Console.WriteLine(" The arrival city is: " + e.Result.Semantics["GoingTo"].Value);
}</speechrecognizedeventargs>
The existing grxml file as followed.
<grammar xml:lang="en-US" root="flightBooker">
tag-format="semantics/1.0" version="1.0"
xmlns="http://www.w3.org/2001/06/grammar">
<rule id="flightBooker" scope="public">
<item> I want to fly from </item>
<ruleref uri="#flightCities" />
<tag> out.LeavingFrom=rules.latest(); </tag>
<item> to </item>
<ruleref uri="#flightCities" />
<tag> out.GoingTo=rules.latest(); </tag>
</rule>
<rule id="flightCities" scope="private">
<one-of>
<item> Chicago <tag> out="ORD"; </tag></item>
<item> Boston <tag> out="BOS"; </tag></item>
<item> Miami <tag> out="MIA"; </tag></item>
<item> Dallas <tag> out="DFW"; </tag></item>
</one-of>
</rule>
</grammar>
In this case ,I can create the grammar with grxml file conforming to Speech Recognition Grammar Specification 1.0
http://www.w3.org/TR/speech-grammar/,then I can use the Microsoft English Recognizer v8.0 to recognizer English sentences.