/[zanavi_public1]/navit/navit/support/espeak/speak_lib.c
ZANavi

Contents of /navit/navit/support/espeak/speak_lib.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2 - (show annotations) (download)
Fri Oct 28 21:19:04 2011 UTC (12 years, 5 months ago) by zoff99
File MIME type: text/plain
File size: 29066 byte(s)
import files
1 /***************************************************************************
2 * Copyright (C) 2005 to 2007 by Jonathan Duddington *
3 * email: jonsd@users.sourceforge.net *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 3 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write see: *
17 * <http://www.gnu.org/licenses/>. *
18 ***************************************************************************/
19
20 #include "StdAfx.h"
21
22 #include "stdio.h"
23 #include "ctype.h"
24 #include "string.h"
25 #include "stdlib.h"
26 #include "wchar.h"
27 #include "locale.h"
28 #include <assert.h>
29 #include <time.h>
30
31 #include "speech.h"
32
33 #include <sys/stat.h>
34 #ifdef PLATFORM_WINDOWS
35 #include <windows.h>
36 #else
37 #include <unistd.h>
38 #endif
39
40 #include "speak_lib.h"
41 #include "phoneme.h"
42 #include "synthesize.h"
43 #include "voice.h"
44 #include "translate.h"
45 #include "debug.h"
46
47 #include "fifo.h"
48 #include "event.h"
49 #include "wave.h"
50
51 unsigned char *outbuf=NULL;
52 extern espeak_VOICE voice_selected;
53
54 espeak_EVENT *event_list=NULL;
55 int event_list_ix=0;
56 int n_event_list;
57 long count_samples;
58 void* my_audio=NULL;
59
60 static unsigned int my_unique_identifier=0;
61 static void* my_user_data=NULL;
62 static espeak_AUDIO_OUTPUT my_mode=AUDIO_OUTPUT_SYNCHRONOUS;
63 static int synchronous_mode = 1;
64 t_espeak_callback* synth_callback = NULL;
65 int (* uri_callback)(int, const char *, const char *) = NULL;
66 int (* phoneme_callback)(const char *) = NULL;
67
68 char path_home[N_PATH_HOME]; // this is the espeak-data directory
69
70
71 #ifdef USE_ASYNC
72
73 static int dispatch_audio(short* outbuf, int length, espeak_EVENT* event)
74 {//======================================================================
75 ENTER("dispatch_audio");
76
77 int a_wave_can_be_played = fifo_is_command_enabled();
78
79 #ifdef DEBUG_ENABLED
80 SHOW("*** dispatch_audio > uid=%d, [write=%p (%d bytes)], sample=%d, a_wave_can_be_played = %d\n",
81 (event) ? event->unique_identifier : 0, wave_test_get_write_buffer(), 2*length,
82 (event) ? event->sample : 0,
83 a_wave_can_be_played);
84 #endif
85
86 switch(my_mode)
87 {
88 case AUDIO_OUTPUT_PLAYBACK:
89 {
90 if (outbuf && length && a_wave_can_be_played)
91 {
92 wave_write (my_audio, (char*)outbuf, 2*length);
93 }
94
95 while(a_wave_can_be_played) {
96 // TBD: some event are filtered here but some insight might be given
97 // TBD: in synthesise.cpp for avoiding to create WORDs with size=0.
98 // TBD: For example sentence "or ALT)." returns three words
99 // "or", "ALT" and "".
100 // TBD: the last one has its size=0.
101 if (event && (event->type == espeakEVENT_WORD) && (event->length==0))
102 {
103 break;
104 }
105 espeak_ERROR a_error = event_declare(event);
106 if (a_error != EE_BUFFER_FULL)
107 {
108 break;
109 }
110 SHOW_TIME("dispatch_audio > EE_BUFFER_FULL\n");
111 usleep(10000);
112 a_wave_can_be_played = fifo_is_command_enabled();
113 }
114 }
115 break;
116
117 case AUDIO_OUTPUT_RETRIEVAL:
118 if (synth_callback)
119 {
120 synth_callback(outbuf, length, event);
121 }
122 break;
123
124 case AUDIO_OUTPUT_SYNCHRONOUS:
125 case AUDIO_OUTPUT_SYNCH_PLAYBACK:
126 break;
127 }
128
129 if (!a_wave_can_be_played)
130 {
131 SHOW_TIME("dispatch_audio > synth must be stopped!\n");
132 }
133
134 SHOW_TIME("LEAVE dispatch_audio\n");
135
136 return (a_wave_can_be_played==0); // 1 = stop synthesis
137 }
138
139
140
141 static int create_events(short* outbuf, int length, espeak_EVENT* event, uint32_t the_write_pos)
142 {//=====================================================================
143 int finished;
144 int i=0;
145
146 // The audio data are written to the output device.
147 // The list of events in event_list (index: event_list_ix) is read:
148 // Each event is declared to the "event" object which stores them internally.
149 // The event object is responsible of calling the external callback
150 // as soon as the relevant audio sample is played.
151
152 do
153 { // for each event
154 espeak_EVENT* event;
155 if (event_list_ix == 0)
156 {
157 event = NULL;
158 }
159 else
160 {
161 event = event_list + i;
162 #ifdef DEBUG_ENABLED
163 SHOW("Synthesize: event->sample(%d) + %d = %d\n", event->sample, the_write_pos, event->sample + the_write_pos);
164 #endif
165 event->sample += the_write_pos;
166 }
167 #ifdef DEBUG_ENABLED
168 SHOW("*** Synthesize: i=%d (event_list_ix=%d), length=%d\n",i,event_list_ix,length);
169 #endif
170 finished = dispatch_audio((short *)outbuf, length, event);
171 length = 0; // the wave data are played once.
172 i++;
173 } while((i < event_list_ix) && !finished);
174 return finished;
175 }
176
177
178 int sync_espeak_terminated_msg( uint unique_identifier, void* user_data)
179 {//=====================================================================
180 ENTER("sync_espeak_terminated_msg");
181
182 int finished=0;
183
184 memset(event_list, 0, 2*sizeof(espeak_EVENT));
185
186 event_list[0].type = espeakEVENT_MSG_TERMINATED;
187 event_list[0].unique_identifier = unique_identifier;
188 event_list[0].user_data = user_data;
189 event_list[1].type = espeakEVENT_LIST_TERMINATED;
190 event_list[1].unique_identifier = unique_identifier;
191 event_list[1].user_data = user_data;
192
193 if (my_mode==AUDIO_OUTPUT_PLAYBACK)
194 {
195 while(1)
196 {
197 espeak_ERROR a_error = event_declare(event_list);
198 if (a_error != EE_BUFFER_FULL)
199 {
200 break;
201 }
202 SHOW_TIME("sync_espeak_terminated_msg > EE_BUFFER_FULL\n");
203 usleep(10000);
204 }
205 }
206 else
207 {
208 if (synth_callback)
209 {
210 finished=synth_callback(NULL,0,event_list);
211 }
212 }
213 return finished;
214 }
215
216 #endif
217
218
219 static void select_output(espeak_AUDIO_OUTPUT output_type)
220 {//=======================================================
221 my_mode = output_type;
222 my_audio = NULL;
223 synchronous_mode = 1;
224 option_waveout = 1; // inhibit portaudio callback from wavegen.cpp
225
226 switch(my_mode)
227 {
228 case AUDIO_OUTPUT_PLAYBACK:
229 synchronous_mode = 0;
230 #ifdef USE_ASYNC
231 wave_init();
232 wave_set_callback_is_output_enabled( fifo_is_command_enabled);
233 my_audio = wave_open("alsa");
234 event_init();
235 #endif
236 break;
237
238 case AUDIO_OUTPUT_RETRIEVAL:
239 synchronous_mode = 0;
240 break;
241
242 case AUDIO_OUTPUT_SYNCHRONOUS:
243 break;
244
245 case AUDIO_OUTPUT_SYNCH_PLAYBACK:
246 option_waveout = 0;
247 WavegenInitSound();
248 break;
249 }
250 } // end of select_output
251
252
253
254
255 int GetFileLength(const char *filename)
256 {//====================================
257 struct stat statbuf;
258
259 if(stat(filename,&statbuf) != 0)
260 return(0);
261
262 if((statbuf.st_mode & S_IFMT) == S_IFDIR)
263 // if(S_ISDIR(statbuf.st_mode))
264 return(-2); // a directory
265
266 return(statbuf.st_size);
267 } // end of GetFileLength
268
269
270 char *Alloc(int size)
271 {//==================
272 char *p;
273 if((p = (char *)malloc(size)) == NULL)
274 fprintf(stderr,"Can't allocate memory\n"); // I was told that size+1 fixes a crash on 64-bit systems
275 return(p);
276 }
277
278 void Free(void *ptr)
279 {//=================
280 if(ptr != NULL)
281 free(ptr);
282 }
283
284
285
286 static void init_path(const char *path)
287 {//====================================
288 #ifdef PLATFORM_WINDOWS
289 HKEY RegKey;
290 unsigned long size;
291 unsigned long var_type;
292 char *env;
293 unsigned char buf[sizeof(path_home)-13];
294
295 if(path != NULL)
296 {
297 sprintf(path_home,"%s/espeak-data",path);
298 return;
299 }
300
301 if((env = getenv("ESPEAK_DATA_PATH")) != NULL)
302 {
303 sprintf(path_home,"%s/espeak-data",env);
304 if(GetFileLength(path_home) == -2)
305 return; // an espeak-data directory exists
306 }
307
308 buf[0] = 0;
309 RegOpenKeyExA(HKEY_LOCAL_MACHINE, "Software\\Microsoft\\Speech\\Voices\\Tokens\\eSpeak", 0, KEY_READ, &RegKey);
310 size = sizeof(buf);
311 var_type = REG_SZ;
312 RegQueryValueExA(RegKey, "path", 0, &var_type, buf, &size);
313
314 sprintf(path_home,"%s\\espeak-data",buf);
315
316 #else
317 char *env;
318
319 if(path != NULL)
320 {
321 snprintf(path_home,sizeof(path_home),"%s/espeak-data",path);
322 return;
323 }
324
325 // check for environment variable
326 if((env = getenv("ESPEAK_DATA_PATH")) != NULL)
327 {
328 snprintf(path_home,sizeof(path_home),"%s/espeak-data",env);
329 if(GetFileLength(path_home) == -2)
330 return; // an espeak-data directory exists
331 }
332
333 snprintf(path_home,sizeof(path_home),"%s/espeak-data",getenv("HOME"));
334 if(access(path_home,R_OK) != 0)
335 {
336 strcpy(path_home,PATH_ESPEAK_DATA);
337 }
338 #endif
339 }
340
341 static int initialise(void)
342 {//========================
343 int param;
344 int result;
345
346 LoadConfig();
347 WavegenInit(22050,0); // 22050
348 if((result = LoadPhData()) != 1)
349 {
350 if(result == -1)
351 {
352 fprintf(stderr,"Failed to load espeak-data\n");
353 exit(1);
354 }
355 else
356 fprintf(stderr,"Wrong version of espeak-data 0x%x (expects 0x%x) at %s\n",result,version_phdata,path_home);
357 }
358
359 memset(&voice_selected,0,sizeof(voice_selected));
360 SetVoiceStack(NULL);
361 SynthesizeInit();
362 InitNamedata();
363
364 for(param=0; param<N_SPEECH_PARAM; param++)
365 param_stack[0].parameter[param] = param_defaults[param];
366
367 return(0);
368 }
369
370
371 static espeak_ERROR Synthesize(unsigned int unique_identifier, const void *text, int flags)
372 {//========================================================================================
373 // Fill the buffer with output sound
374 int length;
375 int finished = 0;
376 int count_buffers = 0;
377 #ifdef USE_ASYNC
378 uint32_t a_write_pos=0;
379 #endif
380
381 #ifdef DEBUG_ENABLED
382 ENTER("Synthesize");
383 if (text)
384 {
385 SHOW("Synthesize > uid=%d, flags=%d, >>>text=%s<<<\n", unique_identifier, flags, text);
386 }
387 #endif
388
389 if((outbuf==NULL) || (event_list==NULL))
390 return(EE_INTERNAL_ERROR); // espeak_Initialize() has not been called
391
392 option_multibyte = flags & 7;
393 option_ssml = flags & espeakSSML;
394 option_phoneme_input = flags & espeakPHONEMES;
395 option_endpause = flags & espeakENDPAUSE;
396
397 count_samples = 0;
398
399 #ifdef USE_ASYNC
400 if(my_mode == AUDIO_OUTPUT_PLAYBACK)
401 {
402 a_write_pos = wave_get_write_position(my_audio);
403 }
404 #endif
405
406 if(translator == NULL)
407 {
408 SetVoiceByName("default");
409 }
410
411 SpeakNextClause(NULL,text,0);
412
413 if(my_mode == AUDIO_OUTPUT_SYNCH_PLAYBACK)
414 {
415 for(;;)
416 {
417 #ifdef PLATFORM_WINDOWS
418 Sleep(300); // 0.3s
419 #else
420 #ifdef USE_NANOSLEEP
421 struct timespec period;
422 struct timespec remaining;
423 period.tv_sec = 0;
424 period.tv_nsec = 300000000; // 0.3 sec
425 nanosleep(&period,&remaining);
426 #else
427 sleep(1);
428 #endif
429 #endif
430 if(SynthOnTimer() != 0)
431 break;
432 }
433 return(EE_OK);
434 }
435
436 for(;;)
437 {
438 #ifdef DEBUG_ENABLED
439 SHOW("Synthesize > %s\n","for (next)");
440 #endif
441 out_ptr = outbuf;
442 out_end = &outbuf[outbuf_size];
443 event_list_ix = 0;
444 WavegenFill(0);
445
446 length = (out_ptr - outbuf)/2;
447 count_samples += length;
448 event_list[event_list_ix].type = espeakEVENT_LIST_TERMINATED; // indicates end of event list
449 event_list[event_list_ix].unique_identifier = my_unique_identifier;
450 event_list[event_list_ix].user_data = my_user_data;
451
452 count_buffers++;
453 if (my_mode==AUDIO_OUTPUT_PLAYBACK)
454 {
455 #ifdef USE_ASYNC
456 finished = create_events((short *)outbuf, length, event_list, a_write_pos);
457 length = 0; // the wave data are played once.
458 #endif
459 }
460 else
461 {
462 finished = synth_callback((short *)outbuf, length, event_list);
463 }
464 if(finished)
465 {
466 SpeakNextClause(NULL,0,2); // stop
467 break;
468 }
469
470 if(Generate(phoneme_list,&n_phoneme_list,1)==0)
471 {
472 if(WcmdqUsed() == 0)
473 {
474 // don't process the next clause until the previous clause has finished generating speech.
475 // This ensures that <audio> tag (which causes end-of-clause) is at a sound buffer boundary
476
477 event_list[0].type = espeakEVENT_LIST_TERMINATED;
478 event_list[0].unique_identifier = my_unique_identifier;
479 event_list[0].user_data = my_user_data;
480
481 if(SpeakNextClause(NULL,NULL,1)==0)
482 {
483 #ifdef USE_ASYNC
484 if (my_mode==AUDIO_OUTPUT_PLAYBACK)
485 {
486 dispatch_audio(NULL, 0, NULL); // TBD: test case
487 }
488 else
489 {
490 synth_callback(NULL, 0, event_list); // NULL buffer ptr indicates end of data
491 }
492 #else
493 synth_callback(NULL, 0, event_list); // NULL buffer ptr indicates end of data
494 #endif
495 break;
496 }
497 }
498 }
499 }
500 return(EE_OK);
501 } // end of Synthesize
502
503 #ifdef DEBUG_ENABLED
504 static const char* label[] = {
505 "END_OF_EVENT_LIST",
506 "WORD",
507 "SENTENCE",
508 "MARK",
509 "PLAY",
510 "END"};
511 #endif
512
513
514 void MarkerEvent(int type, unsigned int char_position, int value, unsigned char *out_ptr)
515 {//======================================================================================
516 // type: 1=word, 2=sentence, 3=named mark, 4=play audio, 5=end
517 ENTER("MarkerEvent");
518 espeak_EVENT *ep;
519 double time;
520
521 if((event_list == NULL) || (event_list_ix >= (n_event_list-2)))
522 return;
523
524 ep = &event_list[event_list_ix++];
525 ep->type = (espeak_EVENT_TYPE)type;
526 ep->unique_identifier = my_unique_identifier;
527 ep->user_data = my_user_data;
528 ep->text_position = char_position & 0xffffff;
529 ep->length = char_position >> 24;
530
531 time = ((double)(count_samples + mbrola_delay + (out_ptr - out_start)/2)*1000.0)/samplerate;
532 ep->audio_position = (int)(time);
533 ep->sample = (count_samples + mbrola_delay + (out_ptr - out_start)/2);
534
535 #ifdef DEBUG_ENABLED
536 SHOW("MarkerEvent > count_samples=%d, out_ptr=%x, out_start=0x%x\n",count_samples, out_ptr, out_start);
537 SHOW("*** MarkerEvent > type=%s, uid=%d, text_pos=%d, length=%d, audio_position=%d, sample=%d\n",
538 label[ep->type], ep->unique_identifier, ep->text_position, ep->length,
539 ep->audio_position, ep->sample);
540 #endif
541
542 if((type == espeakEVENT_MARK) || (type == espeakEVENT_PLAY))
543 ep->id.name = &namedata[value];
544 else
545 ep->id.number = value;
546 } // end of MarkerEvent
547
548
549
550
551 espeak_ERROR sync_espeak_Synth(unsigned int unique_identifier, const void *text, size_t size,
552 unsigned int position, espeak_POSITION_TYPE position_type,
553 unsigned int end_position, unsigned int flags, void* user_data)
554 {//===========================================================================
555
556 #ifdef DEBUG_ENABLED
557 ENTER("sync_espeak_Synth");
558 SHOW("sync_espeak_Synth > position=%d, position_type=%d, end_position=%d, flags=%d, user_data=0x%x, text=%s\n", position, position_type, end_position, flags, user_data, text);
559 #endif
560
561 espeak_ERROR aStatus;
562
563 InitText(flags);
564 my_unique_identifier = unique_identifier;
565 my_user_data = user_data;
566
567 switch(position_type)
568 {
569 case POS_CHARACTER:
570 skip_characters = position;
571 break;
572
573 case POS_WORD:
574 skip_words = position;
575 break;
576
577 case POS_SENTENCE:
578 skip_sentences = position;
579 break;
580
581 }
582 if(skip_characters || skip_words || skip_sentences)
583 skipping_text = 1;
584
585 end_character_position = end_position;
586
587 aStatus = Synthesize(unique_identifier, text, flags);
588 #ifdef USE_ASYNC
589 wave_flush(my_audio);
590 #endif
591
592 SHOW_TIME("LEAVE sync_espeak_Synth");
593 return aStatus;
594 } // end of sync_espeak_Synth
595
596
597
598
599 espeak_ERROR sync_espeak_Synth_Mark(unsigned int unique_identifier, const void *text, size_t size,
600 const char *index_mark, unsigned int end_position,
601 unsigned int flags, void* user_data)
602 {//=========================================================================
603 espeak_ERROR aStatus;
604
605 InitText(flags);
606
607 my_unique_identifier = unique_identifier;
608 my_user_data = user_data;
609
610 if(index_mark != NULL)
611 {
612 strncpy0(skip_marker, index_mark, sizeof(skip_marker));
613 skipping_text = 1;
614 }
615
616 end_character_position = end_position;
617
618
619 aStatus = Synthesize(unique_identifier, text, flags | espeakSSML);
620 SHOW_TIME("LEAVE sync_espeak_Synth_Mark");
621
622 return (aStatus);
623 } // end of sync_espeak_Synth_Mark
624
625
626
627 void sync_espeak_Key(const char *key)
628 {//==================================
629 // symbolic name, symbolicname_character - is there a system resource of symbolic names per language?
630 int letter;
631 int ix;
632
633 ix = utf8_in(&letter,key);
634 if(key[ix] == 0)
635 {
636 // a single character
637 sync_espeak_Char(letter);
638 return;
639 }
640
641 my_unique_identifier = 0;
642 my_user_data = NULL;
643 Synthesize(0, key,0); // speak key as a text string
644 }
645
646
647 void sync_espeak_Char(wchar_t character)
648 {//=====================================
649 // is there a system resource of character names per language?
650 char buf[80];
651 my_unique_identifier = 0;
652 my_user_data = NULL;
653
654 sprintf(buf,"<say-as interpret-as=\"tts:char\">&#%d;</say-as>",character);
655 Synthesize(0, buf,espeakSSML);
656 }
657
658
659
660 void sync_espeak_SetPunctuationList(const wchar_t *punctlist)
661 {//==========================================================
662 // Set the list of punctuation which are spoken for "some".
663 my_unique_identifier = 0;
664 my_user_data = NULL;
665
666 wcsncpy(option_punctlist, punctlist, N_PUNCTLIST);
667 option_punctlist[N_PUNCTLIST-1] = 0;
668 } // end of sync_espeak_SetPunctuationList
669
670
671
672
673 #pragma GCC visibility push(default)
674
675
676 ESPEAK_API void espeak_SetSynthCallback(t_espeak_callback* SynthCallback)
677 {//======================================================================
678 ENTER("espeak_SetSynthCallback");
679 synth_callback = SynthCallback;
680 #ifdef USE_ASYNC
681 event_set_callback(synth_callback);
682 #endif
683 }
684
685 ESPEAK_API void espeak_SetUriCallback(int (* UriCallback)(int, const char*, const char *))
686 {//=======================================================================================
687 ENTER("espeak_SetUriCallback");
688 uri_callback = UriCallback;
689 }
690
691
692 ESPEAK_API void espeak_SetPhonemeCallback(int (* PhonemeCallback)(const char*))
693 {//===========================================================================
694 phoneme_callback = PhonemeCallback;
695 }
696
697 ESPEAK_API int espeak_Initialize(espeak_AUDIO_OUTPUT output_type, int buf_length, const char *path, int options)
698 {//=============================================================================================================
699 ENTER("espeak_Initialize");
700 int param;
701
702 // It seems that the wctype functions don't work until the locale has been set
703 // to something other than the default "C". Then, not only Latin1 but also the
704 // other characters give the correct results with iswalpha() etc.
705 #ifdef PLATFORM_RISCOS
706 setlocale(LC_CTYPE,"ISO8859-1");
707 #else
708 if(setlocale(LC_CTYPE,"en_US.UTF-8") == NULL)
709 {
710 if(setlocale(LC_CTYPE,"UTF-8") == NULL)
711 setlocale(LC_CTYPE,"");
712 }
713 #endif
714
715 init_path(path);
716 initialise();
717 select_output(output_type);
718
719 // buflength is in mS, allocate 2 bytes per sample
720 if(buf_length == 0)
721 buf_length = 200;
722 outbuf_size = (buf_length * samplerate)/500;
723 outbuf = (unsigned char*)realloc(outbuf,outbuf_size);
724 if((out_start = outbuf) == NULL)
725 return(EE_INTERNAL_ERROR);
726
727 // allocate space for event list. Allow 200 events per second.
728 // Add a constant to allow for very small buf_length
729 n_event_list = (buf_length*200)/1000 + 20;
730 if((event_list = (espeak_EVENT *)realloc(event_list,sizeof(espeak_EVENT) * n_event_list)) == NULL)
731 return(EE_INTERNAL_ERROR);
732
733 option_phonemes = 0;
734 option_phoneme_events = (options & 1);
735
736 SetVoiceByName("default");
737
738 for(param=0; param<N_SPEECH_PARAM; param++)
739 param_stack[0].parameter[param] = param_defaults[param];
740
741 SetParameter(espeakRATE,170,0);
742 SetParameter(espeakVOLUME,100,0);
743 SetParameter(espeakCAPITALS,option_capitals,0);
744 SetParameter(espeakPUNCTUATION,option_punctuation,0);
745 SetParameter(espeakWORDGAP,0,0);
746 DoVoiceChange(voice);
747
748 #ifdef USE_ASYNC
749 fifo_init();
750 #endif
751
752 return(samplerate);
753 }
754
755
756
757 ESPEAK_API espeak_ERROR espeak_Synth(const void *text, size_t size,
758 unsigned int position,
759 espeak_POSITION_TYPE position_type,
760 unsigned int end_position, unsigned int flags,
761 unsigned int* unique_identifier, void* user_data)
762 {//=====================================================================================
763 #ifdef DEBUG_ENABLED
764 ENTER("espeak_Synth");
765 SHOW("espeak_Synth > position=%d, position_type=%d, end_position=%d, flags=%d, user_data=0x%x, text=%s\n", position, position_type, end_position, flags, user_data, text);
766 #endif
767
768 espeak_ERROR a_error=EE_INTERNAL_ERROR;
769 static unsigned int temp_identifier;
770
771 if (unique_identifier == NULL)
772 {
773 unique_identifier = &temp_identifier;
774 }
775 *unique_identifier = 0;
776
777 if(synchronous_mode)
778 {
779 return(sync_espeak_Synth(0,text,size,position,position_type,end_position,flags,user_data));
780 }
781
782 #ifdef USE_ASYNC
783 // Create the text command
784 t_espeak_command* c1 = create_espeak_text(text, size, position, position_type, end_position, flags, user_data);
785
786 // Retrieve the unique identifier
787 *unique_identifier = c1->u.my_text.unique_identifier;
788
789 // Create the "terminated msg" command (same uid)
790 t_espeak_command* c2 = create_espeak_terminated_msg(*unique_identifier, user_data);
791
792 // Try to add these 2 commands (single transaction)
793 if (c1 && c2)
794 {
795 a_error = fifo_add_commands(c1, c2);
796 if (a_error != EE_OK)
797 {
798 delete_espeak_command(c1);
799 delete_espeak_command(c2);
800 c1=c2=NULL;
801 }
802 }
803 else
804 {
805 delete_espeak_command(c1);
806 delete_espeak_command(c2);
807 }
808
809 #endif
810 return a_error;
811 } // end of espeak_Synth
812
813
814
815 ESPEAK_API espeak_ERROR espeak_Synth_Mark(const void *text, size_t size,
816 const char *index_mark,
817 unsigned int end_position,
818 unsigned int flags,
819 unsigned int* unique_identifier,
820 void* user_data)
821 {//=========================================================================
822 #ifdef DEBUG_ENABLED
823 ENTER("espeak_Synth_Mark");
824 SHOW("espeak_Synth_Mark > index_mark=%s, end_position=%d, flags=%d, text=%s\n", index_mark, end_position, flags, text);
825 #endif
826
827 espeak_ERROR a_error=EE_OK;
828 static unsigned int temp_identifier;
829
830 if (unique_identifier == NULL)
831 {
832 unique_identifier = &temp_identifier;
833 }
834 *unique_identifier = 0;
835
836 if(synchronous_mode)
837 {
838 return(sync_espeak_Synth_Mark(0,text,size,index_mark,end_position,flags,user_data));
839 }
840
841 #ifdef USE_ASYNC
842 // Create the mark command
843 t_espeak_command* c1 = create_espeak_mark(text, size, index_mark, end_position,
844 flags, user_data);
845
846 // Retrieve the unique identifier
847 *unique_identifier = c1->u.my_mark.unique_identifier;
848
849 // Create the "terminated msg" command (same uid)
850 t_espeak_command* c2 = create_espeak_terminated_msg(*unique_identifier, user_data);
851
852 // Try to add these 2 commands (single transaction)
853 if (c1 && c2)
854 {
855 a_error = fifo_add_commands(c1, c2);
856 if (a_error != EE_OK)
857 {
858 delete_espeak_command(c1);
859 delete_espeak_command(c2);
860 c1=c2=NULL;
861 }
862 }
863 else
864 {
865 delete_espeak_command(c1);
866 delete_espeak_command(c2);
867 }
868
869 #endif
870 return a_error;
871 } // end of espeak_Synth_Mark
872
873
874
875 ESPEAK_API espeak_ERROR espeak_Key(const char *key)
876 {//================================================
877 ENTER("espeak_Key");
878 // symbolic name, symbolicname_character - is there a system resource of symbolicnames per language
879
880 espeak_ERROR a_error = EE_OK;
881
882 if(synchronous_mode)
883 {
884 sync_espeak_Key(key);
885 return(EE_OK);
886 }
887
888 #ifdef USE_ASYNC
889 t_espeak_command* c = create_espeak_key( key, NULL);
890 a_error = fifo_add_command(c);
891 if (a_error != EE_OK)
892 {
893 delete_espeak_command(c);
894 }
895
896 #endif
897 return a_error;
898 }
899
900
901 ESPEAK_API espeak_ERROR espeak_Char(wchar_t character)
902 {//===========================================
903 ENTER("espeak_Char");
904 // is there a system resource of character names per language?
905
906 #ifdef USE_ASYNC
907 espeak_ERROR a_error;
908
909 if(synchronous_mode)
910 {
911 sync_espeak_Char(character);
912 return(EE_OK);
913 }
914
915 t_espeak_command* c = create_espeak_char( character, NULL);
916 a_error = fifo_add_command(c);
917 if (a_error != EE_OK)
918 {
919 delete_espeak_command(c);
920 }
921 return a_error;
922 #else
923 sync_espeak_Char(character);
924 return(EE_OK);
925 #endif
926 }
927
928
929 ESPEAK_API espeak_ERROR espeak_SetVoiceByName(const char *name)
930 {//============================================================
931 ENTER("espeak_SetVoiceByName");
932
933 //#ifdef USE_ASYNC
934 // I don't think there's a need to queue change voice requests
935 #ifdef deleted
936 espeak_ERROR a_error;
937
938 if(synchronous_mode)
939 {
940 return(SetVoiceByName(name));
941 }
942
943 t_espeak_command* c = create_espeak_voice_name(name);
944 a_error = fifo_add_command(c);
945 if (a_error != EE_OK)
946 {
947 delete_espeak_command(c);
948 }
949 return a_error;
950 #else
951 return(SetVoiceByName(name));
952 #endif
953 } // end of espeak_SetVoiceByName
954
955
956
957 ESPEAK_API espeak_ERROR espeak_SetVoiceByProperties(espeak_VOICE *voice_selector)
958 {//==============================================================================
959 ENTER("espeak_SetVoiceByProperties");
960
961 //#ifdef USE_ASYNC
962 #ifdef deleted
963 espeak_ERROR a_error;
964
965 if(synchronous_mode)
966 {
967 return(SetVoiceByProperties(voice_selector));
968 }
969
970 t_espeak_command* c = create_espeak_voice_spec( voice_selector);
971 a_error = fifo_add_command(c);
972 if (a_error != EE_OK)
973 {
974 delete_espeak_command(c);
975 }
976 return a_error;
977 #else
978 return(SetVoiceByProperties(voice_selector));
979 #endif
980 } // end of espeak_SetVoiceByProperties
981
982
983 ESPEAK_API int espeak_GetParameter(espeak_PARAMETER parameter, int current)
984 {//========================================================================
985 ENTER("espeak_GetParameter");
986 // current: 0=default value, 1=current value
987 if(current)
988 {
989 return(param_stack[0].parameter[parameter]);
990 }
991 else
992 {
993 return(param_defaults[parameter]);
994 }
995 } // end of espeak_GetParameter
996
997
998 ESPEAK_API espeak_ERROR espeak_SetParameter(espeak_PARAMETER parameter, int value, int relative)
999 {//=============================================================================================
1000 ENTER("espeak_SetParameter");
1001
1002 #ifdef USE_ASYNC
1003 espeak_ERROR a_error;
1004
1005 if(synchronous_mode)
1006 {
1007 SetParameter(parameter,value,relative);
1008 return(EE_OK);
1009 }
1010
1011 t_espeak_command* c = create_espeak_parameter(parameter, value, relative);
1012
1013 a_error = fifo_add_command(c);
1014 if (a_error != EE_OK)
1015 {
1016 delete_espeak_command(c);
1017 }
1018 return a_error;
1019 #else
1020 SetParameter(parameter,value,relative);
1021 return(EE_OK);
1022 #endif
1023 }
1024
1025
1026 ESPEAK_API espeak_ERROR espeak_SetPunctuationList(const wchar_t *punctlist)
1027 {//================================================================
1028 ENTER("espeak_SetPunctuationList");
1029 // Set the list of punctuation which are spoken for "some".
1030
1031 #ifdef USE_ASYNC
1032 espeak_ERROR a_error;
1033
1034 if(synchronous_mode)
1035 {
1036 sync_espeak_SetPunctuationList(punctlist);
1037 return(EE_OK);
1038 }
1039
1040 t_espeak_command* c = create_espeak_punctuation_list( punctlist);
1041 a_error = fifo_add_command(c);
1042 if (a_error != EE_OK)
1043 {
1044 delete_espeak_command(c);
1045 }
1046 return a_error;
1047 #else
1048 sync_espeak_SetPunctuationList(punctlist);
1049 return(EE_OK);
1050 #endif
1051 } // end of espeak_SetPunctuationList
1052
1053
1054 ESPEAK_API void espeak_SetPhonemeTrace(int value, FILE *stream)
1055 {//============================================================
1056 ENTER("espeak_SetPhonemes");
1057 /* Controls the output of phoneme symbols for the text
1058 value=0 No phoneme output (default)
1059 value=1 Output the translated phoneme symbols for the text
1060 value=2 as (1), but also output a trace of how the translation was done (matching rules and list entries)
1061 */
1062 option_phonemes = value;
1063 f_trans = stream;
1064 if(stream == NULL)
1065 f_trans = stderr;
1066
1067 } // end of espeak_SetPhonemes
1068
1069
1070 ESPEAK_API void espeak_CompileDictionary(const char *path, FILE *log, int flags)
1071 {//=============================================================================
1072 ENTER("espeak_CompileDictionary");
1073 CompileDictionary(path, dictionary_name, log, NULL, flags);
1074 } // end of espeak_CompileDirectory
1075
1076
1077 ESPEAK_API espeak_ERROR espeak_Cancel(void)
1078 {//===============================
1079 #ifdef USE_ASYNC
1080 ENTER("espeak_Cancel");
1081 fifo_stop();
1082 event_clear_all();
1083
1084 if(my_mode == AUDIO_OUTPUT_PLAYBACK)
1085 {
1086 wave_close(my_audio);
1087 }
1088 SHOW_TIME("espeak_Cancel > LEAVE");
1089 #endif
1090 embedded_value[EMBED_T] = 0; // reset echo for pronunciation announcements
1091 return EE_OK;
1092 } // end of espeak_Cancel
1093
1094
1095 ESPEAK_API int espeak_IsPlaying(void)
1096 {//==================================
1097 // ENTER("espeak_IsPlaying");
1098 #ifdef USE_ASYNC
1099 if((my_mode == AUDIO_OUTPUT_PLAYBACK) && wave_is_busy(my_audio))
1100 return(1);
1101
1102 return(fifo_is_busy());
1103 #else
1104 return(0);
1105 #endif
1106 } // end of espeak_IsPlaying
1107
1108
1109 ESPEAK_API espeak_ERROR espeak_Synchronize(void)
1110 {//=============================================
1111 #ifdef USE_ASYNC
1112 SHOW_TIME("espeak_Synchronize > ENTER");
1113 while (espeak_IsPlaying())
1114 {
1115 usleep(20000);
1116 }
1117 #endif
1118 SHOW_TIME("espeak_Synchronize > LEAVE");
1119 return EE_OK;
1120 } // end of espeak_Synchronize
1121
1122
1123 extern void FreePhData(void);
1124
1125 ESPEAK_API espeak_ERROR espeak_Terminate(void)
1126 {//===========================================
1127 ENTER("espeak_Terminate");
1128 #ifdef USE_ASYNC
1129 fifo_stop();
1130 fifo_terminate();
1131 event_terminate();
1132
1133 if(my_mode == AUDIO_OUTPUT_PLAYBACK)
1134 {
1135 wave_close(my_audio);
1136 wave_terminate();
1137 }
1138
1139 #endif
1140 Free(event_list);
1141 event_list = NULL;
1142 Free(outbuf);
1143 outbuf = NULL;
1144 FreePhData();
1145
1146 return EE_OK;
1147 } // end of espeak_Terminate
1148
1149 ESPEAK_API const char *espeak_Info(void)
1150 {//=======================================
1151 return(version_string);
1152 }
1153
1154 #pragma GCC visibility pop
1155
1156

   
Visit the ZANavi Wiki