The following sample code is
https://github.com/daitan-innovation/cnn-audio-denoiser
Here it is.
from mozilla_common_voice import MozillaCommonVoiceDataset
from Urban_sound_8K import UrbanSound8K
from dataset import Dataset
import warnings
warnings.filterwarnings(action='ignore')
mozilla_basepath=r'C:\Users\username\cn-audio-denoiser-master\data_processing\mozilla_common_voice'
urbansound_basepath=r'C:\Users\username\cn-audio-denoiser-master\data_processing\UrbanSound8K'
mcv=MozillaCommonVoiceDataset(mozilla_basepath,val_dataset_size=1000)
clean_train_filenames, clean_val_filenames=mcv.get_train_val_filenames()→def get_train_val_filenames(self)—Go to
in the above code
mcv=MozillaCommonVoiceDataset(mozilla_basepath,val_dataset_size=1000)
In , navigate to the mozilla_common_voice file and
np.random.seed(999)
class MozillaCommonVoiceDataset:
def_init__(self, basepath, *, val_dataset_size):
self.basepath =basepath
self.val_dataset_size = val_dataset_size
def_get_common_voice_filenames(self, dataframe_name='train.tsv'):
mozilla_metadata=pd.read_csv(os.path.join(self.basepath, dataframe_name), sep='\t')
clean_files=mozilla_metadata ['path'].values
np.random.shuffle(clean_files)
print("Total number of training examples:", len(clean_files))
return clean_files
def get_train_val_filenames(self):
clean_files=self._get_common_voice_filenames(dataaframe_name='train.tsv')
→def_get_common_voice_filenames(self, dataframe_name='train.tsv')—Go to
I went to the 21st and 14th lines and the program was progressing.
Affected Error Codes
------------------------------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-10-70173126a5a3>in<module>
11
12 mcv = MozillaCommonVoiceDataset(mozilla_basepath, val_dataset_size=1000)
--- >13 clean_train_filenames, clean_val_filenames=mcv.get_train_val_filenames()
14
15us8K = UrbanSound8K (urbansound_basepath, val_dataset_size=200)
~\mozilla_common_voice.py inget_train_val_filenames(self)
19
20def get_train_val_filenames(self):
--- >21 clean_files=self._get_common_voice_filenames(dataaframe_name='train.tsv')
22
23# resolve full path
~\mozilla_common_voice.py in_get_common_voice_filenames(self, dataframe_name)
12
13def_get_common_voice_filenames(self, dataframe_name='train.tsv'):
--->14 mozilla_metadata=pd.read_csv(os.path.join(self.basepath, dataframe_name), sep='\t')
15 clean_files = mozilla_metadata ['path'].values
16 np.random.shuffle(clean_files)
~\AppData\Roaming\Python\Python38\site-packages\pandas\io\parsers.py in read_csv(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates,iterator, chunksize, compression, thousands, critical, lineterminator, quote, double quote, escapechar, comment, encoding, error_bad_lashes, word_loads, word_loads, word_loads, word_loads, word_loads, word
684 )
685
-->686 return_read (filepath_or_buffer, kwds)
687
688
~\AppData\Roaming\Python38\site-packages\pandas\io\parsers.py in_read (filepath_or_buffer, kwds)
450
451#Create the parser.
-->452parser=TextFileReader(fp_or_buf,**kwds)
453
454 if chunksize oriterator:
~\AppData\Roaming\Python38\site-packages\pandas\io\parsers.py in __init__(self, f, engine, **kwds)
934self.options["has_index_names"] = kwds["has_index_names"]
935
-->936self._make_engine(self.engine)
937
938 def close (self):
~\AppData\Roaming\Python38\site-packages\pandas\io\parsers.py in_make_engine(self,engine)
1166 def_make_engine(self, engine="c"):
1167 if engine=="c":
->1168self._engine=CParserWrapper (self.f,**self.options)
1169 else:
1170 if engine=="python":
~\AppData\Roaming\Python38\site-packages\pandas\io\parsers.py in __init__(self, src, **kwds)
1996 kwds ["usecols"] = self.usecols
1997
->1998 self._reader=parsers.TextReader(src,**kwds)
1999 self.unnamed_cols=self._reader.unnamed_cols
2000
pandas\_libs\parsers.pyx in pandas._libs.pars.TextReader.__cinit__()
pandas\_libs\parsers.pyx in pandas._libs.parsers.TextReader._setup_parser_source()
FileNotFoundError: [Errno2] No such file or directory: 'C:\\Users\\Username\\cn-audio-denoiser-master\\data_processing\\mozilla_common_voice\\train.tsv'
The above error code appears.
I do not understand that the mozilla_common_voice file is a .py file and the train.tsv file exists since then.As you can see from the error, there is a .tsv file in the .py file.
Please tell me this mystery and how to resolve this FileNotFoundError.
The program may have been long and insufficiently filled in the source code, so I will add comments as much as possible.
python python3 anaconda anaconda3
README.md
has the following wording:
Dataset
Part of the database used to train the original system is now available to download
The zip file contains1 training file (that is 10% of the data used to train the system), validation file, and two audio files (not included in the training files) used to validate the model.
The train.csv
in question is probably in this ZIP
file.However, it does not exist at this time (so-called out of link).
© 2024 OneMinuteCode. All rights reserved.