FileNotFoundError: [Errno2] No such file or directory: 'in_out_put/live_data.csv' How do I work?

Asked 2 years ago, Updated 2 years ago, 274 views

I was doing an AI program on jupyternotebook in anaconda.
When I was loading the following tasks, I got an error and I am having trouble.
I am a beginner with no program experience.
Please let me know.
Thank you for your cooperation.

Using learned AI, predict whether it is time to buy (up) or sell (down) from the current market price

#9 Import various libraries
print("[Start Import Library]")
import numpy as np
import pandas aspd
import tensorflow as tf
# from tensorflow.keras.models import load_model, Model
from keras.models import load_model, Model
print("[Library Import Complete]")
​
m_recent=24# 要Adjustment required 調整Number of legs to be used for prediction (based on past 24 legs)
​
# 直 Preparation of the latest price movement data
print("[Last price movement data read start]US>")# Display load start
data=pd.read_csv("in_out_put/live_data.csv", encoding="shift-jis")# [Adjustment Required] Read the most recent price movement data
data=data.loc[:,["Low", "Close", "High"]]# Extract all rows (:) and columns only "Low", "Close", and "High"
X = [ ] # First, prepare an empty X box
print("[Last price movement data read completed]US>")# Display load completion
​
print("[Recent price movement data construction start]US>")#Displays the start of data construction to be entered into AI
X.append(data.loc[len(data)-m_recent:(len(data))], ["Low", "Close", "High"]])# Add values of "Low", "Close", and "High" from last line to 24 lines to X  
X_live=np.array(X)# Generate numpy array
print(X_live)# Show X_live
print("[Last price movement data construction completed]US>")# Display completion of data construction to be entered into AI
​
# 学習 Load Learned AI
print("[Start loading learned AI]US>")# Display learned AI load start
model=load_model('in_out_put/model_epochs_10.h5')# [Adjustment Required] Load saved learned AI
print("[Learned AI Load Completed]US>")# View Learned AI Load Complete
​
# 直 Performing predictions using the most recent data
print("[Start prediction using the most recent price movement data]")
predicted=model.predict(X_live)#X_live as input for prediction
print("[Prediction completed using the latest price movement data]")
​
# 予測 Viewing Forecast Results
print("================================================")# Display horizontal lines of decoration
if predicated [0,0]>=0.5: # When the probability of rising is 50% or higher
    print("Predicted results:", '{:4g}'.format(predicted[0,0]*100), "% chance of rising") # 4 valid digits → {:4g} to show certainty
else: # When you're more than 50% sure you'llow
    print("Predicted results:", '{:4g}'.format(predicted[0,1]*100), "% chance to drop") # 4 valid digits → {:4g} to show certainty
print("================================================")# Display horizontal lines of decoration

Output

[Start importing libraries]
Library Import Complete
[Recent price movement data read]
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-2-80fce524d42d>in<module>
     12# 直 Preparation of the most recent price movement data
     13 print("[Last price movement data read start]US>")# Display load start
--->14 data=pd.read_csv("in_out_put/live_data.csv", encoding="shift-jis")#[Adjustment Required] Read the most recent price movement data
     15 data=data.loc[:,["Low", "Close", "High"]]# Extract all rows (:) and only three columns are "Low", "Close", and "High"
     16 X = [ ] # First, prepare an empty X box

~\anaconda3\envs\ai_aki_maki\lib\site-packages\pandas\io\parsers.py in read_csv(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetimage_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, critical, lineterminator, quote, double quote, escapecar, comment, encoding, error_body_loads, word_loads, word_loads, word_loads, word
    686     )
    687 
-->688 return_read (filepath_or_buffer, kwds)
    689 
    690 

~\anaconda3\envs\ai_aki_maki\lib\site-packages\pandas\io\parsers.py in_read (filepath_or_buffer, kwds)
    452 
    453#Create the parser.
-->454 parser=TextFileReader(fp_or_buf,**kwds)
    455 
    456 if chunksize oriterator:

~\anaconda3\envs\ai_aki_maki\lib\site-packages\pandas\io\parsers.py in __init__(self, f, engine, **kwds)
    946self.options["has_index_names"] = kwds["has_index_names"]
    947 
-->948self._make_engine(self.engine)
    949 
    950 def close (self):

~\anaconda3\envs\ai_aki_maki\lib\site-packages\pandas\io\parsers.py in_make_engine(self,engine)
   1178 def_make_engine(self, engine="c"):
   1179 if engine=="c":
->1180self._engine=CParserWrapper (self.f,**self.options)
   1181 else:
   1182 if engine=="python":

~\anaconda3\envs\ai_aki_maki\lib\site-packages\pandas\io\parsers.py in_init__(self, src, **kwds)
   1991 if kwds.get ("compression") is None and encoding:
   1992 if isinstance(src, str):
- > 1993 src = open(src, "rb")
   1994 self.handles.append(src)
   1995 

FileNotFoundError: [Errno2] No such file or directory: 'in_out_put/live_data.csv'
'''

python

2022-09-30 22:01

1 Answers

If you don't know where the file is, you can check it as follows.

 from pathlib import Path

# US>Search below the current directory (probably will take some time)
cur=Path('.')
for fin cur.glob('**/live_data.csv'):
    print(f)

Or you can follow from below the home directory

 from pathlib import Path

# Search below the home directory (it should take time)
home=Path.home()
for fin home.glob('**/live_data.csv'):
    print(f)

Example of handling the first file you find when you know the directory location

folder=cur/'some directory path'/'in_out_put'

# to retrieve the first file you found if it exists
fname, = folder.glob('*.json')
import pandas aspd
df=pd.read_json(fname, orient='index')

If "in_out_put/live_data.csv" like the code in the question, it will be handled in , within the current directory (the first code in this answer is "below the current directory").

Desktop can describe:
(It can be omitted to some extent because the cord is unnecessarily long.)

 from pathlib import Path
desktop=Path.home()/'Desktop'
print(f'path:{desktop}\texist? {desktop.exists()}')

fname = desktop / 'in_out_put' / 'live_data.csv'
print(f'path:{fname}\texist?{fname.exists()}')

iffname.exists():
    df=pd.read_csv(fname, encoding="shift-jis")
    display(df)


2022-09-30 22:01

If you have any answers or tips


© 2024 OneMinuteCode. All rights reserved.