Saturday, August 10, 2019

Project: Data Analysis and Visualizations and Predicting Future Energy Consumption using LSTM Predicting Values 2 month Later Accurately RNN

Energy

Hourly Energy Consumption

Step 1:

Import Library

In [146]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pprint
%matplotlib inline
In [81]:
df = pd.read_csv("AEP_hourly.csv")
print("="*50)
print("First Five Rows ","\n")
print(df.head(2),"\n")

print("="*50)
print("Information About Dataset","\n")
print(df.info(),"\n")

print("="*50)
print("Describe the Dataset ","\n")
print(df.describe(),"\n")

print("="*50)
print("Null Values t ","\n")
print(df.isnull().sum(),"\n")
==================================================
First Five Rows  

              Datetime   AEP_MW
0  2004-12-31 01:00:00  13478.0
1  2004-12-31 02:00:00  12865.0 

==================================================
Information About Dataset 

<class 'pandas.core.frame.DataFrame'>
RangeIndex: 121273 entries, 0 to 121272
Data columns (total 2 columns):
Datetime    121273 non-null object
AEP_MW      121273 non-null float64
dtypes: float64(1), object(1)
memory usage: 1.9+ MB
None 

==================================================
Describe the Dataset  

              AEP_MW
count  121273.000000
mean    15499.513717
std      2591.399065
min      9581.000000
25%     13630.000000
50%     15310.000000
75%     17200.000000
max     25695.000000 

==================================================
Null Values t  

Datetime    0
AEP_MW      0
dtype: int64 

Step 2:

Reformat the Date Time Columns

In [82]:
# Extract all Data Like Year MOnth Day Time etc
dataset = df
dataset["Month"] = pd.to_datetime(df["Datetime"]).dt.month
dataset["Year"] = pd.to_datetime(df["Datetime"]).dt.year
dataset["Date"] = pd.to_datetime(df["Datetime"]).dt.date
dataset["Time"] = pd.to_datetime(df["Datetime"]).dt.time
dataset["Week"] = pd.to_datetime(df["Datetime"]).dt.week
dataset["Day"] = pd.to_datetime(df["Datetime"]).dt.day_name()
dataset = df.set_index("Datetime")
dataset.index = pd.to_datetime(dataset.index)
dataset.head(1)
Out[82]:
AEP_MW Month Year Date Time Week Day
Datetime
2004-12-31 01:00:00 13478.0 12 2004 2004-12-31 01:00:00 53 Friday

Step 3:

In [96]:
# How many Unique Year do we Have in Dataset 
print(df.Year.unique(),"\n")
print("Total Number of Unique Year", df.Year.nunique(), "\n")
[2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
 2018] 

Total Number of Unique Year 15 

Lets us see the energy consumption Each Year

In [304]:
from matplotlib import style

fig = plt.figure()
ax1 = plt.subplot2grid((1,1), (0,0))

style.use('ggplot')

sns.lineplot(x=dataset["Year"], y=dataset["AEP_MW"], data=df)
sns.set(rc={'figure.figsize':(15,6)})

plt.title("Energy consumptionnin Year 2004")
plt.xlabel("Date")
plt.ylabel("Energy in MW")
plt.grid(True)
plt.legend()

for label in ax1.xaxis.get_ticklabels():
    label.set_rotation(90)


plt.title("Energy Consumption According to Year")
No handles with labels found to put in legend.
Out[304]:
Text(0.5, 1.0, 'Energy Consumption According to Year')
In [339]:
from matplotlib import style


fig = plt.figure()

ax1= fig.add_subplot(311)
ax2= fig.add_subplot(312)
ax3= fig.add_subplot(313)


style.use('ggplot')

y_2004 = dataset["2004"]["AEP_MW"].to_list()
x_2004 = dataset["2004"]["Date"].to_list()
ax1.plot(x_2004,y_2004, color="green", linewidth=1.7)


y_2005 = dataset["2005"]["AEP_MW"].to_list()
x_2005 = dataset["2005"]["Date"].to_list()
ax2.plot(x_2005, y_2005, color="green", linewidth=1)


y_2006 = dataset["2006"]["AEP_MW"].to_list()
x_2006 = dataset["2006"]["Date"].to_list()
ax3.plot(x_2006, y_2006, color="green", linewidth=1)


plt.rcParams["figure.figsize"] = (18,8)
plt.title("Energy consumptionnin")
plt.xlabel("Date")
plt.ylabel("Energy in MW")
plt.grid(True, alpha=1)
plt.legend()

for label in ax1.xaxis.get_ticklabels():
    label.set_rotation(90)
No handles with labels found to put in legend.

Energy Distribution

In [341]:
sns.distplot(dataset["AEP_MW"])
plt.title("Ennergy Distribution")
Out[341]:
Text(0.5, 1.0, 'Ennergy Distribution')

Energy with Respect to Time

In [356]:
fig = plt.figure()
ax1= fig.add_subplot(111)

sns.lineplot(x=dataset["Time"],y=dataset["AEP_MW"], data=df)
plt.title("Energy Consumption vs Time ")
plt.xlabel("Time")
plt.grid(True, alpha=1)
plt.legend()

for label in ax1.xaxis.get_ticklabels():
    label.set_rotation(90)
No handles with labels found to put in legend.

Resampleing Data

In [415]:
NewDataSet = dataset.resample('D').mean()
In [416]:
print("Old Dataset ",dataset.shape )
print("New  Dataset ",NewDataSet.shape )
Old Dataset  (121273, 7)
New  Dataset  (5055, 4)
In [417]:
TestData = NewDataSet.tail(100)

Training_Set = NewDataSet.iloc[:,0:1]

Training_Set = Training_Set[:-60]
In [418]:
print("Training Set Shape ", Training_Set.shape)
print("Test Set Shape ", TestData.shape)
Training Set Shape  (4995, 1)
Test Set Shape  (100, 4)
In [419]:
Training_Set = Training_Set.values
sc = MinMaxScaler(feature_range=(0, 1))
Train = sc.fit_transform(Training_Set)
In [420]:
X_Train = []
Y_Train = []

# Range should be fromm 60 Values to END 
for i in range(60, Train.shape[0]):
    
    # X_Train 0-59 
    X_Train.append(Train[i-60:i])
    
    # Y Would be 60 th Value based on past 60 Values 
    Y_Train.append(Train[i])

# Convert into Numpy Array
X_Train = np.array(X_Train)
Y_Train = np.array(Y_Train)

print(X_Train.shape)
print(Y_Train.shape)
(4935, 60, 1)
(4935, 1)
In [421]:
# Shape should be Number of [Datapoints , Steps , 1 )
# we convert into 3-d Vector or #rd Dimesnsion
X_Train = np.reshape(X_Train, newshape=(X_Train.shape[0], X_Train.shape[1], 1))
X_Train.shape
Out[421]:
(4935, 60, 1)

Model

In [422]:
regressor = Sequential()

# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_Train.shape[1], 1)))
regressor.add(Dropout(0.2))

# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))

# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))

# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))

# Adding the output layer
regressor.add(Dense(units = 1))

# Compiling the RNN
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
In [423]:
regressor.fit(X_Train, Y_Train, epochs = 50, batch_size = 32)
Epoch 1/50
4935/4935 [==============================] - 33s 7ms/step - loss: 0.0237
Epoch 2/50
4935/4935 [==============================] - 33s 7ms/step - loss: 0.0183
Epoch 3/50
4935/4935 [==============================] - 34s 7ms/step - loss: 0.0173
Epoch 4/50
4935/4935 [==============================] - 34s 7ms/step - loss: 0.0164
Epoch 5/50
4935/4935 [==============================] - 35s 7ms/step - loss: 0.0157
Epoch 6/50
4935/4935 [==============================] - 34s 7ms/step - loss: 0.0160
Epoch 7/50
4935/4935 [==============================] - 34s 7ms/step - loss: 0.0151
Epoch 8/50
4935/4935 [==============================] - 35s 7ms/step - loss: 0.0125
Epoch 9/50
4935/4935 [==============================] - 34s 7ms/step - loss: 0.0099
Epoch 10/50
4935/4935 [==============================] - 34s 7ms/step - loss: 0.0089
Epoch 11/50
4935/4935 [==============================] - 34s 7ms/step - loss: 0.0085
Epoch 12/50
4935/4935 [==============================] - 34s 7ms/step - loss: 0.0083
Epoch 13/50
4935/4935 [==============================] - 34s 7ms/step - loss: 0.0078
Epoch 14/50
4935/4935 [==============================] - 34s 7ms/step - loss: 0.0079
Epoch 15/50
4935/4935 [==============================] - 34s 7ms/step - loss: 0.0073
Epoch 16/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0075
Epoch 17/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0072
Epoch 18/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0070
Epoch 19/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0066
Epoch 20/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0063
Epoch 21/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0061
Epoch 22/50
4935/4935 [==============================] - 32s 6ms/step - loss: 0.0058
Epoch 23/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0056
Epoch 24/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0055
Epoch 25/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0053
Epoch 26/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0054
Epoch 27/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0053
Epoch 28/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0051
Epoch 29/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0050
Epoch 30/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0051
Epoch 31/50
4935/4935 [==============================] - 32s 6ms/step - loss: 0.0050
Epoch 32/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0049
Epoch 33/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0048
Epoch 34/50
4935/4935 [==============================] - 32s 7ms/step - loss: 0.0048
Epoch 35/50
4935/4935 [==============================] - 2283s 463ms/step - loss: 0.0048
Epoch 36/50
4935/4935 [==============================] - 3475s 704ms/step - loss: 0.0047
Epoch 37/50
4935/4935 [==============================] - 32s 6ms/step - loss: 0.0047
Epoch 38/50
4935/4935 [==============================] - 29s 6ms/step - loss: 0.0047
Epoch 39/50
4935/4935 [==============================] - 30s 6ms/step - loss: 0.0046
Epoch 40/50
4935/4935 [==============================] - 31s 6ms/step - loss: 0.0046
Epoch 41/50
4935/4935 [==============================] - 33s 7ms/step - loss: 0.0045
Epoch 42/50
4935/4935 [==============================] - 37s 7ms/step - loss: 0.0045
Epoch 43/50
4935/4935 [==============================] - 38s 8ms/step - loss: 0.0047
Epoch 44/50
4935/4935 [==============================] - 36s 7ms/step - loss: 0.0045
Epoch 45/50
4935/4935 [==============================] - 35s 7ms/step - loss: 0.0044
Epoch 46/50
4935/4935 [==============================] - 38s 8ms/step - loss: 0.0044
Epoch 47/50
4935/4935 [==============================] - 34s 7ms/step - loss: 0.0043
Epoch 48/50
4935/4935 [==============================] - 42s 9ms/step - loss: 0.0043
Epoch 49/50
4935/4935 [==============================] - 37s 7ms/step - loss: 0.0044
Epoch 50/50
4935/4935 [==============================] - 37s 8ms/step - loss: 0.0044
Out[423]:
<keras.callbacks.History at 0x1a36d8f898>

Test Data

In [462]:
TestData.head(2)
Out[462]:
AEP_MW Month Year Week
Datetime
2018-04-26 13157.791667 4 2018 17
2018-04-27 12964.000000 4 2018 17
In [463]:
TestData.shape
Out[463]:
(100, 4)
In [464]:
NewDataSet.shape
Out[464]:
(5055, 4)
In [465]:
Df_Total = pd.concat((NewDataSet[["AEP_MW"]], TestData[["AEP_MW"]]), axis=0)
In [466]:
Df_Total.shape
Out[466]:
(5155, 1)
In [467]:
inputs = Df_Total[len(Df_Total) - len(TestData) - 60:].values
inputs.shape
Out[467]:
(160, 1)
In [468]:
inputs = Df_Total[len(Df_Total) - len(TestData) - 60:].values

# We need to Reshape
inputs = inputs.reshape(-1,1)

# Normalize the Dataset
inputs = sc.transform(inputs)

X_test = []
for i in range(60, 160):
    X_test.append(inputs[i-60:i])
    
# Convert into Numpy Array
X_test = np.array(X_test)

# Reshape before Passing to Network
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))

# Pass to Model 
predicted_stock_price = regressor.predict(X_test)

# Do inverse Transformation to get Values 
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
In [469]:
True_MegaWatt = TestData["AEP_MW"].to_list()
Predicted_MegaWatt  = predicted_stock_price
dates = TestData.index.to_list()
In [471]:
Machine_Df = pd.DataFrame(data={
    "Date":dates,
    "TrueMegaWatt": True_MegaWatt,
    "PredictedMeagWatt":[x[0] for x in Predicted_MegaWatt ]
})

Future Predicted

In [474]:
Machine_Df
Out[474]:
Date TrueMegaWatt PredictedMeagWatt
0 2018-04-26 13157.791667 13671.706055
1 2018-04-27 12964.000000 12991.945312
2 2018-04-28 12237.583333 14521.591797
3 2018-04-29 12156.791667 13211.944336
4 2018-04-30 13443.500000 12788.455078
5 2018-05-01 13251.875000 13789.046875
6 2018-05-02 13641.166667 12804.154297
7 2018-05-03 14217.250000 12709.704102
8 2018-05-04 13725.625000 14261.728516
9 2018-05-05 11902.166667 14472.195312
10 2018-05-06 11680.083333 12677.794922
11 2018-05-07 12972.500000 12127.531250
12 2018-05-08 13295.083333 12887.196289
13 2018-05-09 13688.750000 12743.552734
14 2018-05-10 13993.250000 12747.035156
15 2018-05-11 13525.166667 13814.033203
16 2018-05-12 12942.916667 13970.200195
17 2018-05-13 12832.541667 13168.587891
18 2018-05-14 15004.750000 12955.161133
19 2018-05-15 15171.791667 15169.067383
20 2018-05-16 13925.416667 14419.253906
21 2018-05-17 14465.666667 12913.649414
22 2018-05-18 13684.333333 14998.011719
23 2018-05-19 13044.166667 14174.238281
24 2018-05-20 13169.125000 13413.721680
25 2018-05-21 14728.666667 13382.070312
26 2018-05-22 14857.125000 14739.416992
27 2018-05-23 14489.583333 14121.821289
28 2018-05-24 14656.250000 13763.244141
29 2018-05-25 15137.125000 15047.317383
... ... ... ...
70 2018-07-05 17609.000000 17120.591797
71 2018-07-06 15742.916667 17615.269531
72 2018-07-07 13610.333333 14689.130859
73 2018-07-08 13768.708333 13816.837891
74 2018-07-09 16427.333333 15385.699219
75 2018-07-10 17489.333333 16932.236328
76 2018-07-11 16714.125000 17681.707031
77 2018-07-12 16330.833333 16694.558594
78 2018-07-13 16911.291667 15885.130859
79 2018-07-14 16488.375000 16239.578125
80 2018-07-15 16296.208333 16572.927734
81 2018-07-16 17400.041667 17885.480469
82 2018-07-17 17311.125000 17595.656250
83 2018-07-18 15814.041667 17368.632812
84 2018-07-19 15889.916667 15917.466797
85 2018-07-20 15332.500000 15957.360352
86 2018-07-21 13795.250000 14366.544922
87 2018-07-22 13479.333333 13657.029297
88 2018-07-23 15410.083333 15275.373047
89 2018-07-24 15890.541667 15779.814453
90 2018-07-25 16503.333333 16030.302734
91 2018-07-26 16474.250000 16809.560547
92 2018-07-27 15816.625000 16138.321289
93 2018-07-28 14113.083333 14586.478516
94 2018-07-29 13658.000000 13875.068359
95 2018-07-30 15368.083333 15294.772461
96 2018-07-31 15180.291667 15672.427734
97 2018-08-01 15151.166667 15329.677734
98 2018-08-02 15687.666667 15497.061523
99 2018-08-03 14809.000000 15975.358398

100 rows × 3 columns

In [476]:
True_MegaWatt = TestData["AEP_MW"].to_list()
Predicted_MegaWatt  = [x[0] for x in Predicted_MegaWatt ]
dates = TestData.index.to_list()
In [487]:
fig = plt.figure()

ax1= fig.add_subplot(111)

x = dates
y = True_MegaWatt

y1 = Predicted_MegaWatt

plt.plot(x,y, color="green")
plt.plot(x,y1, color="red")
# beautify the x-labels
plt.gcf().autofmt_xdate()
plt.xlabel('Dates')
plt.ylabel("Power in MW")
plt.title("Machine Learned the Pattern Predicting Future Values ")
plt.legend()
No handles with labels found to put in legend.
Out[487]:
<matplotlib.legend.Legend at 0x1a4984b780>

35 comments:

  1. This comment has been removed by the author.

    ReplyDelete
    Replies
    1. Pythonist: Project: Data Analysis And Visualizations And Predicting Future Energy Consumption Using Lstm Predicting Values 2 Month Later Accurately Rnn >>>>> Download Now

      >>>>> Download Full

      Pythonist: Project: Data Analysis And Visualizations And Predicting Future Energy Consumption Using Lstm Predicting Values 2 Month Later Accurately Rnn >>>>> Download LINK

      >>>>> Download Now

      Pythonist: Project: Data Analysis And Visualizations And Predicting Future Energy Consumption Using Lstm Predicting Values 2 Month Later Accurately Rnn >>>>> Download Full

      >>>>> Download LINK R9

      Delete
  2. This comment has been removed by the author.

    ReplyDelete
  3. Hii Soumil, I am following your tutorial and facing some issue with Prediction value , I am very new to this. I completely stuck with this problem so need your guidance over this. It would be very helpful if you enlighten me.

    Please look into this link :https://stackoverflow.com/questions/59775085/getting-constant-prediction-values-using-lstm-keras-syntax

    ReplyDelete
  4. If You are going to use Google Colab: Instead of Jupyter notebbok. For last part of code named as "MODEL"

    You need to import theese libraries/moduls,: ;)

    from tensorflow import keras
    from tensorflow.keras import layers
    from keras.layers.core import Dense, Activation, Dropout
    from keras.layers.recurrent import LSTM
    from keras.models import Sequential

    ReplyDelete
  5. this problem aries while running this code in google lab please help

    -------------------------------
    TypeError Traceback (most recent call last)
    in ()
    2 ax1= fig.add_subplot(111)
    3
    ----> 4 sns.lineplot(x=dataset["Time"],y=dataset["AEP_MW"], data=df)
    5 plt.title("Energy Consumption vs Time ")
    6 plt.xlabel("Time")

    8 frames
    /usr/local/lib/python3.6/dist-packages/numpy/core/_asarray.py in asarray(a, dtype, order)
    83
    84 """
    ---> 85 return array(a, dtype, copy=False, order=order)
    86
    87

    TypeError: float() argument must be a string or a number, not 'datetime.time

    ReplyDelete
    Replies
    1. hi, do you have the solution for this? can you pls email me if you have the solution
      ainafaqihah7@gmail.com

      Delete
    2. This comment has been removed by the author.

      Delete
    3. replace x=dataset["Time"] with x=dataset["Time"].astype(str)

      Delete
    4. i have same error....

      how can i solve this?

      Delete
  6. Hi, would you mind updating this code as it doesn't seem to work anymore!

    ReplyDelete
    Replies
    1. Hi, i hope you are safe
      this is the entire project
      https://github.com/drwiiche/electricity-consumption/blob/master/electricity-consumption-project.ipynb

      Delete
    2. Apologies for the late reply, the losses that I'm getting are Nans. Any fix for this ?

      Delete
  7. Replies
    1. you can get it here :
      https://github.com/drwiiche/electricity-consumption

      Delete
  8. Thanks for the blog loaded with so many information. Stopping by your blog helped me to get what I was looking for. hemp oil wellness

    ReplyDelete
  9. Hi where did u Predict Values of 2 month???

    ReplyDelete
  10. Thanks for providing recent updates regarding the concern, I look forward to read more. aktieanalys

    ReplyDelete
  11. Hi, I am a python beginner, how could I determine rms and mae comparing true data and predicted data with using Sklearn?


    Seems the value is not correct if I input like this comparing y and y1 in the model:
    from sklearn.metrics import mean_squared_error
    from sklearn.metrics import mean_absolute_error

    rmse = sqrt(mean_squared_error(y, y1))
    mae = mean_absolute_error(y, y1)

    ReplyDelete
  12. Plus Xnergy Edge - AIOT/Energy IOT for smart energy IOT, provides ecosystem that turns building energy insights into savings & business intelligence building ems

    ReplyDelete
  13. Hi,
    Thank you for a good description. I managed to run the code and overcome all error. Now I would like to predict future value and not substract "60 days data" from my excel file. Where do i change in the code in order to tell the machine to predict the energy consumption in the coming 60 days (ex 2021-06-07)?

    ReplyDelete
    Replies
    1. hey i am new to python and ml and i am not getting this code ...what actually does this code do ? I thought this is going to predict future energy consumption

      Delete
  14. This comment has been removed by the author.

    ReplyDelete
  15. Positive site, where did u come up with the information on this posting?I have read a few of the articles on your website now, and I really like your style. Thanks a million and please keep up the effective work. amazon product research tool free

    ReplyDelete
  16. This comment has been removed by the author.

    ReplyDelete
  17. I have been impressed after read this because of some quality work and informative thoughts. I just want to say thanks for the writer and wish you all the best for coming! Your exuberance is refreshing. The Best Remote Team Management Tool

    ReplyDelete
  18. Can you please provide dataset of this problem

    ReplyDelete
  19. Hi
    First of all thanks for the presentation regarding time series prediction analysis. By the way, I am a Civil Engineer. I prefer to forecast events regarding to hydrology based on CTS-LSMT. Can you please help me to build up coding for that?

    ReplyDelete
  20. Getting following error
    Cannot convert a symbolic Tensor (lstm_2/strided_slice:0) to a numpy array. This error may indicate that you're trying to pass a Tensor to a NumPy call, which is not supported
    Please help

    ReplyDelete
  21. how can you write a Python program that analyzes the consumption for a month from CSV file, number of days in the month, average consumption per hour,time of the highest consumption.(link adress for the CSV file)
    https://cdn.fbsbx.com/v/t59.2708-21/246526620_199119609030507_8304275238008157976_n.csv/meteringvalues-mp-xxxxx-consumption-202012.csv?_nc_cat=106&ccb=1-5&_nc_sid=0cab14&_nc_ohc=_XCmvNzqBBsAX9rtULi&_nc_ht=cdn.fbsbx.com&oh=b9bf11f8afd69c6d4bb34c5a3c8bc46c&oe=61733F1E&dl=1

    ReplyDelete
  22. hello , please you can give me your private contact i need your help

    ReplyDelete
  23. Pythonist: Project: Data Analysis And Visualizations And Predicting Future Energy Consumption Using Lstm Predicting Values 2 Month Later Accurately Rnn >>>>> Download Now

    >>>>> Download Full

    Pythonist: Project: Data Analysis And Visualizations And Predicting Future Energy Consumption Using Lstm Predicting Values 2 Month Later Accurately Rnn >>>>> Download LINK

    >>>>> Download Now

    Pythonist: Project: Data Analysis And Visualizations And Predicting Future Energy Consumption Using Lstm Predicting Values 2 Month Later Accurately Rnn >>>>> Download Full

    >>>>> Download LINK

    ReplyDelete
  24. Hello Soumil, I wanted to know how did you manage to get the data set of power consumption of the company. Basically, I am also working on the same project just don't know how to or from where I can get tha data set. Urgently required please help.

    ReplyDelete
  25. I have problem in Energy with Respect to Time part ---------------------------------------------------------------------------
    TypeError Traceback (most recent call last)
    /usr/local/lib/python3.9/dist-packages/pandas/_libs/lib.pyx in pandas._libs.lib.maybe_convert_numeric()

    TypeError: Invalid object type

    During handling of the above exception, another exception occurred:

    TypeError Traceback (most recent call last)
    6 frames
    in
    2 ax1= fig.add_subplot(111)
    3
    ----> 4 sns.lineplot(x=dataset["Time"],y=dataset["AEP_MW"], data=df)
    5 plt.title("Energy Consumption vs Time ")
    6 plt.xlabel("Time")

    /usr/local/lib/python3.9/dist-packages/seaborn/relational.py in lineplot(data, x, y, hue, size, style, units, palette, hue_order, hue_norm, sizes, size_order, size_norm, dashes, markers, style_order, estimator, errorbar, n_boot, seed, orient, sort, err_style, err_kws, legend, ci, ax, **kwargs)
    643 kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
    644
    --> 645 p.plot(ax, kwargs)
    646 return ax
    647

    /usr/local/lib/python3.9/dist-packages/seaborn/relational.py in plot(self, ax, kws)
    421 # Loop over the semantic subsets and add to the plot
    422 grouping_vars = "hue", "size", "style"
    --> 423 for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True):
    424
    425 if self.sort:

    /usr/local/lib/python3.9/dist-packages/seaborn/_oldcore.py in iter_data(self, grouping_vars, reverse, from_comp_data, by_facet, allow_empty, dropna)
    1026
    1027 if from_comp_data:
    -> 1028 data = self.comp_data
    1029 else:
    1030 data = self.plot_data

    /usr/local/lib/python3.9/dist-packages/seaborn/_oldcore.py in comp_data(self)
    1124 # supporting `order` in categorical plots is tricky
    1125 orig = orig[orig.isin(self.var_levels[var])]
    -> 1126 comp = pd.to_numeric(converter.convert_units(orig))
    1127 if converter.get_scale() == "log":
    1128 comp = np.log10(comp)

    /usr/local/lib/python3.9/dist-packages/pandas/core/tools/numeric.py in to_numeric(arg, errors, downcast)
    182 coerce_numeric = errors not in ("ignore", "raise")
    183 try:
    --> 184 values, _ = lib.maybe_convert_numeric(
    185 values, set(), coerce_numeric=coerce_numeric
    186 )

    /usr/local/lib/python3.9/dist-packages/pandas/_libs/lib.pyx in pandas._libs.lib.maybe_convert_numeric()

    TypeError: Invalid object type at position 0

    ReplyDelete
  26. can someone help me 1 Training_Set = Training_Set.values
    2 sc = MinMaxScaler(feature_range=(0, 1))
    3 Train = sc.fit_transform(Training_Set)

    AttributeError: 'numpy.ndarray' object has no attribute 'values'

    ReplyDelete

How to Use Publish-Audit-Merge Workflow in Apache Iceberg: A Beginner’s Guide

publish How to Use Publish-Audit-Merge Workflow in Apache Iceberg: A Beginner’s Guide ¶ In [24]: from ...