Skip to content

time dependent ODE with exogenous input #11

@mariaade26

Description

@mariaade26

Hello,
I am working on solving an inverse problem using a Physics-Informed Neural Network (PINN) to model the time evolution of a single-zone building's temperature. The ODE considers thermal power, outdoor temperature, and solar gains as inputs. My objective is to determine the parameters of the underlying ODE using available data (both inputs and target values).
I have followed your tutorials, but I am struggling to obtain a reasonable solution. The parameter estimates do not make sense, and the predicted solution does not approximate the expected behavior well.

Could you help me understand where I might be going wrong? Any guidance would be greatly appreciated.


import torch
import torch.autograd as autograd         # computation graph
from torch import Tensor                  # tensor node in the computation graph
import torch.nn as nn                     # neural networks
import torch.optim as optim               # optimizers e.g. gradient descent, ADAM, etc.

import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.ticker
from sklearn.model_selection import train_test_split

import numpy as np
import time
import pandas as pd
import scipy.io

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)

#DATI
data =pd.read_csv('/home/adelaide/Scrivania/SImulazioni shoebox/Params/Copenaghen/Tinterna_2023_param.csv')['Tint']
Text = pd.read_csv('/home/adelaide/Scrivania/SImulazioni shoebox/Params/Copenaghen/Testerna_2023_param.csv')['Tout']
phi = pd.read_csv('/home/adelaide/Scrivania/SImulazioni shoebox/Params/Copenaghen/phi_hc_2023_param.csv')['phi']
phisun =pd.read_csv('/home/adelaide/Scrivania/SImulazioni shoebox/Params/Copenaghen/phisun_2023_param.csv')['phisun']
t =  np.linspace(0,386100, 430)

#t = data['t']                                
usol = data                    

U_true = np.array(usol)
t_true = np.array(t)
T_true = np.array(Text[0:len(t)])
phi_true = np.array(phi[0:len(t)])
phisun_true = np.array(phisun[0:len(t)])
total_points=len(t)
#N_f = 700
N_f = 300
id_f = np.random.choice(total_points, N_f, replace=False)

U_train_Nu= U_true[id_f]
t_train_Nu = t_true[id_f]
Text_train_Nu = T_true[id_f]
phi_train_Nu =phi_true[id_f]
phisun_train_Nu = phisun_true[id_f]

Text_train_Nu = (Text_train_Nu - Text_train_Nu.mean())/Text_train_Nu.std()
phi_train_Nu = (phi_train_Nu - phi_train_Nu.mean())/phi_train_Nu.std()
phisun_train_Nu = (phisun_train_Nu -phisun_train_Nu.mean())/phisun_train_Nu.std()
t_train_Nu = (t_train_Nu-t_train_Nu.mean())/t_train_Nu.std()
t_true = (t_true - t_true.mean())/t_true.std()
print(t_train_Nu)


t_true = t_true.reshape(-1,1)
t_train_Nu = t_train_Nu.reshape(-1, 1) 
U_train_Nu = U_train_Nu.reshape(-1,1)
Text_train_Nu = Text_train_Nu.reshape(-1,1)
phi_train_Nu = phi_train_Nu.reshape(-1,1)
phisun_train_Nu = phisun_train_Nu.reshape(-1,1)


t_train_Nu = torch.from_numpy(t_train_Nu).float().to(device)
U_train_Nu = torch.from_numpy(U_train_Nu).float().to(device)
Text_train_Nu = torch.from_numpy(Text_train_Nu).float().to(device)
phi_train_Nu = torch.from_numpy(phi_train_Nu).float().to(device)
phisun_train_Nu = torch.from_numpy(phisun_train_Nu).float().to(device)

U_true = torch.from_numpy(U_true).float().to(device)
T_true = torch.from_numpy(T_true).float().to(device)
f_hat = torch.zeros(U_train_Nu.shape[0],1).to(device)


class DNN(nn.Module):
    def __init__(self,layers):
        super().__init__()  
              
        self.activation = nn.Tanh()
        self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1]) for i in range(len(layers)-1)])
        for i in range(len(layers)-1):
            nn.init.xavier_normal_(self.linears[i].weight.data, gain=1.0)
            nn.init.zeros_(self.linears[i].bias.data)

    def forward(self,x):   
        if torch.is_tensor(x) != True:         
            x = torch.from_numpy(x)  
        a = x.float().to(device)
        
        for i in range(len(layers)-2):
            z = self.linears[i](a)                      
            a = self.activation(z)      
        a = self.linears[-1](a)
        return a
    

lambda1 = 5.021417e-03 
lambda2 = 4.225671e+01
lambda3 = 8.677529e+00


class FCN():
    def __init__(self, layers):
        self.loss_function = nn.MSELoss(reduction ='mean')
        'Initialize iterator'
        self.iter = 0
        'Initialize our new parameters i.e. 𝜆 (Inverse problem)' 
        self.lambda1 = torch.tensor([lambda1], requires_grad=True).float().to(device)  
        self.lambda2 = torch.tensor([lambda2], requires_grad=True).float().to(device) 
        self.lambda3 = torch.tensor([lambda3], requires_grad=True).float().to(device)  
        'Register lambda to optimize'
        self.lambda1 = nn.Parameter(self.lambda1)
        self.lambda2 = nn.Parameter(self.lambda2)
        self.lambda3 = nn.Parameter(self.lambda3)
        'Call our DNN'
        self.dnn = DNN(layers).to(device)
        'Register our new parameter'
        self.dnn.register_parameter('lambda1', self.lambda1)  
        self.dnn.register_parameter('lambda2', self.lambda2)
        self.dnn.register_parameter('lambda3', self.lambda3)                                                                
        'loss function'
            
    def loss_data(self,x,y):
                
        loss_u = self.loss_function(self.dnn(x), y)
      
        return loss_u
    
    def loss_PDE(self, t_train_Nu,Text_train_Nu,phi_train_Nu, phisun_train_Nu):
                        
        lambda1=self.lambda1 #R
        lambda2=self.lambda2 #C
        lambda3 = self.lambda3 #A

        g = t_train_Nu.clone()        
        g.requires_grad = True
        u = self.dnn(g)    
        Text= Text_train_Nu
        phi = phi_train_Nu
        phisun = phisun_train_Nu

        u_t_T = autograd.grad(u,g,torch.ones([t_train_Nu.shape[0], 1]).to(device), retain_graph=True, create_graph=True)[0]

        u_t = u_t_T  
        f = u_t -1/(lambda1*lambda2)*(Text - u) - phisun*(lambda3/lambda1) - phi/lambda2
        loss_f = self.loss_function(f,f_hat)
                
        return loss_f
    
    def loss(self,x,y):

        loss_u = self.loss_data(x,y)
        loss_f = self.loss_PDE(x, Text_train_Nu, phi_train_Nu, phisun_train_Nu)
        loss_val = loss_u + loss_f
        
        return loss_val
     
    'callable for optimizer'                                       
    def closure(self):
        
        optimizer.zero_grad() 
        loss = self.loss(t_train_Nu, U_train_Nu)  
        loss.backward()             
        self.iter += 1
    
        if self.iter % 100 == 0:

            error_vec, _ = PINN.test()   
            print(
                'Epoch: %.1f ,Relative Error(Test): %.5f , 𝜆_PINN = [%.5f,  %.5f, %.5f]' %
                ( self.iter, error_vec.cpu().detach().numpy(), self.lambda1.item(), self.lambda2.item()), self.lambda3.item())
        return loss        
    
    'test neural network'
    def test(self, t_true):
        
        u_pred = self.dnn(t_true)    
        error_fun = torch.nn.MSELoss()
        error_vec = error_fun(u_pred, U_true)
        print(error_vec)
        u_pred = u_pred.cpu().detach().numpy() 
             
        return error_vec, u_pred
    

layers = np.array([1,128,128,128, 128, 1]) #PER PHI
PINN = FCN(layers)
steps=60000

lr=1e-1
params = list(PINN.dnn.parameters())
optimizer = torch.optim.Adam(params, lr=1e-3)  


epoch_counter = 0
loss_vec = []
for epochs in range(steps):
    optimizer.zero_grad()  # Clear gradients
    loss = PINN.loss(t_train_Nu, U_train_Nu)  # Compute the loss
    loss.backward()  # Backpropagate the gradients
    optimizer.step()  # Update weights using Adam
    loss_vec.append([loss.item(), PINN.lambda1.item(), PINN.lambda2.item(), PINN.lambda3.item()])

    if epochs % 100 == 0:
        error_vec, u_pred = PINN.test(t_true)
        print(f'Epoch: {epochs+1:.1f}, LOSS: {loss.item():.5f}, Relative Error(Test): {error_vec.cpu().detach().numpy():.5f}, 𝜆_PINN = [{PINN.lambda1.item():.5f}, {PINN.lambda2.item():.5f}, {PINN.lambda3.item():.5f}]')


_,u_pred = PINN.test(t_true)


Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions