SolidWorks Videos

Thursday, October 3, 2024

Waves, Sand and Brain: Data-Less Physics Informed Neural Network (Includes PyTorch Code)

      After successfully solving the steady-state version read more, Here is code to solve one of the transient version, the Wave 🌊 equation using PINN 🧠. The other transient version has 1st derivative of the quantity, T. This πŸ‘½ technology works without any data πŸ“š requirements just like discrete methods like finite element or finite difference methods etc. The current code contains the Mur (Neumann) boundary conditions which can be easily modified. Fig. 1 shows the results from the code at current boundary conditions. As always, the PINNs use hard form of the equation and do not require a mesh to be solved 😯.


       

Fig. 1, The animation

     Cite as: Fahad Butt (2024). WAVE-PINN (https://whenrobotslove.blogspot.com/2024/10/waves-sand-and-brain-data-less-physics.html), Blogger. Retrieved Month Date, Year

Code

#Copyright <2024> <FAHAD BUTT>

#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

#THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.


#%% import necessary libraries

import os

import numpy as np

import random as rn

import matplotlib.pyplot as plt

import torch

import torch.nn as nn

import torch.nn.init as init

import torch.optim as optim

import glob

# import torch_directml # enable for AMD and Intel gpu

device = torch.device("cpu") # cuda for CUDA gpu, cpu for cpu, mps for Apple GPU, torch_directml.device() for any other gpu


#%% 2D wave equation and boundary conditions

def wave_equation(we):

    we.requires_grad_(True) # watch ns for gradient computation

    

    out = model(we) # output from model

    

    T = out[:, 0] # transport

    

    dT = torch.autograd.grad(T, we, torch.ones_like(T), create_graph = True)[0] # dT/dwe

    

    T_x = dT[:, 0] # dT/dx

    T_y = dT[:, 1] # dT/dy

    T_t = dT[:, 2] # dT/dt

    

    T_xx = torch.autograd.grad(T_x, we, torch.ones_like(T_x), create_graph = True)[0][:, 0] # d2T/dx2

    T_yy = torch.autograd.grad(T_y, we, torch.ones_like(T_y), create_graph = True)[0][:, 1] # d2T/dy2

    T_tt = torch.autograd.grad(T_t, we, torch.ones_like(T_t), create_graph = True)[0][:, 2] # d2T/dt2

    

    wave = T_tt - (c**2 * (T_xx + T_yy)) # wave equaiton


    # apply boundary conditions

    T_left_bc = T_x[we[:, 0] == 0] - (c * T_t[we[:, 0] == 0]) # x = 0

    T_right_bc = T_x[we[:, 0] == L]  + (c * T_t[we[:, 0] == L]) # x = L

    T_top_bc = T_y[we[:, 1] == D]  - (c * T_t[we[:, 1] == D]) # y = D

    T_bottom_bc = T_y[we[:, 1] == 0]  + (c * T_t[we[:, 1] == 0]) # y = 0

    

    T_center = T[(we[:, 0] == L / 2) & (we[:, 1] == D / 2)] - torch.sin(we[(we[:, 0] == L / 2) & (we[:, 1] == D / 2), 2]) # ripple at center


    # apply initial conditions

    T_ic_I = T[we[:, 2] == 0] # t = 0

    T_ic_II = T_t[we[:, 2] == 0] # t = 0

    

    return wave, \

        T_left_bc, T_right_bc, T_top_bc, T_bottom_bc, \

            T_ic_I, T_ic_II, \

                T_center

            

#%% define loss function

def loss_fn(we):

    

    global wave_loss, T_bc_loss, T_ic_loss, mse_loss, bc_loss

    

    we = we.clone().detach().requires_grad_(True).to(device) # move to "device" and use x32 data type

    

    wave, \

        T_left_bc, T_right_bc, T_top_bc, T_bottom_bc, \

            T_ic_I, T_ic_II, \

                T_center = wave_equation(we) # compute residuals for wave equations with specified boundary and initial conditions

    

    wave_loss = nn.MSELoss()(wave, torch.zeros_like(wave)) # wave equation loss

    

    T_bc_loss = nn.MSELoss()(T_left_bc, torch.zeros_like(T_left_bc)) + nn.MSELoss()(T_right_bc, torch.zeros_like(T_right_bc)) \

        + nn.MSELoss()(T_top_bc, torch.zeros_like(T_top_bc)) + nn.MSELoss()(T_bottom_bc, torch.zeros_like(T_bottom_bc)) # boundary condition loss

            

    T_ic_loss = nn.MSELoss()(T_ic_I, torch.zeros_like(T_ic_I)) + nn.MSELoss()(T_ic_II, torch.zeros_like(T_ic_II)) # initial condition loss

    

    T_center_loss = nn.MSELoss()(T_center, torch.zeros_like(T_center)) # pulse loss

    

    bc_loss = T_bc_loss / 480 # boundary condition loss

    

    mse_loss = (wave_loss + T_center_loss) / 1056 # equation loss (interior)

    

    ic_loss = T_ic_loss / 33 # initial condition loss

    

    total_loss =  mse_loss + bc_loss + ic_loss # total loss

            

    return total_loss


#%% save and resume training

def save_checkpoint(epoch, model, optimizer): # save check point

    checkpoint = {

        'epoch': epoch,

        'model_state_dict': model.state_dict(),

        'optimizer_state_dict': optimizer.state_dict()}

    checkpoint_path = os.path.join(checkpoint_dir, f"checkpoint_epoch_{epoch}.pt")

    torch.save(checkpoint, checkpoint_path)

    print(f"Checkpoint saved at epoch {epoch}")


def load_checkpoint(model, optimizer): # load check point

    latest_checkpoint = max(glob.glob(os.path.join(checkpoint_dir, 'checkpoint_epoch_*.pt')), key=os.path.getctime)

    checkpoint = torch.load(latest_checkpoint)

    epoch = checkpoint['epoch']

    model.load_state_dict(checkpoint['model_state_dict'])

    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

    print(f"Checkpoint loaded from epoch {epoch}")

    return model, optimizer, epoch


#%% show model summary

def count_parameters(model):

    return sum(p1.numel() for p1 in model.parameters() if p1.requires_grad) # show trainable parameters


#%% set same seeds for initialization

np.random.seed(20) # same random initializations every time numpy

rn.seed(20) # same random initializations every time for python 

torch.manual_seed(20) # set seed for reproducibility


#%% generate sample points (grid)

c = 1 # wave speed

length_square = 1 # length of square / diameter of cylinder

L = 1 * length_square # Length of plate

D = 1 * length_square # Width of plate

TT = 2 * np.pi  # total time [s]


num_training_points = 120 # per length


# generate points for boundaries

x_l = np.random.uniform(0, 0, num_training_points) # left boundary

y_l = np.random.uniform(0, D, num_training_points)


x_r = np.random.uniform(L, L, num_training_points) # right boundary

y_r = np.random.uniform(0, D, num_training_points)


x_b = np.random.uniform(0, L, num_training_points) # bottom boundary

y_b = np.random.uniform(0, 0, num_training_points)


x_t = np.random.uniform(0, L, num_training_points) # top boundary

y_t = np.random.uniform(D, D, num_training_points)


x_m = np.random.uniform(0, L, 1055)

y_m = np.random.uniform(0, D, 1055)


x = np.concatenate([x_l, x_r, x_b, x_t, x_m]) # x co-ordinates

y = np.concatenate([y_l, y_r, y_b, y_t, y_m]) # y co-ordinates


x_center = L / 2 # center point

y_center = D / 2


# add center point to arrays

x = np.append(x, x_center)

y = np.append(y, y_center)


t = np.linspace(0, TT, 33) # time


x_repeated = np.repeat(x, len(t)) # create pairs

y_repeated = np.repeat(y, len(t))

t_tiled = np.tile(t, len(x))


#%% display mesh

plt.figure(dpi = 300)

plt.scatter(x, y, s = 1, c = 'k', alpha = 1) # scatter plot

plt.xlim(0, L)

plt.ylim(0, D)

plt.gca().set_aspect('equal', adjustable = 'box')

plt.show()


#%% create neural network model

h = 50 # number of neurons per hidden layer

model = nn.Sequential(

    nn.Linear(3, h), # 3 inputs for x, y and t

    nn.Tanh(), # hidden layer

    nn.Linear(h, h),

    nn.Tanh(), # hidden layer

    nn.Linear(h, h),

    nn.Tanh(), # hidden layer

    nn.Linear(h, h),

    nn.Tanh(), # hidden layer

    nn.Linear(h, h),

    nn.Tanh(), # hidden layer

    nn.Linear(h, 1)).to(device) # 1 output for T


for m in model.modules():

    if isinstance(m, nn.Linear):

        init.xavier_normal_(m.weight) # weights initialization

        init.constant_(m.bias, 0.0) # bias initialization


print(model) # show model details

num_trainable_params = count_parameters(model) # show trainable parameters

print("Number of trainable parameters:", num_trainable_params)


#%% prepare input data

train_points = np.vstack((x_repeated, y_repeated, t_tiled)).T  # Stack x, y, t together

train_points = torch.tensor(train_points, dtype = torch.float32).to(device) # convert to tensor and move training points to GPU / CPU


#%% train using Adam

resume_training = False # True if resuming training

start_epoch = 1

optimizer = optim.Adam(model.parameters(), lr = 1e-4) # select optimizer and parameters


checkpoint_dir = './pytorch_checkpoints' # directory to save checkpoints

os.makedirs(checkpoint_dir, exist_ok = True) # create a directory to save checkpoints

if resume_training:

    model, optimizer, start_epoch = load_checkpoint(model, optimizer)

    start_epoch += 1 # start from next epoch

    

for epoch in range(20001):

    optimizer.zero_grad() # clear gradients of all optimized variables

    loss_value = loss_fn(train_points) # compute prediction by passing inputs to model

    loss_value.backward() # compute gradient of loss with respect to model parameters

    optimizer.step() # perform parameter up date

    if epoch % 100 == 0:

        print(f"{epoch} {loss_value.item()}")


save_checkpoint(epoch, model, optimizer) # save model details

print(f"{epoch} {loss_value.item()}") # show loss values

print("Final Loss Values:")

print("wave_loss:", wave_loss.item())

print("T_bc_loss:", T_bc_loss.item())

print("T_ic_loss:", T_ic_loss.item())

print("mse_loss:", mse_loss.item())

print("bc_loss:", bc_loss.item())


#%% prediction

target_time_step = 0 * np.pi / 2 # in seconds

num_testing_points = 250 # per length


# Generate points for boundaries

x_l_te = np.random.uniform(0, 0, num_testing_points) # left boundary

y_l_te = np.random.uniform(0, D, num_testing_points)


x_r_te = np.random.uniform(L, L, num_testing_points) # right boundary

y_r_te = np.random.uniform(0, D, num_testing_points)


x_b_te = np.random.uniform(0, L, num_testing_points) # bottom boundary

y_b_te = np.random.uniform(0, 0, num_testing_points)


x_t_te = np.random.uniform(0, L, num_testing_points) # top boundary

y_t_te = np.random.uniform(D, D, num_testing_points)


x_m_te = np.random.uniform(0, L, 62500) # mid field

y_m_te = np.random.uniform(0, D, 62500)


x_te = np.concatenate([x_l_te, x_r_te, x_b_te, x_t_te, x_m_te]) # x co-ordinates

y_te = np.concatenate([y_l_te, y_r_te, y_b_te, y_t_te, y_m_te]) # y co-ordinates


test_points = np.vstack((x_te, y_te)).T # stack x, y together


test_points_t = np.column_stack([test_points, np.full_like(test_points[:, 0], target_time_step)]) # generate test points for the target time step

test_points_t = torch.tensor(test_points_t, dtype=torch.float32).to(device) # convert to tensor and move training points to GPU / CPU


predict = model(test_points_t).detach().cpu().numpy() # predict and move back to CPU


T = predict[:, 0] # predict


# plotting

plt.figure(dpi = 300)

sc = plt.scatter(x_te, y_te, c = T, cmap = 'jet', s = 1, edgecolor = 'none', alpha = 1)

plt.colorbar(orientation = 'vertical')

plt.gca().set_aspect('equal', adjustable = 'box')

plt.xticks([0, L])

plt.yticks([0, D])

plt.xlabel('x [m]')

plt.ylabel('y [m]')

plt.axis('off')

plt.show()

     The results of Code 02 show that the neural network can easily handle complex / mixed boundary and initial conditions. The results are shown in Fig. 2.


     

Fig. 2, Complex boundary condition results

     Code 02

def wave_equation(we):
    we.requires_grad_(True) # watch ns for gradient computation
    
    out = model(we) # output from model
    
    T = out[:, 0] # transport
    
    dT = torch.autograd.grad(T, we, torch.ones_like(T), create_graph = True)[0] # dT/dwe
    
    T_x = dT[:, 0] # dT/dx
    T_y = dT[:, 1] # dT/dy
    T_t = dT[:, 2] # dT/dt
    
    T_xx = torch.autograd.grad(T_x, we, torch.ones_like(T_x), create_graph = True)[0][:, 0] # d2T/dx2
    T_yy = torch.autograd.grad(T_y, we, torch.ones_like(T_y), create_graph = True)[0][:, 1] # d2T/dy2
    T_tt = torch.autograd.grad(T_t, we, torch.ones_like(T_t), create_graph = True)[0][:, 2] # d2T/dt2
    
    wave = T_tt - (c**2 * (T_xx + T_yy)) # wave equaiton

    # apply boundary conditions
    T_left_bc = T[we[:, 0] == 0] - (2 * torch.sin(1.26 * we[(we[:, 0] == 0), 2])) # x = 0
    
    T_right_bc = T[we[:, 0] == L] - (2 * torch.sin(1.26 * we[(we[:, 0] == L), 2])) # x = L
    
    T_top_bc = T_y[we[:, 1] == D] # y = D
    T_bottom_bc = T[we[:, 1] == 0] # y = 0
    
    # apply initial conditions
    T_ic_I = T[we[:, 2] == 0] # t = 0
    T_ic_II = T_t[we[:, 2] == 0] # t = 0
    
    return wave, \
        T_left_bc, T_right_bc, T_top_bc, T_bottom_bc, \
            T_ic_I, T_ic_II

     Thank you for reading! 

No comments:

Post a Comment