Solving PDEs with ModelingToolkit and NeuralPDE

Solving PDEs with ModelingToolkit and NeuralPDE#

Solving Poisson PDE Systems (https://docs.sciml.ai/NeuralPDE/stable/tutorials/pdesystem/)

\[ \partial^{2}_{x}u(x,y) + \partial^{2}_{y}u(x,y) = -\sin (\pi x) \sin (\pi y) \]

with boundary conditions

\[\begin{split} \begin{align} u(0, y) &= 0 \\ u(1, y) &= 0 \\ u(x, 0) &= 0 \\ u(x, 1) &= 0 \\ \end{align} \end{split}\]

where

\(x ∈ [0, 1], y ∈ [0, 1]\)

using NeuralPDE
using Lux
using Optimization
using OptimizationOptimJL
using ModelingToolkit
using ModelingToolkit: Interval
using LineSearches
using Plots
@parameters x y
@variables u(..)
Dxx = Differential(x)^2
Dyy = Differential(y)^2
Differential(y) ∘ Differential(y)

2D PDE

eq  = Dxx(u(x, y)) + Dyy(u(x, y)) ~ -sinpi(x) * sinpi(y)
\[ \begin{equation} \frac{\mathrm{d}}{\mathrm{d}y} \frac{\mathrm{d}}{\mathrm{d}y} u\left( x, y \right) + \frac{\mathrm{d}}{\mathrm{d}x} \frac{\mathrm{d}}{\mathrm{d}x} u\left( x, y \right) = - sinpi\left( x \right) sinpi\left( y \right) \end{equation} \]

Boundary conditions

bcs = [
    u(0, y) ~ 0.0,
    u(1, y) ~ 0.0,
    u(x, 0) ~ 0.0,
    u(x, 1) ~ 0.0
]
\[\begin{split} \begin{align} u\left( 0, y \right) &= 0 \\ u\left( 1, y \right) &= 0 \\ u\left( x, 0 \right) &= 0 \\ u\left( x, 1 \right) &= 0 \end{align} \end{split}\]

Space domains

domains = [
    x  Interval(0.0, 1.0),
    y  Interval(0.0, 1.0)
]
2-element Vector{Symbolics.VarDomainPairing}:
 Symbolics.VarDomainPairing(x, 0.0 .. 1.0)
 Symbolics.VarDomainPairing(y, 0.0 .. 1.0)

Build a neural network for the PDE solver. Input: 2 dimensions. Hidden layers: 16 neurons * 2 layers. Output: single output u(x, y)

dim = 2
chain = Lux.Chain(Dense(dim, 16, Lux.σ), Dense(16, 16, Lux.σ), Dense(16, 1))
Chain(
    layer_1 = Dense(2 => 16, σ),        # 48 parameters
    layer_2 = Dense(16 => 16, σ),       # 272 parameters
    layer_3 = Dense(16 => 1),           # 17 parameters
)         # Total: 337 parameters,
          #        plus 0 states.

Discretization method usesPhysicsInformedNN() (PINN).

dx = 0.05
discretization = PhysicsInformedNN(chain, QuadratureTraining(; batch = 200, abstol = 1e-6, reltol = 1e-6))
NeuralPDE.PhysicsInformedNN{Lux.Chain{@NamedTuple{layer_1::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_2::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_3::Lux.Dense{typeof(identity), Int64, Int64, Nothing, Nothing, Static.True}}, Nothing}, NeuralPDE.QuadratureTraining{Float64, Integrals.CubatureJLh}, Nothing, NeuralPDE.Phi{Lux.StatefulLuxLayer{Static.True, Lux.Chain{@NamedTuple{layer_1::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_2::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_3::Lux.Dense{typeof(identity), Int64, Int64, Nothing, Nothing, Static.True}}, Nothing}, Nothing, @NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}}}, typeof(NeuralPDE.numeric_derivative), Bool, Nothing, Nothing, Nothing, Base.RefValue{Int64}, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}}(Lux.Chain{@NamedTuple{layer_1::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_2::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_3::Lux.Dense{typeof(identity), Int64, Int64, Nothing, Nothing, Static.True}}, Nothing}((layer_1 = Dense(2 => 16, σ), layer_2 = Dense(16 => 16, σ), layer_3 = Dense(16 => 1)), nothing), NeuralPDE.QuadratureTraining{Float64, Integrals.CubatureJLh}(Integrals.CubatureJLh(0), 1.0e-6, 1.0e-6, 1000, 200), nothing, NeuralPDE.Phi{Lux.StatefulLuxLayer{Static.True, Lux.Chain{@NamedTuple{layer_1::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_2::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_3::Lux.Dense{typeof(identity), Int64, Int64, Nothing, Nothing, Static.True}}, Nothing}, Nothing, @NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}}}(Lux.StatefulLuxLayer{Static.True, Lux.Chain{@NamedTuple{layer_1::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_2::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_3::Lux.Dense{typeof(identity), Int64, Int64, Nothing, Nothing, Static.True}}, Nothing}, Nothing, @NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}}(Lux.Chain{@NamedTuple{layer_1::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_2::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_3::Lux.Dense{typeof(identity), Int64, Int64, Nothing, Nothing, Static.True}}, Nothing}((layer_1 = Dense(2 => 16, σ), layer_2 = Dense(16 => 16, σ), layer_3 = Dense(16 => 1)), nothing), nothing, (layer_1 = NamedTuple(), layer_2 = NamedTuple(), layer_3 = NamedTuple()), nothing, static(true))), NeuralPDE.numeric_derivative, false, nothing, nothing, nothing, NeuralPDE.LogOptions(50), Base.RefValue{Int64}(1), true, false, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}())

Build the PDE system and discretize it.

@named pde_system = PDESystem(eq, bcs, domains, [x, y], [u(x, y)])
prob = discretize(pde_system, discretization)
OptimizationProblem. In-place: true
u0: ComponentVector{Float64}(layer_1 = (weight = [-1.1222529411315918 0.6395617723464966; -0.6218140721321106 -0.720457136631012; … ; -0.07807270437479019 0.16371439397335052; -0.8029779195785522 -0.49542102217674255], bias = [0.3474361002445221, -0.31184300780296326, 0.6202742457389832, -0.09670552611351013, 0.13229617476463318, 0.6966440081596375, 0.1997760683298111, 0.2694702446460724, 0.6044501066207886, -0.6209971308708191, -0.6564324498176575, 0.21250762045383453, -0.1416701376438141, 0.0229464303702116, 0.5081490278244019, 0.5159335732460022]), layer_2 = (weight = [-0.2989715337753296 0.12227608263492584 … 0.21597962081432343 0.4293718934059143; -0.3991950750350952 -0.3122188150882721 … 0.3270843029022217 -0.1880023181438446; … ; -0.23482050001621246 -0.24017439782619476 … 0.07585633546113968 -0.35413554310798645; -0.3130508065223694 -0.08321768790483475 … -0.025621261447668076 -0.1287911832332611], bias = [-0.11655455827713013, -0.10586532950401306, -0.07215151190757751, 0.032992780208587646, 0.1305292844772339, 0.22078007459640503, 0.2402491271495819, 0.02768397331237793, 0.09719166159629822, -0.01827406883239746, -0.038592904806137085, 0.1359422206878662, 0.17371279001235962, 0.09279778599739075, -0.007151186466217041, 0.24503540992736816]), layer_3 = (weight = [-0.04771347716450691 0.05554518476128578 … 0.3934426009654999 -0.2592220604419708], bias = [0.23722699284553528]))

Callback function to record the loss

lossrecord = Float64[]
callback = function (p, l)
    push!(lossrecord, l)
    return false
end
#1 (generic function with 1 method)

Solve the problem. It may take a long time.

opt = OptimizationOptimJL.LBFGS(linesearch = LineSearches.BackTracking())
res = Optimization.solve(prob, opt, callback = callback, maxiters=1000)
retcode: Success
u: ComponentVector{Float64}(layer_1 = (weight = [-1.3640311695416425 -0.37895227862306463; -0.30870507347662324 -0.41556745032719833; … ; -0.8690549577234251 0.22888509596938794; -0.13494517899438566 -0.5150159905214853], bias = [-0.9070423707500228, 0.053399699746117385, 1.8938470896426478, 0.18073391384072504, 0.17965764164317066, 1.0204666001608644, -1.3945084318838585, 0.32597258742867896, 0.9889321409162648, -0.8484169688401932, -2.020255978993131, 0.5510438454528116, -0.20209779982748088, 0.25747631425574846, 0.7227480363738052, 0.9527660121702912]), layer_2 = (weight = [-0.31382512041539806 0.10091542462717763 … 0.23134558432456714 0.5266301063086443; -0.33896658340906677 -0.29497686059560935 … 0.2691490410088436 -0.07230019019724832; … ; 0.4184539560344784 -0.17821172260905002 … -0.27905256572294024 -1.0336477284033123; -0.7130210566291567 -0.3910612447757921 … 0.07977863129518038 0.1000330150535036], bias = [-0.1335714097607903, -0.20271058342637235, 0.18454203972602423, 0.122365666961942, -0.1310495787749589, 0.932546921676121, 0.38858785084725056, -0.13774925257588053, 0.024197616217348956, 0.24647101982313904, -0.5285098579834663, 0.2235448796862309, -0.011793281042292108, 0.2870048545437379, -0.18762190226726655, 0.5179011212934544]), layer_3 = (weight = [-0.19422726743583155 -0.07595594127135329 … 1.4206066597749838 -1.2153454682159515], bias = [0.7338440375826932]))
plot(lossrecord, xlabel="Iters", yscale=:log10, ylabel="Loss", lab=false)
_images/289ff20767e03ca30707259fce89a6ba7bfc868b0a76235691d5ba350d671b39.png

Plot the predicted solution of the PDE and compare it with the analytical solution to see the relative error.

xs, ys = [infimum(d.domain):dx/10:supremum(d.domain) for d in domains]
analytic_sol_func(x,y) = (sinpi(x)*sinpi(y))/(2pi^2)

phi = discretization.phi
u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys], (length(xs), length(ys)))
u_real = reshape([analytic_sol_func(x, y) for x in xs for y in ys], (length(xs), length(ys)))
diff_u = abs.(u_predict .- u_real)
201×201 Matrix{Float64}:
 0.000519919  0.000512311  0.000504585  …  9.44203e-5   4.36244e-5
 0.000473602  0.000467937  0.000462102     2.77798e-5   2.39177e-5
 0.000428008  0.000424239  0.000420251     3.64589e-5   8.90025e-5
 0.000383142  0.000381224  0.000379037     9.83308e-5   0.000151666
 0.000339009  0.000338897  0.000338467     0.000157871  0.000211943
 0.000295616  0.000297264  0.000298548  …  0.000215113  0.000269869
 0.00025297   0.000256333  0.000259286     0.000270092  0.00032548
 0.000211077  0.00021611   0.000220688     0.000322842  0.000378811
 0.000169945  0.000176603  0.000182762     0.000373397  0.000429895
 0.000129582  0.000137819  0.000145517     0.000421791  0.000478769
 ⋮                                      ⋱               ⋮
 0.00117564   0.00122471   0.00127098      0.000606014  0.000637812
 0.00130792   0.0013555    0.00140024      0.000639956  0.000673023
 0.00144317   0.00148919   0.00153236      0.000673731  0.000708069
 0.00158135   0.00162578   0.00166733   …  0.000707297  0.000742907
 0.00172245   0.00176524   0.00180512      0.000740613  0.000777495
 0.00186645   0.00190756   0.00194572      0.000773637  0.000811789
 0.00201334   0.00205271   0.00208912      0.000806325  0.000845745
 0.00216309   0.00220068   0.00223528      0.000838632  0.000879319
 0.00231567   0.00235144   0.00238418   …  0.000870516  0.000912464
p1 = plot(xs, ys, u_real, linetype=:contourf, title = "analytic");
p2 = plot(xs, ys, u_predict, linetype=:contourf, title = "predicted");
p3 = plot(xs, ys, diff_u, linetype=:contourf, title = "error");
plot(p1, p2, p3)
_images/8f05216b68eb270ec61e5f0b92220683fb4616d018bc7b0545fd5666ee03a55d.png

This notebook was generated using Literate.jl.