Solving PDEs with ModelingToolkit and NeuralPDE#
Solving Poisson PDE Systems (https://docs.sciml.ai/NeuralPDE/stable/tutorials/pdesystem/)
\[
\partial^{2}_{x}u(x,y) + \partial^{2}_{y}u(x,y) = -\sin (\pi x) \sin (\pi y)
\]
with boundary conditions
\[\begin{split}
\begin{align}
u(0, y) &= 0 \\
u(1, y) &= 0 \\
u(x, 0) &= 0 \\
u(x, 1) &= 0 \\
\end{align}
\end{split}\]
where
\(x ∈ [0, 1], y ∈ [0, 1]\)
using NeuralPDE
using Lux
using Optimization
using OptimizationOptimJL
using ModelingToolkit
using ModelingToolkit: Interval
using LineSearches
using Plots
@parameters x y
@variables u(..)
Dxx = Differential(x)^2
Dyy = Differential(y)^2
Differential(y) ∘ Differential(y)
2D PDE
eq = Dxx(u(x, y)) + Dyy(u(x, y)) ~ -sinpi(x) * sinpi(y)
\[ \begin{equation}
\frac{\mathrm{d}}{\mathrm{d}y} \frac{\mathrm{d}}{\mathrm{d}y} u\left( x, y \right) + \frac{\mathrm{d}}{\mathrm{d}x} \frac{\mathrm{d}}{\mathrm{d}x} u\left( x, y \right) = - sinpi\left( x \right) sinpi\left( y \right)
\end{equation}
\]
Boundary conditions
bcs = [
u(0, y) ~ 0.0,
u(1, y) ~ 0.0,
u(x, 0) ~ 0.0,
u(x, 1) ~ 0.0
]
\[\begin{split} \begin{align}
u\left( 0, y \right) &= 0 \\
u\left( 1, y \right) &= 0 \\
u\left( x, 0 \right) &= 0 \\
u\left( x, 1 \right) &= 0
\end{align}
\end{split}\]
Space domains
domains = [
x ∈ Interval(0.0, 1.0),
y ∈ Interval(0.0, 1.0)
]
2-element Vector{Symbolics.VarDomainPairing}:
Symbolics.VarDomainPairing(x, 0.0 .. 1.0)
Symbolics.VarDomainPairing(y, 0.0 .. 1.0)
Build a neural network for the PDE solver.
Input: 2 dimensions.
Hidden layers: 16 neurons * 2 layers.
Output: single output u(x, y)
dim = 2
chain = Lux.Chain(Dense(dim, 16, Lux.σ), Dense(16, 16, Lux.σ), Dense(16, 1))
Chain(
layer_1 = Dense(2 => 16, σ), # 48 parameters
layer_2 = Dense(16 => 16, σ), # 272 parameters
layer_3 = Dense(16 => 1), # 17 parameters
) # Total: 337 parameters,
# plus 0 states.
Discretization method usesPhysicsInformedNN()
(PINN).
dx = 0.05
discretization = PhysicsInformedNN(chain, QuadratureTraining(; batch = 200, abstol = 1e-6, reltol = 1e-6))
NeuralPDE.PhysicsInformedNN{Lux.Chain{@NamedTuple{layer_1::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_2::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_3::Lux.Dense{typeof(identity), Int64, Int64, Nothing, Nothing, Static.True}}, Nothing}, NeuralPDE.QuadratureTraining{Float64, Integrals.CubatureJLh}, Nothing, NeuralPDE.Phi{Lux.StatefulLuxLayer{Static.True, Lux.Chain{@NamedTuple{layer_1::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_2::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_3::Lux.Dense{typeof(identity), Int64, Int64, Nothing, Nothing, Static.True}}, Nothing}, Nothing, @NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}}}, typeof(NeuralPDE.numeric_derivative), Bool, Nothing, Nothing, Nothing, Base.RefValue{Int64}, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}}(Lux.Chain{@NamedTuple{layer_1::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_2::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_3::Lux.Dense{typeof(identity), Int64, Int64, Nothing, Nothing, Static.True}}, Nothing}((layer_1 = Dense(2 => 16, σ), layer_2 = Dense(16 => 16, σ), layer_3 = Dense(16 => 1)), nothing), NeuralPDE.QuadratureTraining{Float64, Integrals.CubatureJLh}(Integrals.CubatureJLh(0), 1.0e-6, 1.0e-6, 1000, 200), nothing, NeuralPDE.Phi{Lux.StatefulLuxLayer{Static.True, Lux.Chain{@NamedTuple{layer_1::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_2::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_3::Lux.Dense{typeof(identity), Int64, Int64, Nothing, Nothing, Static.True}}, Nothing}, Nothing, @NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}}}(Lux.StatefulLuxLayer{Static.True, Lux.Chain{@NamedTuple{layer_1::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_2::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_3::Lux.Dense{typeof(identity), Int64, Int64, Nothing, Nothing, Static.True}}, Nothing}, Nothing, @NamedTuple{layer_1::@NamedTuple{}, layer_2::@NamedTuple{}, layer_3::@NamedTuple{}}}(Lux.Chain{@NamedTuple{layer_1::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_2::Lux.Dense{typeof(NNlib.σ), Int64, Int64, Nothing, Nothing, Static.True}, layer_3::Lux.Dense{typeof(identity), Int64, Int64, Nothing, Nothing, Static.True}}, Nothing}((layer_1 = Dense(2 => 16, σ), layer_2 = Dense(16 => 16, σ), layer_3 = Dense(16 => 1)), nothing), nothing, (layer_1 = NamedTuple(), layer_2 = NamedTuple(), layer_3 = NamedTuple()), nothing, static(true))), NeuralPDE.numeric_derivative, false, nothing, nothing, nothing, NeuralPDE.LogOptions(50), Base.RefValue{Int64}(1), true, false, Base.Pairs{Symbol, Union{}, Tuple{}, @NamedTuple{}}())
Build the PDE system and discretize it.
@named pde_system = PDESystem(eq, bcs, domains, [x, y], [u(x, y)])
prob = discretize(pde_system, discretization)
OptimizationProblem. In-place: true
u0: ComponentVector{Float64}(layer_1 = (weight = [0.1644270271062851 0.377031147480011; -0.031137626618146896 0.07450853288173676; … ; -0.7985032796859741 -0.9014918804168701; -0.0789148360490799 1.1452665328979492], bias = [0.0018303532851859927, -0.2887676954269409, -0.21173304319381714, -0.6968265771865845, -0.6735348105430603, -0.3980163037776947, 0.6739180684089661, -0.5627307295799255, 0.29173070192337036, -0.2217928171157837, -0.14424556493759155, 0.05417539179325104, -0.4348104000091553, 0.3326670229434967, -0.2288428097963333, 0.19502392411231995]), layer_2 = (weight = [0.026710890233516693 -0.28566813468933105 … -0.38149741291999817 0.24726547300815582; 0.2781878709793091 -0.3118481934070587 … 0.4210962653160095 -0.32710039615631104; … ; 0.3525536358356476 -0.12350621819496155 … 0.09395544975996017 0.1421557515859604; -0.193444162607193 -0.3121558427810669 … 0.19519135355949402 -0.12736773490905762], bias = [-0.036180853843688965, 0.1403389871120453, -0.21954330801963806, -0.12686079740524292, 0.1856110692024231, 0.1291380524635315, -0.227378249168396, 0.17878830432891846, 0.05927804112434387, -0.08638003468513489, -0.009557336568832397, -0.12061598896980286, -0.0868159830570221, 0.15891703963279724, -0.23023751378059387, 0.19348281621932983]), layer_3 = (weight = [0.21245522797107697 -0.26441916823387146 … -0.301594614982605 0.11430412530899048], bias = [0.07857254147529602]))
Callback function to record the loss
lossrecord = Float64[]
callback = function (p, l)
push!(lossrecord, l)
return false
end
#1 (generic function with 1 method)
Solve the problem. It may take a long time.
opt = OptimizationOptimJL.LBFGS(linesearch = LineSearches.BackTracking())
res = Optimization.solve(prob, opt, callback = callback, maxiters=1000)
retcode: Success
u: ComponentVector{Float64}(layer_1 = (weight = [-0.17526731304966742 0.6201467907473281; 0.6028810700993633 0.49401653050036604; … ; -1.336377749525798 -2.450127793789241; -0.14606263813627215 2.3746857322938983], bias = [-0.049194970042167, -0.34208781903662805, -1.0086169798504823, -1.091749771941019, -1.1732324589051537, -0.49556728064466615, 1.5870390477221736, -1.1576120493755957, 0.32665146928232797, -1.3422630402177251, 0.3401812428085329, -2.3944525925798046, -1.8588032654111655, -0.10203360089645742, -0.6025435858448545, 1.579972190572541]), layer_2 = (weight = [0.3696321046173728 0.05517102923566609 … -0.11259055661871886 0.5664853622170731; -0.439206300207857 -0.9677749001052267 … -0.15763715626186636 -1.3046655043769608; … ; 0.6897167619450295 0.14429535780759917 … 0.00011080990409313314 1.1110849284497335; 0.12854079331757637 -0.014777554386418399 … 0.32275296512711016 0.06145470816177216], bias = [0.5928775281223378, -1.4392916135563216, 0.1646020914659672, -0.4511273889524656, -0.04668631211655095, 0.19635300149381124, -0.44295783026268293, -0.8741146231149571, 0.8265836591764963, -0.5431140768885278, 0.8225780223631837, -0.0010610796492357682, 0.3500567258613329, 0.07290168040054215, 0.6355196492172303, 0.6632670544298925]), layer_3 = (weight = [0.6222368146085355 0.9336790526267548 … -2.180799296355518 0.47793568076323034], bias = [0.27441558311173636]))
plot(lossrecord, xlabel="Iters", yscale=:log10, ylabel="Loss", lab=false)
Plot the predicted solution of the PDE and compare it with the analytical solution to see the relative error.
xs, ys = [infimum(d.domain):dx/10:supremum(d.domain) for d in domains]
analytic_sol_func(x,y) = (sinpi(x)*sinpi(y))/(2pi^2)
phi = discretization.phi
u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys], (length(xs), length(ys)))
u_real = reshape([analytic_sol_func(x, y) for x in xs for y in ys], (length(xs), length(ys)))
diff_u = abs.(u_predict .- u_real)
201×201 Matrix{Float64}:
0.000962785 0.000897961 0.000834523 … 0.00154396 0.00150839
0.000976559 0.000912386 0.000849569 0.00136536 0.00132782
0.000989716 0.000926208 0.000864024 0.00118615 0.00114663
0.00100222 0.000939387 0.00087785 0.00100646 0.000964936
0.00101402 0.000951886 0.00089101 0.000826402 0.000782862
0.0010251 0.000963669 0.00090347 … 0.000646088 0.00060052
0.00103541 0.000974704 0.000915196 0.00046563 0.000418023
0.00104492 0.000984957 0.000926156 0.00028514 0.000235481
0.00105361 0.0009944 0.00093632 0.000104722 5.3003e-5
0.00106145 0.001003 0.00094566 7.55169e-5 0.000129307
⋮ ⋱ ⋮
0.000492715 0.000360094 0.000228138 0.0119932 0.0121505
0.000458715 0.000324217 0.000190375 0.0123439 0.0125074
0.000423374 0.000286981 0.000151233 0.0126959 0.0128656
0.000386653 0.000248344 0.000110671 … 0.0130491 0.0132251
0.00034851 0.000208267 6.86504e-5 0.0134034 0.0135857
0.000308903 0.000166709 2.51304e-5 0.0137587 0.0139474
0.000267791 0.000123629 1.9929e-5 0.0141151 0.0143101
0.000225133 7.89865e-5 6.6568e-5 0.0144723 0.0146737
0.000180885 3.27393e-5 0.000114827 … 0.0148303 0.0150381
p1 = plot(xs, ys, u_real, linetype=:contourf, title = "analytic");
p2 = plot(xs, ys, u_predict, linetype=:contourf, title = "predicted");
p3 = plot(xs, ys, diff_u, linetype=:contourf, title = "error");
plot(p1, p2, p3)
This notebook was generated using Literate.jl.