12.1. Chorin splitting#

We apply a splitting method to solve the Navier-Stokes equation. The main idea is to first solve the momentum equation

\[ \frac{\partial u^*}{\partial t} - \operatorname{div}( u^* \otimes u^*) - \nu \Delta u^* =f, \]

with the same boundary conditions for \(u^*\) as before but ignoring the divergence constraint. In a second step we project the solution

\[ u = P(u^*), \]

such that \(\operatorname{div} P(u) = 0\). The motivation for this splitting is that we can use “simpler” solvers designed for \(H^1\)-like problems (i.e. without constraints). For the projection we aim to solve the pressure Poisson problem (with proper boundary conditions)

\[ -\Delta p = \operatorname{div} u^*, \]

with boundary conditions

\[\begin{align*} \nabla p \cdot n &= 0 \quad \text{on} \quad \Gamma_{wall} \cup \Gamma_{inlet} \cup \Gamma_{slip}, \\ p &= 0 \quad \text{on} \quad \partial \Omega \setminus \{\Gamma_{wall} \cup \Gamma_{inlet} \cup \Gamma_{slip}\}. \end{align*}\]

Setting \(u = \nabla p + u^*\) we have \(\operatorname{div} u = \Delta p + \operatorname{div} u^* = 0\) and on \(\Gamma_{wall} \cup \Gamma_{inlet} \cup \Gamma_{slip}\) we have \(u \cdot n = \nabla p \cdot n + u^* \cdot n = u^* \cdot n\).

Again we see that we can use “classical” (parallel-) solvers designed for \(H^1\)-problems to solve the pressure Poisson problem.

While splitting methods are very common in CFD, we will see below, that using an \(H(div)\)-conforming discretization is particularly suited.

We aim to solve the flow in a channel around a spherical object (take a closer look… does it look familiar? :eyes:)

from commonCFD import *
from ipyparallel import Cluster
MPI_RANKS = min(4,MPI_RANKS) # increase if problem increases (and you have more resources)
c = await Cluster(engines="mpi").start_and_connect(n=MPI_RANKS, activate=True)
Starting 4 engines with <class 'ipyparallel.cluster.launcher.MPIEngineSetLauncher'>
%%px 
from commonCFD import *
from mpi4py import MPI
from ngsolve import *
from netgen.occ import *
from ngsolve.krylovspace import CGSolver
from ngsolve.krylovspace import MinResSolver, GMResSolver, BramblePasciakCG
ngsglobals.numthreads=1

import ngsolve.ngs2petsc as n2p
import petsc4py.PETSc as psc

box = Box( (-2,-1.5,-1.5), (6,1.5,1.5))
box.faces.name = "wall"
box.faces.Min(X).name = "inlet"
box.faces.Max(X).name = "outlet"
ngsolve = Sphere( (0,0,0), 0.8) - Sphere( (0,0,0), 0.5)  - Cylinder((-1,0.5,0.5),X, r=0.4, h=2) - Cylinder((-0.5,-1,-0.5),Y, r=0.4, h=2) - Cylinder((0.5,-0.5,-1),Z, r=0.4, h=2)
ngsolve.faces.col = (1,0,0)
ngsolve.faces.name = "wall"
shape = box-ngsolve
ngmesh = OCCGeometry(shape).GenerateMesh(maxh=0.2,comm=MPI.COMM_WORLD)

from ngsolve.webgui import Draw

mesh = Mesh(ngmesh)
order = 1
mesh.Curve(order);


nu = 0.1
dt = 0.01
if (MPI.COMM_WORLD.rank != 0):
    Draw(mesh);
print(MPI.COMM_WORLD.rank, mesh.ne)
[stdout:0] 0 0
[output:1]
[output:3]
[output:2]
[stdout:3] 3 12427
[stdout:2] 2 11598
[stdout:1] 1 12072

12.1.1. Stokes solution as initial condition#

Hide code cell source
%%px 
VT = HDiv(mesh,order=order, dirichlet="wall|inlet")
Vhat = TangentialFacetFESpace(mesh,order=order, dirichlet="wall|inlet")
Q = L2(mesh,order=order-1)

if mesh.comm.rank==0:
    print ("Solving initial Stokes problem:")
    print ("ndof = ", VT.ndofglobal,'+',Vhat.ndofglobal,'+',Q.ndofglobal,'=',
           VT.ndofglobal + Vhat.ndofglobal + Q.ndofglobal)

V = VT * Vhat

(u, uhat), (v,vhat) = V.TnT()
p,q = Q.TnT()

n = specialcf.normal(mesh.dim)
h = specialcf.mesh_size

def tang(u):
    return u - (u*n)*n

elint = True #True

gamma = 10

bfa = BilinearForm(V, condense = elint, store_inner = elint) #, symmetric=True, condense = True)
bfa += (nu*InnerProduct(Grad(u), Grad(v))) * dx()
bfa += nu * Grad(u)*n * tang(vhat - v) * dx(element_boundary = True)
bfa += nu * Grad(v) * n *  tang(uhat-u) * dx(element_boundary = True)
bfa += nu  *gamma*order*(order+1)/h * InnerProduct ( tang(vhat-v),  tang(uhat-u) ) * dx(element_boundary = True)
bfa.Assemble()
A = bfa.mat

bfb = BilinearForm(div(u)*q*dx).Assemble()
B = bfb.mat

F = LinearForm(V).Assemble()
G = LinearForm(Q).Assemble()

rhs_vec = BlockVector([F.vec, G.vec])

Vc = VectorH1(mesh, order = 1, dirichlet = "inlet|wall")
V1c = H1(mesh, order = 1, dirichlet = "inlet|wall")

u1c, v1c = V1c.TnT()
bfa1c = BilinearForm(nu * InnerProduct(Grad(u1c),Grad(v1c))*dx)

hata1c = Preconditioner(bfa1c, "gamg")
bfa1c.Assemble()

hatAcinv = sum( [Ei@hata1c@Ei.T for Ei in Vc.embeddings])

emb1 = ConvertOperator(spacea = Vc, spaceb = VT, localop = True, parmat = False, range_dofs = VT.FreeDofs(elint))
tc1 = V.Embedding(0).local_mat
emb2 = ConvertOperator(spacea = Vc, spaceb = Vhat, localop = True, parmat = False, range_dofs = Vhat.FreeDofs(elint))
tc2 = V.Embedding(1).local_mat

embA = tc1 @ emb1 + tc2 @ emb2

sm_blocks = list()


for facet in mesh.facets:
    block = list( dof for dof in V.GetDofNrs(facet) if dof>=0 and V.FreeDofs(elint)[dof])
    if len(block):
        sm_blocks.append(block)
        
if not elint:
    for elnr in range(mesh.ne):
        block = list( dof for dof in V.GetDofNrs(NodeId(ELEMENT, elnr)) if dof>=0 and V.FreeDofs(elint)[dof])
        if len(block):
            sm_blocks.append(block)

bsmoother = bfa.mat.local_mat.CreateBlockSmoother(blocks = sm_blocks, parallel=True)

hatAinv = embA @ hatAcinv @ embA.T + bsmoother 

if elint:
    Ahex, Ahext, Aiii, Aii  = bfa.harmonic_extension.local_mat, bfa.harmonic_extension_trans.local_mat, bfa.inner_solve.local_mat, bfa.inner_matrix.local_mat
    Id = IdentityMatrix(A.height)

    Ihex = ParallelMatrix(Id + Ahex, row_pardofs = A.row_pardofs, col_pardofs = A.row_pardofs, op = ParallelMatrix.C2C)
    Ihext = ParallelMatrix(Id + Ahext, row_pardofs = A.row_pardofs, col_pardofs = A.row_pardofs, op = ParallelMatrix.D2D)
    Isolve = ParallelMatrix(Aiii, row_pardofs = A.row_pardofs, col_pardofs = A.row_pardofs, op = ParallelMatrix.D2C)

    hatAAinv = ( Ihex @ hatAinv @ Ihext ) + Isolve

    AA = (Id - Ahext) @ (A.local_mat + Aii) @ (Id - Ahex)
    AA = ParallelMatrix(AA, row_pardofs = A.row_pardofs, col_pardofs = A.col_pardofs, op = ParallelMatrix.C2D)
else:
    hatAAinv = hatAinv
    AA = A

K = BlockMatrix([[AA,B.T], [B, None]])

hatS = BilinearForm(1/nu * p*q*dx).Assemble()
hatSinv = hatS.mat.Inverse(Q.FreeDofs()) 

hatK_block_inv = BlockMatrix([[hatAAinv, None],[None, hatSinv]])

gfu = GridFunction(V)
gfp = GridFunction(Q)

uin = 4/9*CF(((z+1.5)*(z-1.5)*(y+1.5)*(y-1.5),0,0))

gfu.components[0].Set(uin, definedon = mesh.Boundaries("inlet"))

sol_vec = BlockVector([gfu.vec, gfp.vec])

rhs_vec.data = -K* sol_vec


from ngsolve import BaseMatrix

class BSPC(BaseMatrix):
    def __init__(self, M, Ahat, Shat):
        super(BSPC, self).__init__()
        self.M = M
        self.A, self.B, self.BT = M[0,0], M[1,0], M[0,1]
        self.Ahat = Ahat
        self.Shat = Shat
        self.mBTSg2 = self.A.CreateColVector()
        self.g2 = self.Shat.CreateColVector()
        self.xtemp = self.M.CreateColVector()
    def IsComplex(self):
        return False
    def Height(self):
        return self.M.height
    def Width(self):
        return self.M.width
    def CreateColVector(self):
        return self.M.CreateColVector()
    def CreateRowVector(self):
        return self.M.CreateRowVector()
    def Mult(self, b, x):
        f, g = b[0], b[1]
        x[0].data = self.Ahat * f
        self.g2.data = g - self.B * x[0]
        x[1].data = - self.Shat * self.g2
        self.mBTSg2.data = self.BT * x[1]
        x[0].data -= self.Ahat * self.mBTSg2
    def MultTrans(self, b, x):
        self.Mult(b, x)
    def MultAdd(self, scal, b, x):
        self.Mult(b, self.xtemp)
        x.data += scal * self.xtemp
    def MultTransAdd(self, scal, b, x):
        self.MultAdd(scal, b, x)
        
hatK_sz_inv = BSPC(M = K, Ahat = hatAAinv, Shat = hatSinv)

solver = GMResSolver(mat = K, pre = hatK_sz_inv , maxiter=500, \
                    printrates='\r' if mesh.comm.rank==0 else False, tol=1e-5)
sol_vec.data += solver * rhs_vec
[stdout:0] Solving initial Stokes problem:
ndof =  227622 + 455244 + 36097 = 718963
GMRes converged in 162 iterations to residual 0.0002982304119306732

12.1.2. Interlude: Write VTK Output#

(optional; depending on VTKOUT in commonCFD.py): Each processor writes its own file and the meta data is written to a single file.

The VTK output can then be inspected

  • with ParaView

  • or through itkwidgets or pyvista or alike in a jupyter notebook.

In vtk.ipynb we show how to do this with pyvista.

Note that it makes sense to have the vtk output only for small problems if you want to inspect them through pyvista or itkwidgets.

%%px
if VTKOUT: VTKOutput(ma=mesh,coefs=[gfu.components[0],gfp],names=["velocity","pressure"], 
                     floatsize="single", filename="parstokes",subdivision=0).Do()

Alternatively we can put all data to one processor and use our convential means (webgui) for visualization:

from ngsolve import *
from ngsolve.webgui import Draw, FieldLines
gfu = c[:]["gfu"]
N=100
fieldlines = FieldLines(gfu[0].components[0], gfu[0].space.mesh.Boundaries('inlet'), num_lines=N, length=3)

Draw(gfu[0].components[0],objects=[fieldlines], settings={"Objects": {"Surface": True, "Wireframe":False}},
     euler_angles = (16.748369506964842,-25.39835395236453,0.3657242596990491), #
     clipping={"function": False, "x":0, "y":0, "z":-1, "dist": 1});

12.1.3. Solving the momentum equation#

Similarly as in the previous section we are going to use an operator splitting method to solve the momentum equation (without the divergence constraint!)

\[\begin{split} \underbrace{\left( \begin{pmatrix} M & 0 \\ 0 & 0 \end{pmatrix} + \tau A\right)}_{= M^*} \begin{pmatrix} u \\ \hat u \end{pmatrix}^{n+1} = M^* \begin{pmatrix} u \\ \hat u \end{pmatrix}^{n} + \tau \left(f - A \begin{pmatrix} u \\ \hat u \end{pmatrix}^{n} - C(u^n)u^n \right). \end{split}\]

Note, that the pressure is not included in the system and that we have replaced \(K\) by \(A\).

We aim to solve this system with a CGSolver (\(M^*\) is SPD now). As solver we make use of an overlapping domain-decomposition preconditioner. The main idea here is, that in the case of a small viscosity \(\nu\) and a small time step \(\tau\) we have \(M^* \simeq M\), and thus a block diagonal solver, each block consisting of the restriction of \(M^*\) on an element \(T\), results in an appropriate solver. Further, this solver (since only acting on local dofs) is suitable for an MPI-parallel implementation.

There are various ways on how to assemble this solver in NGSolve (e.g. you could use the Block-Jacobi solver used before). We are going to make use of the BDDC-preconditioner framework. Note, that the term BDDC might be misleading here since we will not make use of the “constraint” part of the BDDC. In NGSolve the solver is built using the COUPLING_TYPE. While a WIREBASKED_DOF will remain in the globally coupled system (i.e. a dof that is shared by elements), all dofs of the type INTERFACE_DOF or LOCAL_DOF will be treated in a local sense.

Thus, to achieve our desired behaviour we simply mark all dofs that are WIREBASKED_DOF (and are thus located at interfaces/facets) as an INTERFACE_DOF.

%%px
for i in range(V.ndof):
    if V.CouplingType(i) == COUPLING_TYPE.WIREBASKET_DOF:
        V.SetCouplingType(i, COUPLING_TYPE.INTERFACE_DOF)
%%px
res = gfu.vec.CreateVector()
temp = gfu.vec.CreateVector()

mstar = BilinearForm(V, condense=True)
mstar += dt * nu*InnerProduct(Grad(u), Grad(v)) * dx()
mstar += dt * nu * Grad(u)*n * tang(vhat - v) * dx(element_boundary = True)
mstar += dt * nu * Grad(v) * n *  tang(uhat-u) * dx(element_boundary = True)
mstar += dt * nu  *gamma*order*(order+1)/h * InnerProduct ( tang(vhat-v),  tang(uhat-u) ) * dx(element_boundary = True)
mstar += InnerProduct(u,v)*dx

pre = Preconditioner(mstar, type="bddc")
mstar.Assemble()

inv1 = CGSolver(mstar.mat, pre=pre, tol=1e-4, printrates=False)

MShex, MShext, MSiii  = mstar.harmonic_extension.local_mat, mstar.harmonic_extension_trans.local_mat, mstar.inner_solve.local_mat
IdMS = IdentityMatrix(mstar.mat.height)


IMShex = ParallelMatrix(IdMS + MShex, row_pardofs = mstar.mat.row_pardofs, col_pardofs = mstar.mat.row_pardofs, op = ParallelMatrix.C2C)
IMShext = ParallelMatrix(IdMS + MShext, row_pardofs = mstar.mat.row_pardofs, col_pardofs = mstar.mat.row_pardofs, op = ParallelMatrix.D2D)
IMSsolve = ParallelMatrix(MSiii, row_pardofs = mstar.mat.row_pardofs, col_pardofs = mstar.mat.row_pardofs, op = ParallelMatrix.D2C)

inv = IMShex @ inv1 @ IMShext + IMSsolve

12.1.4. Explicit DG convection operator#

Below you find the application of the convection formulated in an VectorL2 space which can be applied in a much faster approach as if formulated in an HDiv space. The main idea here follows the construction discussed in the NGSolve docu. On top we use a geometry-free implementation via the piola=True flag.

%%px
realcompile = True
VL2 = VectorL2(mesh, order=order, piola=True)
ul2,vl2 = VL2.TnT()
conv_l2 = BilinearForm(VL2, nonassemble=True)
conv_l2 += -InnerProduct(grad(vl2)*ul2, ul2).Compile(realcompile=realcompile, wait=True) * dx(bonus_intorder=2)
conv_l2 += (IfPos(ul2 * n, ul2*n*ul2*vl2, ul2*n*ul2.Other()*vl2)).Compile(realcompile=realcompile, wait=True) * dx(element_boundary=True, bonus_intorder=2)
V_convertl2 = comp.ConvertOperator(spacea = VT, spaceb = VL2, localop = True, parmat = False,
                                    bonus_intorder_ab = 2, range_dofs = VL2.FreeDofs(), geom_free = True)
        
convertl2 = V_convertl2 @ V.Restriction(0)
conv_operator = convertl2.T @ conv_l2.mat @ convertl2

12.1.5. Solving the pressure Poisson problem#

The final ingredient needed is the projection of the intermediate velocity \(u^*\) such that it is divergence-free. For this we aim to solve the pressure Poisson problem in a mixed form

\[\begin{align*} \tilde u &= -\nabla p, \\ \operatorname{div} \tilde u &= \operatorname{div} u^*. \end{align*}\]

Fortunately, since we are already using compatible finite element spaces we can directly formulate the corresponding weak formulation: find \(\tilde u_h \in V^T_h\) and \(p_h \in Q_h\) such that

\[\begin{align*} \int_\Omega -\tilde u_h \cdot v_h + \int_\Omega \operatorname{div} v_h \, p_h &= 0 &\quad \forall v_h \in V_h^T,\\ \int_\Omega \operatorname{div} \tilde u_h \, q_h &= \int_\Omega \operatorname{div} u^*_h \, q_h &\quad \forall q_h \in Q_h. \end{align*}\]

Due to the compatibility condition \(\operatorname{div} V_h^T = Q_h\), we have \(\operatorname{div} \tilde u_h = \operatorname{div} u^*_h\) (in an exact manner), and thus setting \(u_h = P(u_h^*) = u_h^* - \tilde u_h\) we have our final solution. Since it is not feasible to solve the mixed problem in an efficient manner, we will use a hybridized version instead (see also NGSolve-docu), thus we solve the system: \(\tilde u_h \in V^{T,-}_h\), \(\hat p_h \in \hat Q_h\) and \(p_h \in Q_h\) such that

\[\begin{align*} \int_\Omega -\tilde u_h \cdot v_h + \sum_T \int_T \operatorname{div} v_h \, p_h - \int_{\partial T} v_h \cdot n \hat p_h &= 0 &\quad \forall v_h \in V_h^T,\\ \sum_T \int_T \operatorname{div} \tilde u_h \, q_h - \int_{\partial T} \tilde u_h \cdot n \hat q_h&= \int_\Omega \operatorname{div} u^*_h \, q_h &\quad \forall (q_h, \hat q_h) \in Q_h \times \hat Q_h, \end{align*}\]

where \(V_h^{T,-} = [\mathbb{P}^k]^d\) is the “broken” (i.e. discontinuous) BDM space, and \(\hat Q_h = \{\hat q_h \in L^2(\mathcal{F}_h): q_h|_{E} \in \mathbb{P}^k(E) ~\forall E \in \mathcal{F}_h\}\). Note that \(\hat Q_h\) is the “normal-trace-space” of \(V_h^T\), in the sense that \(v_h \cdot n \in \hat Q_h\) for all \(v_h \in V_h\). As a consequence we have, testing with \(v_h = 0, p_h = 0\) and \(\hat p_h = [\![ \tilde u_h \cdot n]\!]\) (the jump of the normal-components of \(\tilde u_h\)), that

\[ \| [\![ \tilde u_h \cdot n]\!] \|_{L^2(\mathcal{F}_h)}^2 = \sum_E \int_E [\![ \tilde u_h \cdot n]\!]^2 = \sum_T \int_{\partial T} \tilde u_h \cdot n [\![ \tilde u_h \cdot n]\!] = 0, \]

thus \(\tilde u_h \in V_h^T\) (it is normal continuous!). The crucial advantage is that we can apply a static condensation for the hybridized variable. The remaining Schur complement matrix for the facet degrees of freedoms in Qhat can be interpreted as an HDG \(H^1\)-like diffusion matrix for which we can use the PETSc’ gamg in combination with a CGSolver.

Note

Although natural and essential boundary conditions exchange when formulating the pressure Poisson problem in a mixed form, the hybridization results in again enforcing the Dirichlet values for \(p_h\) in an essential manner in the space Qhat and implementing the Neumann conditions naturally.

%%px
V2 = HDiv(mesh, order=order, discontinuous=True)
Qhat = FacetFESpace(mesh, order=order, dirichlet='outlet')
Xproj = V2*Q*Qhat
(u,p,phat),(v,q,qhat) = Xproj.TnT()
aproj = BilinearForm(Xproj, condense=True)
aproj += (-u*v+ div(u)*q + div(v)*p) * dx + (u*n*qhat+v*n*phat) * dx(element_boundary = True)
# cproj = Preconditioner(aproj, "bddc")
cproj = Preconditioner(aproj, "gamg")
aproj.Assemble()
gfu_proj = GridFunction(Xproj)
pressure = -gfu_proj.components[1]

invproj1 = CGSolver(aproj.mat, maxiter=10000, tol=1E-14, pre=cproj, printrates=False)

APhex, APhext, APiii  = aproj.harmonic_extension.local_mat, aproj.harmonic_extension_trans.local_mat, aproj.inner_solve.local_mat
IdP = IdentityMatrix(aproj.mat.height)

IPhex = ParallelMatrix(IdP + APhex, row_pardofs = aproj.mat.row_pardofs, col_pardofs = aproj.mat.row_pardofs, op = ParallelMatrix.C2C)
IPhext = ParallelMatrix(IdP + APhext, row_pardofs = aproj.mat.row_pardofs, col_pardofs = aproj.mat.row_pardofs, op = ParallelMatrix.D2D)
IPsolve = ParallelMatrix(APiii, row_pardofs = aproj.mat.row_pardofs, col_pardofs = aproj.mat.row_pardofs, op = ParallelMatrix.D2C)

invproj = IPhex @ invproj1 @ IPhext + IPsolve

To assemble the right hand side for the mixed problem we compute the finite element matrix for the bilinear form \(b(u_h, q_h) = \int_\Omega \operatorname{div} u_h q_h\) for \(q_h \in Q_h\) and \(u_h \in V_h^T\) (i.e. in the normal-continuous space!). Note that q is still a TestFunction of the product space Xproj. Thus, the matrix will have dimension “ndofs of Xproj” times “ndofs of VT” Since our solution from the first step is in V = VT * Vhat we can then calculate the right hand side simply via bproj * gfu[V.Range(0)].

%%px
bproj = BilinearForm(div(VT.TrialFunction())*q*dx).Assemble()

Since the GridFunction of the velocity solution \(\tilde u_h\) is an element of the broken space \(V_h^T\) (V2) we still need to “transform” it to a GridFunction (or an associated coefficient vector) on VT.

Note that we have defined V2 as V2 = HDiv(..., discontinuous = True) rather then using an VectorL2(...) space. The difference is, that the flag discontinuous=True simply breaks “break” the (normal-) continuous basis functions associated to facets into its parts defined on the adjacent elements. This is crucial since it allows us to define a transfer operation for the embedding \(V_h^T \subset V_h^{T,-}\) by copying/averaginv the coefficient of the (two) coefficients of the broken space to the coefficient of the continuous space. This can be achieved by using a PermutationMatrix.

%%px
ind = VT.ndof * [0]
for el in mesh.Elements(VOL):
    dofs1 = VT.GetDofNrs(el)
    dofs2 = V2.GetDofNrs(el)
    for d1,d2 in zip(dofs1,dofs2):
        ind[d1] = d2

mapV = PermutationMatrix(Xproj.ndof, ind)
%%px
time = 0
while time < 100*dt:
    # solve the momentum equation without divergence constraint
    res.data = -conv_operator * gfu.vec - AA * gfu.vec
    temp.data = inv * res
    
    # solve the pressure Poisson problem
    gfu_proj.vec.data = invproj @ bproj.mat * temp[V.Range(0)]
    
    # map the correcting velocity to VT and update the final solution 
    temp[V.Range(0)].data -= mapV * gfu_proj.vec
    gfu.vec.data += dt * temp
    time += dt

    if MPI.COMM_WORLD.rank==0:
        print("t: ", time, end = "\r")
[stdout:0] t:  1.0000000000000007
from ngsolve import *
from ngsolve.webgui import Draw, FieldLines
gfu = c[:]["gfu"]
N=100
fieldlines = FieldLines(gfu[0].components[0], gfu[0].space.mesh.Boundaries('inlet'), num_lines=N, length=3)

Draw(gfu[0].components[0],objects=[fieldlines], settings={"Objects": {"Surface": True, "Wireframe":False}},
     euler_angles = (16.748369506964842,-25.39835395236453,0.3657242596990491), #
     clipping={"function": False, "x":0, "y":0, "z":-1, "dist": 1});
c.shutdown(hub=True)