From 06d90f6856fe1e6196f652409c16f84887ad95cd Mon Sep 17 00:00:00 2001 From: Anastasia Alexadrova Date: Fri, 28 Mar 2025 17:25:41 +0100 Subject: [PATCH] K8SPXC-1453 Added a section hor to use dataSource dquote> dquote> new file: docs/data-source.md modified: mkdocs-base.yml --- __pycache__/main.cpython-311.pyc | Bin 1705 -> 0 bytes docs/data-source.md | 31 +++++++++++++++++++++++++++++++ mkdocs-base.yml | 1 + 3 files changed, 32 insertions(+) delete mode 100644 __pycache__/main.cpython-311.pyc create mode 100644 docs/data-source.md diff --git a/__pycache__/main.cpython-311.pyc b/__pycache__/main.cpython-311.pyc deleted file mode 100644 index 7ca444f13de0ee776cf1836f2afdf89010b29c18..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1705 zcmcgsO>7%Q6rR~#|2C5qq7((S;HGMbY3)^2A+^MWLo38j6_U8&C!wV4ops{G-nC}e zZCIQtha5O?C{j^_1jrWxRW2Mka_n&@O4Vp3q)ME)8Rg~^Z+30xhe*9K)_(Ke``*mF zdGnrsAta4py!rD_^nrxXUqa~((8IBJ5~hcUB8sbM7Ex&(&0>m+C@cRh93~7L`b8ka zB)+{xfQP7wmLfYK#p`Gj=B>VE6(8bZ=Oesyh>ufc9Ut+KX$&Gsc;r3XUe1&omf==x z>rhAvocAJpd(!}L5$qlaxQRAF+v;09@S&2x=X%M)3l3K%yyV=6SBjILUdwseTceAv zTX){lw51Bu^L3LMwx#C{yQYbXmgL|Bu_eUlkbdeXDO< zm716;%c0I}U+gLB@%NYx$7zwyq< zf;*ODDzILvpl2+-X1+b_C5Oignm(SH^W!O*(@?ap6>kNu{B}i5)*0QiW;s%*aCVJBD0zn>M_R%JS7mwq71psBQ4n0-R zZmVZo=UNU|FYKrno~oC&)k|ExyrW)TTl`af;hQ`6tybyR(Vl5HeQINPJAK+upWaQL zyl?R2(ANXIWN70ebWUr96YaMXoQ(bc0VnVN(ct99^dL)dmyO8_!0uyzhka8-hj++I>?JrmjN}mtP2q!dl7*U0 i8&z|hy#~CfVdo-1TgDjgqSRXWYbz=iryY;sZvO(0NQ_qi diff --git a/docs/data-source.md b/docs/data-source.md new file mode 100644 index 00000000..aedf2740 --- /dev/null +++ b/docs/data-source.md @@ -0,0 +1,31 @@ +# Clone a cluster with the same data set + +A good practice is to test a new functionality or an upgraded version of the database in a testing / staging environment. As a developer, you would want the data in the staging database cluster, so that your applications can start immediately. + +The [`dataSource` :octicons-link-external-16:](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#volume-cloning) functionality allows doing just that. Instead of creating a new PVC for a new cluster, you can clone the existing one. This enables you to spin up a new cluster with the data in it almost in no time which is especially beneficial if you use CI/CD for that. + +For example, you have the production Percona XtraDB Cluster `cluster1`. To test a new feature in your app, you need a staging cluster `cluster2` with the data set from `cluster1`. + +To create it, create the `cluster2-cr.yaml` Custom Resource manifest. You can use the existing [`deploy/cr.yaml` ](https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/{{release}}/deploy/cr.yaml) for convenience. Specify the PVC from `cluster1` as the `dataSource` for it: + +```yaml +pxc: + volumeSpec: + persistentVolumeClaim: + dataSource: + name: cluster1-pvc + kind: PersistentVolumeClaim + +``` + +This configuration instructs the Operator to create a direct clone of the PVC from `cluster1`. If you have a snapshot of the PVC, you can use that as a data source for your staging cluster. Here’s how you define it: + +```yaml +persistentVolumeClaim: + dataSource: + name: cluster1-pvc-snapshot1 + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io +``` + +To create a database cluster, apply the `cluster2-cr.yaml`. diff --git a/mkdocs-base.yml b/mkdocs-base.yml index d96dece0..50e8c3cb 100644 --- a/mkdocs-base.yml +++ b/mkdocs-base.yml @@ -216,6 +216,7 @@ nav: - "Add sidecar containers": sidecar.md - "Restart or pause the cluster": pause.md - "Crash recovery": recovery.md + - data-source.md - Troubleshooting: - "Initial troubleshooting": debug.md