From 2e8d5063f7a68e45f25cd9aaf3e5075a6f250c30 Mon Sep 17 00:00:00 2001 From: Rob Winch Date: Thu, 20 Jul 2023 17:07:16 -0500 Subject: [PATCH] Migrate documentation to Antora Issue #4422 --- .github/workflows/deploy-docs.yml | 30 + .gitignore | 5 + README.md | 5 +- pom.xml | 4 +- spring-batch-docs/antora-playbook.yml | 40 + spring-batch-docs/antora.yml | 11 + .../ROOT/assets}/images/1-1-step.png | Bin .../images/BatchExecutionEnvironments.bmp | Bin .../assets}/images/ExecutionEnvironment.png | Bin .../ROOT/assets}/images/PipeAndFilter.jpg | Bin .../ROOT/assets}/images/PipeAndFilter.png | Bin .../ROOT/assets}/images/RepeatTemplate.png | Bin .../assets}/images/RuntimeDependencies.png | Bin .../ROOT/assets}/images/application-tier.png | Bin ...riented-processing-with-item-processor.png | Bin .../images/chunk-oriented-processing.png | Bin .../assets}/images/composite-transformer.png | Bin .../ROOT/assets}/images/conditional-flow.png | Bin .../ROOT/assets}/images/cursorExample.png | Bin .../assets}/images/domain-classdiagram.jpg | Bin .../assets}/images/drivingQueryExample.png | Bin .../ROOT/assets}/images/drivingQueryJob.png | Bin .../ROOT/assets}/images/errorOnFlush.png | Bin .../ROOT/assets}/images/errorOnWrite.png | Bin .../images/execution-environment-config.jpg | Bin .../images/flat-file-input-source-design.gif | Bin .../images/flat-file-input-source-design.jpg | Bin .../images/flatfile-input-source-diagram.jpg | Bin .../handling-informational-messages.png | Bin .../ROOT/assets}/images/io-design.jpg | Bin .../images/item-oriented-processing.png | Bin .../assets}/images/item-reader-design.jpg | Bin .../assets}/images/item-stream-adapter.jpg | Bin .../ROOT/assets}/images/jmx-job.jpg | Bin .../ROOT/assets}/images/jmx.jpg | Bin .../ROOT/assets}/images/job-heirarchy.png | Bin .../images/job-launcher-sequence-async.png | Bin .../images/job-launcher-sequence-sync.png | Bin .../images/job-repository-advanced.png | Bin .../ROOT/assets}/images/job-repository.png | Bin .../images/job-stereotypes-parameters.png | Bin .../assets}/images/jobHeirarchyWithSteps.png | Bin .../assets}/images/launch-batch-job-svg.svg | 0 .../ROOT/assets}/images/launch-batch-job.png | Bin .../assets}/images/launch-from-request.png | Bin .../ROOT/assets}/images/meta-data-erd.png | Bin .../ROOT/assets}/images/nfljob-config.jpg | Bin .../ROOT/assets}/images/nfljob.jpg | Bin .../ROOT/assets}/images/oxm-fragments.png | Bin .../ROOT/assets}/images/partitioned.png | Bin .../assets}/images/partitioning-overview.png | Bin .../ROOT/assets}/images/partitioning-spi.png | Bin .../assets}/images/remote-chunking-config.png | Bin .../assets}/images/remote-chunking-sbi.png | Bin .../ROOT/assets}/images/remote-chunking.png | Bin ...remote-partitioning-aggregation-config.png | Bin .../remote-partitioning-polling-config.png | Bin .../assets}/images/remote-partitioning.png | Bin .../images/repository-classdiagram.jpg | Bin .../ROOT/assets}/images/run-tier.png | Bin .../assets}/images/s1-job-configuration.jpg | Bin .../ROOT/assets}/images/sequential-flow.png | Bin .../images/simple-batch-execution-env.jpg | Bin .../simple-tasklet-job-configuration.jpg | Bin .../simplified-chunk-oriented-processing.png | Bin .../images/spring-batch-football-graph.jpg | Bin .../assets}/images/spring-batch-layers.png | Bin .../images/spring-batch-reference-model.png | Bin .../ROOT/assets}/images/spring-batch.png | Bin .../ROOT/assets}/images/step.png | Bin .../ROOT/assets}/images/xmlinput.png | Bin spring-batch-docs/modules/ROOT/nav.adoc | 62 + .../ROOT/pages}/appendix.adoc | 10 +- .../ROOT/pages}/attributes.adoc | 0 .../ROOT/pages}/common-patterns.adoc | 424 ++- .../ROOT/pages}/domain.adoc | 137 +- .../ROOT/pages}/footer/index-footer.adoc | 0 .../ROOT/pages}/glossary.adoc | 6 +- .../ROOT/pages/header/index-header.adoc | 3 + .../modules/ROOT/pages/index.adoc | 46 + spring-batch-docs/modules/ROOT/pages/job.adoc | 22 + .../ROOT/pages/job/advanced-meta-data.adoc | 556 +++ .../ROOT/pages/job/configuring-launcher.adoc | 120 + .../pages/job/configuring-repository.adoc | 262 ++ .../modules/ROOT/pages/job/configuring.adoc | 316 ++ .../modules/ROOT/pages/job/java-config.adoc | 105 + .../modules/ROOT/pages/job/running.adoc | 281 ++ .../ROOT/pages}/monitoring-and-metrics.adoc | 23 +- .../ROOT/pages}/processor.adoc | 151 +- .../pages/readers-and-writers/custom.adoc | 188 + .../pages/readers-and-writers/database.adoc | 747 ++++ .../delegate-pattern-registering.adoc | 89 + .../pages/readers-and-writers/flat-files.adoc | 11 + .../flat-files/field-set.adoc | 30 + .../flat-files/file-item-reader.adoc | 660 ++++ .../flat-files/file-item-writer.adoc | 445 +++ .../item-reader-writer-implementations.adoc | 313 ++ .../readers-and-writers/item-reader.adoc | 48 + .../readers-and-writers/item-stream.adoc | 38 + .../readers-and-writers/item-writer.adoc | 30 + .../json-reading-writing.adoc | 88 + .../readers-and-writers/multi-file-input.adoc | 60 + .../process-indicator.adoc | 75 + .../reusing-existing-services.adoc | 112 + .../xml-reading-writing.adoc | 373 ++ .../modules/ROOT/pages/readersAndWriters.adoc | 13 + .../ROOT/pages}/repeat.adoc | 80 +- .../ROOT/pages}/retry.adoc | 8 +- .../ROOT/pages}/scalability.adoc | 295 +- .../ROOT/pages}/schema-appendix.adoc | 44 +- .../pages}/spring-batch-architecture.adoc | 11 +- .../ROOT/pages/spring-batch-integration.adoc | 42 + ...tributes-of-the-job-launching-gateway.adoc | 38 + .../launching-jobs-through-messages.adoc | 256 ++ .../namespace-support.adoc | 57 + .../sub-elements.adoc} | 1033 ++---- .../ROOT/pages}/spring-batch-intro.adoc | 35 +- .../modules/ROOT/pages/step.adoc | 21 + .../pages/step/chunk-oriented-processing.adoc | 60 + .../commit-interval.adoc | 62 + .../configuring-skip.adoc | 142 + .../configuring.adoc | 94 + .../controlling-rollback.adoc | 103 + .../inheriting-from-parent.adoc | 108 + .../intercepting-execution.adoc | 266 ++ .../registering-item-streams.adoc | 92 + .../chunk-oriented-processing/restart.adoc | 247 ++ .../retry-logic.adoc | 58 + .../transaction-attributes.adoc | 57 + .../ROOT/pages/step/controlling-flow.adoc | 828 +++++ .../modules/ROOT/pages/step/late-binding.adoc | 405 +++ .../modules/ROOT/pages/step/tasklet.adoc | 212 ++ .../ROOT/pages}/testing.adoc | 74 +- .../modules/ROOT/pages/tracing.adoc | 9 + .../ROOT/pages}/transaction-appendix.adoc | 30 +- .../ROOT/pages}/whatsnew.adoc | 29 +- spring-batch-docs/pom.xml | 108 +- .../resources/antora-resources/antora.yml | 8 + .../main/asciidoc/header/index-header.adoc | 1 - .../src/main/asciidoc/index-single.adoc | 49 - .../src/main/asciidoc/index.adoc | 52 - spring-batch-docs/src/main/asciidoc/job.adoc | 1676 --------- .../src/main/asciidoc/js/DocumentToggle.js | 76 - .../src/main/asciidoc/js/Redirect.js | 62 - .../src/main/asciidoc/js/jquery-3.2.1.min.js | 4 - .../src/main/asciidoc/js/js.cookie.js | 165 - .../src/main/asciidoc/readersAndWriters.adoc | 3073 ----------------- spring-batch-docs/src/main/asciidoc/step.adoc | 2512 -------------- .../src/main/asciidoc/toggle.adoc | 15 - 149 files changed, 9447 insertions(+), 9089 deletions(-) create mode 100644 .github/workflows/deploy-docs.yml create mode 100644 spring-batch-docs/antora-playbook.yml create mode 100644 spring-batch-docs/antora.yml rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/1-1-step.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/BatchExecutionEnvironments.bmp (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/ExecutionEnvironment.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/PipeAndFilter.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/PipeAndFilter.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/RepeatTemplate.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/RuntimeDependencies.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/application-tier.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/chunk-oriented-processing-with-item-processor.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/chunk-oriented-processing.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/composite-transformer.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/conditional-flow.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/cursorExample.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/domain-classdiagram.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/drivingQueryExample.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/drivingQueryJob.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/errorOnFlush.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/errorOnWrite.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/execution-environment-config.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/flat-file-input-source-design.gif (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/flat-file-input-source-design.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/flatfile-input-source-diagram.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/handling-informational-messages.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/io-design.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/item-oriented-processing.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/item-reader-design.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/item-stream-adapter.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/jmx-job.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/jmx.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/job-heirarchy.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/job-launcher-sequence-async.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/job-launcher-sequence-sync.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/job-repository-advanced.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/job-repository.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/job-stereotypes-parameters.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/jobHeirarchyWithSteps.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/launch-batch-job-svg.svg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/launch-batch-job.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/launch-from-request.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/meta-data-erd.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/nfljob-config.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/nfljob.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/oxm-fragments.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/partitioned.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/partitioning-overview.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/partitioning-spi.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/remote-chunking-config.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/remote-chunking-sbi.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/remote-chunking.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/remote-partitioning-aggregation-config.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/remote-partitioning-polling-config.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/remote-partitioning.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/repository-classdiagram.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/run-tier.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/s1-job-configuration.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/sequential-flow.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/simple-batch-execution-env.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/simple-tasklet-job-configuration.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/simplified-chunk-oriented-processing.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/spring-batch-football-graph.jpg (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/spring-batch-layers.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/spring-batch-reference-model.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/spring-batch.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/step.png (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/assets}/images/xmlinput.png (100%) create mode 100644 spring-batch-docs/modules/ROOT/nav.adoc rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/appendix.adoc (98%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/attributes.adoc (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/common-patterns.adoc (94%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/domain.adoc (90%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/footer/index-footer.adoc (100%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/glossary.adoc (98%) create mode 100644 spring-batch-docs/modules/ROOT/pages/header/index-header.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/index.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/job.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/job/advanced-meta-data.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/job/configuring-launcher.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/job/configuring-repository.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/job/configuring.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/job/java-config.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/job/running.adoc rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/monitoring-and-metrics.adoc (82%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/processor.adoc (94%) create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/custom.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/database.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/delegate-pattern-registering.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files/field-set.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files/file-item-reader.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files/file-item-writer.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-reader-writer-implementations.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-reader.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-stream.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-writer.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/json-reading-writing.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/multi-file-input.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/process-indicator.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/reusing-existing-services.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readers-and-writers/xml-reading-writing.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/readersAndWriters.adoc rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/repeat.adoc (95%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/retry.adoc (92%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/scalability.adoc (91%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/schema-appendix.adoc (95%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/spring-batch-architecture.adoc (98%) create mode 100644 spring-batch-docs/modules/ROOT/pages/spring-batch-integration.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/spring-batch-integration/available-attributes-of-the-job-launching-gateway.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/spring-batch-integration/launching-jobs-through-messages.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/spring-batch-integration/namespace-support.adoc rename spring-batch-docs/{src/main/asciidoc/spring-batch-integration.adoc => modules/ROOT/pages/spring-batch-integration/sub-elements.adoc} (64%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/spring-batch-intro.adoc (88%) create mode 100644 spring-batch-docs/modules/ROOT/pages/step.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/commit-interval.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/configuring-skip.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/configuring.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/controlling-rollback.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/inheriting-from-parent.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/intercepting-execution.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/registering-item-streams.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/restart.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/retry-logic.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/transaction-attributes.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/controlling-flow.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/late-binding.adoc create mode 100644 spring-batch-docs/modules/ROOT/pages/step/tasklet.adoc rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/testing.adoc (95%) create mode 100644 spring-batch-docs/modules/ROOT/pages/tracing.adoc rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/transaction-appendix.adoc (93%) rename spring-batch-docs/{src/main/asciidoc => modules/ROOT/pages}/whatsnew.adoc (84%) create mode 100644 spring-batch-docs/src/main/antora/resources/antora-resources/antora.yml delete mode 100644 spring-batch-docs/src/main/asciidoc/header/index-header.adoc delete mode 100644 spring-batch-docs/src/main/asciidoc/index-single.adoc delete mode 100644 spring-batch-docs/src/main/asciidoc/index.adoc delete mode 100644 spring-batch-docs/src/main/asciidoc/job.adoc delete mode 100644 spring-batch-docs/src/main/asciidoc/js/DocumentToggle.js delete mode 100644 spring-batch-docs/src/main/asciidoc/js/Redirect.js delete mode 100644 spring-batch-docs/src/main/asciidoc/js/jquery-3.2.1.min.js delete mode 100644 spring-batch-docs/src/main/asciidoc/js/js.cookie.js delete mode 100644 spring-batch-docs/src/main/asciidoc/readersAndWriters.adoc delete mode 100644 spring-batch-docs/src/main/asciidoc/step.adoc delete mode 100644 spring-batch-docs/src/main/asciidoc/toggle.adoc diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml new file mode 100644 index 0000000000..4af2314b75 --- /dev/null +++ b/.github/workflows/deploy-docs.yml @@ -0,0 +1,30 @@ +name: Deploy Docs +on: + push: + branches-ignore: [ gh-pages ] + tags: '**' + repository_dispatch: + types: request-build-reference # legacy + workflow_dispatch: +permissions: + actions: write +jobs: + build: + runs-on: ubuntu-latest + if: github.repository_owner == 'spring-projects' + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + ref: docs-build + fetch-depth: 1 + - name: Dispatch (partial build) + if: github.ref_type == 'branch' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: gh workflow run deploy-docs.yml -r $(git rev-parse --abbrev-ref HEAD) -f build-refname=${{ github.ref_name }} + - name: Dispatch (full build) + if: github.ref_type == 'tag' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: gh workflow run deploy-docs.yml -r $(git rev-parse --abbrev-ref HEAD) diff --git a/.gitignore b/.gitignore index 300d387baf..e5ff39fa6a 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,8 @@ out /.gradletasknamecache **/*.flattened-pom.xml + +node +node_modules +package-lock.json +package.json diff --git a/README.md b/README.md index f597c5dd30..29c7ffe157 100644 --- a/README.md +++ b/README.md @@ -60,12 +60,11 @@ Please note that some integration tests are based on Docker, so please make sure To generate the reference documentation, run the following commands: ``` -$ ./mvnw javadoc:aggregate $ cd spring-batch-docs -$ ../mvnw site +$ ../mvnw antora:antora ``` -The reference documentation can be found in `spring-batch-docs/target`. +The reference documentation can be found in `spring-batch-docs/target/anotra/site`. ## Using Docker diff --git a/pom.xml b/pom.xml index 815e3388f4..b47e1c0281 100644 --- a/pom.xml +++ b/pom.xml @@ -136,9 +136,7 @@ 3.0.19 - 1.6.2 - 1.5.1 - 0.0.6 + 0.0.3 3.11.0 diff --git a/spring-batch-docs/antora-playbook.yml b/spring-batch-docs/antora-playbook.yml new file mode 100644 index 0000000000..b46171c98f --- /dev/null +++ b/spring-batch-docs/antora-playbook.yml @@ -0,0 +1,40 @@ +# PACKAGES antora@3.2.0-alpha.2 @antora/atlas-extension:1.0.0-alpha.1 @antora/collector-extension@1.0.0-alpha.3 @springio/antora-extensions@1.1.0-alpha.2 @asciidoctor/tabs@1.0.0-alpha.12 @opendevise/antora-release-line-extension@1.0.0-alpha.2 +# +# The purpose of this Antora playbook is to build the docs in the current branch. +antora: + extensions: + - '@springio/antora-extensions/partial-build-extension' + - require: '@springio/antora-extensions/inject-collector-cache-config-extension' + - '@antora/collector-extension' + - '@antora/atlas-extension' + - require: '@springio/antora-extensions/root-component-extension' + root_component_name: 'batch' +site: + title: Spring Batch Reference + url: https://docs.spring.io/spring-batch/reference +content: + sources: + - url: .. + branches: HEAD + start_path: spring-batch-docs + worktrees: true +asciidoc: + attributes: + page-pagination: '' + hide-uri-scheme: '@' + tabs-sync-option: '@' + chomp: 'all' + extensions: + - '@asciidoctor/tabs' + - '@springio/asciidoctor-extensions' + sourcemap: true +urls: + latest_version_segment: '' +runtime: + log: + failure_level: warn + format: pretty +ui: + bundle: + url: https://github.com/spring-io/antora-ui-spring/releases/download/v0.3.3/ui-bundle.zip + snapshot: true \ No newline at end of file diff --git a/spring-batch-docs/antora.yml b/spring-batch-docs/antora.yml new file mode 100644 index 0000000000..a3808cf652 --- /dev/null +++ b/spring-batch-docs/antora.yml @@ -0,0 +1,11 @@ +name: batch +version: true +title: Spring Batch Documentation +nav: + - modules/ROOT/nav.adoc +ext: + collector: + run: + command: mvn process-resources -pl spring-batch-docs -am + scan: + dir: ./target/classes/antora-resources diff --git a/spring-batch-docs/src/main/asciidoc/images/1-1-step.png b/spring-batch-docs/modules/ROOT/assets/images/1-1-step.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/1-1-step.png rename to spring-batch-docs/modules/ROOT/assets/images/1-1-step.png diff --git a/spring-batch-docs/src/main/asciidoc/images/BatchExecutionEnvironments.bmp b/spring-batch-docs/modules/ROOT/assets/images/BatchExecutionEnvironments.bmp similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/BatchExecutionEnvironments.bmp rename to spring-batch-docs/modules/ROOT/assets/images/BatchExecutionEnvironments.bmp diff --git a/spring-batch-docs/src/main/asciidoc/images/ExecutionEnvironment.png b/spring-batch-docs/modules/ROOT/assets/images/ExecutionEnvironment.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/ExecutionEnvironment.png rename to spring-batch-docs/modules/ROOT/assets/images/ExecutionEnvironment.png diff --git a/spring-batch-docs/src/main/asciidoc/images/PipeAndFilter.jpg b/spring-batch-docs/modules/ROOT/assets/images/PipeAndFilter.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/PipeAndFilter.jpg rename to spring-batch-docs/modules/ROOT/assets/images/PipeAndFilter.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/PipeAndFilter.png b/spring-batch-docs/modules/ROOT/assets/images/PipeAndFilter.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/PipeAndFilter.png rename to spring-batch-docs/modules/ROOT/assets/images/PipeAndFilter.png diff --git a/spring-batch-docs/src/main/asciidoc/images/RepeatTemplate.png b/spring-batch-docs/modules/ROOT/assets/images/RepeatTemplate.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/RepeatTemplate.png rename to spring-batch-docs/modules/ROOT/assets/images/RepeatTemplate.png diff --git a/spring-batch-docs/src/main/asciidoc/images/RuntimeDependencies.png b/spring-batch-docs/modules/ROOT/assets/images/RuntimeDependencies.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/RuntimeDependencies.png rename to spring-batch-docs/modules/ROOT/assets/images/RuntimeDependencies.png diff --git a/spring-batch-docs/src/main/asciidoc/images/application-tier.png b/spring-batch-docs/modules/ROOT/assets/images/application-tier.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/application-tier.png rename to spring-batch-docs/modules/ROOT/assets/images/application-tier.png diff --git a/spring-batch-docs/src/main/asciidoc/images/chunk-oriented-processing-with-item-processor.png b/spring-batch-docs/modules/ROOT/assets/images/chunk-oriented-processing-with-item-processor.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/chunk-oriented-processing-with-item-processor.png rename to spring-batch-docs/modules/ROOT/assets/images/chunk-oriented-processing-with-item-processor.png diff --git a/spring-batch-docs/src/main/asciidoc/images/chunk-oriented-processing.png b/spring-batch-docs/modules/ROOT/assets/images/chunk-oriented-processing.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/chunk-oriented-processing.png rename to spring-batch-docs/modules/ROOT/assets/images/chunk-oriented-processing.png diff --git a/spring-batch-docs/src/main/asciidoc/images/composite-transformer.png b/spring-batch-docs/modules/ROOT/assets/images/composite-transformer.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/composite-transformer.png rename to spring-batch-docs/modules/ROOT/assets/images/composite-transformer.png diff --git a/spring-batch-docs/src/main/asciidoc/images/conditional-flow.png b/spring-batch-docs/modules/ROOT/assets/images/conditional-flow.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/conditional-flow.png rename to spring-batch-docs/modules/ROOT/assets/images/conditional-flow.png diff --git a/spring-batch-docs/src/main/asciidoc/images/cursorExample.png b/spring-batch-docs/modules/ROOT/assets/images/cursorExample.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/cursorExample.png rename to spring-batch-docs/modules/ROOT/assets/images/cursorExample.png diff --git a/spring-batch-docs/src/main/asciidoc/images/domain-classdiagram.jpg b/spring-batch-docs/modules/ROOT/assets/images/domain-classdiagram.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/domain-classdiagram.jpg rename to spring-batch-docs/modules/ROOT/assets/images/domain-classdiagram.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/drivingQueryExample.png b/spring-batch-docs/modules/ROOT/assets/images/drivingQueryExample.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/drivingQueryExample.png rename to spring-batch-docs/modules/ROOT/assets/images/drivingQueryExample.png diff --git a/spring-batch-docs/src/main/asciidoc/images/drivingQueryJob.png b/spring-batch-docs/modules/ROOT/assets/images/drivingQueryJob.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/drivingQueryJob.png rename to spring-batch-docs/modules/ROOT/assets/images/drivingQueryJob.png diff --git a/spring-batch-docs/src/main/asciidoc/images/errorOnFlush.png b/spring-batch-docs/modules/ROOT/assets/images/errorOnFlush.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/errorOnFlush.png rename to spring-batch-docs/modules/ROOT/assets/images/errorOnFlush.png diff --git a/spring-batch-docs/src/main/asciidoc/images/errorOnWrite.png b/spring-batch-docs/modules/ROOT/assets/images/errorOnWrite.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/errorOnWrite.png rename to spring-batch-docs/modules/ROOT/assets/images/errorOnWrite.png diff --git a/spring-batch-docs/src/main/asciidoc/images/execution-environment-config.jpg b/spring-batch-docs/modules/ROOT/assets/images/execution-environment-config.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/execution-environment-config.jpg rename to spring-batch-docs/modules/ROOT/assets/images/execution-environment-config.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/flat-file-input-source-design.gif b/spring-batch-docs/modules/ROOT/assets/images/flat-file-input-source-design.gif similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/flat-file-input-source-design.gif rename to spring-batch-docs/modules/ROOT/assets/images/flat-file-input-source-design.gif diff --git a/spring-batch-docs/src/main/asciidoc/images/flat-file-input-source-design.jpg b/spring-batch-docs/modules/ROOT/assets/images/flat-file-input-source-design.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/flat-file-input-source-design.jpg rename to spring-batch-docs/modules/ROOT/assets/images/flat-file-input-source-design.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/flatfile-input-source-diagram.jpg b/spring-batch-docs/modules/ROOT/assets/images/flatfile-input-source-diagram.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/flatfile-input-source-diagram.jpg rename to spring-batch-docs/modules/ROOT/assets/images/flatfile-input-source-diagram.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/handling-informational-messages.png b/spring-batch-docs/modules/ROOT/assets/images/handling-informational-messages.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/handling-informational-messages.png rename to spring-batch-docs/modules/ROOT/assets/images/handling-informational-messages.png diff --git a/spring-batch-docs/src/main/asciidoc/images/io-design.jpg b/spring-batch-docs/modules/ROOT/assets/images/io-design.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/io-design.jpg rename to spring-batch-docs/modules/ROOT/assets/images/io-design.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/item-oriented-processing.png b/spring-batch-docs/modules/ROOT/assets/images/item-oriented-processing.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/item-oriented-processing.png rename to spring-batch-docs/modules/ROOT/assets/images/item-oriented-processing.png diff --git a/spring-batch-docs/src/main/asciidoc/images/item-reader-design.jpg b/spring-batch-docs/modules/ROOT/assets/images/item-reader-design.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/item-reader-design.jpg rename to spring-batch-docs/modules/ROOT/assets/images/item-reader-design.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/item-stream-adapter.jpg b/spring-batch-docs/modules/ROOT/assets/images/item-stream-adapter.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/item-stream-adapter.jpg rename to spring-batch-docs/modules/ROOT/assets/images/item-stream-adapter.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/jmx-job.jpg b/spring-batch-docs/modules/ROOT/assets/images/jmx-job.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/jmx-job.jpg rename to spring-batch-docs/modules/ROOT/assets/images/jmx-job.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/jmx.jpg b/spring-batch-docs/modules/ROOT/assets/images/jmx.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/jmx.jpg rename to spring-batch-docs/modules/ROOT/assets/images/jmx.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/job-heirarchy.png b/spring-batch-docs/modules/ROOT/assets/images/job-heirarchy.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/job-heirarchy.png rename to spring-batch-docs/modules/ROOT/assets/images/job-heirarchy.png diff --git a/spring-batch-docs/src/main/asciidoc/images/job-launcher-sequence-async.png b/spring-batch-docs/modules/ROOT/assets/images/job-launcher-sequence-async.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/job-launcher-sequence-async.png rename to spring-batch-docs/modules/ROOT/assets/images/job-launcher-sequence-async.png diff --git a/spring-batch-docs/src/main/asciidoc/images/job-launcher-sequence-sync.png b/spring-batch-docs/modules/ROOT/assets/images/job-launcher-sequence-sync.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/job-launcher-sequence-sync.png rename to spring-batch-docs/modules/ROOT/assets/images/job-launcher-sequence-sync.png diff --git a/spring-batch-docs/src/main/asciidoc/images/job-repository-advanced.png b/spring-batch-docs/modules/ROOT/assets/images/job-repository-advanced.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/job-repository-advanced.png rename to spring-batch-docs/modules/ROOT/assets/images/job-repository-advanced.png diff --git a/spring-batch-docs/src/main/asciidoc/images/job-repository.png b/spring-batch-docs/modules/ROOT/assets/images/job-repository.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/job-repository.png rename to spring-batch-docs/modules/ROOT/assets/images/job-repository.png diff --git a/spring-batch-docs/src/main/asciidoc/images/job-stereotypes-parameters.png b/spring-batch-docs/modules/ROOT/assets/images/job-stereotypes-parameters.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/job-stereotypes-parameters.png rename to spring-batch-docs/modules/ROOT/assets/images/job-stereotypes-parameters.png diff --git a/spring-batch-docs/src/main/asciidoc/images/jobHeirarchyWithSteps.png b/spring-batch-docs/modules/ROOT/assets/images/jobHeirarchyWithSteps.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/jobHeirarchyWithSteps.png rename to spring-batch-docs/modules/ROOT/assets/images/jobHeirarchyWithSteps.png diff --git a/spring-batch-docs/src/main/asciidoc/images/launch-batch-job-svg.svg b/spring-batch-docs/modules/ROOT/assets/images/launch-batch-job-svg.svg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/launch-batch-job-svg.svg rename to spring-batch-docs/modules/ROOT/assets/images/launch-batch-job-svg.svg diff --git a/spring-batch-docs/src/main/asciidoc/images/launch-batch-job.png b/spring-batch-docs/modules/ROOT/assets/images/launch-batch-job.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/launch-batch-job.png rename to spring-batch-docs/modules/ROOT/assets/images/launch-batch-job.png diff --git a/spring-batch-docs/src/main/asciidoc/images/launch-from-request.png b/spring-batch-docs/modules/ROOT/assets/images/launch-from-request.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/launch-from-request.png rename to spring-batch-docs/modules/ROOT/assets/images/launch-from-request.png diff --git a/spring-batch-docs/src/main/asciidoc/images/meta-data-erd.png b/spring-batch-docs/modules/ROOT/assets/images/meta-data-erd.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/meta-data-erd.png rename to spring-batch-docs/modules/ROOT/assets/images/meta-data-erd.png diff --git a/spring-batch-docs/src/main/asciidoc/images/nfljob-config.jpg b/spring-batch-docs/modules/ROOT/assets/images/nfljob-config.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/nfljob-config.jpg rename to spring-batch-docs/modules/ROOT/assets/images/nfljob-config.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/nfljob.jpg b/spring-batch-docs/modules/ROOT/assets/images/nfljob.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/nfljob.jpg rename to spring-batch-docs/modules/ROOT/assets/images/nfljob.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/oxm-fragments.png b/spring-batch-docs/modules/ROOT/assets/images/oxm-fragments.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/oxm-fragments.png rename to spring-batch-docs/modules/ROOT/assets/images/oxm-fragments.png diff --git a/spring-batch-docs/src/main/asciidoc/images/partitioned.png b/spring-batch-docs/modules/ROOT/assets/images/partitioned.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/partitioned.png rename to spring-batch-docs/modules/ROOT/assets/images/partitioned.png diff --git a/spring-batch-docs/src/main/asciidoc/images/partitioning-overview.png b/spring-batch-docs/modules/ROOT/assets/images/partitioning-overview.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/partitioning-overview.png rename to spring-batch-docs/modules/ROOT/assets/images/partitioning-overview.png diff --git a/spring-batch-docs/src/main/asciidoc/images/partitioning-spi.png b/spring-batch-docs/modules/ROOT/assets/images/partitioning-spi.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/partitioning-spi.png rename to spring-batch-docs/modules/ROOT/assets/images/partitioning-spi.png diff --git a/spring-batch-docs/src/main/asciidoc/images/remote-chunking-config.png b/spring-batch-docs/modules/ROOT/assets/images/remote-chunking-config.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/remote-chunking-config.png rename to spring-batch-docs/modules/ROOT/assets/images/remote-chunking-config.png diff --git a/spring-batch-docs/src/main/asciidoc/images/remote-chunking-sbi.png b/spring-batch-docs/modules/ROOT/assets/images/remote-chunking-sbi.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/remote-chunking-sbi.png rename to spring-batch-docs/modules/ROOT/assets/images/remote-chunking-sbi.png diff --git a/spring-batch-docs/src/main/asciidoc/images/remote-chunking.png b/spring-batch-docs/modules/ROOT/assets/images/remote-chunking.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/remote-chunking.png rename to spring-batch-docs/modules/ROOT/assets/images/remote-chunking.png diff --git a/spring-batch-docs/src/main/asciidoc/images/remote-partitioning-aggregation-config.png b/spring-batch-docs/modules/ROOT/assets/images/remote-partitioning-aggregation-config.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/remote-partitioning-aggregation-config.png rename to spring-batch-docs/modules/ROOT/assets/images/remote-partitioning-aggregation-config.png diff --git a/spring-batch-docs/src/main/asciidoc/images/remote-partitioning-polling-config.png b/spring-batch-docs/modules/ROOT/assets/images/remote-partitioning-polling-config.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/remote-partitioning-polling-config.png rename to spring-batch-docs/modules/ROOT/assets/images/remote-partitioning-polling-config.png diff --git a/spring-batch-docs/src/main/asciidoc/images/remote-partitioning.png b/spring-batch-docs/modules/ROOT/assets/images/remote-partitioning.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/remote-partitioning.png rename to spring-batch-docs/modules/ROOT/assets/images/remote-partitioning.png diff --git a/spring-batch-docs/src/main/asciidoc/images/repository-classdiagram.jpg b/spring-batch-docs/modules/ROOT/assets/images/repository-classdiagram.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/repository-classdiagram.jpg rename to spring-batch-docs/modules/ROOT/assets/images/repository-classdiagram.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/run-tier.png b/spring-batch-docs/modules/ROOT/assets/images/run-tier.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/run-tier.png rename to spring-batch-docs/modules/ROOT/assets/images/run-tier.png diff --git a/spring-batch-docs/src/main/asciidoc/images/s1-job-configuration.jpg b/spring-batch-docs/modules/ROOT/assets/images/s1-job-configuration.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/s1-job-configuration.jpg rename to spring-batch-docs/modules/ROOT/assets/images/s1-job-configuration.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/sequential-flow.png b/spring-batch-docs/modules/ROOT/assets/images/sequential-flow.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/sequential-flow.png rename to spring-batch-docs/modules/ROOT/assets/images/sequential-flow.png diff --git a/spring-batch-docs/src/main/asciidoc/images/simple-batch-execution-env.jpg b/spring-batch-docs/modules/ROOT/assets/images/simple-batch-execution-env.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/simple-batch-execution-env.jpg rename to spring-batch-docs/modules/ROOT/assets/images/simple-batch-execution-env.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/simple-tasklet-job-configuration.jpg b/spring-batch-docs/modules/ROOT/assets/images/simple-tasklet-job-configuration.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/simple-tasklet-job-configuration.jpg rename to spring-batch-docs/modules/ROOT/assets/images/simple-tasklet-job-configuration.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/simplified-chunk-oriented-processing.png b/spring-batch-docs/modules/ROOT/assets/images/simplified-chunk-oriented-processing.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/simplified-chunk-oriented-processing.png rename to spring-batch-docs/modules/ROOT/assets/images/simplified-chunk-oriented-processing.png diff --git a/spring-batch-docs/src/main/asciidoc/images/spring-batch-football-graph.jpg b/spring-batch-docs/modules/ROOT/assets/images/spring-batch-football-graph.jpg similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/spring-batch-football-graph.jpg rename to spring-batch-docs/modules/ROOT/assets/images/spring-batch-football-graph.jpg diff --git a/spring-batch-docs/src/main/asciidoc/images/spring-batch-layers.png b/spring-batch-docs/modules/ROOT/assets/images/spring-batch-layers.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/spring-batch-layers.png rename to spring-batch-docs/modules/ROOT/assets/images/spring-batch-layers.png diff --git a/spring-batch-docs/src/main/asciidoc/images/spring-batch-reference-model.png b/spring-batch-docs/modules/ROOT/assets/images/spring-batch-reference-model.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/spring-batch-reference-model.png rename to spring-batch-docs/modules/ROOT/assets/images/spring-batch-reference-model.png diff --git a/spring-batch-docs/src/main/asciidoc/images/spring-batch.png b/spring-batch-docs/modules/ROOT/assets/images/spring-batch.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/spring-batch.png rename to spring-batch-docs/modules/ROOT/assets/images/spring-batch.png diff --git a/spring-batch-docs/src/main/asciidoc/images/step.png b/spring-batch-docs/modules/ROOT/assets/images/step.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/step.png rename to spring-batch-docs/modules/ROOT/assets/images/step.png diff --git a/spring-batch-docs/src/main/asciidoc/images/xmlinput.png b/spring-batch-docs/modules/ROOT/assets/images/xmlinput.png similarity index 100% rename from spring-batch-docs/src/main/asciidoc/images/xmlinput.png rename to spring-batch-docs/modules/ROOT/assets/images/xmlinput.png diff --git a/spring-batch-docs/modules/ROOT/nav.adoc b/spring-batch-docs/modules/ROOT/nav.adoc new file mode 100644 index 0000000000..a9a29913e0 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/nav.adoc @@ -0,0 +1,62 @@ +* xref:index.adoc[] +* xref:spring-batch-intro.adoc[] +* xref:spring-batch-architecture.adoc[] +* xref:whatsnew.adoc[] +* xref:domain.adoc[] +* xref:job.adoc[] +** xref:job/configuring.adoc[] +** xref:job/java-config.adoc[] +** xref:job/configuring-repository.adoc[] +** xref:job/configuring-launcher.adoc[] +** xref:job/running.adoc[] +** xref:job/advanced-meta-data.adoc[] +* xref:step.adoc[] +** xref:step/chunk-oriented-processing.adoc[] +*** xref:step/chunk-oriented-processing/configuring.adoc[] +*** xref:step/chunk-oriented-processing/inheriting-from-parent.adoc[] +*** xref:step/chunk-oriented-processing/commit-interval.adoc[] +*** xref:step/chunk-oriented-processing/restart.adoc[] +*** xref:step/chunk-oriented-processing/configuring-skip.adoc[] +*** xref:step/chunk-oriented-processing/retry-logic.adoc[] +*** xref:step/chunk-oriented-processing/controlling-rollback.adoc[] +*** xref:step/chunk-oriented-processing/transaction-attributes.adoc[] +*** xref:step/chunk-oriented-processing/registering-item-streams.adoc[] +*** xref:step/chunk-oriented-processing/intercepting-execution.adoc[] +** xref:step/tasklet.adoc[] +** xref:step/controlling-flow.adoc[] +** xref:step/late-binding.adoc[] +* xref:readersAndWriters.adoc[] +** xref:readers-and-writers/item-reader.adoc[] +** xref:readers-and-writers/item-writer.adoc[] +** xref:readers-and-writers/item-stream.adoc[] +** xref:readers-and-writers/delegate-pattern-registering.adoc[] +** xref:readers-and-writers/flat-files.adoc[] +*** xref:readers-and-writers/flat-files/field-set.adoc[] +*** xref:readers-and-writers/flat-files/file-item-reader.adoc[] +*** xref:readers-and-writers/flat-files/file-item-writer.adoc[] +** xref:readers-and-writers/xml-reading-writing.adoc[] +** xref:readers-and-writers/json-reading-writing.adoc[] +** xref:readers-and-writers/multi-file-input.adoc[] +** xref:readers-and-writers/database.adoc[] +** xref:readers-and-writers/reusing-existing-services.adoc[] +** xref:readers-and-writers/process-indicator.adoc[] +** xref:readers-and-writers/custom.adoc[] +** xref:readers-and-writers/item-reader-writer-implementations.adoc[] +* xref:processor.adoc[] +* xref:scalability.adoc[] +* xref:repeat.adoc[] +* xref:retry.adoc[] +* xref:testing.adoc[] +* xref:common-patterns.adoc[] +* xref:spring-batch-integration.adoc[] +** xref:spring-batch-integration/namespace-support.adoc[] +** xref:spring-batch-integration/launching-jobs-through-messages.adoc[] +** xref:spring-batch-integration/available-attributes-of-the-job-launching-gateway.adoc[] +** xref:spring-batch-integration/sub-elements.adoc[] +* xref:monitoring-and-metrics.adoc[] +* xref:tracing.adoc[] +* Appendices +** xref:appendix.adoc[] +** xref:schema-appendix.adoc[] +** xref:transaction-appendix.adoc[] +** xref:glossary.adoc[] diff --git a/spring-batch-docs/src/main/asciidoc/appendix.adoc b/spring-batch-docs/modules/ROOT/pages/appendix.adoc similarity index 98% rename from spring-batch-docs/src/main/asciidoc/appendix.adoc rename to spring-batch-docs/modules/ROOT/pages/appendix.adoc index 79cf854bd9..12de632126 100644 --- a/spring-batch-docs/src/main/asciidoc/appendix.adoc +++ b/spring-batch-docs/modules/ROOT/pages/appendix.adoc @@ -1,14 +1,12 @@ -:toc: left -:toclevels: 4 [[listOfReadersAndWriters]] -include::attributes.adoc[] [appendix] -== List of ItemReaders and ItemWriters +[[list-of-itemreaders-and-itemwriters]] += List of ItemReaders and ItemWriters [[itemReadersAppendix]] -=== Item Readers +== Item Readers .Available Item Readers [options="header"] @@ -77,7 +75,7 @@ This reader stores message offsets in the execution context to support restart c [[itemWritersAppendix]] -=== Item Writers +== Item Writers .Available Item Writers [options="header"] diff --git a/spring-batch-docs/src/main/asciidoc/attributes.adoc b/spring-batch-docs/modules/ROOT/pages/attributes.adoc similarity index 100% rename from spring-batch-docs/src/main/asciidoc/attributes.adoc rename to spring-batch-docs/modules/ROOT/pages/attributes.adoc diff --git a/spring-batch-docs/src/main/asciidoc/common-patterns.adoc b/spring-batch-docs/modules/ROOT/pages/common-patterns.adoc similarity index 94% rename from spring-batch-docs/src/main/asciidoc/common-patterns.adoc rename to spring-batch-docs/modules/ROOT/pages/common-patterns.adoc index f82484fdd6..130eb779c6 100644 --- a/spring-batch-docs/src/main/asciidoc/common-patterns.adoc +++ b/spring-batch-docs/modules/ROOT/pages/common-patterns.adoc @@ -1,15 +1,8 @@ -:toc: left -:toclevels: 4 [[commonPatterns]] -include::attributes.adoc[] - -== Common Batch Patterns - -ifndef::onlyonetoggle[] -include::toggle.adoc[] -endif::onlyonetoggle[] +[[common-batch-patterns]] += Common Batch Patterns Some batch jobs can be assembled purely from off-the-shelf components in Spring Batch. For instance, the `ItemReader` and `ItemWriter` implementations can be configured to @@ -25,7 +18,7 @@ These examples primarily feature the listener interfaces. It should be noted tha `ItemReader` or `ItemWriter` can implement a listener interface as well, if appropriate. [[loggingItemProcessingAndFailures]] -=== Logging Item Processing and Failures +== Logging Item Processing and Failures A common use case is the need for special handling of errors in a step, item by item, perhaps logging to a special channel or inserting a record into a database. A @@ -52,11 +45,31 @@ public class ItemFailureLoggerListener extends ItemListenerSupport { Having implemented this listener, it must be registered with a step. -[role="xmlContent"] -The following example shows how to register a listener with a step in XML: +[tabs] +==== +Java:: ++ +The following example shows how to register a listener with a step Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Step simpleStep(JobRepository jobRepository) { + return new StepBuilder("simpleStep", jobRepository) + ... + .listener(new ItemFailureLoggerListener()) + .build(); +} +---- + +XML:: ++ +The following example shows how to register a listener with a step in XML: ++ .XML Configuration -[source, xml, role="xmlContent"] +[source, xml] ---- ... @@ -68,20 +81,8 @@ The following example shows how to register a listener with a step in XML: ---- -[role="javaContent"] -The following example shows how to register a listener with a step Java: +==== -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -public Step simpleStep(JobRepository jobRepository) { - return new StepBuilder("simpleStep", jobRepository) - ... - .listener(new ItemFailureLoggerListener()) - .build(); -} ----- IMPORTANT: if your listener does anything in an `onError()` method, it must be inside a transaction that is going to be rolled back. If you need to use a transactional @@ -90,7 +91,7 @@ transaction to that method (see Spring Core Reference Guide for details), and gi propagation attribute a value of `REQUIRES_NEW`. [[stoppingAJobManuallyForBusinessReasons]] -=== Stopping a Job Manually for Business Reasons +== Stopping a Job Manually for Business Reasons Spring Batch provides a `stop()` method through the `JobOperator` interface, but this is really for use by the operator rather than the application programmer. Sometimes, it is @@ -141,11 +142,32 @@ of the `CompletionPolicy` strategy that signals a complete batch when the item t processed is `null`. A more sophisticated completion policy could be implemented and injected into the `Step` through the `SimpleStepFactoryBean`. -[role="xmlContent"] -The following example shows how to inject a completion policy into a step in XML: +[tabs] +==== +Java:: ++ +The following example shows how to inject a completion policy into a step in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Step simpleStep(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("simpleStep", jobRepository) + .chunk(new SpecialCompletionPolicy(), transactionManager) + .reader(reader()) + .writer(writer()) + .build(); +} +---- + +XML:: ++ +The following example shows how to inject a completion policy into a step in XML: ++ .XML Configuration -[source, xml, role="xmlContent"] +[source, xml] ---- @@ -157,21 +179,8 @@ The following example shows how to inject a completion policy into a step in XML ---- -[role="javaContent"] -The following example shows how to inject a completion policy into a step in Java: +==== -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -public Step simpleStep(JobRepository jobRepository, PlatformTransactionManager transactionManager) { - return new StepBuilder("simpleStep", jobRepository) - .chunk(new SpecialCompletionPolicy(), transactionManager) - .reader(reader()) - .writer(writer()) - .build(); -} ----- An alternative is to set a flag in the `StepExecution`, which is checked by the `Step` implementations in the framework in between item processing. To implement this @@ -204,7 +213,7 @@ When the flag is set, the default behavior is for the step to throw a so this is always an abnormal ending to a job. [[addingAFooterRecord]] -=== Adding a Footer Record +== Adding a Footer Record Often, when writing to flat files, a "`footer`" record must be appended to the end of the file, after all processing has be completed. This can be achieved using the @@ -212,27 +221,16 @@ file, after all processing has be completed. This can be achieved using the (and its counterpart, the `FlatFileHeaderCallback`) are optional properties of the `FlatFileItemWriter` and can be added to an item writer. -[role="xmlContent"] -The following example shows how to use the `FlatFileHeaderCallback` and the -`FlatFileFooterCallback` in XML: -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - ----- - -[role="javaContent"] +[tabs] +==== +Java:: ++ The following example shows how to use the `FlatFileHeaderCallback` and the `FlatFileFooterCallback` in Java: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public FlatFileItemWriter itemWriter(Resource outputResource) { @@ -246,6 +244,26 @@ public FlatFileItemWriter itemWriter(Resource outputResource) { } ---- +XML:: ++ +The following example shows how to use the `FlatFileHeaderCallback` and the +`FlatFileFooterCallback` in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + +---- + +==== + + + The footer callback interface has just one method that is called when the footer must be written, as shown in the following interface definition: @@ -259,7 +277,7 @@ public interface FlatFileFooterCallback { ---- [[writingASummaryFooter]] -==== Writing a Summary Footer +=== Writing a Summary Footer A common requirement involving footer records is to aggregate information during the output process and to append this information to the end of the file. This footer often @@ -311,28 +329,15 @@ In order for the `writeFooter` method to be called, the `TradeItemWriter` (which implements `FlatFileFooterCallback`) must be wired into the `FlatFileItemWriter` as the `footerCallback`. -[role="xmlContent"] -The following example shows how to wire the `TradeItemWriter` in XML: -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - - - - ----- - -[role="javaContent"] +[tabs] +==== +Java:: ++ The following example shows how to wire the `TradeItemWriter` in Java: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public TradeItemWriter tradeItemWriter() { @@ -354,6 +359,29 @@ public FlatFileItemWriter flatFileItemWriter(Resource outputResource) { } ---- +XML:: ++ +The following example shows how to wire the `TradeItemWriter` in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + +---- + +==== + + + + The way that the `TradeItemWriter` has been written so far functions correctly only if the `Step` is not restartable. This is because the class is stateful (since it stores the `totalAmount`), but the `totalAmount` is not persisted to the database. Therefore, it @@ -381,7 +409,7 @@ starting point for processing, allowing the `TradeItemWriter` to pick up on rest it left off the previous time the `Step` was run. [[drivingQueryBasedItemReaders]] -=== Driving Query Based ItemReaders +== Driving Query Based ItemReaders In the link:readersAndWriters.html[chapter on readers and writers], database input using paging was discussed. Many database vendors, such as DB2, have extremely pessimistic @@ -393,7 +421,7 @@ by iterating over keys, rather than the entire object that needs to be returned, following image illustrates: .Driving Query Job -image::{batch-asciidoc}images/drivingQueryExample.png[Driving Query Job, scaledwidth="60%"] +image::drivingQueryExample.png[Driving Query Job, scaledwidth="60%"] As you can see, the example shown in the preceding image uses the same 'FOO' table as was used in the cursor-based example. However, rather than selecting the entire row, only the @@ -402,14 +430,14 @@ from `read`, an `Integer` is returned. This number can then be used to query for 'details', which is a complete `Foo` object, as shown in the following image: .Driving Query Example -image::{batch-asciidoc}images/drivingQueryJob.png[Driving Query Example, scaledwidth="60%"] +image::drivingQueryJob.png[Driving Query Example, scaledwidth="60%"] An `ItemProcessor` should be used to transform the key obtained from the driving query into a full `Foo` object. An existing DAO can be used to query for the full object based on the key. [[multiLineRecords]] -=== Multi-Line Records +== Multi-Line Records While it is usually the case with flat files that each record is confined to a single line, it is common that a file might have records spanning multiple lines with multiple @@ -434,32 +462,15 @@ there are, the `ItemReader` must be careful to always read an entire record. In do this, a custom `ItemReader` should be implemented as a wrapper for the `FlatFileItemReader`. -[role="xmlContent"] -The following example shows how to implement a custom `ItemReader` in XML: -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - - - - - - - - ----- - -[role="javaContent"] +[tabs] +==== +Java:: ++ The following example shows how to implement a custom `ItemReader` in Java: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public MultiLineTradeItemReader itemReader() { @@ -482,6 +493,33 @@ public FlatFileItemReader flatFileItemReader() { } ---- +XML:: ++ +The following example shows how to implement a custom `ItemReader` in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + +---- + +==== + + + + To ensure that each line is tokenized properly, which is especially important for fixed-length input, the `PatternMatchingCompositeLineTokenizer` can be used on the delegate `FlatFileItemReader`. See @@ -490,29 +528,15 @@ Writers chapter] for more details. The delegate reader then uses a `PassThroughFieldSetMapper` to deliver a `FieldSet` for each line back to the wrapping `ItemReader`. -[role="xmlContent"] -The following example shows how to ensure that each line is properly tokenized in XML: - -.XML Content -[source, xml, role="xmlContent"] ----- - - - - - - - - - - ----- -[role="javaContent"] +[tabs] +==== +Java:: ++ The following example shows how to ensure that each line is properly tokenized in Java: - ++ .Java Content -[source, java, role="javaContent"] +[source, java] ---- @Bean public PatternMatchingCompositeLineTokenizer orderFileTokenizer() { @@ -532,6 +556,29 @@ public PatternMatchingCompositeLineTokenizer orderFileTokenizer() { } ---- +XML:: ++ +The following example shows how to ensure that each line is properly tokenized in XML: ++ +.XML Content +[source, xml] +---- + + + + + + + + + + +---- +==== + + + + This wrapper has to be able to recognize the end of a record so that it can continually call `read()` on its delegate until the end is reached. For each line that is read, the wrapper should build up the item to be returned. Once the footer is reached, the item can @@ -572,7 +619,7 @@ public Trade read() throws Exception { ---- [[executingSystemCommands]] -=== Executing System Commands +== Executing System Commands Many batch jobs require that an external command be called from within the batch job. Such a process could be kicked off separately by the scheduler, but the advantage of @@ -582,24 +629,15 @@ need to be split up into multiple jobs as well. Because the need is so common, Spring Batch provides a `Tasklet` implementation for calling system commands. -[role="xmlContent"] -The following example shows how to call an external command in XML: -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - ----- - -[role="javaContent"] +[tabs] +==== +Java:: ++ The following example shows how to call an external command in Java: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public SystemCommandTasklet tasklet() { @@ -612,8 +650,27 @@ public SystemCommandTasklet tasklet() { } ---- +XML:: ++ +The following example shows how to call an external command in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + +---- +==== + + + + + [[handlingStepCompletionWhenNoInputIsFound]] -=== Handling Step Completion When No Input is Found +== Handling Step Completion When No Input is Found In many batch scenarios, finding no rows in a database or file to process is not exceptional. The `Step` is simply considered to have found no work and completes with 0 @@ -647,7 +704,7 @@ is the case, an exit code `FAILED` is returned, indicating that the `Step` shoul Otherwise, `null` is returned, which does not affect the status of the `Step`. [[passingDataToFutureSteps]] -=== Passing Data to Future Steps +== Passing Data to Future Steps It is often useful to pass information from one step to another. This can be done through the `ExecutionContext`. The catch is that there are two `ExecutionContexts`: one at the @@ -688,41 +745,15 @@ also, optionally, be configured with a list of exit code patterns for which the should occur (`COMPLETED` is the default). As with all listeners, it must be registered on the `Step`. -[role="xmlContent"] -The following example shows how to promote a step to the `Job` `ExecutionContext` in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - - - - - - - ... - - - - - - - someKey - - - ----- -[role="xmlContent"] +[tabs] +==== +Java:: ++ The following example shows how to promote a step to the `Job` `ExecutionContext` in Java: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public Job job1(JobRepository jobRepository) { @@ -752,6 +783,41 @@ public ExecutionContextPromotionListener promotionListener() { } ---- +XML:: ++ +The following example shows how to promote a step to the `Job` `ExecutionContext` in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + ... + + + + + + + someKey + + + +---- + +==== + + + Finally, the saved values must be retrieved from the `Job` `ExecutionContext`, as shown in the following example: diff --git a/spring-batch-docs/src/main/asciidoc/domain.adoc b/spring-batch-docs/modules/ROOT/pages/domain.adoc similarity index 90% rename from spring-batch-docs/src/main/asciidoc/domain.adoc rename to spring-batch-docs/modules/ROOT/pages/domain.adoc index c544b10539..e999c42469 100644 --- a/spring-batch-docs/src/main/asciidoc/domain.adoc +++ b/spring-batch-docs/modules/ROOT/pages/domain.adoc @@ -1,13 +1,7 @@ -:toc: left -:toclevels: 4 [[domainLanguageOfBatch]] -== The Domain Language of Batch += The Domain Language of Batch -include::attributes.adoc[] -ifndef::onlyonetoggle[] -include::toggle.adoc[] -endif::onlyonetoggle[] To any experienced batch architect, the overall concepts of batch processing used in Spring Batch should be familiar and comfortable. There are "`Jobs`" and "`Steps`" and @@ -33,7 +27,7 @@ creation of simple to complex batch applications, with the infrastructure and ex to address very complex processing needs. .Batch Stereotypes -image::{batch-asciidoc}images/spring-batch-reference-model.png[Figure 2.1: Batch Stereotypes, scaledwidth="60%"] +image::spring-batch-reference-model.png[Figure 2.1: Batch Stereotypes, scaledwidth="60%"] The preceding diagram highlights the key concepts that make up the domain language of Spring Batch. A `Job` has one to many steps, each of which has exactly one `ItemReader`, @@ -41,7 +35,8 @@ one `ItemProcessor`, and one `ItemWriter`. A job needs to be launched (with `JobLauncher`), and metadata about the currently running process needs to be stored (in `JobRepository`). -=== Job +[[job]] +== Job This section describes stereotypes relating to the concept of a batch job. A `Job` is an entity that encapsulates an entire batch process. As is common with other Spring @@ -50,7 +45,7 @@ configuration. This configuration may be referred to as the "`job configuration` `Job` is only the top of an overall hierarchy, as shown in the following diagram: .Job Hierarchy -image::{batch-asciidoc}images/job-heirarchy.png[Job Hierarchy, scaledwidth="60%"] +image::job-heirarchy.png[Job Hierarchy, scaledwidth="60%"] In Spring Batch, a `Job` is simply a container for `Step` instances. It combines multiple steps that logically belong together in a flow and allows for configuration of properties @@ -60,15 +55,18 @@ global to all steps, such as restartability. The job configuration contains: * Definition and ordering of `Step` instances. * Whether or not the job is restartable. -ifdef::backend-spring-html[] -[role="javaContent"] + +[tabs] +==== +Java:: ++ For those who use Java configuration, Spring Batch provides a default implementation of the `Job` interface in the form of the `SimpleJob` class, which creates some standard functionality on top of `Job`. When using Java-based configuration, a collection of builders is made available for the instantiation of a `Job`, as the following example shows: - -[source, java, role="javaContent"] ++ +[source, java] ---- @Bean public Job footballJob(JobRepository jobRepository) { @@ -80,14 +78,15 @@ public Job footballJob(JobRepository jobRepository) { } ---- -[role="xmlContent"] +XML:: ++ For those who use XML configuration, Spring Batch provides a default implementation of the `Job` interface in the form of the `SimpleJob` class, which creates some standard functionality on top of `Job`. However, the batch namespace abstracts away the need to instantiate it directly. Instead, you can use the `` element, as the following example shows: - -[source, xml, role="xmlContent"] ++ +[source, xml] ---- @@ -95,41 +94,15 @@ following example shows: ---- -endif::backend-spring-html[] -ifdef::backend-pdf[] -Spring Batch provides a default implementation of the `Job` interface in the form of the -`SimpleJob` class, which creates some standard functionality on top of `Job`. When using -Java-based configuration, a collection of builders are made available for the -instantiation of a `Job`, as the following example shows: +==== + -[source, java] ----- -@Bean -public Job footballJob(JobRepository jobRepository) { - return new JobBuilder("footballJob", jobRepository) - .start(playerLoad()) - .next(gameLoad()) - .next(playerSummarization()) - .build(); -} ----- -However, when using XML configuration, the batch namespace abstracts away the need to -instantiate it directly. Instead, you can use the `` element, as the following -example shows: -[source, xml] ----- - - - - - ----- -endif::backend-pdf[] -==== JobInstance +[[jobinstance]] +=== JobInstance A `JobInstance` refers to the concept of a logical job run. Consider a batch job that should be run once at the end of the day, such as the `EndOfDay` `Job` from the preceding @@ -156,7 +129,7 @@ beginning,`" and using an existing instance generally means "`start from where y off`". [[jobParameters]] -==== JobParameters +=== JobParameters Having discussed `JobInstance` and how it differs from `Job`, the natural question to ask is: "`How is one `JobInstance` distinguished from another?`" The answer is: @@ -165,7 +138,7 @@ job. They can be used for identification or even as reference data during the ru following image shows: .Job Parameters -image::{batch-asciidoc}images/job-stereotypes-parameters.png[Job Parameters, scaledwidth="60%"] +image::job-stereotypes-parameters.png[Job Parameters, scaledwidth="60%"] In the preceding example, where there are two instances, one for January 1st and another for January 2nd, there is really only one `Job`, but it has two `JobParameter` objects: @@ -178,7 +151,8 @@ NOTE: Not all job parameters are required to contribute to the identification of `JobInstance`. By default, they do so. However, the framework also allows the submission of a `Job` with parameters that do not contribute to the identity of a `JobInstance`. -==== JobExecution +[[jobexecution]] +=== JobExecution A `JobExecution` refers to the technical concept of a single attempt to run a Job. An execution may end in failure or success, but the `JobInstance` corresponding to a given @@ -344,7 +318,8 @@ in both the `JobInstance` and `JobParameters` tables and two extra entries in th NOTE: Column names may have been abbreviated or removed for the sake of clarity and formatting. -=== Step +[[step]] +== Step A `Step` is a domain object that encapsulates an independent, sequential phase of a batch job. Therefore, every `Job` is composed entirely of one or more steps. A `Step` contains @@ -358,9 +333,10 @@ with a `Job`, a `Step` has an individual `StepExecution` that correlates with a `JobExecution`, as the following image shows: .Job Hierarchy With Steps -image::{batch-asciidoc}images/jobHeirarchyWithSteps.png[Figure 2.1: Job Hierarchy With Steps, scaledwidth="60%"] +image::jobHeirarchyWithSteps.png[Figure 2.1: Job Hierarchy With Steps, scaledwidth="60%"] -==== StepExecution +[[stepexecution]] +=== StepExecution A `StepExecution` represents a single attempt to execute a `Step`. A new `StepExecution` is created each time a `Step` is run, similar to `JobExecution`. However, if a step fails @@ -427,7 +403,8 @@ back. |The number of times `write` has failed, resulting in a skipped item. |=== -=== ExecutionContext +[[executioncontext]] +== ExecutionContext An `ExecutionContext` represents a collection of key/value pairs that are persisted and controlled by the framework to give developers a place to store persistent @@ -557,7 +534,8 @@ As noted in the comment, `ecStep` does not equal `ecJob`. They are two different `ExecutionContexts`. The one scoped to the `Step` is saved at every commit point in the `Step`, whereas the one scoped to the Job is saved in between every `Step` execution. -=== JobRepository +[[jobrepository]] +== JobRepository `JobRepository` is the persistence mechanism for all of the stereotypes mentioned earlier. It provides CRUD operations for `JobLauncher`, `Job`, and `Step` implementations. When a @@ -565,20 +543,28 @@ It provides CRUD operations for `JobLauncher`, `Job`, and `Step` implementations the course of execution, `StepExecution` and `JobExecution` implementations are persisted by passing them to the repository. -[role="xmlContent"] + +[tabs] +==== +Java:: ++ +When using Java configuration, the `@EnableBatchProcessing` annotation provides a +`JobRepository` as one of the components that is automatically configured. + +XML:: ++ The Spring Batch XML namespace provides support for configuring a `JobRepository` instance with the `` tag, as the following example shows: - -[source, xml, role="xmlContent"] ++ +[source, xml] ---- ---- +==== -[role="javaContent"] -When using Java configuration, the `@EnableBatchProcessing` annotation provides a -`JobRepository` as one of the components that is automatically configured. -=== JobLauncher +[[joblauncher]] +== JobLauncher `JobLauncher` represents a simple interface for launching a `Job` with a given set of `JobParameters`, as the following example shows: @@ -596,23 +582,26 @@ public JobExecution run(Job job, JobParameters jobParameters) It is expected that implementations obtain a valid `JobExecution` from the `JobRepository` and execute the `Job`. -=== ItemReader +[[itemreader]] +== ItemReader `ItemReader` is an abstraction that represents the retrieval of input for a `Step`, one item at a time. When the `ItemReader` has exhausted the items it can provide, it indicates this by returning `null`. You can find more details about the `ItemReader` interface and its various implementations in -<>. +xref:readersAndWriters.adoc[Readers And Writers]. -=== ItemWriter +[[itemwriter]] +== ItemWriter `ItemWriter` is an abstraction that represents the output of a `Step`, one batch or chunk of items at a time. Generally, an `ItemWriter` has no knowledge of the input it should receive next and knows only the item that was passed in its current invocation. You can find more details about the `ItemWriter` interface and its various implementations in -<>. +xref:readersAndWriters.adoc[Readers And Writers]. -=== ItemProcessor +[[itemprocessor]] +== ItemProcessor `ItemProcessor` is an abstraction that represents the business processing of an item. While the `ItemReader` reads one item, and the `ItemWriter` writes one item, the @@ -620,16 +609,18 @@ While the `ItemReader` reads one item, and the `ItemWriter` writes one item, the If, while processing the item, it is determined that the item is not valid, returning `null` indicates that the item should not be written out. You can find more details about the `ItemProcessor` interface in -<>. +xref:readersAndWriters.adoc[Readers And Writers]. [role="xmlContent"] -=== Batch Namespace +[[batch-namespace]] +== Batch Namespace Many of the domain concepts listed previously need to be configured in a Spring `ApplicationContext`. While there are implementations of the interfaces above that you can use in a standard bean definition, a namespace has been provided for ease of configuration, as the following example shows: + [source, xml, role="xmlContent"] ---- >. You can find more information on configuring a `Step` in -<>. +information on configuring a Job in xref:job.adoc[Configuring and Running a Job] +. You can find more information on configuring a `Step` in +xref:step.adoc[Configuring a Step]. + diff --git a/spring-batch-docs/src/main/asciidoc/footer/index-footer.adoc b/spring-batch-docs/modules/ROOT/pages/footer/index-footer.adoc similarity index 100% rename from spring-batch-docs/src/main/asciidoc/footer/index-footer.adoc rename to spring-batch-docs/modules/ROOT/pages/footer/index-footer.adoc diff --git a/spring-batch-docs/src/main/asciidoc/glossary.adoc b/spring-batch-docs/modules/ROOT/pages/glossary.adoc similarity index 98% rename from spring-batch-docs/src/main/asciidoc/glossary.adoc rename to spring-batch-docs/modules/ROOT/pages/glossary.adoc index 4bedab1c43..884d8c2da8 100644 --- a/spring-batch-docs/src/main/asciidoc/glossary.adoc +++ b/spring-batch-docs/modules/ROOT/pages/glossary.adoc @@ -1,9 +1,11 @@ [[glossary]] [appendix] -== Glossary +[[glossary]] += Glossary [glossary] -=== Spring Batch Glossary +[[spring-batch-glossary]] +== Spring Batch Glossary Batch:: An accumulation of business transactions over time. diff --git a/spring-batch-docs/modules/ROOT/pages/header/index-header.adoc b/spring-batch-docs/modules/ROOT/pages/header/index-header.adoc new file mode 100644 index 0000000000..3838025307 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/header/index-header.adoc @@ -0,0 +1,3 @@ +[[spring-batch-reference-documentation]] += Spring Batch - Reference Documentation +:page-section-summary-toc: 1 diff --git a/spring-batch-docs/modules/ROOT/pages/index.adoc b/spring-batch-docs/modules/ROOT/pages/index.adoc new file mode 100644 index 0000000000..1c541e3c2c --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/index.adoc @@ -0,0 +1,46 @@ += Overview + +// ====================================================================================== + +The reference documentation is divided into several sections: + +[horizontal] +xref:spring-batch-intro.adoc[Spring Batch Introduction] :: Background, usage + scenarios, and general guidelines. +xref:spring-batch-architecture.adoc[Spring Batch Architecture] :: Spring Batch +architecture, general batch principles, batch processing strategies. +xref:whatsnew.adoc[What's new in Spring Batch 5.1] :: New features introduced in version 5.1. +xref:domain.adoc[The Domain Language of Batch] :: Core concepts and abstractions +of the Batch domain language. +xref:job.adoc[Configuring and Running a Job] :: Job configuration, execution, and +administration. +xref:step.adoc[Configuring a Step] :: Step configuration, different types of steps, and +controlling step flow. +xref:readersAndWriters.adoc[Item reading and writing] :: `ItemReader` +and `ItemWriter` interfaces and how to use them. +xref:processor.adoc[Item processing] :: `ItemProcessor` interface and how to use it. +xref:scalability.adoc#scalability[Scaling and Parallel Processing] :: Multi-threaded steps, +parallel steps, remote chunking, and partitioning. +<> :: Completion policies and exception handling of repetitive actions. +<> :: Retry and backoff policies of retryable operations. +xref:testing.adoc[Unit Testing] :: Job and Step testing facilities and APIs. +xref:common-patterns.adoc#commonPatterns[Common Patterns] :: Common batch processing patterns +and guidelines. +xref:spring-batch-integration.adoc[Spring Batch Integration] :: Integration +between Spring Batch and Spring Integration projects. +xref:monitoring-and-metrics.adoc[Monitoring and metrics] :: Batch jobs +monitoring and metrics. +xref:tracing.adoc[Tracing] :: Tracing with Micrometer. + +The following appendices are available: + +[horizontal] +xref:appendix.adoc#listOfReadersAndWriters[List of ItemReaders and ItemWriters] :: List of +all provided item readers and writers. +xref:schema-appendix.adoc#metaDataSchema[Meta-Data Schema] :: Core tables used by the Batch +domain model. +xref:transaction-appendix.adoc#transactions[Batch Processing and Transactions] :: Transaction +boundaries, propagation, and isolation levels used in Spring Batch. +<> :: Glossary of common terms, concepts, and vocabulary of +the Batch domain. + diff --git a/spring-batch-docs/modules/ROOT/pages/job.adoc b/spring-batch-docs/modules/ROOT/pages/job.adoc new file mode 100644 index 0000000000..31c9408033 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/job.adoc @@ -0,0 +1,22 @@ + +[[configureJob]] += Configuring and Running a Job +:page-section-summary-toc: 1 + +ifndef::onlyonetoggle[] +endif::onlyonetoggle[] + +In the xref:domain.adoc[domain section] , the overall +architecture design was discussed, using the following diagram as a +guide: + +.Batch Stereotypes +image::spring-batch-reference-model.png[Figure 2.1: Batch Stereotypes, scaledwidth="60%"] + +While the `Job` object may seem like a simple +container for steps, you must be aware of many configuration options. +Furthermore, you must consider many options about +how a `Job` can be run and how its metadata can be +stored during that run. This chapter explains the various configuration +options and runtime concerns of a `Job`. + diff --git a/spring-batch-docs/modules/ROOT/pages/job/advanced-meta-data.adoc b/spring-batch-docs/modules/ROOT/pages/job/advanced-meta-data.adoc new file mode 100644 index 0000000000..6919c2f34d --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/job/advanced-meta-data.adoc @@ -0,0 +1,556 @@ +[[advancedMetaData]] += Advanced Metadata Usage + +So far, both the `JobLauncher` and `JobRepository` interfaces have been +discussed. Together, they represent the simple launching of a job and basic +CRUD operations of batch domain objects: + +.Job Repository +image::job-repository.png[Job Repository, scaledwidth="60%"] + +A `JobLauncher` uses the +`JobRepository` to create new +`JobExecution` objects and run them. +`Job` and `Step` implementations +later use the same `JobRepository` for basic updates +of the same executions during the running of a `Job`. +The basic operations suffice for simple scenarios. However, in a large batch +environment with hundreds of batch jobs and complex scheduling +requirements, more advanced access to the metadata is required: + +.Advanced Job Repository Access +image::job-repository-advanced.png[Job Repository Advanced, scaledwidth="80%"] + +The `JobExplorer` and +`JobOperator` interfaces, which are discussed +in the coming sections, add additional functionality for querying and controlling the metadata. + +[[queryingRepository]] +== Querying the Repository + +The most basic need before any advanced features is the ability to +query the repository for existing executions. This functionality is +provided by the `JobExplorer` interface: + +[source, java] +---- +public interface JobExplorer { + + List getJobInstances(String jobName, int start, int count); + + JobExecution getJobExecution(Long executionId); + + StepExecution getStepExecution(Long jobExecutionId, Long stepExecutionId); + + JobInstance getJobInstance(Long instanceId); + + List getJobExecutions(JobInstance jobInstance); + + Set findRunningJobExecutions(String jobName); +} +---- + +As is evident from its method signatures, `JobExplorer` is a read-only version of +the `JobRepository`, and, like the `JobRepository`, it can be easily configured by using a +factory bean. + + +[tabs] +==== +Java:: ++ +The following example shows how to configure a `JobExplorer` in Java: ++ +.Java Configuration +[source, java] +---- +... +// This would reside in your DefaultBatchConfiguration extension +@Bean +public JobExplorer jobExplorer() throws Exception { + JobExplorerFactoryBean factoryBean = new JobExplorerFactoryBean(); + factoryBean.setDataSource(this.dataSource); + return factoryBean.getObject(); +} +... +---- + +XML:: ++ +The following example shows how to configure a `JobExplorer` in XML: ++ +.XML Configuration +[source, xml] +---- + +---- + +==== + + + +xref:job/configuring-repository.adoc#repositoryTablePrefix[Earlier in this chapter], we noted that you can modify the table prefix +of the `JobRepository` to allow for different versions or schemas. Because +the `JobExplorer` works with the same tables, it also needs the ability to set a prefix. + + +[tabs] +==== +Java:: ++ +The following example shows how to set the table prefix for a `JobExplorer` in Java: ++ +.Java Configuration +[source, java] +---- +... +// This would reside in your DefaultBatchConfiguration extension +@Bean +public JobExplorer jobExplorer() throws Exception { + JobExplorerFactoryBean factoryBean = new JobExplorerFactoryBean(); + factoryBean.setDataSource(this.dataSource); + factoryBean.setTablePrefix("SYSTEM."); + return factoryBean.getObject(); +} +... +---- + +XML:: ++ +The following example shows how to set the table prefix for a `JobExplorer` in XML: ++ +.XML Configuration +[source, xml] +---- + +---- + +==== + + + +[[jobregistry]] +== JobRegistry + +A `JobRegistry` (and its parent interface, `JobLocator`) is not mandatory, but it can be +useful if you want to keep track of which jobs are available in the context. It is also +useful for collecting jobs centrally in an application context when they have been created +elsewhere (for example, in child contexts). You can also use custom `JobRegistry` implementations +to manipulate the names and other properties of the jobs that are registered. +There is only one implementation provided by the framework and this is based on a simple +map from job name to job instance. + +[tabs] +==== +Java:: ++ +When using `@EnableBatchProcessing`, a `JobRegistry` is provided for you. +The following example shows how to configure your own `JobRegistry`: ++ +[source, java] +---- +... +// This is already provided via the @EnableBatchProcessing but can be customized via +// overriding the bean in the DefaultBatchConfiguration +@Override +@Bean +public JobRegistry jobRegistry() throws Exception { + return new MapJobRegistry(); +} +... +---- + +XML:: ++ +The following example shows how to include a `JobRegistry` for a job defined in XML: ++ +[source, xml] +---- + +---- + +==== + +You can populate a `JobRegistry` in either of two ways: by using +a bean post processor or by using a registrar lifecycle component. The coming +sections describe these two mechanisms. + +[[jobregistrybeanpostprocessor]] +=== JobRegistryBeanPostProcessor + +This is a bean post-processor that can register all jobs as they are created. + +[tabs] +==== +Java:: ++ +The following example shows how to include the `JobRegistryBeanPostProcessor` for a job +defined in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public JobRegistryBeanPostProcessor jobRegistryBeanPostProcessor(JobRegistry jobRegistry) { + JobRegistryBeanPostProcessor postProcessor = new JobRegistryBeanPostProcessor(); + postProcessor.setJobRegistry(jobRegistry); + return postProcessor; +} +---- + +XML:: ++ +The following example shows how to include the `JobRegistryBeanPostProcessor` for a job +defined in XML: ++ +.XML Configuration +[source, xml] +---- + + + +---- + +==== + + + +Although it is not strictly necessary, the post-processor in the +example has been given an `id` so that it can be included in child +contexts (for example, as a parent bean definition) and cause all jobs created +there to also be registered automatically. + +[[automaticjobregistrar]] +=== AutomaticJobRegistrar + +This is a lifecycle component that creates child contexts and registers jobs from those +contexts as they are created. One advantage of doing this is that, while the job names in +the child contexts still have to be globally unique in the registry, their dependencies +can have "`natural`" names. So, for example, you can create a set of XML configuration files +that each have only one Job but that all have different definitions of an `ItemReader` with the +same bean name, such as `reader`. If all those files were imported into the same context, +the reader definitions would clash and override one another, but, with the automatic +registrar, this is avoided. This makes it easier to integrate jobs that have been contributed from +separate modules of an application. + +[tabs] +==== +Java:: ++ +The following example shows how to include the `AutomaticJobRegistrar` for a job defined +in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public AutomaticJobRegistrar registrar() { + + AutomaticJobRegistrar registrar = new AutomaticJobRegistrar(); + registrar.setJobLoader(jobLoader()); + registrar.setApplicationContextFactories(applicationContextFactories()); + registrar.afterPropertiesSet(); + return registrar; + +} +---- + +XML:: ++ +The following example shows how to include the `AutomaticJobRegistrar` for a job defined +in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + +---- + +==== + + + +The registrar has two mandatory properties: an array of +`ApplicationContextFactory` (created from a +convenient factory bean in the preceding example) and a +`JobLoader`. The `JobLoader` +is responsible for managing the lifecycle of the child contexts and +registering jobs in the `JobRegistry`. + +The `ApplicationContextFactory` is +responsible for creating the child context. The most common usage +is (as in the preceding example) to use a +`ClassPathXmlApplicationContextFactory`. One of +the features of this factory is that, by default, it copies some of the +configuration down from the parent context to the child. So, for +instance, you need not redefine the +`PropertyPlaceholderConfigurer` or AOP +configuration in the child, provided it should be the same as the +parent. + +You can use `AutomaticJobRegistrar` in +conjunction with a `JobRegistryBeanPostProcessor` +(as long as you also use `DefaultJobLoader`). +For instance, this might be desirable if there are jobs +defined in the main parent context as well as in the child +locations. + +[[JobOperator]] +== JobOperator + +As previously discussed, the `JobRepository` +provides CRUD operations on the meta-data, and the +`JobExplorer` provides read-only operations on the +metadata. However, those operations are most useful when used together +to perform common monitoring tasks such as stopping, restarting, or +summarizing a Job, as is commonly done by batch operators. Spring Batch +provides these types of operations in the +`JobOperator` interface: + +[source, java] +---- +public interface JobOperator { + + List getExecutions(long instanceId) throws NoSuchJobInstanceException; + + List getJobInstances(String jobName, int start, int count) + throws NoSuchJobException; + + Set getRunningExecutions(String jobName) throws NoSuchJobException; + + String getParameters(long executionId) throws NoSuchJobExecutionException; + + Long start(String jobName, String parameters) + throws NoSuchJobException, JobInstanceAlreadyExistsException; + + Long restart(long executionId) + throws JobInstanceAlreadyCompleteException, NoSuchJobExecutionException, + NoSuchJobException, JobRestartException; + + Long startNextInstance(String jobName) + throws NoSuchJobException, JobParametersNotFoundException, JobRestartException, + JobExecutionAlreadyRunningException, JobInstanceAlreadyCompleteException; + + boolean stop(long executionId) + throws NoSuchJobExecutionException, JobExecutionNotRunningException; + + String getSummary(long executionId) throws NoSuchJobExecutionException; + + Map getStepExecutionSummaries(long executionId) + throws NoSuchJobExecutionException; + + Set getJobNames(); + +} +---- + +The preceding operations represent methods from many different interfaces, such as +`JobLauncher`, `JobRepository`, `JobExplorer`, and `JobRegistry`. For this reason, the +provided implementation of `JobOperator` (`SimpleJobOperator`) has many dependencies. + + +[tabs] +==== +Java:: ++ +The following example shows a typical bean definition for `SimpleJobOperator` in Java: ++ +[source, java] +---- + /** + * All injected dependencies for this bean are provided by the @EnableBatchProcessing + * infrastructure out of the box. + */ + @Bean + public SimpleJobOperator jobOperator(JobExplorer jobExplorer, + JobRepository jobRepository, + JobRegistry jobRegistry, + JobLauncher jobLauncher) { + + SimpleJobOperator jobOperator = new SimpleJobOperator(); + jobOperator.setJobExplorer(jobExplorer); + jobOperator.setJobRepository(jobRepository); + jobOperator.setJobRegistry(jobRegistry); + jobOperator.setJobLauncher(jobLauncher); + + return jobOperator; + } +---- + +XML:: ++ +The following example shows a typical bean definition for `SimpleJobOperator` in XML: ++ +[source, xml] +---- + + + + + + + + + + +---- + +==== + + +As of version 5.0, the `@EnableBatchProcessing` annotation automatically registers a job operator bean +in the application context. + +NOTE: If you set the table prefix on the job repository, do not forget to set it on the job explorer as well. + +[[JobParametersIncrementer]] +== JobParametersIncrementer + +Most of the methods on `JobOperator` are +self-explanatory, and you can find more detailed explanations in the +https://docs.spring.io/spring-batch/docs/current/api/org/springframework/batch/core/launch/JobOperator.html[Javadoc of the interface]. However, the +`startNextInstance` method is worth noting. This +method always starts a new instance of a `Job`. +This can be extremely useful if there are serious issues in a +`JobExecution` and the `Job` +needs to be started over again from the beginning. Unlike +`JobLauncher` (which requires a new +`JobParameters` object that triggers a new +`JobInstance`), if the parameters are different from +any previous set of parameters, the +`startNextInstance` method uses the +`JobParametersIncrementer` tied to the +`Job` to force the `Job` to a +new instance: + +[source, java] +---- +public interface JobParametersIncrementer { + + JobParameters getNext(JobParameters parameters); + +} +---- + +The contract of `JobParametersIncrementer` is +that, given a xref:domain.adoc#jobParameters[JobParameters] +object, it returns the "`next`" `JobParameters` +object by incrementing any necessary values it may contain. This +strategy is useful because the framework has no way of knowing what +changes to the `JobParameters` make it the "`next`" +instance. For example, if the only value in +`JobParameters` is a date and the next instance +should be created, should that value be incremented by one day or one +week (if the job is weekly, for instance)? The same can be said for any +numerical values that help to identify the `Job`, +as the following example shows: + +[source, java] +---- +public class SampleIncrementer implements JobParametersIncrementer { + + public JobParameters getNext(JobParameters parameters) { + if (parameters==null || parameters.isEmpty()) { + return new JobParametersBuilder().addLong("run.id", 1L).toJobParameters(); + } + long id = parameters.getLong("run.id",1L) + 1; + return new JobParametersBuilder().addLong("run.id", id).toJobParameters(); + } +} +---- + +In this example, the value with a key of `run.id` is used to +discriminate between `JobInstances`. If the +`JobParameters` passed in is null, it can be +assumed that the `Job` has never been run before +and, thus, its initial state can be returned. However, if not, the old +value is obtained, incremented by one, and returned. + + +[tabs] +==== +Java:: ++ +For jobs defined in Java, you can associate an incrementer with a `Job` through the +`incrementer` method provided in the builders, as follows: ++ +[source, java] +---- +@Bean +public Job footballJob(JobRepository jobRepository) { + return new JobBuilder("footballJob", jobRepository) + .incrementer(sampleIncrementer()) + ... + .build(); +} +---- + +XML:: ++ +For jobs defined in XML, you can associate an incrementer with a `Job` through the +`incrementer` attribute in the namespace, as follows: ++ +[source, xml] +---- + + ... + +---- +==== + +[[stoppingAJob]] +== Stopping a Job + +One of the most common use cases of +`JobOperator` is gracefully stopping a +Job: + +[source, java] +---- +Set executions = jobOperator.getRunningExecutions("sampleJob"); +jobOperator.stop(executions.iterator().next()); +---- + +The shutdown is not immediate, since there is no way to force +immediate shutdown, especially if the execution is currently in +developer code that the framework has no control over, such as a +business service. However, as soon as control is returned back to the +framework, it sets the status of the current +`StepExecution` to +`BatchStatus.STOPPED`, saves it, and does the same +for the `JobExecution` before finishing. + +[[aborting-a-job]] +== Aborting a Job + +A job execution that is `FAILED` can be +restarted (if the `Job` is restartable). A job execution whose status is +`ABANDONED` cannot be restarted by the framework. +The `ABANDONED` status is also used in step +executions to mark them as skippable in a restarted job execution. If a +job is running and encounters a step that has been marked +`ABANDONED` in the previous failed job execution, it +moves on to the next step (as determined by the job flow definition +and the step execution exit status). + +If the process died (`kill -9` or server +failure), the job is, of course, not running, but the `JobRepository` has +no way of knowing because no one told it before the process died. You +have to tell it manually that you know that the execution either failed +or should be considered aborted (change its status to +`FAILED` or `ABANDONED`). This is +a business decision, and there is no way to automate it. Change the +status to `FAILED` only if it is restartable and you know that the restart data is valid. diff --git a/spring-batch-docs/modules/ROOT/pages/job/configuring-launcher.adoc b/spring-batch-docs/modules/ROOT/pages/job/configuring-launcher.adoc new file mode 100644 index 0000000000..828f393d24 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/job/configuring-launcher.adoc @@ -0,0 +1,120 @@ +[[configuringJobLauncher]] += Configuring a JobLauncher + + +[tabs] +==== +Java:: ++ +When you use `@EnableBatchProcessing`, a `JobRegistry` is provided for you. +This section describes how to configure your own. + +XML:: ++ +// FIXME what is the XML equivalent? +==== + + +The most basic implementation of the `JobLauncher` interface is the `TaskExecutorJobLauncher`. +Its only required dependency is a `JobRepository` (needed to obtain an execution). + + +[tabs] +==== +Java:: ++ +The following example shows a `TaskExecutorJobLauncher` in Java: ++ +.Java Configuration +[source, java] +---- +... +@Bean +public JobLauncher jobLauncher() throws Exception { + TaskExecutorJobLauncher jobLauncher = new TaskExecutorJobLauncher(); + jobLauncher.setJobRepository(jobRepository); + jobLauncher.afterPropertiesSet(); + return jobLauncher; +} +... +---- + +XML:: ++ +The following example shows a `TaskExecutorJobLauncher` in XML: ++ +.XML Configuration +[source, xml] +---- + + + +---- + +==== + + +Once a xref:domain.adoc[JobExecution] is obtained, it is passed to the +execute method of `Job`, ultimately returning the `JobExecution` to the caller, as +the following image shows: + +.Job Launcher Sequence +image::job-launcher-sequence-sync.png[Job Launcher Sequence, scaledwidth="60%"] + +The sequence is straightforward and works well when launched from a scheduler. However, +issues arise when trying to launch from an HTTP request. In this scenario, the launching +needs to be done asynchronously so that the `TaskExecutorJobLauncher` returns immediately to its +caller. This is because it is not good practice to keep an HTTP request open for the +amount of time needed by long running processes (such as batch jobs). The following image shows +an example sequence: + +.Asynchronous Job Launcher Sequence +image::job-launcher-sequence-async.png[Async Job Launcher Sequence, scaledwidth="60%"] + +You can configure the `TaskExecutorJobLauncher` to allow for this scenario by configuring a +`TaskExecutor`. + +[tabs] +==== +Java:: ++ +The following Java example configures a `TaskExecutorJobLauncher` to return immediately: ++ +.Java Configuration +[source, java] +---- +@Bean +public JobLauncher jobLauncher() { + TaskExecutorJobLauncher jobLauncher = new TaskExecutorJobLauncher(); + jobLauncher.setJobRepository(jobRepository()); + jobLauncher.setTaskExecutor(new SimpleAsyncTaskExecutor()); + jobLauncher.afterPropertiesSet(); + return jobLauncher; +} +---- + +XML:: ++ +The following XML example configures a `TaskExecutorJobLauncher` to return immediately: ++ +.XML Configuration +[source, xml] +---- + + + + + + +---- + +==== + + + +You can use any implementation of the spring `TaskExecutor` +interface to control how jobs are asynchronously +executed. + diff --git a/spring-batch-docs/modules/ROOT/pages/job/configuring-repository.adoc b/spring-batch-docs/modules/ROOT/pages/job/configuring-repository.adoc new file mode 100644 index 0000000000..29fe9eff44 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/job/configuring-repository.adoc @@ -0,0 +1,262 @@ +[[configuringJobRepository]] += Configuring a JobRepository + +As described earlier, the xref:job.adoc[`JobRepository`] is used for basic CRUD operations of the various persisted +domain objects within Spring Batch, such as `JobExecution` and `StepExecution`. +It is required by many of the major framework features, such as the `JobLauncher`, +`Job`, and `Step`. + + +// FIXME: This did not quite convert properly +[tabs] +==== +Java:: ++ +When using `@EnableBatchProcessing`, a `JobRepository` is provided for you. +This section describes how to configure your own. ++ +Other than the `dataSource` and the `transactionManager`, none of the configuration options listed earlier are required. +If they are not set, the defaults shown earlier +are used. The +max `varchar` length defaults to `2500`, which is the +length of the long `VARCHAR` columns in the +xref:schema-appendix.adoc#metaDataSchemaOverview[sample schema scripts] + + +XML:: ++ +The batch namespace abstracts away many of the implementation details of the +`JobRepository` implementations and their collaborators. However, there are still a few +configuration options available, as the following example shows: ++ +.XML Configuration +[source, xml] +---- + +---- ++ +Other than the `id`, none of the configuration options listed earlier are required. If they are +not set, the defaults shown earlier are used. +The `max-varchar-length` defaults to `2500`, which is the length of the long +`VARCHAR` columns in the xref:schema-appendix.adoc#metaDataSchemaOverview[sample schema scripts] +. + +==== + + +[[txConfigForJobRepository]] +== Transaction Configuration for the JobRepository + +If the namespace or the provided `FactoryBean` is used, transactional advice is +automatically created around the repository. This is to ensure that the batch metadata, +including state that is necessary for restarts after a failure, is persisted correctly. +The behavior of the framework is not well defined if the repository methods are not +transactional. The isolation level in the `create*` method attributes is specified +separately to ensure that, when jobs are launched, if two processes try to launch +the same job at the same time, only one succeeds. The default isolation level for that +method is `SERIALIZABLE`, which is quite aggressive. `READ_COMMITTED` usually works equally +well. `READ_UNCOMMITTED` is fine if two processes are not likely to collide in this +way. However, since a call to the `create*` method is quite short, it is unlikely that +`SERIALIZED` causes problems, as long as the database platform supports it. However, you +can override this setting. + + +[tabs] +==== +Java:: ++ +The following example shows how to override the isolation level in Java: ++ +.Java Configuration +[source, java] +---- +@Configuration +@EnableBatchProcessing(isolationLevelForCreate = "ISOLATION_REPEATABLE_READ") +public class MyJobConfiguration { + + // job definition + +} +---- + +XML:: ++ +The following example shows how to override the isolation level in XML: ++ +.XML Configuration +[source, xml] +---- + +---- +==== + + +If the namespace is not used, you must also configure the +transactional behavior of the repository by using AOP. + +[tabs] +==== +Java:: ++ +The following example shows how to configure the transactional behavior of the repository +in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public TransactionProxyFactoryBean baseProxy() { + TransactionProxyFactoryBean transactionProxyFactoryBean = new TransactionProxyFactoryBean(); + Properties transactionAttributes = new Properties(); + transactionAttributes.setProperty("*", "PROPAGATION_REQUIRED"); + transactionProxyFactoryBean.setTransactionAttributes(transactionAttributes); + transactionProxyFactoryBean.setTarget(jobRepository()); + transactionProxyFactoryBean.setTransactionManager(transactionManager()); + return transactionProxyFactoryBean; +} +---- + +XML:: ++ +The following example shows how to configure the transactional behavior of the repository +in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + +---- ++ +You can use the preceding fragment nearly as is, with almost no changes. Remember also to +include the appropriate namespace declarations and to make sure `spring-tx` and `spring-aop` +(or the whole of Spring) are on the classpath. +==== + + + + +[[repositoryTablePrefix]] +== Changing the Table Prefix + +Another modifiable property of the `JobRepository` is the table prefix of the meta-data +tables. By default, they are all prefaced with `BATCH_`. `BATCH_JOB_EXECUTION` and +`BATCH_STEP_EXECUTION` are two examples. However, there are potential reasons to modify this +prefix. If the schema names need to be prepended to the table names or if more than one +set of metadata tables is needed within the same schema, the table prefix needs to +be changed. + + +[tabs] +==== +Java:: ++ +The following example shows how to change the table prefix in Java: ++ +.Java Configuration +[source, java] +---- +@Configuration +@EnableBatchProcessing(tablePrefix = "SYSTEM.TEST_") +public class MyJobConfiguration { + + // job definition + +} +---- + +XML:: ++ +The following example shows how to change the table prefix in XML: ++ +.XML Configuration +[source, xml] +---- + +---- + +==== + + + + + +Given the preceding changes, every query to the metadata tables is prefixed with +`SYSTEM.TEST_`. `BATCH_JOB_EXECUTION` is referred to as `SYSTEM.TEST_JOB_EXECUTION`. + +NOTE: Only the table prefix is configurable. The table and column names are not. + +[[nonStandardDatabaseTypesInRepository]] +== Non-standard Database Types in a Repository + +If you use a database platform that is not in the list of supported platforms, you +may be able to use one of the supported types, if the SQL variant is close enough. To do +this, you can use the raw `JobRepositoryFactoryBean` instead of the namespace shortcut and +use it to set the database type to the closest match. + +[tabs] +==== +Java:: ++ +The following example shows how to use `JobRepositoryFactoryBean` to set the database type +to the closest match in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public JobRepository jobRepository() throws Exception { + JobRepositoryFactoryBean factory = new JobRepositoryFactoryBean(); + factory.setDataSource(dataSource); + factory.setDatabaseType("db2"); + factory.setTransactionManager(transactionManager); + return factory.getObject(); +} +---- + +XML:: ++ +The following example shows how to use `JobRepositoryFactoryBean` to set the database type +to the closest match in XML: ++ +.XML Configuration +[source, xml] +---- + + + + +---- + +==== + + +If the database type is not specified, the `JobRepositoryFactoryBean` tries to +auto-detect the database type from the `DataSource`. +The major differences between platforms are +mainly accounted for by the strategy for incrementing primary keys, so +it is often necessary to override the +`incrementerFactory` as well (by using one of the standard +implementations from the Spring Framework). + +If even that does not work or if you are not using an RDBMS, the +only option may be to implement the various `Dao` +interfaces that the `SimpleJobRepository` depends +on and wire one up manually in the normal Spring way. + diff --git a/spring-batch-docs/modules/ROOT/pages/job/configuring.adoc b/spring-batch-docs/modules/ROOT/pages/job/configuring.adoc new file mode 100644 index 0000000000..57f9bbb48c --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/job/configuring.adoc @@ -0,0 +1,316 @@ +[[configuringAJob]] += Configuring a Job + +There are multiple implementations of the xref:job.adoc[`Job`] interface. However, +these implementations are abstracted behind either the provided builders (for Java configuration) or the XML +namespace (for XML-based configuration). The following example shows both Java and XML configuration: + +[tabs] +==== +Java:: ++ +[source, java] +---- +@Bean +public Job footballJob(JobRepository jobRepository) { + return new JobBuilder("footballJob", jobRepository) + .start(playerLoad()) + .next(gameLoad()) + .next(playerSummarization()) + .build(); +} +---- ++ +A `Job` (and, typically, any `Step` within it) requires a `JobRepository`. The +configuration of the `JobRepository` is handled through the xref:job/java-config.adoc[`Java Configuration`]. ++ +The preceding example illustrates a `Job` that consists of three `Step` instances. The job related +builders can also contain other elements that help with parallelization (`Split`), +declarative flow control (`Decision`), and externalization of flow definitions (`Flow`). + +XML:: ++ +There are multiple implementations of the xref:job.adoc[`Job`] +interface. However, the namespace abstracts away the differences in configuration. It has +only three required dependencies: a name, `JobRepository` , and a list of `Step` instances. +The following example creates a `footballJob`: ++ +[source, xml] +---- + + + + + +---- ++ +The preceding examples uses a parent bean definition to create the steps. +See the section on xref:step.adoc[step configuration] +for more options when declaring specific step details inline. The XML namespace +defaults to referencing a repository with an `id` of `jobRepository`, which +is a sensible default. However, you can explicitly override this default: ++ +[source, xml] +---- + + + + + +---- ++ +In addition to steps, a job configuration can contain other elements +that help with parallelization (``), +declarative flow control (``), and +externalization of flow definitions +(``). + +==== + +[[restartability]] +== Restartability + +One key issue when executing a batch job concerns the behavior of a `Job` when it is +restarted. The launching of a `Job` is considered to be a "`restart`" if a `JobExecution` +already exists for the particular `JobInstance`. Ideally, all jobs should be able to start +up where they left off, but there are scenarios where this is not possible. +_In this scenario, it is entirely up to the developer to ensure that a new `JobInstance` is created._ +However, Spring Batch does provide some help. If a `Job` should never be +restarted but should always be run as part of a new `JobInstance`, you can set the +restartable property to `false`. + +[tabs] +==== +Java:: ++ +The following example shows how to set the `restartable` field to `false` in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Job footballJob(JobRepository jobRepository) { + return new JobBuilder("footballJob", jobRepository) + .preventRestart() + ... + .build(); +} +---- + +XML:: ++ +The following example shows how to set the `restartable` field to `false` in XML: ++ +.XML Configuration +[source, xml] +---- + + ... + +---- +==== + +To phrase it another way, setting `restartable` to `false` means "`this +`Job` does not support being started again`". Restarting a `Job` that is not +restartable causes a `JobRestartException` to +be thrown. +The following Junit code causes the exception to be thrown: + +[source, java] +---- +Job job = new SimpleJob(); +job.setRestartable(false); + +JobParameters jobParameters = new JobParameters(); + +JobExecution firstExecution = jobRepository.createJobExecution(job, jobParameters); +jobRepository.saveOrUpdate(firstExecution); + +try { + jobRepository.createJobExecution(job, jobParameters); + fail(); +} +catch (JobRestartException e) { + // expected +} +---- + +The first attempt to create a +`JobExecution` for a non-restartable +job causes no issues. However, the second +attempt throws a `JobRestartException`. + +[[interceptingJobExecution]] +== Intercepting Job Execution + +During the course of the execution of a +`Job`, it may be useful to be notified of various +events in its lifecycle so that custom code can be run. +`SimpleJob` allows for this by calling a +`JobListener` at the appropriate time: + +[source, java] +---- +public interface JobExecutionListener { + + void beforeJob(JobExecution jobExecution); + + void afterJob(JobExecution jobExecution); +} +---- + +You can add `JobListeners` to a `SimpleJob` by setting listeners on the job. + + +[tabs] +==== +Java:: ++ +The following example shows how to add a listener method to a Java job definition: ++ +.Java Configuration +[source, java] +---- +@Bean +public Job footballJob(JobRepository jobRepository) { + return new JobBuilder("footballJob", jobRepository) + .listener(sampleListener()) + ... + .build(); +} +---- + +XML:: ++ +The following example shows how to add a listener element to an XML job definition: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + +---- +==== + +Note that the `afterJob` method is called regardless of the success or +failure of the `Job`. If you need to determine success or failure, you can get that information +from the `JobExecution`: + +[source, java] +---- +public void afterJob(JobExecution jobExecution){ + if (jobExecution.getStatus() == BatchStatus.COMPLETED ) { + //job success + } + else if (jobExecution.getStatus() == BatchStatus.FAILED) { + //job failure + } +} +---- + +The annotations corresponding to this interface are: + +* `@BeforeJob` +* `@AfterJob` + +[[inheritingFromAParentJob]] +[role="xmlContent"] +[[inheriting-from-a-parent-job]] +== Inheriting from a Parent Job + +ifdef::backend-pdf[] +This section applies only to XML based configuration, as Java configuration provides better +reuse capabilities. +endif::backend-pdf[] + +[role="xmlContent"] +If a group of Jobs share similar but not +identical configurations, it may help to define a "`parent`" +`Job` from which the concrete +`Job` instances can inherit properties. Similar to class +inheritance in Java, a "`child`" `Job` combines +its elements and attributes with the parent's. + +[role="xmlContent"] +In the following example, `baseJob` is an abstract +`Job` definition that defines only a list of +listeners. The `Job` (`job1`) is a concrete +definition that inherits the list of listeners from `baseJob` and merges +it with its own list of listeners to produce a +`Job` with two listeners and one +`Step` (`step1`). + +[source, xml] +---- + + + + + + + + + + + + + +---- + +[role="xmlContent"] +See the section on <> +for more detailed information. + +[[jobparametersvalidator]] +== JobParametersValidator + +A job declared in the XML namespace or using any subclass of +`AbstractJob` can optionally declare a validator for the job parameters at +runtime. This is useful when, for instance, you need to assert that a job +is started with all its mandatory parameters. There is a +`DefaultJobParametersValidator` that you can use to constrain combinations +of simple mandatory and optional parameters. For more complex +constraints, you can implement the interface yourself. + + +[tabs] +==== +Java:: ++ +The configuration of a validator is supported through the Java builders: ++ +[source, java] +---- +@Bean +public Job job1(JobRepository jobRepository) { + return new JobBuilder("job1", jobRepository) + .validator(parametersValidator()) + ... + .build(); +} +---- + +XML:: ++ +The configuration of a validator is supported through the XML namespace through a child +element of the job, as the following example shows: ++ +[source, xml] +---- + + + + +---- ++ +You can specify the validator as a reference (as shown earlier) or as a nested bean +definition in the `beans` namespace. + +==== + diff --git a/spring-batch-docs/modules/ROOT/pages/job/java-config.adoc b/spring-batch-docs/modules/ROOT/pages/job/java-config.adoc new file mode 100644 index 0000000000..3cbcb727cc --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/job/java-config.adoc @@ -0,0 +1,105 @@ +[[javaConfig]] += Java Configuration + +Spring 3 brought the ability to configure applications with Java instead of XML. As of +Spring Batch 2.2.0, you can configure batch jobs by using the same Java configuration. +There are three components for the Java-based configuration: the `@EnableBatchProcessing` +annotation and two builders. + +The `@EnableBatchProcessing` annotation works similarly to the other `@Enable*` annotations in the +Spring family. In this case, `@EnableBatchProcessing` provides a base configuration for +building batch jobs. Within this base configuration, an instance of `StepScope` and `JobScope` are +created, in addition to a number of beans being made available to be autowired: + +* `JobRepository`: a bean named `jobRepository` +* `JobLauncher`: a bean named `jobLauncher` +* `JobRegistry`: a bean named `jobRegistry` +* `JobExplorer`: a bean named `jobExplorer` +* `JobOperator`: a bean named `jobOperator` + +The default implementation provides the beans mentioned in the preceding list and requires a `DataSource` +and a `PlatformTransactionManager` to be provided as beans within the context. The data source and transaction +manager are used by the `JobRepository` and `JobExplorer` instances. By default, the data source named `dataSource` +and the transaction manager named `transactionManager` will be used. You can customize any of these beans by using +the attributes of the `@EnableBatchProcessing` annotation. The following example shows how to provide a +custom data source and transaction manager: + +[source, java] +---- +@Configuration +@EnableBatchProcessing(dataSourceRef = "batchDataSource", transactionManagerRef = "batchTransactionManager") +public class MyJobConfiguration { + + @Bean + public DataSource batchDataSource() { + return new EmbeddedDatabaseBuilder().setType(EmbeddedDatabaseType.HSQL) + .addScript("/org/springframework/batch/core/schema-hsqldb.sql") + .generateUniqueName(true).build(); + } + + @Bean + public JdbcTransactionManager batchTransactionManager(DataSource dataSource) { + return new JdbcTransactionManager(dataSource); + } + + public Job job(JobRepository jobRepository) { + return new JobBuilder("myJob", jobRepository) + //define job flow as needed + .build(); + } + +} +---- + +NOTE: Only one configuration class needs to have the `@EnableBatchProcessing` annotation. Once +you have a class annotated with it, you have all of the configuration described earlier. + +Starting from v5.0, an alternative, programmatic way of configuring base infrastrucutre beans +is provided through the `DefaultBatchConfiguration` class. This class provides the same beans +provided by `@EnableBatchProcessing` and can be used as a base class to configure batch jobs. +The following snippet is a typical example of how to use it: + +[source, java] +---- +@Configuration +class MyJobConfiguration extends DefaultBatchConfiguration { + + @Bean + public Job job(JobRepository jobRepository) { + return new JobBuilder("job", jobRepository) + // define job flow as needed + .build(); + } + +} +---- + +The data source and transaction manager will be resolved from the application context +and set on the job repository and job explorer. You can customize the configuration +of any infrastructure bean by overriding the required setter. The following example +shows how to customize the character encoding for instance: + +[source, java] +---- +@Configuration +class MyJobConfiguration extends DefaultBatchConfiguration { + + @Bean + public Job job(JobRepository jobRepository) { + return new JobBuilder("job", jobRepository) + // define job flow as needed + .build(); + } + + @Override + protected Charset getCharset() { + return StandardCharsets.ISO_8859_1; + } +} +---- + +NOTE: `@EnableBatchProcessing` should *not* be used with `DefaultBatchConfiguration`. You should +either use the declarative way of configuring Spring Batch through `@EnableBatchProcessing`, +or use the programmatic way of extending `DefaultBatchConfiguration`, but not both ways at +the same time. + diff --git a/spring-batch-docs/modules/ROOT/pages/job/running.adoc b/spring-batch-docs/modules/ROOT/pages/job/running.adoc new file mode 100644 index 0000000000..80114898fa --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/job/running.adoc @@ -0,0 +1,281 @@ +[[runningAJob]] += Running a Job + +At a minimum, launching a batch job requires two things: the +`Job` to be launched and a +`JobLauncher`. Both can be contained within the same +context or different contexts. For example, if you launch jobs from the +command line, a new JVM is instantiated for each `Job`. Thus, every +job has its own `JobLauncher`. However, if +you run from within a web container that is within the scope of an +`HttpRequest`, there is usually one +`JobLauncher` (configured for asynchronous job +launching) that multiple requests invoke to launch their jobs. + +[[runningJobsFromCommandLine]] +== Running Jobs from the Command Line + +If you want to run your jobs from an enterprise +scheduler, the command line is the primary interface. This is because +most schedulers (with the exception of Quartz, unless using +`NativeJob`) work directly with operating system +processes, primarily kicked off with shell scripts. There are many ways +to launch a Java process besides a shell script, such as Perl, Ruby, or +even build tools, such as Ant or Maven. However, because most people +are familiar with shell scripts, this example focuses on them. + +[[commandLineJobRunner]] +=== The CommandLineJobRunner + +Because the script launching the job must kick off a Java +Virtual Machine, there needs to be a class with a `main` method to act +as the primary entry point. Spring Batch provides an implementation +that serves this purpose: +`CommandLineJobRunner`. Note +that this is just one way to bootstrap your application. There are +many ways to launch a Java process, and this class should in no way be +viewed as definitive. The `CommandLineJobRunner` +performs four tasks: + +* Load the appropriate `ApplicationContext`. +* Parse command line arguments into `JobParameters`. +* Locate the appropriate job based on arguments. +* Use the `JobLauncher` provided in the application context to launch the job. + +All of these tasks are accomplished with only the arguments passed in. +The following table describes the required arguments: + +.CommandLineJobRunner arguments +|=============== +|`jobPath`|The location of the XML file that is used to +create an `ApplicationContext`. This file +should contain everything needed to run the complete +`Job`. +|`jobName`|The name of the job to be run. +|=============== + +These arguments must be passed in, with the path first and the name second. All arguments +after these are considered to be job parameters, are turned into a `JobParameters` object, +and must be in the format of `name=value`. + + +[tabs] +==== +Java:: ++ +The following example shows a date passed as a job parameter to a job defined in Java: ++ +[source] +---- + null, transactionManager) + .build(); + } +} +---- + +XML:: ++ +In most cases, you would want to use a manifest to declare your `main` class in a jar. However, +for simplicity, the class was used directly. This example uses the `EndOfDay` +example from the xref:domain.adoc[The Domain Language of Batch]. The first +argument is `endOfDayJob.xml`, which is the Spring ApplicationContext that contains the +`Job`. The second argument, `endOfDay,` represents the job name. The final argument, +`schedule.date=2007-05-05,java.time.LocalDate`, is converted into a `JobParameter` object of type +`java.time.LocalDate`. ++ +The following example shows a sample configuration for `endOfDay` in XML: ++ +[source, xml] +---- + + + + + + +---- + +==== + + + +The preceding example is overly simplistic, since there are many more requirements to a +run a batch job in Spring Batch in general, but it serves to show the two main +requirements of the `CommandLineJobRunner`: `Job` and `JobLauncher`. + + + +[[exitCodes]] +=== Exit Codes + +When launching a batch job from the command-line, an enterprise +scheduler is often used. Most schedulers are fairly dumb and work only +at the process level. This means that they only know about some +operating system process (such as a shell script that they invoke). +In this scenario, the only way to communicate back to the scheduler +about the success or failure of a job is through return codes. A +return code is a number that is returned to a scheduler by the process +to indicate the result of the run. In the simplest case, 0 is +success and 1 is failure. However, there may be more complex +scenarios, such as "`If job A returns 4, kick off job B, and, if it returns 5, kick +off job C.`" This type of behavior is configured at the scheduler level, +but it is important that a processing framework such as Spring Batch +provide a way to return a numeric representation of the exit code +for a particular batch job. In Spring Batch, this is encapsulated +within an `ExitStatus`, which is covered in more +detail in Chapter 5. For the purposes of discussing exit codes, the +only important thing to know is that an +`ExitStatus` has an exit code property that is +set by the framework (or the developer) and is returned as part of the +`JobExecution` returned from the +`JobLauncher`. The +`CommandLineJobRunner` converts this string value +to a number by using the `ExitCodeMapper` +interface: + +[source, java] +---- +public interface ExitCodeMapper { + + public int intValue(String exitCode); + +} +---- + +The essential contract of an +`ExitCodeMapper` is that, given a string exit +code, a number representation will be returned. The default +implementation used by the job runner is the `SimpleJvmExitCodeMapper` +that returns 0 for completion, 1 for generic errors, and 2 for any job +runner errors such as not being able to find a +`Job` in the provided context. If anything more +complex than the three values above is needed, a custom +implementation of the `ExitCodeMapper` interface +must be supplied. Because the +`CommandLineJobRunner` is the class that creates +an `ApplicationContext` and, thus, cannot be +'wired together', any values that need to be overwritten must be +autowired. This means that if an implementation of +`ExitCodeMapper` is found within the `BeanFactory`, +it is injected into the runner after the context is created. All +that needs to be done to provide your own +`ExitCodeMapper` is to declare the implementation +as a root level bean and ensure that it is part of the +`ApplicationContext` that is loaded by the +runner. + +[[runningJobsFromWebContainer]] +== Running Jobs from within a Web Container + +Historically, offline processing (such as batch jobs) has been +launched from the command-line, as described earlier. However, there are +many cases where launching from an `HttpRequest` is +a better option. Many such use cases include reporting, ad-hoc job +running, and web application support. Because a batch job (by definition) +is long running, the most important concern is to launch the +job asynchronously: + +.Asynchronous Job Launcher Sequence From Web Container +image::launch-from-request.png[Async Job Launcher Sequence from web container, scaledwidth="60%"] + +The controller in this case is a Spring MVC controller. See the +Spring Framework Reference Guide for more about https://docs.spring.io/spring/docs/current/spring-framework-reference/web.html#mvc[Spring MVC]. +The controller launches a `Job` by using a +`JobLauncher` that has been configured to launch +xref:job/running.adoc#runningJobsFromWebContainer[asynchronously], which +immediately returns a `JobExecution`. The +`Job` is likely still running. However, this +nonblocking behavior lets the controller return immediately, which +is required when handling an `HttpRequest`. The following listing +shows an example: + +[source, java] +---- +@Controller +public class JobLauncherController { + + @Autowired + JobLauncher jobLauncher; + + @Autowired + Job job; + + @RequestMapping("/jobLauncher.html") + public void handle() throws Exception{ + jobLauncher.run(job, new JobParameters()); + } +} +---- + diff --git a/spring-batch-docs/src/main/asciidoc/monitoring-and-metrics.adoc b/spring-batch-docs/modules/ROOT/pages/monitoring-and-metrics.adoc similarity index 82% rename from spring-batch-docs/src/main/asciidoc/monitoring-and-metrics.adoc rename to spring-batch-docs/modules/ROOT/pages/monitoring-and-metrics.adoc index e583bba098..0d22ebcabb 100644 --- a/spring-batch-docs/src/main/asciidoc/monitoring-and-metrics.adoc +++ b/spring-batch-docs/modules/ROOT/pages/monitoring-and-metrics.adoc @@ -1,17 +1,14 @@ -:toc: left -:toclevels: 4 [[monitoring-and-metrics]] -== Monitoring and metrics += Monitoring and metrics -include::attributes.adoc[] Since version 4.2, Spring Batch provides support for batch monitoring and metrics based on link:$$https://micrometer.io/$$[Micrometer]. This section describes which metrics are provided out-of-the-box and how to contribute custom metrics. [[built-in-metrics]] -=== Built-in metrics +== Built-in metrics Metrics collection does not require any specific configuration. All metrics provided by the framework are registered in @@ -32,7 +29,7 @@ under the `spring.batch` prefix. The following table explains all the metrics in NOTE: The `status` tag can be either `SUCCESS` or `FAILURE`. [[custom-metrics]] -=== Custom metrics +== Custom metrics If you want to use your own metrics in your custom components, we recommend using Micrometer APIs directly. The following is an example of how to time a `Tasklet`: @@ -70,7 +67,7 @@ public class MyTimedTasklet implements Tasklet { ---- [[disabling-metrics]] -=== Disabling Metrics +== Disabling Metrics Metrics collection is a concern similar to logging. Disabling logs is typically done by configuring the logging library, and this is no different for metrics. @@ -85,14 +82,4 @@ Metrics.globalRegistry.config().meterFilter(MeterFilter.denyNameStartsWith("spri ---- See Micrometer's link:$$http://micrometer.io/docs/concepts#_meter_filters$$[reference documentation] -for more details. - -[[tracing]] -== Tracing - -As of version 5, Spring Batch provides tracing through Micrometer's `Observation` API. By default, tracing is enabled -when using `@EnableBatchProcessing`. Spring Batch will create a trace for each job execution and a span for each -step execution. - -If you do not use `EnableBatchProcessing`, you need to register a `BatchObservabilityBeanPostProcessor` in your -application context, which will automatically setup Micrometer's observability in your jobs and steps beans. +for more details. \ No newline at end of file diff --git a/spring-batch-docs/src/main/asciidoc/processor.adoc b/spring-batch-docs/modules/ROOT/pages/processor.adoc similarity index 94% rename from spring-batch-docs/src/main/asciidoc/processor.adoc rename to spring-batch-docs/modules/ROOT/pages/processor.adoc index 2ab0d39e7d..cbf6cb9d66 100644 --- a/spring-batch-docs/src/main/asciidoc/processor.adoc +++ b/spring-batch-docs/modules/ROOT/pages/processor.adoc @@ -1,15 +1,8 @@ -:toc: left -:toclevels: 4 [[itemProcessor]] -== Item processing += Item processing -include::attributes.adoc[] -ifndef::onlyonetoggle[] -include::toggle.adoc[] -endif::onlyonetoggle[] - -The <> are both very useful for their specific +The xref:readersAndWriters.adoc[ItemReader and ItemWriter interfaces] are both very useful for their specific tasks, but what if you want to insert business logic before writing? One option for both reading and writing is to use the composite pattern: Create an `ItemWriter` that contains another `ItemWriter` or an `ItemReader` that contains another `ItemReader`. The following @@ -90,21 +83,13 @@ objects, throwing an exception if any other type is provided. Similarly, the `FooProcessor` throws an exception if anything but a `Foo` is provided. The `FooProcessor` can then be injected into a `Step`, as the following example shows: -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - - ----- +[tabs] +==== +Java:: ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public Job ioSampleJob(JobRepository jobRepository) { @@ -124,11 +109,28 @@ public Step step1(JobRepository jobRepository, PlatformTransactionManager transa } ---- +XML:: ++ +.XML Configuration +[source, xml] +---- + + + + + + + +---- + +==== + A difference between `ItemProcessor` and `ItemReader` or `ItemWriter` is that an `ItemProcessor` is optional for a `Step`. [[chainingItemProcessors]] -=== Chaining ItemProcessors +== Chaining ItemProcessors Performing a single transformation is useful in many scenarios, but what if you want to "`chain`" together multiple `ItemProcessor` implementations? You can do so by using @@ -185,31 +187,13 @@ compositeProcessor.setDelegates(itemProcessors); Just as with the previous example, you can configure the composite processor into the `Step`: -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - - - - - - - - - - - ----- +[tabs] +==== +Java:: ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public Job ioSampleJob(JobRepository jobRepository) { @@ -242,8 +226,37 @@ public CompositeItemProcessor compositeProcessor() { } ---- +XML:: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + + + + +---- + +==== + + + [[filteringRecords]] -=== Filtering Records +== Filtering Records One typical use for an item processor is to filter out records before they are passed to the `ItemWriter`. Filtering is an action distinct from skipping. Skipping indicates that @@ -263,9 +276,9 @@ the `ItemWriter`. An exception thrown from the `ItemProcessor` results in a skip. [[validatingInput]] -=== Validating Input +== Validating Input -The <> chapter discusses multiple approaches to parsing input. +The xref:readersAndWriters.adoc[ItemReaders and ItemWriters] chapter discusses multiple approaches to parsing input. Each major implementation throws an exception if it is not "`well formed.`" The `FixedLengthTokenizer` throws an exception if a range of data is missing. Similarly, attempting to access an index in a `RowMapper` or `FieldSetMapper` that does not exist or @@ -291,22 +304,13 @@ The contract is that the `validate` method throws an exception if the object is and returns normally if it is valid. Spring Batch provides an `ValidatingItemProcessor`, as the following bean definition shows: -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - - - - ----- +[tabs] +==== +Java:: ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public ValidatingItemProcessor itemProcessor() { @@ -327,6 +331,25 @@ public SpringValidator validator() { } ---- +XML:: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + +---- + +==== + + You can also use the `BeanValidatingItemProcessor` to validate items annotated with the Bean Validation API (JSR-303) annotations. For example, consider the following type `Person`: @@ -367,7 +390,7 @@ public BeanValidatingItemProcessor beanValidatingItemProcessor() throws ---- [[faultTolerant]] -=== Fault Tolerance +== Fault Tolerance When a chunk is rolled back, items that have been cached during reading may be reprocessed. If a step is configured to be fault-tolerant (typically by using skip or diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/custom.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/custom.adoc new file mode 100644 index 0000000000..a20d66fcfc --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/custom.adoc @@ -0,0 +1,188 @@ +[[customReadersWriters]] += Creating Custom ItemReaders and ItemWriters + +So far, this chapter has discussed the basic contracts of reading and writing in Spring +Batch and some common implementations for doing so. However, these are all fairly +generic, and there are many potential scenarios that may not be covered by out-of-the-box +implementations. This section shows, by using a simple example, how to create a custom +`ItemReader` and `ItemWriter` implementation and implement their contracts correctly. The +`ItemReader` also implements `ItemStream`, in order to illustrate how to make a reader or +writer restartable. + +[[customReader]] +== Custom `ItemReader` Example + +For the purpose of this example, we create a simple `ItemReader` implementation that +reads from a provided list. We start by implementing the most basic contract of +`ItemReader`, the `read` method, as shown in the following code: + +[source, java] +---- +public class CustomItemReader implements ItemReader { + + List items; + + public CustomItemReader(List items) { + this.items = items; + } + + public T read() throws Exception, UnexpectedInputException, + NonTransientResourceException, ParseException { + + if (!items.isEmpty()) { + return items.remove(0); + } + return null; + } +} +---- + +The preceding class takes a list of items and returns them one at a time, removing each +from the list. When the list is empty, it returns `null`, thus satisfying the most basic +requirements of an `ItemReader`, as illustrated in the following test code: + +[source, java] +---- +List items = new ArrayList<>(); +items.add("1"); +items.add("2"); +items.add("3"); + +ItemReader itemReader = new CustomItemReader<>(items); +assertEquals("1", itemReader.read()); +assertEquals("2", itemReader.read()); +assertEquals("3", itemReader.read()); +assertNull(itemReader.read()); +---- + +[[restartableReader]] +=== Making the `ItemReader` Restartable + +The final challenge is to make the `ItemReader` restartable. Currently, if processing is +interrupted and begins again, the `ItemReader` must start at the beginning. This is +actually valid in many scenarios, but it is sometimes preferable that a batch job +restarts where it left off. The key discriminant is often whether the reader is stateful +or stateless. A stateless reader does not need to worry about restartability, but a +stateful one has to try to reconstitute its last known state on restart. For this reason, +we recommend that you keep custom readers stateless if possible, so you need not worry +about restartability. + +If you do need to store state, then the `ItemStream` interface should be used: + +[source, java] +---- +public class CustomItemReader implements ItemReader, ItemStream { + + List items; + int currentIndex = 0; + private static final String CURRENT_INDEX = "current.index"; + + public CustomItemReader(List items) { + this.items = items; + } + + public T read() throws Exception, UnexpectedInputException, + ParseException, NonTransientResourceException { + + if (currentIndex < items.size()) { + return items.get(currentIndex++); + } + + return null; + } + + public void open(ExecutionContext executionContext) throws ItemStreamException { + if (executionContext.containsKey(CURRENT_INDEX)) { + currentIndex = new Long(executionContext.getLong(CURRENT_INDEX)).intValue(); + } + else { + currentIndex = 0; + } + } + + public void update(ExecutionContext executionContext) throws ItemStreamException { + executionContext.putLong(CURRENT_INDEX, new Long(currentIndex).longValue()); + } + + public void close() throws ItemStreamException {} +} +---- + +On each call to the `ItemStream` `update` method, the current index of the `ItemReader` +is stored in the provided `ExecutionContext` with a key of 'current.index'. When the +`ItemStream` `open` method is called, the `ExecutionContext` is checked to see if it +contains an entry with that key. If the key is found, then the current index is moved to +that location. This is a fairly trivial example, but it still meets the general contract: + +[source, java] +---- +ExecutionContext executionContext = new ExecutionContext(); +((ItemStream)itemReader).open(executionContext); +assertEquals("1", itemReader.read()); +((ItemStream)itemReader).update(executionContext); + +List items = new ArrayList<>(); +items.add("1"); +items.add("2"); +items.add("3"); +itemReader = new CustomItemReader<>(items); + +((ItemStream)itemReader).open(executionContext); +assertEquals("2", itemReader.read()); +---- + +Most `ItemReaders` have much more sophisticated restart logic. The +`JdbcCursorItemReader`, for example, stores the row ID of the last processed row in the +cursor. + +It is also worth noting that the key used within the `ExecutionContext` should not be +trivial. That is because the same `ExecutionContext` is used for all `ItemStreams` within +a `Step`. In most cases, simply prepending the key with the class name should be enough +to guarantee uniqueness. However, in the rare cases where two of the same type of +`ItemStream` are used in the same step (which can happen if two files are needed for +output), a more unique name is needed. For this reason, many of the Spring Batch +`ItemReader` and `ItemWriter` implementations have a `setName()` property that lets this +key name be overridden. + +[[customWriter]] +== Custom `ItemWriter` Example + +Implementing a Custom `ItemWriter` is similar in many ways to the `ItemReader` example +above but differs in enough ways as to warrant its own example. However, adding +restartability is essentially the same, so it is not covered in this example. As with the +`ItemReader` example, a `List` is used in order to keep the example as simple as +possible: + +[source, java] +---- +public class CustomItemWriter implements ItemWriter { + + List output = TransactionAwareProxyFactory.createTransactionalList(); + + public void write(Chunk items) throws Exception { + output.addAll(items); + } + + public List getOutput() { + return output; + } +} +---- + +[[restartableWriter]] +=== Making the `ItemWriter` Restartable + +To make the `ItemWriter` restartable, we would follow the same process as for the +`ItemReader`, adding and implementing the `ItemStream` interface to synchronize the +execution context. In the example, we might have to count the number of items processed +and add that as a footer record. If we needed to do that, we could implement +`ItemStream` in our `ItemWriter` so that the counter was reconstituted from the execution +context if the stream was re-opened. + +In many realistic cases, custom `ItemWriters` also delegate to another writer that itself +is restartable (for example, when writing to a file), or else it writes to a +transactional resource and so does not need to be restartable, because it is stateless. +When you have a stateful writer you should probably be sure to implement `ItemStream` as +well as `ItemWriter`. Remember also that the client of the writer needs to be aware of +the `ItemStream`, so you may need to register it as a stream in the configuration. + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/database.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/database.adoc new file mode 100644 index 0000000000..004ced5373 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/database.adoc @@ -0,0 +1,747 @@ +[[database]] += Database + +Like most enterprise application styles, a database is the central storage mechanism for +batch. However, batch differs from other application styles due to the sheer size of the +datasets with which the system must work. If a SQL statement returns 1 million rows, the +result set probably holds all returned results in memory until all rows have been read. +Spring Batch provides two types of solutions for this problem: + +* xref:readers-and-writers/database.adoc#cursorBasedItemReaders[Cursor-based `ItemReader` Implementations] +* xref:readers-and-writers/database.adoc#pagingItemReaders[Paging `ItemReader` Implementations] + +[[cursorBasedItemReaders]] +== Cursor-based `ItemReader` Implementations + +Using a database cursor is generally the default approach of most batch developers, +because it is the database's solution to the problem of 'streaming' relational data. The +Java `ResultSet` class is essentially an object oriented mechanism for manipulating a +cursor. A `ResultSet` maintains a cursor to the current row of data. Calling `next` on a +`ResultSet` moves this cursor to the next row. The Spring Batch cursor-based `ItemReader` +implementation opens a cursor on initialization and moves the cursor forward one row for +every call to `read`, returning a mapped object that can be used for processing. The +`close` method is then called to ensure all resources are freed up. The Spring core +`JdbcTemplate` gets around this problem by using the callback pattern to completely map +all rows in a `ResultSet` and close before returning control back to the method caller. +However, in batch, this must wait until the step is complete. The following image shows a +generic diagram of how a cursor-based `ItemReader` works. Note that, while the example +uses SQL (because SQL is so widely known), any technology could implement the basic +approach. + +.Cursor Example +image::cursorExample.png[Cursor Example, scaledwidth="60%"] + +This example illustrates the basic pattern. Given a 'FOO' table, which has three columns: +`ID`, `NAME`, and `BAR`, select all rows with an ID greater than 1 but less than 7. This +puts the beginning of the cursor (row 1) on ID 2. The result of this row should be a +completely mapped `Foo` object. Calling `read()` again moves the cursor to the next row, +which is the `Foo` with an ID of 3. The results of these reads are written out after each +`read`, allowing the objects to be garbage collected (assuming no instance variables are +maintaining references to them). + +[[JdbcCursorItemReader]] +=== `JdbcCursorItemReader` + +`JdbcCursorItemReader` is the JDBC implementation of the cursor-based technique. It works +directly with a `ResultSet` and requires an SQL statement to run against a connection +obtained from a `DataSource`. The following database schema is used as an example: + +[source, sql] +---- +CREATE TABLE CUSTOMER ( + ID BIGINT IDENTITY PRIMARY KEY, + NAME VARCHAR(45), + CREDIT FLOAT +); +---- + +Many people prefer to use a domain object for each row, so the following example uses an +implementation of the `RowMapper` interface to map a `CustomerCredit` object: + +[source, java] +---- +public class CustomerCreditRowMapper implements RowMapper { + + public static final String ID_COLUMN = "id"; + public static final String NAME_COLUMN = "name"; + public static final String CREDIT_COLUMN = "credit"; + + public CustomerCredit mapRow(ResultSet rs, int rowNum) throws SQLException { + CustomerCredit customerCredit = new CustomerCredit(); + + customerCredit.setId(rs.getInt(ID_COLUMN)); + customerCredit.setName(rs.getString(NAME_COLUMN)); + customerCredit.setCredit(rs.getBigDecimal(CREDIT_COLUMN)); + + return customerCredit; + } +} +---- + +Because `JdbcCursorItemReader` shares key interfaces with `JdbcTemplate`, it is useful to +see an example of how to read in this data with `JdbcTemplate`, in order to contrast it +with the `ItemReader`. For the purposes of this example, assume there are 1,000 rows in +the `CUSTOMER` database. The first example uses `JdbcTemplate`: + +[source, java] +---- +//For simplicity sake, assume a dataSource has already been obtained +JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); +List customerCredits = jdbcTemplate.query("SELECT ID, NAME, CREDIT from CUSTOMER", + new CustomerCreditRowMapper()); +---- + +After running the preceding code snippet, the `customerCredits` list contains 1,000 +`CustomerCredit` objects. In the query method, a connection is obtained from the +`DataSource`, the provided SQL is run against it, and the `mapRow` method is called for +each row in the `ResultSet`. Contrast this with the approach of the +`JdbcCursorItemReader`, shown in the following example: + +[source, java] +---- +JdbcCursorItemReader itemReader = new JdbcCursorItemReader(); +itemReader.setDataSource(dataSource); +itemReader.setSql("SELECT ID, NAME, CREDIT from CUSTOMER"); +itemReader.setRowMapper(new CustomerCreditRowMapper()); +int counter = 0; +ExecutionContext executionContext = new ExecutionContext(); +itemReader.open(executionContext); +Object customerCredit = new Object(); +while(customerCredit != null){ + customerCredit = itemReader.read(); + counter++; +} +itemReader.close(); +---- + +After running the preceding code snippet, the counter equals 1,000. If the code above had +put the returned `customerCredit` into a list, the result would have been exactly the +same as with the `JdbcTemplate` example. However, the big advantage of the `ItemReader` +is that it allows items to be 'streamed'. The `read` method can be called once, the item +can be written out by an `ItemWriter`, and then the next item can be obtained with +`read`. This allows item reading and writing to be done in 'chunks' and committed +periodically, which is the essence of high performance batch processing. Furthermore, it +is easily configured for injection into a Spring Batch `Step`. + + +[tabs] +==== +Java:: ++ +The following example shows how to inject an `ItemReader` into a `Step` in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public JdbcCursorItemReader itemReader() { + return new JdbcCursorItemReaderBuilder() + .dataSource(this.dataSource) + .name("creditReader") + .sql("select ID, NAME, CREDIT from CUSTOMER") + .rowMapper(new CustomerCreditRowMapper()) + .build(); + +} +---- + +XML:: ++ +The following example shows how to inject an `ItemReader` into a `Step` in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + +---- + +==== + + + + +[[JdbcCursorItemReaderProperties]] +==== Additional Properties + +Because there are so many varying options for opening a cursor in Java, there are many +properties on the `JdbcCursorItemReader` that can be set, as described in the following +table: + +.JdbcCursorItemReader Properties + +|=============== +|ignoreWarnings|Determines whether or not SQLWarnings are logged or cause an exception. +The default is `true` (meaning that warnings are logged). +|fetchSize|Gives the JDBC driver a hint as to the number of rows that should be fetched +from the database when more rows are needed by the `ResultSet` object used by the +`ItemReader`. By default, no hint is given. +|maxRows|Sets the limit for the maximum number of rows the underlying `ResultSet` can +hold at any one time. +|queryTimeout|Sets the number of seconds the driver waits for a `Statement` object to +run. If the limit is exceeded, a `DataAccessException` is thrown. (Consult your driver +vendor documentation for details). +|verifyCursorPosition|Because the same `ResultSet` held by the `ItemReader` is passed to +the `RowMapper`, it is possible for users to call `ResultSet.next()` themselves, which +could cause issues with the reader's internal count. Setting this value to `true` causes +an exception to be thrown if the cursor position is not the same after the `RowMapper` +call as it was before. +|saveState|Indicates whether or not the reader's state should be saved in the +`ExecutionContext` provided by `ItemStream#update(ExecutionContext)`. The default is +`true`. +|driverSupportsAbsolute|Indicates whether the JDBC driver supports +setting the absolute row on a `ResultSet`. It is recommended that this is set to `true` +for JDBC drivers that support `ResultSet.absolute()`, as it may improve performance, +especially if a step fails while working with a large data set. Defaults to `false`. +|setUseSharedExtendedConnection| Indicates whether the connection +used for the cursor should be used by all other processing, thus sharing the same +transaction. If this is set to `false`, then the cursor is opened with its own connection +and does not participate in any transactions started for the rest of the step processing. +If you set this flag to `true` then you must wrap the DataSource in an +`ExtendedConnectionDataSourceProxy` to prevent the connection from being closed and +released after each commit. When you set this option to `true`, the statement used to +open the cursor is created with both 'READ_ONLY' and 'HOLD_CURSORS_OVER_COMMIT' options. +This allows holding the cursor open over transaction start and commits performed in the +step processing. To use this feature, you need a database that supports this and a JDBC +driver supporting JDBC 3.0 or later. Defaults to `false`. +|=============== + +[[HibernateCursorItemReader]] +=== `HibernateCursorItemReader` + +Just as normal Spring users make important decisions about whether or not to use ORM +solutions, which affect whether or not they use a `JdbcTemplate` or a +`HibernateTemplate`, Spring Batch users have the same options. +`HibernateCursorItemReader` is the Hibernate implementation of the cursor technique. +Hibernate's usage in batch has been fairly controversial. This has largely been because +Hibernate was originally developed to support online application styles. However, that +does not mean it cannot be used for batch processing. The easiest approach for solving +this problem is to use a `StatelessSession` rather than a standard session. This removes +all of the caching and dirty checking Hibernate employs and that can cause issues in a +batch scenario. For more information on the differences between stateless and normal +hibernate sessions, refer to the documentation of your specific hibernate release. The +`HibernateCursorItemReader` lets you declare an HQL statement and pass in a +`SessionFactory`, which will pass back one item per call to read in the same basic +fashion as the `JdbcCursorItemReader`. The following example configuration uses the same +'customer credit' example as the JDBC reader: + +[source, java] +---- +HibernateCursorItemReader itemReader = new HibernateCursorItemReader(); +itemReader.setQueryString("from CustomerCredit"); +//For simplicity sake, assume sessionFactory already obtained. +itemReader.setSessionFactory(sessionFactory); +itemReader.setUseStatelessSession(true); +int counter = 0; +ExecutionContext executionContext = new ExecutionContext(); +itemReader.open(executionContext); +Object customerCredit = new Object(); +while(customerCredit != null){ + customerCredit = itemReader.read(); + counter++; +} +itemReader.close(); +---- + +This configured `ItemReader` returns `CustomerCredit` objects in the exact same manner +as described by the `JdbcCursorItemReader`, assuming hibernate mapping files have been +created correctly for the `Customer` table. The 'useStatelessSession' property defaults +to true but has been added here to draw attention to the ability to switch it on or off. +It is also worth noting that the fetch size of the underlying cursor can be set with the +`setFetchSize` property. As with `JdbcCursorItemReader`, configuration is +straightforward. + + +[tabs] +==== +Java:: ++ +The following example shows how to inject a Hibernate `ItemReader` in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public HibernateCursorItemReader itemReader(SessionFactory sessionFactory) { + return new HibernateCursorItemReaderBuilder() + .name("creditReader") + .sessionFactory(sessionFactory) + .queryString("from CustomerCredit") + .build(); +} +---- + +XML:: ++ +The following example shows how to inject a Hibernate `ItemReader` in XML: ++ +.XML Configuration +[source, xml] +---- + + + + +---- + +==== + + + + +[[StoredProcedureItemReader]] +=== `StoredProcedureItemReader` + +Sometimes it is necessary to obtain the cursor data by using a stored procedure. The +`StoredProcedureItemReader` works like the `JdbcCursorItemReader`, except that, instead +of running a query to obtain a cursor, it runs a stored procedure that returns a cursor. +The stored procedure can return the cursor in three different ways: + + +* As a returned `ResultSet` (used by SQL Server, Sybase, DB2, Derby, and MySQL). +* As a ref-cursor returned as an out parameter (used by Oracle and PostgreSQL). +* As the return value of a stored function call. + + +[tabs] +==== +Java:: ++ +The following Java example configuration uses the same 'customer credit' example as +earlier examples: ++ +.Java Configuration +[source, xml] +---- +@Bean +public StoredProcedureItemReader reader(DataSource dataSource) { + StoredProcedureItemReader reader = new StoredProcedureItemReader(); + + reader.setDataSource(dataSource); + reader.setProcedureName("sp_customer_credit"); + reader.setRowMapper(new CustomerCreditRowMapper()); + + return reader; +} +---- +//TODO: Fix the above config to use a builder once we have one for it. + +XML:: ++ +The following XML example configuration uses the same 'customer credit' example as earlier +examples: ++ +.XML Configuration +[source, xml] +---- + + + + + + + +---- +==== + + + +The preceding example relies on the stored procedure to provide a `ResultSet` as a +returned result (option 1 from earlier). + +If the stored procedure returned a `ref-cursor` (option 2), then we would need to provide +the position of the out parameter that is the returned `ref-cursor`. + +[tabs] +==== +Java:: ++ +The following example shows how to work with the first parameter being a ref-cursor in +Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public StoredProcedureItemReader reader(DataSource dataSource) { + StoredProcedureItemReader reader = new StoredProcedureItemReader(); + + reader.setDataSource(dataSource); + reader.setProcedureName("sp_customer_credit"); + reader.setRowMapper(new CustomerCreditRowMapper()); + reader.setRefCursorPosition(1); + + return reader; +} +---- + +XML:: ++ +The following example shows how to work with the first parameter being a ref-cursor in +XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + +---- +==== + + + +If the cursor was returned from a stored function (option 3), we would need to set the +property "[maroon]#function#" to `true`. It defaults to `false`. + + +[tabs] +==== +Java:: ++ +The following example shows property to `true` in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public StoredProcedureItemReader reader(DataSource dataSource) { + StoredProcedureItemReader reader = new StoredProcedureItemReader(); + + reader.setDataSource(dataSource); + reader.setProcedureName("sp_customer_credit"); + reader.setRowMapper(new CustomerCreditRowMapper()); + reader.setFunction(true); + + return reader; +} +---- + +XML:: ++ +The following example shows property to `true` in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + +---- +==== + + + +In all of these cases, we need to define a `RowMapper` as well as a `DataSource` and the +actual procedure name. + +If the stored procedure or function takes in parameters, then they must be declared and +set by using the `parameters` property. The following example, for Oracle, declares three +parameters. The first one is the `out` parameter that returns the ref-cursor, and the +second and third are in parameters that takes a value of type `INTEGER`. + + +[tabs] +==== +Java:: ++ +The following example shows how to work with parameters in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public StoredProcedureItemReader reader(DataSource dataSource) { + List parameters = new ArrayList<>(); + parameters.add(new SqlOutParameter("newId", OracleTypes.CURSOR)); + parameters.add(new SqlParameter("amount", Types.INTEGER); + parameters.add(new SqlParameter("custId", Types.INTEGER); + + StoredProcedureItemReader reader = new StoredProcedureItemReader(); + + reader.setDataSource(dataSource); + reader.setProcedureName("spring.cursor_func"); + reader.setParameters(parameters); + reader.setRefCursorPosition(1); + reader.setRowMapper(rowMapper()); + reader.setPreparedStatementSetter(parameterSetter()); + + return reader; +} +---- + +XML:: ++ +The following example shows how to work with parameters in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +---- + +==== + + + +In addition to the parameter declarations, we need to specify a `PreparedStatementSetter` +implementation that sets the parameter values for the call. This works the same as for +the `JdbcCursorItemReader` above. All the additional properties listed in +xref:readers-and-writers/database.adoc#JdbcCursorItemReaderProperties[Additional Properties] apply to the `StoredProcedureItemReader` as well. + +[[pagingItemReaders]] +== Paging `ItemReader` Implementations + +An alternative to using a database cursor is running multiple queries where each query +fetches a portion of the results. We refer to this portion as a page. Each query must +specify the starting row number and the number of rows that we want returned in the page. + +[[JdbcPagingItemReader]] +=== `JdbcPagingItemReader` + +One implementation of a paging `ItemReader` is the `JdbcPagingItemReader`. The +`JdbcPagingItemReader` needs a `PagingQueryProvider` responsible for providing the SQL +queries used to retrieve the rows making up a page. Since each database has its own +strategy for providing paging support, we need to use a different `PagingQueryProvider` +for each supported database type. There is also the `SqlPagingQueryProviderFactoryBean` +that auto-detects the database that is being used and determine the appropriate +`PagingQueryProvider` implementation. This simplifies the configuration and is the +recommended best practice. + +The `SqlPagingQueryProviderFactoryBean` requires that you specify a `select` clause and a +`from` clause. You can also provide an optional `where` clause. These clauses and the +required `sortKey` are used to build an SQL statement. + +NOTE: It is important to have a unique key constraint on the `sortKey` to guarantee that + no data is lost between executions. + +After the reader has been opened, it passes back one item per call to `read` in the same +basic fashion as any other `ItemReader`. The paging happens behind the scenes when +additional rows are needed. + + +[tabs] +==== +Java:: ++ +The following Java example configuration uses a similar 'customer credit' example as the +cursor-based `ItemReaders` shown previously: ++ +.Java Configuration +[source, java] +---- +@Bean +public JdbcPagingItemReader itemReader(DataSource dataSource, PagingQueryProvider queryProvider) { + Map parameterValues = new HashMap<>(); + parameterValues.put("status", "NEW"); + + return new JdbcPagingItemReaderBuilder() + .name("creditReader") + .dataSource(dataSource) + .queryProvider(queryProvider) + .parameterValues(parameterValues) + .rowMapper(customerCreditMapper()) + .pageSize(1000) + .build(); +} + +@Bean +public SqlPagingQueryProviderFactoryBean queryProvider() { + SqlPagingQueryProviderFactoryBean provider = new SqlPagingQueryProviderFactoryBean(); + + provider.setSelectClause("select id, name, credit"); + provider.setFromClause("from customer"); + provider.setWhereClause("where status=:status"); + provider.setSortKey("id"); + + return provider; +} +---- + +XML:: ++ +The following XML example configuration uses a similar 'customer credit' example as the +cursor-based `ItemReaders` shown previously: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + + + + + + +---- + +==== + + + +This configured `ItemReader` returns `CustomerCredit` objects using the `RowMapper`, +which must be specified. The 'pageSize' property determines the number of entities read +from the database for each query run. + +The 'parameterValues' property can be used to specify a `Map` of parameter values for the +query. If you use named parameters in the `where` clause, the key for each entry should +match the name of the named parameter. If you use a traditional '?' placeholder, then the +key for each entry should be the number of the placeholder, starting with 1. + +[[JpaPagingItemReader]] +=== `JpaPagingItemReader` + +Another implementation of a paging `ItemReader` is the `JpaPagingItemReader`. JPA does +not have a concept similar to the Hibernate `StatelessSession`, so we have to use other +features provided by the JPA specification. Since JPA supports paging, this is a natural +choice when it comes to using JPA for batch processing. After each page is read, the +entities become detached and the persistence context is cleared, to allow the entities to +be garbage collected once the page is processed. + +The `JpaPagingItemReader` lets you declare a JPQL statement and pass in a +`EntityManagerFactory`. It then passes back one item per call to read in the same basic +fashion as any other `ItemReader`. The paging happens behind the scenes when additional +entities are needed. + +[tabs] +==== +Java:: ++ +The following Java example configuration uses the same 'customer credit' example as the +JDBC reader shown previously: ++ +.Java Configuration +[source, java] +---- +@Bean +public JpaPagingItemReader itemReader() { + return new JpaPagingItemReaderBuilder() + .name("creditReader") + .entityManagerFactory(entityManagerFactory()) + .queryString("select c from CustomerCredit c") + .pageSize(1000) + .build(); +} +---- + +XML:: ++ +The following XML example configuration uses the same 'customer credit' example as the +JDBC reader shown previously: ++ +.XML Configuration +[source, xml] +---- + + + + + +---- + +==== + + + +This configured `ItemReader` returns `CustomerCredit` objects in the exact same manner as +described for the `JdbcPagingItemReader` above, assuming the `CustomerCredit` object has the +correct JPA annotations or ORM mapping file. The 'pageSize' property determines the +number of entities read from the database for each query execution. + +[[databaseItemWriters]] +== Database ItemWriters + +While both flat files and XML files have a specific `ItemWriter` instance, there is no exact equivalent +in the database world. This is because transactions provide all the needed functionality. +`ItemWriter` implementations are necessary for files because they must act as if they're transactional, +keeping track of written items and flushing or clearing at the appropriate times. +Databases have no need for this functionality, since the write is already contained in a +transaction. Users can create their own DAOs that implement the `ItemWriter` interface or +use one from a custom `ItemWriter` that's written for generic processing concerns. Either +way, they should work without any issues. One thing to look out for is the performance +and error handling capabilities that are provided by batching the outputs. This is most +common when using hibernate as an `ItemWriter` but could have the same issues when using +JDBC batch mode. Batching database output does not have any inherent flaws, assuming we +are careful to flush and there are no errors in the data. However, any errors while +writing can cause confusion, because there is no way to know which individual item caused +an exception or even if any individual item was responsible, as illustrated in the +following image: + +.Error On Flush +image::errorOnFlush.png[Error On Flush, scaledwidth="60%"] + +If items are buffered before being written, any errors are not thrown until the buffer is +flushed just before a commit. For example, assume that 20 items are written per chunk, +and the 15th item throws a `DataIntegrityViolationException`. As far as the `Step` +is concerned, all 20 item are written successfully, since there is no way to know that an +error occurs until they are actually written. Once `Session#flush()` is called, the +buffer is emptied and the exception is hit. At this point, there is nothing the `Step` +can do. The transaction must be rolled back. Normally, this exception might cause the +item to be skipped (depending upon the skip/retry policies), and then it is not written +again. However, in the batched scenario, there is no way to know which item caused the +issue. The whole buffer was being written when the failure happened. The only way to +solve this issue is to flush after each item, as shown in the following image: + +.Error On Write +image::errorOnWrite.png[Error On Write, scaledwidth="60%"] + +This is a common use case, especially when using Hibernate, and the simple guideline for +implementations of `ItemWriter` is to flush on each call to `write()`. Doing so allows +for items to be skipped reliably, with Spring Batch internally taking care of the +granularity of the calls to `ItemWriter` after an error. + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/delegate-pattern-registering.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/delegate-pattern-registering.adoc new file mode 100644 index 0000000000..ea69b8af81 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/delegate-pattern-registering.adoc @@ -0,0 +1,89 @@ +[[delegatePatternAndRegistering]] += The Delegate Pattern and Registering with the Step + +Note that the `CompositeItemWriter` is an example of the delegation pattern, which is +common in Spring Batch. The delegates themselves might implement callback interfaces, +such as `StepListener`. If they do and if they are being used in conjunction with Spring +Batch Core as part of a `Step` in a `Job`, then they almost certainly need to be +registered manually with the `Step`. A reader, writer, or processor that is directly +wired into the `Step` gets registered automatically if it implements `ItemStream` or a +`StepListener` interface. However, because the delegates are not known to the `Step`, +they need to be injected as listeners or streams (or both if appropriate). + + +[tabs] +==== +Java:: ++ +The following example shows how to inject a delegate as a stream in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Job ioSampleJob(JobRepository jobRepository) { + return new JobBuilder("ioSampleJob", jobRepository) + .start(step1()) + .build(); +} + +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("step1", jobRepository) + .chunk(2, transactionManager) + .reader(fooReader()) + .processor(fooProcessor()) + .writer(compositeItemWriter()) + .stream(barWriter()) + .build(); +} + +@Bean +public CustomCompositeItemWriter compositeItemWriter() { + + CustomCompositeItemWriter writer = new CustomCompositeItemWriter(); + + writer.setDelegate(barWriter()); + + return writer; +} + +@Bean +public BarWriter barWriter() { + return new BarWriter(); +} +---- + +XML:: ++ +The following example shows how to inject a delegate as a stream in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + + + + + +---- + +==== + + + + + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files.adoc new file mode 100644 index 0000000000..8e97d04a9d --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files.adoc @@ -0,0 +1,11 @@ +[[flatFiles]] += Flat Files +:page-section-summary-toc: 1 + +One of the most common mechanisms for interchanging bulk data has always been the flat +file. Unlike XML, which has an agreed upon standard for defining how it is structured +(XSD), anyone reading a flat file must understand ahead of time exactly how the file is +structured. In general, all flat files fall into two types: delimited and fixed length. +Delimited files are those in which fields are separated by a delimiter, such as a comma. +Fixed Length files have fields that are a set length. + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files/field-set.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files/field-set.adoc new file mode 100644 index 0000000000..f0f70d8676 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files/field-set.adoc @@ -0,0 +1,30 @@ +[[fieldSet]] += The `FieldSet` + +When working with flat files in Spring Batch, regardless of whether it is for input or +output, one of the most important classes is the `FieldSet`. Many architectures and +libraries contain abstractions for helping you read in from a file, but they usually +return a `String` or an array of `String` objects. This really only gets you halfway +there. A `FieldSet` is Spring Batch's abstraction for enabling the binding of fields from +a file resource. It allows developers to work with file input in much the same way as +they would work with database input. A `FieldSet` is conceptually similar to a JDBC +`ResultSet`. A `FieldSet` requires only one argument: a `String` array of tokens. +Optionally, you can also configure the names of the fields so that the fields may be +accessed either by index or name as patterned after `ResultSet`, as shown in the following +example: + +[source, java] +---- +String[] tokens = new String[]{"foo", "1", "true"}; +FieldSet fs = new DefaultFieldSet(tokens); +String name = fs.readString(0); +int value = fs.readInt(1); +boolean booleanValue = fs.readBoolean(2); +---- + +There are many more options on the `FieldSet` interface, such as `Date`, long, +`BigDecimal`, and so on. The biggest advantage of the `FieldSet` is that it provides +consistent parsing of flat file input. Rather than each batch job parsing differently in +potentially unexpected ways, it can be consistent, both when handling errors caused by a +format exception, or when doing simple data conversions. + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files/file-item-reader.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files/file-item-reader.adoc new file mode 100644 index 0000000000..d504af010e --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files/file-item-reader.adoc @@ -0,0 +1,660 @@ +[[flatFileItemReader]] += `FlatFileItemReader` + +A flat file is any type of file that contains at most two-dimensional (tabular) data. +Reading flat files in the Spring Batch framework is facilitated by the class called +`FlatFileItemReader`, which provides basic functionality for reading and parsing flat +files. The two most important required dependencies of `FlatFileItemReader` are +`Resource` and `LineMapper`. The `LineMapper` interface is explored more in the next +sections. The resource property represents a Spring Core `Resource`. Documentation +explaining how to create beans of this type can be found in +link:$$https://docs.spring.io/spring/docs/current/spring-framework-reference/core.html#resources$$[Spring +Framework, Chapter 5. Resources]. Therefore, this guide does not go into the details of +creating `Resource` objects beyond showing the following simple example: + +[source, java] +---- +Resource resource = new FileSystemResource("resources/trades.csv"); +---- + +In complex batch environments, the directory structures are often managed by the Enterprise Application Integration (EAI) +infrastructure, where drop zones for external interfaces are established for moving files +from FTP locations to batch processing locations and vice versa. File moving utilities +are beyond the scope of the Spring Batch architecture, but it is not unusual for batch +job streams to include file moving utilities as steps in the job stream. The batch +architecture only needs to know how to locate the files to be processed. Spring Batch +begins the process of feeding the data into the pipe from this starting point. However, +link:$$https://projects.spring.io/spring-integration/$$[Spring Integration] provides many +of these types of services. + +The other properties in `FlatFileItemReader` let you further specify how your data is +interpreted, as described in the following table: + +.`FlatFileItemReader` Properties +[options="header"] +|=============== +|Property|Type|Description +|comments|String[]|Specifies line prefixes that indicate comment rows. +|encoding|String|Specifies what text encoding to use. The default value is `UTF-8`. +|lineMapper|`LineMapper`|Converts a `String` to an `Object` representing the item. +|linesToSkip|int|Number of lines to ignore at the top of the file. +|recordSeparatorPolicy|RecordSeparatorPolicy|Used to determine where the line endings are +and do things like continue over a line ending if inside a quoted string. +|resource|`Resource`|The resource from which to read. +|skippedLinesCallback|LineCallbackHandler|Interface that passes the raw line content of +the lines in the file to be skipped. If `linesToSkip` is set to 2, then this interface is +called twice. +|strict|boolean|In strict mode, the reader throws an exception on `ExecutionContext` if +the input resource does not exist. Otherwise, it logs the problem and continues. +|=============== + +[[lineMapper]] +== `LineMapper` + +As with `RowMapper`, which takes a low-level construct such as `ResultSet` and returns +an `Object`, flat file processing requires the same construct to convert a `String` line +into an `Object`, as shown in the following interface definition: + +[source, java] +---- +public interface LineMapper { + + T mapLine(String line, int lineNumber) throws Exception; + +} +---- + +The basic contract is that, given the current line and the line number with which it is +associated, the mapper should return a resulting domain object. This is similar to +`RowMapper`, in that each line is associated with its line number, just as each row in a +`ResultSet` is tied to its row number. This allows the line number to be tied to the +resulting domain object for identity comparison or for more informative logging. However, +unlike `RowMapper`, the `LineMapper` is given a raw line which, as discussed above, only +gets you halfway there. The line must be tokenized into a `FieldSet`, which can then be +mapped to an object, as described later in this document. + +[[lineTokenizer]] +== `LineTokenizer` + +An abstraction for turning a line of input into a `FieldSet` is necessary because there +can be many formats of flat file data that need to be converted to a `FieldSet`. In +Spring Batch, this interface is the `LineTokenizer`: + +[source, java] +---- +public interface LineTokenizer { + + FieldSet tokenize(String line); + +} +---- + +The contract of a `LineTokenizer` is such that, given a line of input (in theory the +`String` could encompass more than one line), a `FieldSet` representing the line is +returned. This `FieldSet` can then be passed to a `FieldSetMapper`. Spring Batch contains +the following `LineTokenizer` implementations: + +* `DelimitedLineTokenizer`: Used for files where fields in a record are separated by a +delimiter. The most common delimiter is a comma, but pipes or semicolons are often used +as well. +* `FixedLengthTokenizer`: Used for files where fields in a record are each a "fixed +width". The width of each field must be defined for each record type. +* `PatternMatchingCompositeLineTokenizer`: Determines which `LineTokenizer` among a list of +tokenizers should be used on a particular line by checking against a pattern. + +[[fieldSetMapper]] +== `FieldSetMapper` + +The `FieldSetMapper` interface defines a single method, `mapFieldSet`, which takes a +`FieldSet` object and maps its contents to an object. This object may be a custom DTO, a +domain object, or an array, depending on the needs of the job. The `FieldSetMapper` is +used in conjunction with the `LineTokenizer` to translate a line of data from a resource +into an object of the desired type, as shown in the following interface definition: + +[source, java] +---- +public interface FieldSetMapper { + + T mapFieldSet(FieldSet fieldSet) throws BindException; + +} +---- + +The pattern used is the same as the `RowMapper` used by `JdbcTemplate`. + +[[defaultLineMapper]] +== `DefaultLineMapper` + +Now that the basic interfaces for reading in flat files have been defined, it becomes +clear that three basic steps are required: + +. Read one line from the file. +. Pass the `String` line into the `LineTokenizer#tokenize()` method to retrieve a +`FieldSet`. +. Pass the `FieldSet` returned from tokenizing to a `FieldSetMapper`, returning the +result from the `ItemReader#read()` method. + +The two interfaces described above represent two separate tasks: converting a line into a +`FieldSet` and mapping a `FieldSet` to a domain object. Because the input of a +`LineTokenizer` matches the input of the `LineMapper` (a line), and the output of a +`FieldSetMapper` matches the output of the `LineMapper`, a default implementation that +uses both a `LineTokenizer` and a `FieldSetMapper` is provided. The `DefaultLineMapper`, +shown in the following class definition, represents the behavior most users need: + +[source, java] +---- + +public class DefaultLineMapper implements LineMapper<>, InitializingBean { + + private LineTokenizer tokenizer; + + private FieldSetMapper fieldSetMapper; + + public T mapLine(String line, int lineNumber) throws Exception { + return fieldSetMapper.mapFieldSet(tokenizer.tokenize(line)); + } + + public void setLineTokenizer(LineTokenizer tokenizer) { + this.tokenizer = tokenizer; + } + + public void setFieldSetMapper(FieldSetMapper fieldSetMapper) { + this.fieldSetMapper = fieldSetMapper; + } +} +---- + +The above functionality is provided in a default implementation, rather than being built +into the reader itself (as was done in previous versions of the framework) to allow users +greater flexibility in controlling the parsing process, especially if access to the raw +line is needed. + +[[simpleDelimitedFileReadingExample]] +== Simple Delimited File Reading Example + +The following example illustrates how to read a flat file with an actual domain scenario. +This particular batch job reads in football players from the following file: + +---- +ID,lastName,firstName,position,birthYear,debutYear +"AbduKa00,Abdul-Jabbar,Karim,rb,1974,1996", +"AbduRa00,Abdullah,Rabih,rb,1975,1999", +"AberWa00,Abercrombie,Walter,rb,1959,1982", +"AbraDa00,Abramowicz,Danny,wr,1945,1967", +"AdamBo00,Adams,Bob,te,1946,1969", +"AdamCh00,Adams,Charlie,wr,1979,2003" +---- + +The contents of this file are mapped to the following +`Player` domain object: + +[source, java] +---- +public class Player implements Serializable { + + private String ID; + private String lastName; + private String firstName; + private String position; + private int birthYear; + private int debutYear; + + public String toString() { + return "PLAYER:ID=" + ID + ",Last Name=" + lastName + + ",First Name=" + firstName + ",Position=" + position + + ",Birth Year=" + birthYear + ",DebutYear=" + + debutYear; + } + + // setters and getters... +} +---- + +To map a `FieldSet` into a `Player` object, a `FieldSetMapper` that returns players needs +to be defined, as shown in the following example: + +[source, java] +---- +protected static class PlayerFieldSetMapper implements FieldSetMapper { + public Player mapFieldSet(FieldSet fieldSet) { + Player player = new Player(); + + player.setID(fieldSet.readString(0)); + player.setLastName(fieldSet.readString(1)); + player.setFirstName(fieldSet.readString(2)); + player.setPosition(fieldSet.readString(3)); + player.setBirthYear(fieldSet.readInt(4)); + player.setDebutYear(fieldSet.readInt(5)); + + return player; + } +} +---- + +The file can then be read by correctly constructing a `FlatFileItemReader` and calling +`read`, as shown in the following example: + +[source, java] +---- +FlatFileItemReader itemReader = new FlatFileItemReader<>(); +itemReader.setResource(new FileSystemResource("resources/players.csv")); +DefaultLineMapper lineMapper = new DefaultLineMapper<>(); +//DelimitedLineTokenizer defaults to comma as its delimiter +lineMapper.setLineTokenizer(new DelimitedLineTokenizer()); +lineMapper.setFieldSetMapper(new PlayerFieldSetMapper()); +itemReader.setLineMapper(lineMapper); +itemReader.open(new ExecutionContext()); +Player player = itemReader.read(); +---- + +Each call to `read` returns a new + `Player` object from each line in the file. When the end of the file is + reached, `null` is returned. + +[[mappingFieldsByName]] +== Mapping Fields by Name + +There is one additional piece of functionality that is allowed by both +`DelimitedLineTokenizer` and `FixedLengthTokenizer` and that is similar in function to a +JDBC `ResultSet`. The names of the fields can be injected into either of these +`LineTokenizer` implementations to increase the readability of the mapping function. +First, the column names of all fields in the flat file are injected into the tokenizer, +as shown in the following example: + +[source, java] +---- +tokenizer.setNames(new String[] {"ID", "lastName", "firstName", "position", "birthYear", "debutYear"}); +---- + +A `FieldSetMapper` can use this information as follows: + + +[source, java] +---- +public class PlayerMapper implements FieldSetMapper { + public Player mapFieldSet(FieldSet fs) { + + if (fs == null) { + return null; + } + + Player player = new Player(); + player.setID(fs.readString("ID")); + player.setLastName(fs.readString("lastName")); + player.setFirstName(fs.readString("firstName")); + player.setPosition(fs.readString("position")); + player.setDebutYear(fs.readInt("debutYear")); + player.setBirthYear(fs.readInt("birthYear")); + + return player; + } +} +---- + +[[beanWrapperFieldSetMapper]] +== Automapping FieldSets to Domain Objects + +For many, having to write a specific `FieldSetMapper` is equally as cumbersome as writing +a specific `RowMapper` for a `JdbcTemplate`. Spring Batch makes this easier by providing +a `FieldSetMapper` that automatically maps fields by matching a field name with a setter +on the object using the JavaBean specification. + + +[tabs] +==== +Java:: ++ +Again using the football example, the `BeanWrapperFieldSetMapper` configuration looks like +the following snippet in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public FieldSetMapper fieldSetMapper() { + BeanWrapperFieldSetMapper fieldSetMapper = new BeanWrapperFieldSetMapper(); + + fieldSetMapper.setPrototypeBeanName("player"); + + return fieldSetMapper; +} + +@Bean +@Scope("prototype") +public Player player() { + return new Player(); +} +---- + +XML:: ++ +Again using the football example, the `BeanWrapperFieldSetMapper` configuration looks like +the following snippet in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + +---- + +==== + + + +For each entry in the `FieldSet`, the mapper looks for a corresponding setter on a new +instance of the `Player` object (for this reason, prototype scope is required) in the +same way the Spring container looks for setters matching a property name. Each available +field in the `FieldSet` is mapped, and the resultant `Player` object is returned, with no +code required. + +[[fixedLengthFileFormats]] +== Fixed Length File Formats + +So far, only delimited files have been discussed in much detail. However, they represent +only half of the file reading picture. Many organizations that use flat files use fixed +length formats. An example fixed length file follows: + +---- +UK21341EAH4121131.11customer1 +UK21341EAH4221232.11customer2 +UK21341EAH4321333.11customer3 +UK21341EAH4421434.11customer4 +UK21341EAH4521535.11customer5 +---- + +While this looks like one large field, it actually represent 4 distinct fields: + +. ISIN: Unique identifier for the item being ordered - 12 characters long. +. Quantity: Number of the item being ordered - 3 characters long. +. Price: Price of the item - 5 characters long. +. Customer: ID of the customer ordering the item - 9 characters long. + +When configuring the `FixedLengthLineTokenizer`, each of these lengths must be provided +in the form of ranges. + + +[tabs] +===== +Java:: ++ +The following example shows how to define ranges for the `FixedLengthLineTokenizer` in +Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public FixedLengthTokenizer fixedLengthTokenizer() { + FixedLengthTokenizer tokenizer = new FixedLengthTokenizer(); + + tokenizer.setNames("ISIN", "Quantity", "Price", "Customer"); + tokenizer.setColumns(new Range(1, 12), + new Range(13, 15), + new Range(16, 20), + new Range(21, 29)); + + return tokenizer; +} +---- + + +XML:: ++ +The following example shows how to define ranges for the `FixedLengthLineTokenizer` in +XML: ++ +.XML Configuration +[source, xml] +---- + + + + +---- ++ +Because the `FixedLengthLineTokenizer` uses the same `LineTokenizer` interface as +discussed earlier, it returns the same `FieldSet` as if a delimiter had been used. This +allows the same approaches to be used in handling its output, such as using the +`BeanWrapperFieldSetMapper`. ++ +[NOTE] +==== +Supporting the preceding syntax for ranges requires that a specialized property editor, +`RangeArrayPropertyEditor`, be configured in the `ApplicationContext`. However, this bean +is automatically declared in an `ApplicationContext` where the batch namespace is used. +==== + +===== + + +Because the `FixedLengthLineTokenizer` uses the same `LineTokenizer` interface as +discussed above, it returns the same `FieldSet` as if a delimiter had been used. This +lets the same approaches be used in handling its output, such as using the +`BeanWrapperFieldSetMapper`. + +[[prefixMatchingLineMapper]] +== Multiple Record Types within a Single File + +All of the file reading examples up to this point have all made a key assumption for +simplicity's sake: all of the records in a file have the same format. However, this may +not always be the case. It is very common that a file might have records with different +formats that need to be tokenized differently and mapped to different objects. The +following excerpt from a file illustrates this: + +---- +USER;Smith;Peter;;T;20014539;F +LINEA;1044391041ABC037.49G201XX1383.12H +LINEB;2134776319DEF422.99M005LI +---- + +In this file we have three types of records, "USER", "LINEA", and "LINEB". A "USER" line +corresponds to a `User` object. "LINEA" and "LINEB" both correspond to `Line` objects, +though a "LINEA" has more information than a "LINEB". + +The `ItemReader` reads each line individually, but we must specify different +`LineTokenizer` and `FieldSetMapper` objects so that the `ItemWriter` receives the +correct items. The `PatternMatchingCompositeLineMapper` makes this easy by allowing maps +of patterns to `LineTokenizers` and patterns to `FieldSetMappers` to be configured. + + +[tabs] +==== +Java:: ++ +.Java Configuration +[source, java] +---- +@Bean +public PatternMatchingCompositeLineMapper orderFileLineMapper() { + PatternMatchingCompositeLineMapper lineMapper = + new PatternMatchingCompositeLineMapper(); + + Map tokenizers = new HashMap<>(3); + tokenizers.put("USER*", userTokenizer()); + tokenizers.put("LINEA*", lineATokenizer()); + tokenizers.put("LINEB*", lineBTokenizer()); + + lineMapper.setTokenizers(tokenizers); + + Map mappers = new HashMap<>(2); + mappers.put("USER*", userFieldSetMapper()); + mappers.put("LINE*", lineFieldSetMapper()); + + lineMapper.setFieldSetMappers(mappers); + + return lineMapper; +} +---- + +XML:: ++ +The following example shows how to define ranges for the `FixedLengthLineTokenizer` in +XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + + + +---- + +==== + + + +In this example, "LINEA" and "LINEB" have separate `LineTokenizer` instances, but they both use +the same `FieldSetMapper`. + +The `PatternMatchingCompositeLineMapper` uses the `PatternMatcher#match` method +in order to select the correct delegate for each line. The `PatternMatcher` allows for +two wildcard characters with special meaning: the question mark ("?") matches exactly one +character, while the asterisk ("\*") matches zero or more characters. Note that, in the +preceding configuration, all patterns end with an asterisk, making them effectively +prefixes to lines. The `PatternMatcher` always matches the most specific pattern +possible, regardless of the order in the configuration. So if "LINE*" and "LINEA*" were +both listed as patterns, "LINEA" would match pattern "LINEA*", while "LINEB" would match +pattern "LINE*". Additionally, a single asterisk ("*") can serve as a default by matching +any line not matched by any other pattern. + + +[tabs] +==== +Java:: ++ +The following example shows how to match a line not matched by any other pattern in Java: ++ +.Java Configuration +[source, java] +---- +... +tokenizers.put("*", defaultLineTokenizer()); +... +---- + +XML:: ++ +The following example shows how to match a line not matched by any other pattern in XML: ++ +.XML Configuration +[source, xml] +---- + +---- + +==== + + + +There is also a `PatternMatchingCompositeLineTokenizer` that can be used for tokenization +alone. + +It is also common for a flat file to contain records that each span multiple lines. To +handle this situation, a more complex strategy is required. A demonstration of this +common pattern can be found in the `multiLineRecords` sample. + +[[exceptionHandlingInFlatFiles]] +== Exception Handling in Flat Files + +There are many scenarios when tokenizing a line may cause exceptions to be thrown. Many +flat files are imperfect and contain incorrectly formatted records. Many users choose to +skip these erroneous lines while logging the issue, the original line, and the line +number. These logs can later be inspected manually or by another batch job. For this +reason, Spring Batch provides a hierarchy of exceptions for handling parse exceptions: +`FlatFileParseException` and `FlatFileFormatException`. `FlatFileParseException` is +thrown by the `FlatFileItemReader` when any errors are encountered while trying to read a +file. `FlatFileFormatException` is thrown by implementations of the `LineTokenizer` +interface and indicates a more specific error encountered while tokenizing. + +[[incorrectTokenCountException]] +=== `IncorrectTokenCountException` + +Both `DelimitedLineTokenizer` and `FixedLengthLineTokenizer` have the ability to specify +column names that can be used for creating a `FieldSet`. However, if the number of column +names does not match the number of columns found while tokenizing a line, the `FieldSet` +cannot be created, and an `IncorrectTokenCountException` is thrown, which contains the +number of tokens encountered, and the number expected, as shown in the following example: + +[source, java] +---- +tokenizer.setNames(new String[] {"A", "B", "C", "D"}); + +try { + tokenizer.tokenize("a,b,c"); +} +catch (IncorrectTokenCountException e) { + assertEquals(4, e.getExpectedCount()); + assertEquals(3, e.getActualCount()); +} +---- + +Because the tokenizer was configured with 4 column names but only 3 tokens were found in +the file, an `IncorrectTokenCountException` was thrown. + +[[incorrectLineLengthException]] +=== `IncorrectLineLengthException` + +Files formatted in a fixed-length format have additional requirements when parsing +because, unlike a delimited format, each column must strictly adhere to its predefined +width. If the total line length does not equal the widest value of this column, an +exception is thrown, as shown in the following example: + +[source, java] +---- +tokenizer.setColumns(new Range[] { new Range(1, 5), + new Range(6, 10), + new Range(11, 15) }); +try { + tokenizer.tokenize("12345"); + fail("Expected IncorrectLineLengthException"); +} +catch (IncorrectLineLengthException ex) { + assertEquals(15, ex.getExpectedLength()); + assertEquals(5, ex.getActualLength()); +} +---- + +The configured ranges for the tokenizer above are: 1-5, 6-10, and 11-15. Consequently, +the total length of the line is 15. However, in the preceding example, a line of length 5 +was passed in, causing an `IncorrectLineLengthException` to be thrown. Throwing an +exception here rather than only mapping the first column allows the processing of the +line to fail earlier and with more information than it would contain if it failed while +trying to read in column 2 in a `FieldSetMapper`. However, there are scenarios where the +length of the line is not always constant. For this reason, validation of line length can +be turned off via the 'strict' property, as shown in the following example: + +[source, java] +---- +tokenizer.setColumns(new Range[] { new Range(1, 5), new Range(6, 10) }); +tokenizer.setStrict(false); +FieldSet tokens = tokenizer.tokenize("12345"); +assertEquals("12345", tokens.readString(0)); +assertEquals("", tokens.readString(1)); +---- + +The preceding example is almost identical to the one before it, except that +`tokenizer.setStrict(false)` was called. This setting tells the tokenizer to not enforce +line lengths when tokenizing the line. A `FieldSet` is now correctly created and +returned. However, it contains only empty tokens for the remaining values. + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files/file-item-writer.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files/file-item-writer.adoc new file mode 100644 index 0000000000..6cafa863c7 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/flat-files/file-item-writer.adoc @@ -0,0 +1,445 @@ +[[flatFileItemWriter]] += `FlatFileItemWriter` + +Writing out to flat files has the same problems and issues that reading in from a file +must overcome. A step must be able to write either delimited or fixed length formats in a +transactional manner. + +[[lineAggregator]] +== `LineAggregator` + +Just as the `LineTokenizer` interface is necessary to take an item and turn it into a +`String`, file writing must have a way to aggregate multiple fields into a single string +for writing to a file. In Spring Batch, this is the `LineAggregator`, shown in the +following interface definition: + +[source, java] +---- +public interface LineAggregator { + + public String aggregate(T item); + +} +---- + +The `LineAggregator` is the logical opposite of `LineTokenizer`. `LineTokenizer` takes a +`String` and returns a `FieldSet`, whereas `LineAggregator` takes an `item` and returns a +`String`. + +[[PassThroughLineAggregator]] +=== `PassThroughLineAggregator` + +The most basic implementation of the `LineAggregator` interface is the +`PassThroughLineAggregator`, which assumes that the object is already a string or that +its string representation is acceptable for writing, as shown in the following code: + +[source, java] +---- +public class PassThroughLineAggregator implements LineAggregator { + + public String aggregate(T item) { + return item.toString(); + } +} +---- + +The preceding implementation is useful if direct control of creating the string is +required but the advantages of a `FlatFileItemWriter`, such as transaction and restart +support, are necessary. + +[[SimplifiedFileWritingExample]] +== Simplified File Writing Example + +Now that the `LineAggregator` interface and its most basic implementation, +`PassThroughLineAggregator`, have been defined, the basic flow of writing can be +explained: + +. The object to be written is passed to the `LineAggregator` in order to obtain a +`String`. +. The returned `String` is written to the configured file. + +The following excerpt from the `FlatFileItemWriter` expresses this in code: + +[source, java] +---- +public void write(T item) throws Exception { + write(lineAggregator.aggregate(item) + LINE_SEPARATOR); +} +---- + + +[tabs] +==== +Java:: ++ +In Java, a simple example of configuration might look like the following: ++ +.Java Configuration +[source, java] +---- +@Bean +public FlatFileItemWriter itemWriter() { + return new FlatFileItemWriterBuilder() + .name("itemWriter") + .resource(new FileSystemResource("target/test-outputs/output.txt")) + .lineAggregator(new PassThroughLineAggregator<>()) + .build(); +} +---- + +XML:: ++ +In XML, a simple example of configuration might look like the following: ++ +.XML Configuration +[source, xml] +---- + + + + + + +---- + +==== + + + +[[FieldExtractor]] +== `FieldExtractor` + +The preceding example may be useful for the most basic uses of a writing to a file. +However, most users of the `FlatFileItemWriter` have a domain object that needs to be +written out and, thus, must be converted into a line. In file reading, the following was +required: + +. Read one line from the file. +. Pass the line into the `LineTokenizer#tokenize()` method, in order to retrieve a +`FieldSet`. +. Pass the `FieldSet` returned from tokenizing to a `FieldSetMapper`, returning the +result from the `ItemReader#read()` method. + +File writing has similar but inverse steps: + +. Pass the item to be written to the writer. +. Convert the fields on the item into an array. +. Aggregate the resulting array into a line. + +Because there is no way for the framework to know which fields from the object need to +be written out, a `FieldExtractor` must be written to accomplish the task of turning the +item into an array, as shown in the following interface definition: + +[source, java] +---- +public interface FieldExtractor { + + Object[] extract(T item); + +} +---- + +Implementations of the `FieldExtractor` interface should create an array from the fields +of the provided object, which can then be written out with a delimiter between the +elements or as part of a fixed-width line. + +[[PassThroughFieldExtractor]] +=== `PassThroughFieldExtractor` + +There are many cases where a collection, such as an array, `Collection`, or `FieldSet`, +needs to be written out. "Extracting" an array from one of these collection types is very +straightforward. To do so, convert the collection to an array. Therefore, the +`PassThroughFieldExtractor` should be used in this scenario. It should be noted that, if +the object passed in is not a type of collection, then the `PassThroughFieldExtractor` +returns an array containing solely the item to be extracted. + +[[BeanWrapperFieldExtractor]] +=== `BeanWrapperFieldExtractor` + +As with the `BeanWrapperFieldSetMapper` described in the file reading section, it is +often preferable to configure how to convert a domain object to an object array, rather +than writing the conversion yourself. The `BeanWrapperFieldExtractor` provides this +functionality, as shown in the following example: + +[source, java] +---- +BeanWrapperFieldExtractor extractor = new BeanWrapperFieldExtractor<>(); +extractor.setNames(new String[] { "first", "last", "born" }); + +String first = "Alan"; +String last = "Turing"; +int born = 1912; + +Name n = new Name(first, last, born); +Object[] values = extractor.extract(n); + +assertEquals(first, values[0]); +assertEquals(last, values[1]); +assertEquals(born, values[2]); +---- + +This extractor implementation has only one required property: the names of the fields to +map. Just as the `BeanWrapperFieldSetMapper` needs field names to map fields on the +`FieldSet` to setters on the provided object, the `BeanWrapperFieldExtractor` needs names +to map to getters for creating an object array. It is worth noting that the order of the +names determines the order of the fields within the array. + +[[delimitedFileWritingExample]] +== Delimited File Writing Example + +The most basic flat file format is one in which all fields are separated by a delimiter. +This can be accomplished using a `DelimitedLineAggregator`. The following example writes +out a simple domain object that represents a credit to a customer account: + +[source, java] +---- +public class CustomerCredit { + + private int id; + private String name; + private BigDecimal credit; + + //getters and setters removed for clarity +} +---- + +Because a domain object is being used, an implementation of the `FieldExtractor` +interface must be provided, along with the delimiter to use. + + +[tabs] +==== +Java:: ++ +The following example shows how to use the `FieldExtractor` with a delimiter in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) throws Exception { + BeanWrapperFieldExtractor fieldExtractor = new BeanWrapperFieldExtractor<>(); + fieldExtractor.setNames(new String[] {"name", "credit"}); + fieldExtractor.afterPropertiesSet(); + + DelimitedLineAggregator lineAggregator = new DelimitedLineAggregator<>(); + lineAggregator.setDelimiter(","); + lineAggregator.setFieldExtractor(fieldExtractor); + + return new FlatFileItemWriterBuilder() + .name("customerCreditWriter") + .resource(outputResource) + .lineAggregator(lineAggregator) + .build(); +} +---- + +XML:: ++ +The following example shows how to use the `FieldExtractor` with a delimiter in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + +---- + +==== + + + +In the previous example, the `BeanWrapperFieldExtractor` described earlier in this +chapter is used to turn the name and credit fields within `CustomerCredit` into an object +array, which is then written out with commas between each field. + + +[tabs] +==== +Java:: ++ +// FIXME: in the existing docs this is displayed for XML too but there is no config below it +It is also possible to use the `FlatFileItemWriterBuilder.DelimitedBuilder` to +automatically create the `BeanWrapperFieldExtractor` and `DelimitedLineAggregator` +as shown in the following example: ++ +.Java Configuration +[source, java] +---- +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) throws Exception { + return new FlatFileItemWriterBuilder() + .name("customerCreditWriter") + .resource(outputResource) + .delimited() + .delimiter("|") + .names(new String[] {"name", "credit"}) + .build(); +} +---- + +XML:: ++ +// FIXME: what is the XML config ++ +There is no XML equivalent of using `FlatFileItemWriterBuilder`. +==== + + +[[fixedWidthFileWritingExample]] +== Fixed Width File Writing Example + +Delimited is not the only type of flat file format. Many prefer to use a set width for +each column to delineate between fields, which is usually referred to as 'fixed width'. +Spring Batch supports this in file writing with the `FormatterLineAggregator`. + + +[tabs] +==== +Java:: ++ +Using the same `CustomerCredit` domain object described above, it can be configured as +follows in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) throws Exception { + BeanWrapperFieldExtractor fieldExtractor = new BeanWrapperFieldExtractor<>(); + fieldExtractor.setNames(new String[] {"name", "credit"}); + fieldExtractor.afterPropertiesSet(); + + FormatterLineAggregator lineAggregator = new FormatterLineAggregator<>(); + lineAggregator.setFormat("%-9s%-2.0f"); + lineAggregator.setFieldExtractor(fieldExtractor); + + return new FlatFileItemWriterBuilder() + .name("customerCreditWriter") + .resource(outputResource) + .lineAggregator(lineAggregator) + .build(); +} +---- + +XML:: ++ +Using the same `CustomerCredit` domain object described above, it can be configured as +follows in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + +---- + +==== + +Most of the preceding example should look familiar. However, the value of the format +property is new. + + +[tabs] +==== +Java:: ++ +The following example shows the format property in Java: ++ +[source, java] +---- +... +FormatterLineAggregator lineAggregator = new FormatterLineAggregator<>(); +lineAggregator.setFormat("%-9s%-2.0f"); +... +---- + +XML:: ++ +The following example shows the format property in XML: ++ +[source, xml] +---- + +---- + +==== + + + +The underlying implementation is built using the same +`Formatter` added as part of Java 5. The Java +`Formatter` is based on the +`printf` functionality of the C programming +language. Most details on how to configure a formatter can be found in +the Javadoc of link:$$https://docs.oracle.com/javase/8/docs/api/java/util/Formatter.html$$[Formatter]. + + +[tabs] +==== +Java:: ++ +It is also possible to use the `FlatFileItemWriterBuilder.FormattedBuilder` to +automatically create the `BeanWrapperFieldExtractor` and `FormatterLineAggregator` +as shown in following example: ++ +.Java Configuration +[source, java] +---- +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) throws Exception { + return new FlatFileItemWriterBuilder() + .name("customerCreditWriter") + .resource(outputResource) + .formatted() + .format("%-9s%-2.0f") + .names(new String[] {"name", "credit"}) + .build(); +} +---- + +XML:: ++ +// FIXME: What is the XML equivalent + +==== + + +[[handlingFileCreation]] +== Handling File Creation + +`FlatFileItemReader` has a very simple relationship with file resources. When the reader +is initialized, it opens the file (if it exists), and throws an exception if it does not. +File writing isn't quite so simple. At first glance, it seems like a similar +straightforward contract should exist for `FlatFileItemWriter`: If the file already +exists, throw an exception, and, if it does not, create it and start writing. However, +potentially restarting a `Job` can cause issues. In normal restart scenarios, the +contract is reversed: If the file exists, start writing to it from the last known good +position, and, if it does not, throw an exception. However, what happens if the file name +for this job is always the same? In this case, you would want to delete the file if it +exists, unless it's a restart. Because of this possibility, the `FlatFileItemWriter` +contains the property, `shouldDeleteIfExists`. Setting this property to true causes an +existing file with the same name to be deleted when the writer is opened. + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-reader-writer-implementations.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-reader-writer-implementations.adoc new file mode 100644 index 0000000000..4b1a0d31eb --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-reader-writer-implementations.adoc @@ -0,0 +1,313 @@ +[[itemReaderAndWriterImplementations]] += Item Reader and Writer Implementations + +In this section, we will introduce you to readers and writers that have not already been +discussed in the previous sections. + +[[decorators]] +== Decorators + +In some cases, a user needs specialized behavior to be appended to a pre-existing +`ItemReader`. Spring Batch offers some out of the box decorators that can add +additional behavior to to your `ItemReader` and `ItemWriter` implementations. + +Spring Batch includes the following decorators: + +* xref:readers-and-writers/item-reader-writer-implementations.adoc#synchronizedItemStreamReader[`SynchronizedItemStreamReader`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#singleItemPeekableItemReader[`SingleItemPeekableItemReader`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#synchronizedItemStreamWriter[`SynchronizedItemStreamWriter`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#multiResourceItemWriter[`MultiResourceItemWriter`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#classifierCompositeItemWriter[`ClassifierCompositeItemWriter`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#classifierCompositeItemProcessor[`ClassifierCompositeItemProcessor`] + +[[synchronizedItemStreamReader]] +=== `SynchronizedItemStreamReader` +When using an `ItemReader` that is not thread safe, Spring Batch offers the +`SynchronizedItemStreamReader` decorator, which can be used to make the `ItemReader` +thread safe. Spring Batch provides a `SynchronizedItemStreamReaderBuilder` to construct +an instance of the `SynchronizedItemStreamReader`. + +For example, the `FlatFileItemReader` is *not* thread-safe and cannot be used in +a multi-threaded step. This reader can be decorated with a `SynchronizedItemStreamReader` +in order to use it safely in a multi-threaded step. Here is an example of how to decorate +such a reader: + +[source, java] +---- +@Bean +public SynchronizedItemStreamReader itemReader() { + FlatFileItemReader flatFileItemReader = new FlatFileItemReaderBuilder() + // set reader properties + .build(); + + return new SynchronizedItemStreamReaderBuilder() + .delegate(flatFileItemReader) + .build(); +} +---- + +[[singleItemPeekableItemReader]] +=== `SingleItemPeekableItemReader` +Spring Batch includes a decorator that adds a peek method to an `ItemReader`. This peek +method lets the user peek one item ahead. Repeated calls to the peek returns the same +item, and this is the next item returned from the `read` method. Spring Batch provides a +`SingleItemPeekableItemReaderBuilder` to construct an instance of the +`SingleItemPeekableItemReader`. + +NOTE: SingleItemPeekableItemReader's peek method is not thread-safe, because it would not +be possible to honor the peek in multiple threads. Only one of the threads that peeked +would get that item in the next call to read. + +[[synchronizedItemStreamWriter]] +=== `SynchronizedItemStreamWriter` +When using an `ItemWriter` that is not thread safe, Spring Batch offers the +`SynchronizedItemStreamWriter` decorator, which can be used to make the `ItemWriter` +thread safe. Spring Batch provides a `SynchronizedItemStreamWriterBuilder` to construct +an instance of the `SynchronizedItemStreamWriter`. + +For example, the `FlatFileItemWriter` is *not* thread-safe and cannot be used in +a multi-threaded step. This writer can be decorated with a `SynchronizedItemStreamWriter` +in order to use it safely in a multi-threaded step. Here is an example of how to decorate +such a writer: + +[source, java] +---- +@Bean +public SynchronizedItemStreamWriter itemWriter() { + FlatFileItemWriter flatFileItemWriter = new FlatFileItemWriterBuilder() + // set writer properties + .build(); + + return new SynchronizedItemStreamWriterBuilder() + .delegate(flatFileItemWriter) + .build(); +} +---- + +[[multiResourceItemWriter]] +=== `MultiResourceItemWriter` +The `MultiResourceItemWriter` wraps a `ResourceAwareItemWriterItemStream` and creates a new +output resource when the count of items written in the current resource exceeds the +`itemCountLimitPerResource`. Spring Batch provides a `MultiResourceItemWriterBuilder` to +construct an instance of the `MultiResourceItemWriter`. + +[[classifierCompositeItemWriter]] +=== `ClassifierCompositeItemWriter` +The `ClassifierCompositeItemWriter` calls one of a collection of `ItemWriter` +implementations for each item, based on a router pattern implemented through the provided +`Classifier`. The implementation is thread-safe if all delegates are thread-safe. Spring +Batch provides a `ClassifierCompositeItemWriterBuilder` to construct an instance of the +`ClassifierCompositeItemWriter`. + +[[classifierCompositeItemProcessor]] +=== `ClassifierCompositeItemProcessor` +The `ClassifierCompositeItemProcessor` is an `ItemProcessor` that calls one of a +collection of `ItemProcessor` implementations, based on a router pattern implemented +through the provided `Classifier`. Spring Batch provides a +`ClassifierCompositeItemProcessorBuilder` to construct an instance of the +`ClassifierCompositeItemProcessor`. + +[[messagingReadersAndWriters]] +== Messaging Readers And Writers +Spring Batch offers the following readers and writers for commonly used messaging systems: + +* xref:readers-and-writers/item-reader-writer-implementations.adoc#amqpItemReader[`AmqpItemReader`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#amqpItemWriter[`AmqpItemWriter`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#jmsItemReader[`JmsItemReader`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#jmsItemWriter[`JmsItemWriter`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#kafkaItemReader[`KafkaItemReader`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#kafkaItemWriter[`KafkaItemWriter`] + +[[amqpItemReader]] +=== `AmqpItemReader` +The `AmqpItemReader` is an `ItemReader` that uses an `AmqpTemplate` to receive or convert +messages from an exchange. Spring Batch provides a `AmqpItemReaderBuilder` to construct +an instance of the `AmqpItemReader`. + +[[amqpItemWriter]] +=== `AmqpItemWriter` +The `AmqpItemWriter` is an `ItemWriter` that uses an `AmqpTemplate` to send messages to +an AMQP exchange. Messages are sent to the nameless exchange if the name not specified in +the provided `AmqpTemplate`. Spring Batch provides an `AmqpItemWriterBuilder` to +construct an instance of the `AmqpItemWriter`. + +[[jmsItemReader]] +=== `JmsItemReader` +The `JmsItemReader` is an `ItemReader` for JMS that uses a `JmsTemplate`. The template +should have a default destination, which is used to provide items for the `read()` +method. Spring Batch provides a `JmsItemReaderBuilder` to construct an instance of the +`JmsItemReader`. + +[[jmsItemWriter]] +=== `JmsItemWriter` +The `JmsItemWriter` is an `ItemWriter` for JMS that uses a `JmsTemplate`. The template +should have a default destination, which is used to send items in `write(List)`. Spring +Batch provides a `JmsItemWriterBuilder` to construct an instance of the `JmsItemWriter`. + +[[kafkaItemReader]] +=== `KafkaItemReader` +The `KafkaItemReader` is an `ItemReader` for an Apache Kafka topic. It can be configured +to read messages from multiple partitions of the same topic. It stores message offsets +in the execution context to support restart capabilities. Spring Batch provides a +`KafkaItemReaderBuilder` to construct an instance of the `KafkaItemReader`. + +[[kafkaItemWriter]] +=== `KafkaItemWriter` +The `KafkaItemWriter` is an `ItemWriter` for Apache Kafka that uses a `KafkaTemplate` to +send events to a default topic. Spring Batch provides a `KafkaItemWriterBuilder` to +construct an instance of the `KafkaItemWriter`. + +[[databaseReaders]] +== Database Readers +Spring Batch offers the following database readers: + +* xref:readers-and-writers/item-reader-writer-implementations.adoc#Neo4jItemReader[`Neo4jItemReader`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#mongoItemReader[`MongoItemReader`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#hibernateCursorItemReader[`HibernateCursorItemReader`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#hibernatePagingItemReader[`HibernatePagingItemReader`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#repositoryItemReader[`RepositoryItemReader`] + +[[Neo4jItemReader]] +=== `Neo4jItemReader` +The `Neo4jItemReader` is an `ItemReader` that reads objects from the graph database Neo4j +by using a paging technique. Spring Batch provides a `Neo4jItemReaderBuilder` to +construct an instance of the `Neo4jItemReader`. + +[[mongoItemReader]] +=== `MongoItemReader` +The `MongoItemReader` is an `ItemReader` that reads documents from MongoDB by using a +paging technique. Spring Batch provides a `MongoItemReaderBuilder` to construct an +instance of the `MongoItemReader`. + +[[hibernateCursorItemReader]] +=== `HibernateCursorItemReader` +The `HibernateCursorItemReader` is an `ItemStreamReader` for reading database records +built on top of Hibernate. It executes the HQL query and then, when initialized, iterates +over the result set as the `read()` method is called, successively returning an object +corresponding to the current row. Spring Batch provides a +`HibernateCursorItemReaderBuilder` to construct an instance of the +`HibernateCursorItemReader`. + +[[hibernatePagingItemReader]] +=== `HibernatePagingItemReader` +The `HibernatePagingItemReader` is an `ItemReader` for reading database records built on +top of Hibernate and reading only up to a fixed number of items at a time. Spring Batch +provides a `HibernatePagingItemReaderBuilder` to construct an instance of the +`HibernatePagingItemReader`. + +[[repositoryItemReader]] +=== `RepositoryItemReader` +The `RepositoryItemReader` is an `ItemReader` that reads records by using a +`PagingAndSortingRepository`. Spring Batch provides a `RepositoryItemReaderBuilder` to +construct an instance of the `RepositoryItemReader`. + +[[databaseWriters]] +== Database Writers +Spring Batch offers the following database writers: + +* xref:readers-and-writers/item-reader-writer-implementations.adoc#neo4jItemWriter[`Neo4jItemWriter`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#mongoItemWriter[`MongoItemWriter`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#repositoryItemWriter[`RepositoryItemWriter`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#hibernateItemWriter[`HibernateItemWriter`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#jdbcBatchItemWriter[`JdbcBatchItemWriter`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#jpaItemWriter[`JpaItemWriter`] + +[[neo4jItemWriter]] +=== `Neo4jItemWriter` +The `Neo4jItemWriter` is an `ItemWriter` implementation that writes to a Neo4j database. +Spring Batch provides a `Neo4jItemWriterBuilder` to construct an instance of the +`Neo4jItemWriter`. + +[[mongoItemWriter]] +=== `MongoItemWriter` +The `MongoItemWriter` is an `ItemWriter` implementation that writes to a MongoDB store +using an implementation of Spring Data's `MongoOperations`. Spring Batch provides a +`MongoItemWriterBuilder` to construct an instance of the `MongoItemWriter`. + +[[repositoryItemWriter]] +=== `RepositoryItemWriter` +The `RepositoryItemWriter` is an `ItemWriter` wrapper for a `CrudRepository` from Spring +Data. Spring Batch provides a `RepositoryItemWriterBuilder` to construct an instance of +the `RepositoryItemWriter`. + +[[hibernateItemWriter]] +=== `HibernateItemWriter` +The `HibernateItemWriter` is an `ItemWriter` that uses a Hibernate session to save or +update entities that are not part of the current Hibernate session. Spring Batch provides +a `HibernateItemWriterBuilder` to construct an instance of the `HibernateItemWriter`. + +[[jdbcBatchItemWriter]] +=== `JdbcBatchItemWriter` +The `JdbcBatchItemWriter` is an `ItemWriter` that uses the batching features from +`NamedParameterJdbcTemplate` to execute a batch of statements for all items provided. +Spring Batch provides a `JdbcBatchItemWriterBuilder` to construct an instance of the +`JdbcBatchItemWriter`. + +[[jpaItemWriter]] +=== `JpaItemWriter` +The `JpaItemWriter` is an `ItemWriter` that uses a JPA `EntityManagerFactory` to merge +any entities that are not part of the persistence context. Spring Batch provides a +`JpaItemWriterBuilder` to construct an instance of the `JpaItemWriter`. + +[[specializedReaders]] +== Specialized Readers +Spring Batch offers the following specialized readers: + +* xref:readers-and-writers/item-reader-writer-implementations.adoc#ldifReader[`LdifReader`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#mappingLdifReader[`MappingLdifReader`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#avroItemReader[`AvroItemReader`] + +[[ldifReader]] +=== `LdifReader` +The `LdifReader` reads LDIF (LDAP Data Interchange Format) records from a `Resource`, +parses them, and returns a `LdapAttribute` object for each `read` executed. Spring Batch +provides a `LdifReaderBuilder` to construct an instance of the `LdifReader`. + + +[[mappingLdifReader]] +=== `MappingLdifReader` +The `MappingLdifReader` reads LDIF (LDAP Data Interchange Format) records from a +`Resource`, parses them then maps each LDIF record to a POJO (Plain Old Java Object). +Each read returns a POJO. Spring Batch provides a `MappingLdifReaderBuilder` to construct +an instance of the `MappingLdifReader`. + +[[avroItemReader]] +=== `AvroItemReader` +The `AvroItemReader` reads serialized Avro data from a Resource. +Each read returns an instance of the type specified by a Java class or Avro Schema. +The reader may be optionally configured for input that embeds an Avro schema or not. +Spring Batch provides an `AvroItemReaderBuilder` to construct an instance of the `AvroItemReader`. + +[[specializedWriters]] +== Specialized Writers +Spring Batch offers the following specialized writers: + +* xref:readers-and-writers/item-reader-writer-implementations.adoc#simpleMailMessageItemWriter[`SimpleMailMessageItemWriter`] +* xref:readers-and-writers/item-reader-writer-implementations.adoc#avroItemWriter[`AvroItemWriter`] + +[[simpleMailMessageItemWriter]] +=== `SimpleMailMessageItemWriter` +The `SimpleMailMessageItemWriter` is an `ItemWriter` that can send mail messages. It +delegates the actual sending of messages to an instance of `MailSender`. Spring Batch +provides a `SimpleMailMessageItemWriterBuilder` to construct an instance of the +`SimpleMailMessageItemWriter`. + +[[avroItemWriter]] +=== `AvroItemWriter` +The `AvroItemWrite` serializes Java objects to a WriteableResource according to the given type or Schema. +The writer may be optionally configured to embed an Avro schema in the output or not. +Spring Batch provides an `AvroItemWriterBuilder` to construct an instance of the `AvroItemWriter`. + + +[[specializedProcessors]] +== Specialized Processors +Spring Batch offers the following specialized processors: + +* xref:readers-and-writers/item-reader-writer-implementations.adoc#scriptItemProcessor[`ScriptItemProcessor`] + +[[scriptItemProcessor]] +=== `ScriptItemProcessor` +The `ScriptItemProcessor` is an `ItemProcessor` that passes the current item to process +to the provided script and the result of the script is returned by the processor. Spring +Batch provides a `ScriptItemProcessorBuilder` to construct an instance of the +`ScriptItemProcessor`. diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-reader.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-reader.adoc new file mode 100644 index 0000000000..f653ea7639 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-reader.adoc @@ -0,0 +1,48 @@ +[[itemReader]] += `ItemReader` + +Although a simple concept, an `ItemReader` is the means for providing data from many +different types of input. The most general examples include: + +* Flat File: Flat-file item readers read lines of data from a flat file that typically +describes records with fields of data defined by fixed positions in the file or delimited +by some special character (such as a comma). + +* XML: XML `ItemReaders` process XML independently of technologies used for parsing, +mapping and validating objects. Input data allows for the validation of an XML file +against an XSD schema. + +* Database: A database resource is accessed to return resultsets which can be mapped to +objects for processing. The default SQL `ItemReader` implementations invoke a `RowMapper` +to return objects, keep track of the current row if restart is required, store basic +statistics, and provide some transaction enhancements that are explained later. + +There are many more possibilities, but we focus on the basic ones for this chapter. A +complete list of all available `ItemReader` implementations can be found in +xref:appendix.adoc#listOfReadersAndWriters[Appendix A]. + +`ItemReader` is a basic interface for generic +input operations, as shown in the following interface definition: + +[source, java] +---- +public interface ItemReader { + + T read() throws Exception, UnexpectedInputException, ParseException, NonTransientResourceException; + +} +---- + +The `read` method defines the most essential contract of the `ItemReader`. Calling it +returns one item or `null` if no more items are left. An item might represent a line in a +file, a row in a database, or an element in an XML file. It is generally expected that +these are mapped to a usable domain object (such as `Trade`, `Foo`, or others), but there +is no requirement in the contract to do so. + +It is expected that implementations of the `ItemReader` interface are forward only. +However, if the underlying resource is transactional (such as a JMS queue) then calling +`read` may return the same logical item on subsequent calls in a rollback scenario. It is +also worth noting that a lack of items to process by an `ItemReader` does not cause an +exception to be thrown. For example, a database `ItemReader` that is configured with a +query that returns 0 results returns `null` on the first invocation of `read`. + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-stream.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-stream.adoc new file mode 100644 index 0000000000..edc6b6ef7b --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-stream.adoc @@ -0,0 +1,38 @@ +[[itemStream]] += `ItemStream` + +Both `ItemReaders` and `ItemWriters` serve their individual purposes well, but there is a +common concern among both of them that necessitates another interface. In general, as +part of the scope of a batch job, readers and writers need to be opened, closed, and +require a mechanism for persisting state. The `ItemStream` interface serves that purpose, +as shown in the following example: + +[source, java] +---- +public interface ItemStream { + + void open(ExecutionContext executionContext) throws ItemStreamException; + + void update(ExecutionContext executionContext) throws ItemStreamException; + + void close() throws ItemStreamException; +} +---- + +Before describing each method, we should mention the `ExecutionContext`. Clients of an +`ItemReader` that also implement `ItemStream` should call `open` before any calls to +`read`, in order to open any resources such as files or to obtain connections. A similar +restriction applies to an `ItemWriter` that implements `ItemStream`. As mentioned in +Chapter 2, if expected data is found in the `ExecutionContext`, it may be used to start +the `ItemReader` or `ItemWriter` at a location other than its initial state. Conversely, +`close` is called to ensure that any resources allocated during open are released safely. +`update` is called primarily to ensure that any state currently being held is loaded into +the provided `ExecutionContext`. This method is called before committing, to ensure that +the current state is persisted in the database before commit. + +In the special case where the client of an `ItemStream` is a `Step` (from the Spring +Batch Core), an `ExecutionContext` is created for each StepExecution to allow users to +store the state of a particular execution, with the expectation that it is returned if +the same `JobInstance` is started again. For those familiar with Quartz, the semantics +are very similar to a Quartz `JobDataMap`. + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-writer.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-writer.adoc new file mode 100644 index 0000000000..1fd6a9023b --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/item-writer.adoc @@ -0,0 +1,30 @@ +[[itemWriter]] += `ItemWriter` + +`ItemWriter` is similar in functionality to an `ItemReader` but with inverse operations. +Resources still need to be located, opened, and closed but they differ in that an +`ItemWriter` writes out, rather than reading in. In the case of databases or queues, +these operations may be inserts, updates, or sends. The format of the serialization of +the output is specific to each batch job. + +As with `ItemReader`, +`ItemWriter` is a fairly generic interface, as shown in the following interface definition: + +[source, java] +---- +public interface ItemWriter { + + void write(Chunk items) throws Exception; + +} +---- + +As with `read` on `ItemReader`, `write` provides the basic contract of `ItemWriter`. It +attempts to write out the list of items passed in as long as it is open. Because it is +generally expected that items are 'batched' together into a chunk and then output, the +interface accepts a list of items, rather than an item by itself. After writing out the +list, any flushing that may be necessary can be performed before returning from the write +method. For example, if writing to a Hibernate DAO, multiple calls to write can be made, +one for each item. The writer can then call `flush` on the hibernate session before +returning. + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/json-reading-writing.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/json-reading-writing.adoc new file mode 100644 index 0000000000..b13f6c553d --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/json-reading-writing.adoc @@ -0,0 +1,88 @@ +[[jsonReadingWriting]] += JSON Item Readers And Writers + +Spring Batch provides support for reading and Writing JSON resources in the following format: + +[source, json] +---- +[ + { + "isin": "123", + "quantity": 1, + "price": 1.2, + "customer": "foo" + }, + { + "isin": "456", + "quantity": 2, + "price": 1.4, + "customer": "bar" + } +] +---- + +It is assumed that the JSON resource is an array of JSON objects corresponding to +individual items. Spring Batch is not tied to any particular JSON library. + +[[JsonItemReader]] +== `JsonItemReader` + +The `JsonItemReader` delegates JSON parsing and binding to implementations of the +`org.springframework.batch.item.json.JsonObjectReader` interface. This interface +is intended to be implemented by using a streaming API to read JSON objects +in chunks. Two implementations are currently provided: + +* link:$$https://github.com/FasterXML/jackson$$[Jackson] through the `org.springframework.batch.item.json.JacksonJsonObjectReader` +* link:$$https://github.com/google/gson$$[Gson] through the `org.springframework.batch.item.json.GsonJsonObjectReader` + +To be able to process JSON records, the following is needed: + +* `Resource`: A Spring Resource that represents the JSON file to read. +* `JsonObjectReader`: A JSON object reader to parse and bind JSON objects to items + +The following example shows how to define a `JsonItemReader` that works with the +previous JSON resource `org/springframework/batch/item/json/trades.json` and a +`JsonObjectReader` based on Jackson: + +[source, java] +---- +@Bean +public JsonItemReader jsonItemReader() { + return new JsonItemReaderBuilder() + .jsonObjectReader(new JacksonJsonObjectReader<>(Trade.class)) + .resource(new ClassPathResource("trades.json")) + .name("tradeJsonItemReader") + .build(); +} +---- + +[[jsonfileitemwriter]] +== `JsonFileItemWriter` + +The `JsonFileItemWriter` delegates the marshalling of items to the +`org.springframework.batch.item.json.JsonObjectMarshaller` interface. The contract +of this interface is to take an object and marshall it to a JSON `String`. +Two implementations are currently provided: + +* link:$$https://github.com/FasterXML/jackson$$[Jackson] through the `org.springframework.batch.item.json.JacksonJsonObjectMarshaller` +* link:$$https://github.com/google/gson$$[Gson] through the `org.springframework.batch.item.json.GsonJsonObjectMarshaller` + +To be able to write JSON records, the following is needed: + +* `Resource`: A Spring `Resource` that represents the JSON file to write +* `JsonObjectMarshaller`: A JSON object marshaller to marshall objects to JSON format + +The following example shows how to define a `JsonFileItemWriter`: + +[source, java] +---- +@Bean +public JsonFileItemWriter jsonFileItemWriter() { + return new JsonFileItemWriterBuilder() + .jsonObjectMarshaller(new JacksonJsonObjectMarshaller<>()) + .resource(new ClassPathResource("trades.json")) + .name("tradeJsonFileItemWriter") + .build(); +} +---- + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/multi-file-input.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/multi-file-input.adoc new file mode 100644 index 0000000000..08307e720d --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/multi-file-input.adoc @@ -0,0 +1,60 @@ +[[multiFileInput]] += Multi-File Input + +It is a common requirement to process multiple files within a single `Step`. Assuming the +files all have the same formatting, the `MultiResourceItemReader` supports this type of +input for both XML and flat file processing. Consider the following files in a directory: + +---- +file-1.txt file-2.txt ignored.txt +---- + +file-1.txt and file-2.txt are formatted the same and, for business reasons, should be +processed together. The `MultiResourceItemReader` can be used to read in both files by +using wildcards. + + +[tabs] +==== +Java:: ++ +The following example shows how to read files with wildcards in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public MultiResourceItemReader multiResourceReader() { + return new MultiResourceItemReaderBuilder() + .delegate(flatFileItemReader()) + .resources(resources()) + .build(); +} +---- + +XML:: ++ +The following example shows how to read files with wildcards in XML: ++ +.XML Configuration +[source, xml] +---- + + + + +---- + +==== + + + +The referenced delegate is a simple `FlatFileItemReader`. The above configuration reads +input from both files, handling rollback and restart scenarios. It should be noted that, +as with any `ItemReader`, adding extra input (in this case a file) could cause potential +issues when restarting. It is recommended that batch jobs work with their own individual +directories until completed successfully. + +NOTE: Input resources are ordered by using `MultiResourceItemReader#setComparator(Comparator)` + to make sure resource ordering is preserved between job runs in restart scenario. + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/process-indicator.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/process-indicator.adoc new file mode 100644 index 0000000000..2c3ae044c0 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/process-indicator.adoc @@ -0,0 +1,75 @@ +[[process-indicator]] += Preventing State Persistence + +By default, all of the `ItemReader` and `ItemWriter` implementations store their current +state in the `ExecutionContext` before it is committed. However, this may not always be +the desired behavior. For example, many developers choose to make their database readers +'rerunnable' by using a process indicator. An extra column is added to the input data to +indicate whether or not it has been processed. When a particular record is being read (or +written) the processed flag is flipped from `false` to `true`. The SQL statement can then +contain an extra statement in the `where` clause, such as `where PROCESSED_IND = false`, +thereby ensuring that only unprocessed records are returned in the case of a restart. In +this scenario, it is preferable to not store any state, such as the current row number, +since it is irrelevant upon restart. For this reason, all readers and writers include the +'saveState' property. + + +[tabs] +==== +Java:: ++ +The following bean definition shows how to prevent state persistence in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public JdbcCursorItemReader playerSummarizationSource(DataSource dataSource) { + return new JdbcCursorItemReaderBuilder() + .dataSource(dataSource) + .rowMapper(new PlayerSummaryMapper()) + .saveState(false) + .sql("SELECT games.player_id, games.year_no, SUM(COMPLETES)," + + "SUM(ATTEMPTS), SUM(PASSING_YARDS), SUM(PASSING_TD)," + + "SUM(INTERCEPTIONS), SUM(RUSHES), SUM(RUSH_YARDS)," + + "SUM(RECEPTIONS), SUM(RECEPTIONS_YARDS), SUM(TOTAL_TD)" + + "from games, players where players.player_id =" + + "games.player_id group by games.player_id, games.year_no") + .build(); + +} +---- + +XML:: ++ +The following bean definition shows how to prevent state persistence in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + SELECT games.player_id, games.year_no, SUM(COMPLETES), + SUM(ATTEMPTS), SUM(PASSING_YARDS), SUM(PASSING_TD), + SUM(INTERCEPTIONS), SUM(RUSHES), SUM(RUSH_YARDS), + SUM(RECEPTIONS), SUM(RECEPTIONS_YARDS), SUM(TOTAL_TD) + from games, players where players.player_id = + games.player_id group by games.player_id, games.year_no + + + +---- + +==== + + + +The `ItemReader` configured above does not make any entries in the `ExecutionContext` for +any executions in which it participates. + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/reusing-existing-services.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/reusing-existing-services.adoc new file mode 100644 index 0000000000..d441bbd993 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/reusing-existing-services.adoc @@ -0,0 +1,112 @@ +[[reusingExistingServices]] += Reusing Existing Services + +Batch systems are often used in conjunction with other application styles. The most +common is an online system, but it may also support integration or even a thick client +application by moving necessary bulk data that each application style uses. For this +reason, it is common that many users want to reuse existing DAOs or other services within +their batch jobs. The Spring container itself makes this fairly easy by allowing any +necessary class to be injected. However, there may be cases where the existing service +needs to act as an `ItemReader` or `ItemWriter`, either to satisfy the dependency of +another Spring Batch class or because it truly is the main `ItemReader` for a step. It is +fairly trivial to write an adapter class for each service that needs wrapping, but +because it is such a common concern, Spring Batch provides implementations: +`ItemReaderAdapter` and `ItemWriterAdapter`. Both classes implement the standard Spring +method by invoking the delegate pattern and are fairly simple to set up. + + +[tabs] +==== +Java:: ++ +The following Java example uses the `ItemReaderAdapter`: ++ +.Java Configuration +[source, java] +---- +@Bean +public ItemReaderAdapter itemReader() { + ItemReaderAdapter reader = new ItemReaderAdapter(); + + reader.setTargetObject(fooService()); + reader.setTargetMethod("generateFoo"); + + return reader; +} + +@Bean +public FooService fooService() { + return new FooService(); +} +---- + +XML:: ++ +The following XML example uses the `ItemReaderAdapter`: ++ +.XML Configuration +[source, xml] +---- + + + + + + +---- + +==== + + + +One important point to note is that the contract of the `targetMethod` must be the same +as the contract for `read`: When exhausted, it returns `null`. Otherwise, it returns an +`Object`. Anything else prevents the framework from knowing when processing should end, +either causing an infinite loop or incorrect failure, depending upon the implementation +of the `ItemWriter`. + + +[tabs] +==== +Java:: ++ +The following Java example uses the `ItemWriterAdapter`: ++ +.Java Configuration +[source, java] +---- +@Bean +public ItemWriterAdapter itemWriter() { + ItemWriterAdapter writer = new ItemWriterAdapter(); + + writer.setTargetObject(fooService()); + writer.setTargetMethod("processFoo"); + + return writer; +} + +@Bean +public FooService fooService() { + return new FooService(); +} +---- + +XML:: ++ +The following XML example uses the `ItemWriterAdapter`: ++ +.XML Configuration +[source, xml] +---- + + + + + + +---- + +==== + + + diff --git a/spring-batch-docs/modules/ROOT/pages/readers-and-writers/xml-reading-writing.adoc b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/xml-reading-writing.adoc new file mode 100644 index 0000000000..8cf484015b --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readers-and-writers/xml-reading-writing.adoc @@ -0,0 +1,373 @@ +[[xmlReadingWriting]] += XML Item Readers and Writers + +Spring Batch provides transactional infrastructure for both reading XML records and +mapping them to Java objects as well as writing Java objects as XML records. + +[NOTE] +.Constraints on streaming XML +==== +The StAX API is used for I/O, as other standard XML parsing APIs do not fit batch +processing requirements (DOM loads the whole input into memory at once and SAX controls +the parsing process by allowing the user to provide only callbacks). +==== + +We need to consider how XML input and output works in Spring Batch. First, there are a +few concepts that vary from file reading and writing but are common across Spring Batch +XML processing. With XML processing, instead of lines of records (`FieldSet` instances) that need +to be tokenized, it is assumed an XML resource is a collection of 'fragments' +corresponding to individual records, as shown in the following image: + +.XML Input +image::xmlinput.png[XML Input, scaledwidth="60%"] + +The 'trade' tag is defined as the 'root element' in the scenario above. Everything +between '<trade>' and '</trade>' is considered one 'fragment'. Spring Batch +uses Object/XML Mapping (OXM) to bind fragments to objects. However, Spring Batch is not +tied to any particular XML binding technology. Typical use is to delegate to +link:$$https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#oxm$$[Spring OXM], which +provides uniform abstraction for the most popular OXM technologies. The dependency on +Spring OXM is optional and you can choose to implement Spring Batch specific interfaces +if desired. The relationship to the technologies that OXM supports is shown in the +following image: + +.OXM Binding +image::oxm-fragments.png[OXM Binding, scaledwidth="60%"] + +With an introduction to OXM and how one can use XML fragments to represent records, we +can now more closely examine readers and writers. + +[[StaxEventItemReader]] +== `StaxEventItemReader` + +The `StaxEventItemReader` configuration provides a typical setup for the processing of +records from an XML input stream. First, consider the following set of XML records that +the `StaxEventItemReader` can process: + +[source, xml] +---- + + + + XYZ0001 + 5 + 11.39 + Customer1 + + + XYZ0002 + 2 + 72.99 + Customer2c + + + XYZ0003 + 9 + 99.99 + Customer3 + + +---- + +To be able to process the XML records, the following is needed: + +* Root Element Name: The name of the root element of the fragment that constitutes the +object to be mapped. The example configuration demonstrates this with the value of trade. +* Resource: A Spring Resource that represents the file to read. +* `Unmarshaller`: An unmarshalling facility provided by Spring OXM for mapping the XML +fragment to an object. + + +[tabs] +==== +Java:: ++ +The following example shows how to define a `StaxEventItemReader` that works with a root +element named `trade`, a resource of `data/iosample/input/input.xml`, and an unmarshaller +called `tradeMarshaller` in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public StaxEventItemReader itemReader() { + return new StaxEventItemReaderBuilder() + .name("itemReader") + .resource(new FileSystemResource("org/springframework/batch/item/xml/domain/trades.xml")) + .addFragmentRootElements("trade") + .unmarshaller(tradeMarshaller()) + .build(); + +} +---- + +XML:: ++ +The following example shows how to define a `StaxEventItemReader` that works with a root +element named `trade`, a resource of `data/iosample/input/input.xml`, and an unmarshaller +called `tradeMarshaller` in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + +---- + +==== + + + +Note that, in this example, we have chosen to use an `XStreamMarshaller`, which accepts +an alias passed in as a map with the first key and value being the name of the fragment +(that is, a root element) and the object type to bind. Then, similar to a `FieldSet`, the +names of the other elements that map to fields within the object type are described as +key/value pairs in the map. In the configuration file, we can use a Spring configuration +utility to describe the required alias. + + +[tabs] +==== +Java:: ++ +The following example shows how to describe the alias in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public XStreamMarshaller tradeMarshaller() { + Map aliases = new HashMap<>(); + aliases.put("trade", Trade.class); + aliases.put("price", BigDecimal.class); + aliases.put("isin", String.class); + aliases.put("customer", String.class); + aliases.put("quantity", Long.class); + + XStreamMarshaller marshaller = new XStreamMarshaller(); + + marshaller.setAliases(aliases); + + return marshaller; +} +---- + +XML:: ++ +The following example shows how to describe the alias in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + +---- + +==== + + + +On input, the reader reads the XML resource until it recognizes that a new fragment is +about to start. By default, the reader matches the element name to recognize that a new +fragment is about to start. The reader creates a standalone XML document from the +fragment and passes the document to a deserializer (typically a wrapper around a Spring +OXM `Unmarshaller`) to map the XML to a Java object. + +In summary, this procedure is analogous to the following Java code, which uses the +injection provided by the Spring configuration: + +[source, java] +---- +StaxEventItemReader xmlStaxEventItemReader = new StaxEventItemReader<>(); +Resource resource = new ByteArrayResource(xmlResource.getBytes()); + +Map aliases = new HashMap(); +aliases.put("trade","org.springframework.batch.sample.domain.trade.Trade"); +aliases.put("price","java.math.BigDecimal"); +aliases.put("customer","java.lang.String"); +aliases.put("isin","java.lang.String"); +aliases.put("quantity","java.lang.Long"); +XStreamMarshaller unmarshaller = new XStreamMarshaller(); +unmarshaller.setAliases(aliases); +xmlStaxEventItemReader.setUnmarshaller(unmarshaller); +xmlStaxEventItemReader.setResource(resource); +xmlStaxEventItemReader.setFragmentRootElementName("trade"); +xmlStaxEventItemReader.open(new ExecutionContext()); + +boolean hasNext = true; + +Trade trade = null; + +while (hasNext) { + trade = xmlStaxEventItemReader.read(); + if (trade == null) { + hasNext = false; + } + else { + System.out.println(trade); + } +} +---- + +[[StaxEventItemWriter]] +== `StaxEventItemWriter` + +Output works symmetrically to input. The `StaxEventItemWriter` needs a `Resource`, a +marshaller, and a `rootTagName`. A Java object is passed to a marshaller (typically a +standard Spring OXM Marshaller) which writes to a `Resource` by using a custom event +writer that filters the `StartDocument` and `EndDocument` events produced for each +fragment by the OXM tools. +// TODO How does `MarshallingEventWriterSerializer` get involved? Because there's a +// property whose name is `marshaller`? + + +[tabs] +==== +Java:: ++ +The following Java example uses the `MarshallingEventWriterSerializer`: ++ +.Java Configuration +[source, java] +---- +@Bean +public StaxEventItemWriter itemWriter(Resource outputResource) { + return new StaxEventItemWriterBuilder() + .name("tradesWriter") + .marshaller(tradeMarshaller()) + .resource(outputResource) + .rootTagName("trade") + .overwriteOutput(true) + .build(); + +} +---- + +XML:: ++ +The following XML example uses the `MarshallingEventWriterSerializer`: ++ +.XML Configuration +[source, xml] +---- + + + + + + +---- + +==== + + +The preceding configuration sets up the three required properties and sets the optional +`overwriteOutput=true` attrbute, mentioned earlier in this chapter for specifying whether +an existing file can be overwritten. + + +[tabs] +==== +Java:: ++ +The following Java example uses the same marshaller as the one used in the reading example +shown earlier in the chapter: ++ +.Java Configuration +[source, java] +---- +@Bean +public XStreamMarshaller customerCreditMarshaller() { + XStreamMarshaller marshaller = new XStreamMarshaller(); + + Map aliases = new HashMap<>(); + aliases.put("trade", Trade.class); + aliases.put("price", BigDecimal.class); + aliases.put("isin", String.class); + aliases.put("customer", String.class); + aliases.put("quantity", Long.class); + + marshaller.setAliases(aliases); + + return marshaller; +} +---- + +XML:: ++ +The following XML example uses the same marshaller as the one used in the reading example +shown earlier in the chapter: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + +---- + +==== + + + +To summarize with a Java example, the following code illustrates all of the points +discussed, demonstrating the programmatic setup of the required properties: + +[source, java] +---- +FileSystemResource resource = new FileSystemResource("data/outputFile.xml") + +Map aliases = new HashMap(); +aliases.put("trade","org.springframework.batch.sample.domain.trade.Trade"); +aliases.put("price","java.math.BigDecimal"); +aliases.put("customer","java.lang.String"); +aliases.put("isin","java.lang.String"); +aliases.put("quantity","java.lang.Long"); +Marshaller marshaller = new XStreamMarshaller(); +marshaller.setAliases(aliases); + +StaxEventItemWriter staxItemWriter = + new StaxEventItemWriterBuilder() + .name("tradesWriter") + .marshaller(marshaller) + .resource(resource) + .rootTagName("trade") + .overwriteOutput(true) + .build(); + +staxItemWriter.afterPropertiesSet(); + +ExecutionContext executionContext = new ExecutionContext(); +staxItemWriter.open(executionContext); +Trade trade = new Trade(); +trade.setPrice(11.39); +trade.setIsin("XYZ0001"); +trade.setQuantity(5L); +trade.setCustomer("Customer1"); +staxItemWriter.write(trade); +---- + diff --git a/spring-batch-docs/modules/ROOT/pages/readersAndWriters.adoc b/spring-batch-docs/modules/ROOT/pages/readersAndWriters.adoc new file mode 100644 index 0000000000..796390d623 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/readersAndWriters.adoc @@ -0,0 +1,13 @@ + +[[readersAndWriters]] += ItemReaders and ItemWriters +:page-section-summary-toc: 1 + +ifndef::onlyonetoggle[] +endif::onlyonetoggle[] + +All batch processing can be described in its most simple form as reading in large amounts +of data, performing some type of calculation or transformation, and writing the result +out. Spring Batch provides three key interfaces to help perform bulk reading and writing: +`ItemReader`, `ItemProcessor`, and `ItemWriter`. + diff --git a/spring-batch-docs/src/main/asciidoc/repeat.adoc b/spring-batch-docs/modules/ROOT/pages/repeat.adoc similarity index 95% rename from spring-batch-docs/src/main/asciidoc/repeat.adoc rename to spring-batch-docs/modules/ROOT/pages/repeat.adoc index 8759d351a4..4836d338b2 100644 --- a/spring-batch-docs/src/main/asciidoc/repeat.adoc +++ b/spring-batch-docs/modules/ROOT/pages/repeat.adoc @@ -1,18 +1,8 @@ -:toc: left -:toclevels: 4 - [[repeat]] += Repeat -== Repeat - -include::attributes.adoc[] -ifndef::onlyonetoggle[] -include::toggle.adoc[] -endif::onlyonetoggle[] - -[[repeatTemplate]] - -=== RepeatTemplate +[[repeattemplate]] +== RepeatTemplate Batch processing is about repetitive actions, either as a simple optimization or as part of a job. To strategize and generalize the repetition and to provide what amounts to an @@ -74,9 +64,8 @@ considerations intrinsic to the work being done in the callback. Others are effe infinite loops (as far as the callback is concerned), and the completion decision is delegated to an external policy, as in the case shown in the preceding example. -[[repeatContext]] - -==== RepeatContext +[[repeatcontext]] +=== RepeatContext The method parameter for the `RepeatCallback` is a `RepeatContext`. Many callbacks ignore the context. However, if necessary, you can use it as an attribute bag to store transient @@ -89,7 +78,7 @@ calls to `iterate`. This is the case, for instance, if you want to count the num occurrences of an event in the iteration and remember it across subsequent calls. [[repeatStatus]] -==== RepeatStatus +=== RepeatStatus `RepeatStatus` is an enumeration used by Spring Batch to indicate whether processing has finished. It has two possible `RepeatStatus` values: @@ -109,7 +98,7 @@ continuable flag. In other words, if either status is `FINISHED`, the result is `FINISHED`. [[completionPolicies]] -=== Completion Policies +== Completion Policies Inside a `RepeatTemplate`, the termination of the loop in the `iterate` method is determined by a `CompletionPolicy`, which is also a factory for the `RepeatContext`. The @@ -128,7 +117,7 @@ decisions. For example, a batch processing window that prevents batch jobs from once the online systems are in use would require a custom policy. [[repeatExceptionHandling]] -=== Exception Handling +== Exception Handling If there is an exception thrown inside a `RepeatCallback`, the `RepeatTemplate` consults an `ExceptionHandler`, which can decide whether or not to re-throw the exception. @@ -160,7 +149,7 @@ current `RepeatContext`. When set to `true`, the limit is kept across sibling co a nested iteration (such as a set of chunks inside a step). [[repeatListeners]] -=== Listeners +== Listeners Often, it is useful to be able to receive additional callbacks for cross-cutting concerns across a number of different iterations. For this purpose, Spring Batch provides the @@ -189,7 +178,7 @@ order. In this case, `open` and `before` are called in the same order while `aft `onError`, and `close` are called in reverse order. [[repeatParallelProcessing]] -=== Parallel Processing +== Parallel Processing Implementations of `RepeatOperations` are not restricted to executing the callback sequentially. It is quite important that some implementations are able to execute their @@ -200,7 +189,7 @@ of executing the whole iteration in the same thread (the same as a normal `RepeatTemplate`). [[declarativeIteration]] -=== Declarative Iteration +== Declarative Iteration Sometimes, there is some business processing that you know you want to repeat every time it happens. The classic example of this is the optimization of a message pipeline. @@ -210,31 +199,17 @@ interceptor that wraps a method call in a `RepeatOperations` object for this purpose. The `RepeatOperationsInterceptor` executes the intercepted method and repeats according to the `CompletionPolicy` in the provided `RepeatTemplate`. -[role="xmlContent"] -The following example shows declarative iteration that uses the Spring AOP namespace to -repeat a service call to a method called `processMessage` (for more detail on how to -configure AOP interceptors, see the -<>): - -[source, xml, role="xmlContent"] ----- - - - - - - ----- -[role="javaContent"] +[tabs] +==== +Java:: ++ The following example uses Java configuration to repeat a service call to a method called `processMessage` (for more detail on how to configure AOP interceptors, see the <>): - -[source, java, role="javaContent"] ++ +[source, java] ---- @Bean public MyService myService() { @@ -254,6 +229,27 @@ public MyService myService() { } ---- +XML:: ++ +The following example shows declarative iteration that uses the Spring AOP namespace to +repeat a service call to a method called `processMessage` (for more detail on how to +configure AOP interceptors, see the +<>): ++ +[source, xml] +---- + + + + + + +---- +==== + + The preceding example uses a default `RepeatTemplate` inside the interceptor. To change the policies, listeners, and other details, you can inject an instance of `RepeatTemplate` into the interceptor. diff --git a/spring-batch-docs/src/main/asciidoc/retry.adoc b/spring-batch-docs/modules/ROOT/pages/retry.adoc similarity index 92% rename from spring-batch-docs/src/main/asciidoc/retry.adoc rename to spring-batch-docs/modules/ROOT/pages/retry.adoc index bf0f68eb35..69bd983e08 100644 --- a/spring-batch-docs/src/main/asciidoc/retry.adoc +++ b/spring-batch-docs/modules/ROOT/pages/retry.adoc @@ -1,10 +1,10 @@ -:toc: left -:toclevels: 4 [[retry]] -== Retry -include::attributes.adoc[] +[[retry]] += Retry +:page-section-summary-toc: 1 + To make processing more robust and less prone to failure, it sometimes helps to automatically retry a failed operation in case it might succeed on a subsequent attempt. diff --git a/spring-batch-docs/src/main/asciidoc/scalability.adoc b/spring-batch-docs/modules/ROOT/pages/scalability.adoc similarity index 91% rename from spring-batch-docs/src/main/asciidoc/scalability.adoc rename to spring-batch-docs/modules/ROOT/pages/scalability.adoc index 4a387f8e5b..2acb263f8a 100644 --- a/spring-batch-docs/src/main/asciidoc/scalability.adoc +++ b/spring-batch-docs/modules/ROOT/pages/scalability.adoc @@ -1,14 +1,8 @@ -:toc: left -:toclevels: 4 [[scalability]] -== Scaling and Parallel Processing - -include::attributes.adoc[] -ifndef::onlyonetoggle[] -include::toggle.adoc[] -endif::onlyonetoggle[] +[[scaling-and-parallel-processing]] += Scaling and Parallel Processing Many batch processing problems can be solved with single-threaded, single-process jobs, so it is always a good idea to properly check if that meets your needs before thinking @@ -34,27 +28,21 @@ These break down into categories as well, as follows: First, we review the single-process options. Then we review the multi-process options. [[multithreadedStep]] -=== Multi-threaded Step +== Multi-threaded Step The simplest way to start parallel processing is to add a `TaskExecutor` to your Step configuration. -[role="xmlContent"] -For example, you might add an attribute TO the `tasklet`, as follows: -[source, xml, role="xmlContent"] ----- - - ... - ----- - -[role="javaContent"] +[tabs] +==== +Java:: ++ When using Java configuration, you can add a `TaskExecutor` to the step, as the following example shows: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public TaskExecutor taskExecutor() { @@ -72,6 +60,20 @@ public Step sampleStep(TaskExecutor taskExecutor, JobRepository jobRepository, P } ---- +XML:: ++ +For example, you might add an attribute TO the `tasklet`, as follows: ++ +[source, xml] +---- + + ... + +---- + +==== + + In this example, the `taskExecutor` is a reference to another bean definition that implements the `TaskExecutor` interface. https://docs.spring.io/spring/docs/current/javadoc-api/org/springframework/core/task/TaskExecutor.html[`TaskExecutor`] @@ -87,23 +89,16 @@ addition to any limits placed by the task executor (such as whether it is backed thread pool), the tasklet configuration has a throttle limit (default: 4). You may need to increase this limit to ensure that a thread pool is fully used. -[role="xmlContent"] -For example, you might increase the throttle-limit, as follows: -[source, xml, role="xmlContent"] ----- - ... - ----- - -[role="javaContent"] +[tabs] +==== +Java:: ++ When using Java configuration, the builders provide access to the throttle limit, as follows: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public Step sampleStep(TaskExecutor taskExecutor, JobRepository jobRepository, PlatformTransactionManager transactionManager) { @@ -117,6 +112,23 @@ public Step sampleStep(TaskExecutor taskExecutor, JobRepository jobRepository, P } ---- +XML:: ++ +For example, you might increase the throttle-limit, as follows: ++ +[source, xml] +---- + ... + +---- + +==== + + + + Note also that there may be limits placed on concurrency by any pooled resources used in your step, such as a `DataSource`. Be sure to make the pool in those resources at least as large as the desired number of concurrent threads in the step. @@ -130,7 +142,7 @@ possible to work with stateless or thread safe readers and writers, and there is (called `parallelJob`) in the https://github.com/spring-projects/spring-batch/tree/main/spring-batch-samples[Spring Batch Samples] that shows the use of a process indicator (see -<>) to keep track +xref:readers-and-writers/process-indicator.adoc[Preventing State Persistence]) to keep track of items that have been processed in a database input table. Spring Batch provides some implementations of `ItemWriter` and `ItemReader`. Usually, @@ -143,40 +155,22 @@ processing and writing is the most expensive part of the chunk, your step may st complete much more quickly than it would in a single-threaded configuration. [[scalabilityParallelSteps]] -=== Parallel Steps +== Parallel Steps As long as the application logic that needs to be parallelized can be split into distinct responsibilities and assigned to individual steps, it can be parallelized in a single process. Parallel Step execution is easy to configure and use. -[role="xmlContent"] -For example, executing steps `(step1,step2)` in parallel with `step3` is straightforward, -as follows: -[source, xml, role="xmlContent"] ----- - - - - - - - - - - - - - - ----- - -[role="javaContent"] +[tabs] +==== +Java:: ++ When using Java configuration, executing steps `(step1,step2)` in parallel with `step3` is straightforward, as follows: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public Job job(JobRepository jobRepository) { @@ -216,23 +210,51 @@ public TaskExecutor taskExecutor() { } ---- +XML:: ++ +For example, executing steps `(step1,step2)` in parallel with `step3` is straightforward, +as follows: ++ +[source, xml] +---- + + + + + + + + + + + + + + +---- + +==== + + + + The configurable task executor is used to specify which `TaskExecutor` implementation should execute the individual flows. The default is `SyncTaskExecutor`, but an asynchronous `TaskExecutor` is required to run the steps in parallel. Note that the job ensures that every flow in the split completes before aggregating the exit statuses and transitioning. -See the section on <> for more detail. +See the section on xref:step/controlling-flow.adoc#split-flows[Split Flows] for more detail. [[remoteChunking]] -=== Remote Chunking +== Remote Chunking In remote chunking, the `Step` processing is split across multiple processes, communicating with each other through some middleware. The following image shows the pattern: .Remote Chunking -image::{batch-asciidoc}images/remote-chunking.png[Remote Chunking, scaledwidth="60%"] +image::remote-chunking.png[Remote Chunking, scaledwidth="60%"] The manager component is a single process, and the workers are multiple remote processes. This pattern works best if the manager is not a bottleneck, so the processing must be more @@ -254,11 +276,11 @@ message. JMS is the obvious candidate, but other options (such as JavaSpaces) ex the grid computing and shared memory product space. See the section on -<> +xref:spring-batch-integration/sub-elements.adoc#remote-chunking[Spring Batch Integration - Remote Chunking] for more detail. [[partitioning]] -=== Partitioning +== Partitioning Spring Batch also provides an SPI for partitioning a `Step` execution and executing it remotely. In this case, the remote participants are `Step` instances that could just as @@ -266,7 +288,7 @@ easily have been configured and used for local processing. The following image s pattern: .Partitioning -image::{batch-asciidoc}images/partitioning-overview.png[Partitioning Overview, scaledwidth="60%"] +image::partitioning-overview.png[Partitioning Overview, scaledwidth="60%"] The `Job` runs on the left-hand side as a sequence of `Step` instances, and one of the `Step` instances is labeled as a manager. The workers in this picture are all identical @@ -283,36 +305,22 @@ environment. The strategy interfaces are `PartitionHandler` and `StepExecutionSp and the following sequence diagram shows their role: .Partitioning SPI -image::{batch-asciidoc}images/partitioning-spi.png[Partitioning SPI, scaledwidth="60%"] +image::partitioning-spi.png[Partitioning SPI, scaledwidth="60%"] The `Step` on the right in this case is the "`remote`" worker, so, potentially, there are many objects and or processes playing this role, and the `PartitionStep` is shown driving the execution. -[role="xmlContent"] -The following example shows the `PartitionStep` configuration when using XML -configuration: - -[source, xml, role="xmlContent"] ----- - - - - - ----- - -[role="xmlContent"] -Similar to the multi-threaded step's `throttle-limit` attribute, the `grid-size` -attribute prevents the task executor from being saturated with requests from a single -step. -[role="javaContent"] +[tabs] +==== +Java:: ++ The following example shows the `PartitionStep` configuration when using Java configuration: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public Step step1Manager() { @@ -324,12 +332,32 @@ public Step step1Manager() { .build(); } ---- - -[role="javaContent"] ++ Similar to the multi-threaded step's `throttleLimit` method, the `gridSize` method prevents the task executor from being saturated with requests from a single step. +XML:: ++ +The following example shows the `PartitionStep` configuration when using XML +configuration: ++ +[source, xml] +---- + + + + + +---- ++ +Similar to the multi-threaded step's `throttle-limit` attribute, the `grid-size` +attribute prevents the task executor from being saturated with requests from a single +step. + +==== + + The unit test suite for https://github.com/spring-projects/spring-batch/tree/main/spring-batch-samples/src/main/resources/jobs[Spring Batch Samples] (see `partition*Job.xml` configuration) has a simple example that you can copy and extend. @@ -340,7 +368,7 @@ use an alias for the step (by specifying the `name` attribute instead of the `id attribute). [[partitionHandler]] -==== PartitionHandler +=== PartitionHandler `PartitionHandler` is the component that knows about the fabric of the remoting or grid environment. It is able to send `StepExecution` requests to the remote `Step` @@ -362,29 +390,17 @@ executes `Step` instances locally in separate threads of execution, using the `TaskExecutor` strategy from Spring. The implementation is called `TaskExecutorPartitionHandler`. -[role="xmlContent"] -The `TaskExecutorPartitionHandler` is the default for a step configured with the XML -namespace shown previously. You can also configure it explicitly, as follows: -[source, xml, role="xmlContent"] ----- - - - - - - - - ----- - -[role="javaContent"] +[tabs] +==== +Java:: ++ You can explicitly configure the `TaskExecutorPartitionHandler` with Java configuration, as follows: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public Step step1Manager() { @@ -404,6 +420,27 @@ public PartitionHandler partitionHandler() { } ---- +XML:: ++ +The `TaskExecutorPartitionHandler` is the default for a step configured with the XML +namespace shown previously. You can also configure it explicitly, as follows: ++ +[source, xml] +---- + + + + + + + + + +---- +==== + + + The `gridSize` attribute determines the number of separate step executions to create, so it can be matched to the size of the thread pool in the `TaskExecutor`. Alternatively, it can be set to be larger than the number of threads available, which makes the blocks of @@ -415,7 +452,7 @@ systems. It can also be used for remote execution by providing a `Step` implemen that is a proxy for a remote invocation (such as using Spring Remoting). [[partitioner]] -==== Partitioner +=== Partitioner The `Partitioner` has a simpler responsibility: to generate execution contexts as input parameters for new step executions only (no need to worry about restarts). It has a @@ -450,12 +487,12 @@ this can be a useful optimization. The names provided by the `PartitionNameProvi match those provided by the `Partitioner`. [[bindingInputDataToSteps]] -==== Binding Input Data to Steps +=== Binding Input Data to Steps It is very efficient for the steps that are executed by the `PartitionHandler` to have identical configuration and for their input parameters to be bound at runtime from the `ExecutionContext`. This is easy to do with the StepScope feature of Spring Batch -(covered in more detail in the section on <>). For +(covered in more detail in the section on xref:step/late-binding.adoc[Late Binding]). For example, if the `Partitioner` creates `ExecutionContext` instances with an attribute key called `fileName`, pointing to a different file (or directory) for each step invocation, the `Partitioner` output might resemble the content of the following table: @@ -470,23 +507,15 @@ the `Partitioner` output might resemble the content of the following table: Then the file name can be bound to a step by using late binding to the execution context. -[role="xmlContent"] -The following example shows how to define late binding in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - ----- -[role="xmlContent"] +[tabs] +==== +Java:: ++ The following example shows how to define late binding in Java: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public MultiResourceItemReader itemReader( @@ -498,3 +527,19 @@ public MultiResourceItemReader itemReader( .build(); } ---- + +XML:: ++ +The following example shows how to define late binding in XML: ++ +.XML Configuration +[source, xml] +---- + + + +---- + +==== + diff --git a/spring-batch-docs/src/main/asciidoc/schema-appendix.adoc b/spring-batch-docs/modules/ROOT/pages/schema-appendix.adoc similarity index 95% rename from spring-batch-docs/src/main/asciidoc/schema-appendix.adoc rename to spring-batch-docs/modules/ROOT/pages/schema-appendix.adoc index 41da1903ee..906152c009 100644 --- a/spring-batch-docs/src/main/asciidoc/schema-appendix.adoc +++ b/spring-batch-docs/modules/ROOT/pages/schema-appendix.adoc @@ -1,14 +1,12 @@ -:toc: left -:toclevels: 4 [[metaDataSchema]] [appendix] -== Meta-Data Schema +[[meta-data-schema]] += Meta-Data Schema -include::attributes.adoc[] [[metaDataSchemaOverview]] -=== Overview +== Overview The Spring Batch Metadata tables closely match the domain objects that represent them in Java. For example, `JobInstance`, `JobExecution`, `JobParameters`, and `StepExecution` @@ -24,10 +22,10 @@ variations in how individual database vendors handle data types. The following i shows an ERD model of all six tables and their relationships to one another: .Spring Batch Meta-Data ERD -image::{batch-asciidoc}images/meta-data-erd.png[Spring Batch Meta-Data ERD, scaledwidth="60%"] +image::meta-data-erd.png[Spring Batch Meta-Data ERD, scaledwidth="60%"] [[exampleDDLScripts]] -==== Example DDL Scripts +=== Example DDL Scripts The Spring Batch Core JAR file contains example scripts to create the relational tables for a number of database platforms (which are, in turn, auto-detected by the job @@ -37,7 +35,7 @@ form `schema-\*.sql`, where `*` is the short name of the target database platfor The scripts are in the package `org.springframework.batch.core`. [[migrationDDLScripts]] -==== Migration DDL Scripts +=== Migration DDL Scripts Spring Batch provides migration DDL scripts that you need to execute when you upgrade versions. These scripts can be found in the Core Jar file under `org/springframework/batch/core/migration`. @@ -47,7 +45,7 @@ Migration scripts are organized into folders corresponding to version numbers in * `4.1`: Contains scripts you need to migrate from a version before `4.1` to version `4.1` [[metaDataVersion]] -==== Version +=== Version Many of the database tables discussed in this appendix contain a version column. This column is important, because Spring Batch employs an optimistic locking strategy when @@ -59,7 +57,7 @@ access. This check is necessary, since, even though different batch jobs may be in different machines, they all use the same database tables. [[metaDataIdentity]] -==== Identity +=== Identity `BATCH_JOB_INSTANCE`, `BATCH_JOB_EXECUTION`, and `BATCH_STEP_EXECUTION` each contain columns ending in `_ID`. These fields act as primary keys for their respective tables. @@ -96,7 +94,7 @@ In the preceding case, a table is used in place of each sequence. The Spring cor give similar functionality. [[metaDataBatchJobInstance]] -=== The `BATCH_JOB_INSTANCE` Table +== The `BATCH_JOB_INSTANCE` Table The `BATCH_JOB_INSTANCE` table holds all information relevant to a `JobInstance` and serves as the top of the overall hierarchy. The following generic DDL statement is used @@ -117,7 +115,7 @@ The following list describes each column in the table: * `JOB_INSTANCE_ID`: The unique ID that identifies the instance. It is also the primary key. The value of this column should be obtainable by calling the `getId` method on `JobInstance`. -* `VERSION`: See <>. +* `VERSION`: See xref:schema-appendix.adoc#metaDataVersion[Version]. * `JOB_NAME`: Name of the job obtained from the `Job` object. Because it is required to identify the instance, it must not be null. * `JOB_KEY`: A serialization of the `JobParameters` that uniquely identifies separate @@ -125,7 +123,7 @@ instances of the same job from one another. (`JobInstances` with the same job na have different `JobParameters` and, thus, different `JOB_KEY` values). [[metaDataBatchJobParams]] -=== The `BATCH_JOB_EXECUTION_PARAMS` Table +== The `BATCH_JOB_EXECUTION_PARAMS` Table The `BATCH_JOB_EXECUTION_PARAMS` table holds all information relevant to the `JobParameters` object. It contains 0 or more key/value pairs passed to a `Job` and @@ -164,7 +162,7 @@ use for one and, thus, does not require it. If need be, you can add a primary ke with a database generated key without causing any issues to the framework itself. [[metaDataBatchJobExecution]] -=== The `BATCH_JOB_EXECUTION` Table +== The `BATCH_JOB_EXECUTION` Table The `BATCH_JOB_EXECUTION` table holds all information relevant to the `JobExecution` object. Every time a `Job` is run, there is always a new called `JobExecution` and a new row in @@ -193,7 +191,7 @@ The following list describes each column: * `JOB_EXECUTION_ID`: Primary key that uniquely identifies this execution. The value of this column is obtainable by calling the `getId` method of the `JobExecution` object. -* `VERSION`: See <>. +* `VERSION`: See xref:schema-appendix.adoc#metaDataVersion[Version]. * `JOB_INSTANCE_ID`: Foreign key from the `BATCH_JOB_INSTANCE` table. It indicates the instance to which this execution belongs. There may be more than one execution per instance. @@ -214,7 +212,7 @@ possible. * `LAST_UPDATED`: Timestamp representing the last time this execution was persisted. [[metaDataBatchStepExecution]] -=== The `BATCH_STEP_EXECUTION` Table +== The `BATCH_STEP_EXECUTION` Table The `BATCH_STEP_EXECUTION` table holds all information relevant to the `StepExecution` object. This table is similar in many ways to the `BATCH_JOB_EXECUTION` table, and there @@ -253,7 +251,7 @@ The following list describes each column: * `STEP_EXECUTION_ID`: Primary key that uniquely identifies this execution. The value of this column should be obtainable by calling the `getId` method of the `StepExecution` object. -* `VERSION`: See <>. +* `VERSION`: See xref:schema-appendix.adoc#metaDataVersion[Version]. * `STEP_NAME`: The name of the step to which this execution belongs. * `JOB_EXECUTION_ID`: Foreign key from the `BATCH_JOB_EXECUTION` table. It indicates the `JobExecution` to which this `StepExecution` belongs. There may be only one @@ -286,7 +284,7 @@ possible. * `LAST_UPDATED`: Timestamp representing the last time this execution was persisted. [[metaDataBatchJobExecutionContext]] -=== The `BATCH_JOB_EXECUTION_CONTEXT` Table +== The `BATCH_JOB_EXECUTION_CONTEXT` Table The `BATCH_JOB_EXECUTION_CONTEXT` table holds all information relevant to the `ExecutionContext` of a `Job`. There is exactly one `Job` `ExecutionContext` for each @@ -314,7 +312,7 @@ belongs. There may be more than one row associated with a given execution. * `SERIALIZED_CONTEXT`: The entire context, serialized. [[metaDataBatchStepExecutionContext]] -=== The `BATCH_STEP_EXECUTION_CONTEXT` Table +== The `BATCH_STEP_EXECUTION_CONTEXT` Table The `BATCH_STEP_EXECUTION_CONTEXT` table holds all information relevant to the `ExecutionContext` of a `Step`. There is exactly one `ExecutionContext` per @@ -343,7 +341,7 @@ belongs. There may be more than one row associated with a given execution. * `SERIALIZED_CONTEXT`: The entire context, serialized. [[metaDataArchiving]] -=== Archiving +== Archiving Because there are entries in multiple tables every time a batch job is run, it is common to create an archive strategy for the metadata tables. The tables themselves are designed @@ -361,19 +359,19 @@ this table for jobs that have not completed successfully prevents them from star the correct point if they are run again. [[multiByteCharacters]] -=== International and Multi-byte Characters +== International and Multi-byte Characters If you use multi-byte character sets (such as Chinese or Cyrillic) in your business processing, those characters might need to be persisted in the Spring Batch schema. Many users find that simply changing the schema to double the length of the `VARCHAR` columns is enough. Others prefer to configure the -<> with `max-varchar-length` half the +xref:job/configuring-repository.adoc[JobRepository] with `max-varchar-length` half the value of the `VARCHAR` column length. Some users have also reported that they use `NVARCHAR` in place of `VARCHAR` in their schema definitions. The best result depends on the database platform and the way the database server has been configured locally. [[recommendationsForIndexingMetaDataTables]] -=== Recommendations for Indexing Metadata Tables +== Recommendations for Indexing Metadata Tables Spring Batch provides DDL samples for the metadata tables in the core jar file for several common database platforms. Index declarations are not included in that DDL, diff --git a/spring-batch-docs/src/main/asciidoc/spring-batch-architecture.adoc b/spring-batch-docs/modules/ROOT/pages/spring-batch-architecture.adoc similarity index 98% rename from spring-batch-docs/src/main/asciidoc/spring-batch-architecture.adoc rename to spring-batch-docs/modules/ROOT/pages/spring-batch-architecture.adoc index b3d31d55f5..75e5ab926f 100644 --- a/spring-batch-docs/src/main/asciidoc/spring-batch-architecture.adoc +++ b/spring-batch-docs/modules/ROOT/pages/spring-batch-architecture.adoc @@ -1,14 +1,13 @@ [[springBatchArchitecture]] -=== Spring Batch Architecture += Spring Batch Architecture -include::attributes.adoc[] Spring Batch is designed with extensibility and a diverse group of end users in mind. The following image shows the layered architecture that supports the extensibility and ease of use for end-user developers. .Spring Batch Layered Architecture -image::{batch-asciidoc}images/spring-batch-layers.png[Figure 1.1: Spring Batch Layered Architecture, scaledwidth="60%"] +image::spring-batch-layers.png[Figure 1.1: Spring Batch Layered Architecture, scaledwidth="60%"] This layered architecture highlights three major high-level components: Application, Core, and Infrastructure. The application contains all batch jobs and custom code written @@ -21,7 +20,7 @@ writers, such as `ItemReader` and `ItemWriter`), and the core framework itself ( which is its own library). [[batchArchitectureConsiderations]] -==== General Batch Principles and Guidelines +== General Batch Principles and Guidelines The following key principles, guidelines, and general considerations should be considered when building a batch solution. @@ -73,7 +72,7 @@ If the system depends on flat files, file backup procedures should not only be i and documented but be regularly tested as well. [[batchProcessingStrategy]] -==== Batch Processing Strategies +== Batch Processing Strategies To help design and implement batch systems, basic batch application building blocks and patterns should be provided to the designers and programmers in the form of sample @@ -250,7 +249,7 @@ necessarily mean physical partitioning of the database (although, in most cases, advisable). The following image illustrates the partitioning approach: .Partitioned Process -image::{batch-asciidoc}images/partitioned.png[Figure 1.2: Partitioned Process, scaledwidth="60%"] +image::partitioned.png[Figure 1.2: Partitioned Process, scaledwidth="60%"] The architecture should be flexible enough to allow dynamic configuration of the number of partitions. You shoul consider both automatic and user controlled configuration. diff --git a/spring-batch-docs/modules/ROOT/pages/spring-batch-integration.adoc b/spring-batch-docs/modules/ROOT/pages/spring-batch-integration.adoc new file mode 100644 index 0000000000..45a9fe3cd8 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/spring-batch-integration.adoc @@ -0,0 +1,42 @@ + +[[springBatchIntegration]] += Spring Batch Integration + +ifndef::onlyonetoggle[] +endif::onlyonetoggle[] + +Many users of Spring Batch may encounter requirements that are +outside the scope of Spring Batch but that may be efficiently and +concisely implemented by using Spring Integration. Conversely, Spring +Integration users may encounter Spring Batch requirements and need a way +to efficiently integrate both frameworks. In this context, several +patterns and use-cases emerge, and Spring Batch Integration +addresses those requirements. + +The line between Spring Batch and Spring Integration is not always +clear, but two pieces of advice can +help: Thinking about granularity and applying common patterns. Some +of those common patterns are described in this section. + +Adding messaging to a batch process enables automation of +operations and also separation and strategizing of key concerns. +For example, a message might trigger a job to execute, and then +sending the message can be exposed in a variety of ways. Alternatively, when +a job completes or fails, that event might trigger a message to be sent, +and the consumers of those messages might have operational concerns +that have nothing to do with the application itself. Messaging can +also be embedded in a job (for example, reading or writing items for +processing through channels). Remote partitioning and remote chunking +provide methods to distribute workloads over a number of workers. + +This section covers the following key concepts: + +[role="xmlContent"] +* <> +* xref:spring-batch-integration/launching-jobs-through-messages.adoc[Launching Batch Jobs through Messages] +* xref:spring-batch-integration/sub-elements.adoc#providing-feedback-with-informational-messages[Providing Feedback with Informational Messages] +* xref:spring-batch-integration/sub-elements.adoc#asynchronous-processors[Asynchronous Processors] +* xref:spring-batch-integration/sub-elements.adoc#externalizing-batch-process-execution[Externalizing Batch Process Execution] + +[[namespace-support]] +[role="xmlContent"] diff --git a/spring-batch-docs/modules/ROOT/pages/spring-batch-integration/available-attributes-of-the-job-launching-gateway.adoc b/spring-batch-docs/modules/ROOT/pages/spring-batch-integration/available-attributes-of-the-job-launching-gateway.adoc new file mode 100644 index 0000000000..36b8fa14c8 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/spring-batch-integration/available-attributes-of-the-job-launching-gateway.adoc @@ -0,0 +1,38 @@ +[[availableAttributesOfTheJobLaunchingGateway]] += Available Attributes of the Job-Launching Gateway + +The job-launching gateway has the following attributes that you can set to control a job: + +* `id`: Identifies the underlying Spring bean definition, which is an instance of either: +** `EventDrivenConsumer` +** `PollingConsumer` +(The exact implementation depends on whether the component's input channel is a +`SubscribableChannel` or a `PollableChannel`.) +* `auto-startup`: Boolean flag to indicate that the endpoint should start automatically on +startup. The default is `true`. +* `request-channel`: The input `MessageChannel` of this endpoint. +* `reply-channel`: `MessageChannel` to which the resulting `JobExecution` payload is sent. +* `reply-timeout`: Lets you specify how long (in milliseconds) this gateway waits for the reply message +to be sent successfully to the reply channel before throwing +an exception. This attribute applies only when the channel +might block (for example, when using a bounded queue channel +that is currently full). Also, keep in mind that, when sending to a +`DirectChannel`, the invocation occurs +in the sender's thread. Therefore, the failing of the send +operation may be caused by other components further downstream. +The `reply-timeout` attribute maps to the +`sendTimeout` property of the underlying +`MessagingTemplate` instance. If not specified, the attribute +defaults to -1, +meaning that, by default, the `Gateway` waits indefinitely. +* `job-launcher`: Optional. Accepts a +custom +`JobLauncher` +bean reference. +If not specified, the adapter +re-uses the instance that is registered under the `id` of +`jobLauncher`. If no default instance +exists, an exception is thrown. +* `order`: Specifies the order of invocation when this endpoint is connected as a subscriber +to a `SubscribableChannel`. + diff --git a/spring-batch-docs/modules/ROOT/pages/spring-batch-integration/launching-jobs-through-messages.adoc b/spring-batch-docs/modules/ROOT/pages/spring-batch-integration/launching-jobs-through-messages.adoc new file mode 100644 index 0000000000..dab8afd9b0 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/spring-batch-integration/launching-jobs-through-messages.adoc @@ -0,0 +1,256 @@ +[[launching-batch-jobs-through-messages]] += Launching Batch Jobs through Messages + +When starting batch jobs by using the core Spring Batch API, you +basically have two options: + +* From the command line, with the `CommandLineJobRunner` +* Programmatically, with either `JobOperator.start()` or `JobLauncher.run()` + +For example, you may want to use the +`CommandLineJobRunner` when invoking batch jobs by +using a shell script. Alternatively, you can use the +`JobOperator` directly (for example, when using +Spring Batch as part of a web application). However, what about +more complex use cases? Maybe you need to poll a remote (S)FTP +server to retrieve the data for the Batch Job or your application +has to support multiple different data sources simultaneously. For +example, you may receive data files not only from the web but also from +FTP and other sources. Maybe additional transformation of the input files is +needed before invoking Spring Batch. + +Therefore, it would be much more powerful to execute the batch job +by using Spring Integration and its numerous adapters. For example, +you can use a _File Inbound Channel Adapter_ to +monitor a directory in the file-system and start the batch job as +soon as the input file arrives. Additionally, you can create Spring +Integration flows that use multiple different adapters to easily +ingest data for your batch jobs from multiple sources +simultaneously by using only configuration. Implementing all these +scenarios with Spring Integration is easy, as it allows for +decoupled, event-driven execution of the +`JobLauncher`. + +Spring Batch Integration provides the +`JobLaunchingMessageHandler` class that you can +use to launch batch jobs. The input for the +`JobLaunchingMessageHandler` is provided by a +Spring Integration message, which has a payload of type +`JobLaunchRequest`. This class is a wrapper around the `Job` +to be launched and around the `JobParameters` that are +necessary to launch the Batch job. + +The following image shows the typical Spring Integration +message flow that is needed to start a Batch job. The +link:$$https://www.enterpriseintegrationpatterns.com/toc.html$$[EIP (Enterprise Integration Patterns) website] +provides a full overview of messaging icons and their descriptions. + +.Launch Batch Job +image::launch-batch-job.png[Launch Batch Job, scaledwidth="60%"] + + +[[transforming-a-file-into-a-joblaunchrequest]] +== Transforming a File into a JobLaunchRequest + +The following example transforms a file into a `JobLaunchRequest`: + +[source, java] +---- +package io.spring.sbi; + +import org.springframework.batch.core.Job; +import org.springframework.batch.core.JobParametersBuilder; +import org.springframework.batch.integration.launch.JobLaunchRequest; +import org.springframework.integration.annotation.Transformer; +import org.springframework.messaging.Message; + +import java.io.File; + +public class FileMessageToJobRequest { + private Job job; + private String fileParameterName; + + public void setFileParameterName(String fileParameterName) { + this.fileParameterName = fileParameterName; + } + + public void setJob(Job job) { + this.job = job; + } + + @Transformer + public JobLaunchRequest toRequest(Message message) { + JobParametersBuilder jobParametersBuilder = + new JobParametersBuilder(); + + jobParametersBuilder.addString(fileParameterName, + message.getPayload().getAbsolutePath()); + + return new JobLaunchRequest(job, jobParametersBuilder.toJobParameters()); + } +} +---- + +[[the-jobexecution-response]] +== The JobExecution Response + +When a batch job is being executed, a +`JobExecution` instance is returned. You can use this +instance to determine the status of an execution. If +a `JobExecution` is able to be created +successfully, it is always returned, regardless of whether +or not the actual execution is successful. + +The exact behavior on how the `JobExecution` +instance is returned depends on the provided +`TaskExecutor`. If a +`synchronous` (single-threaded) +`TaskExecutor` implementation is used, the +`JobExecution` response is returned only +`after` the job completes. When using an +`asynchronous` +`TaskExecutor`, the +`JobExecution` instance is returned +immediately. You can then take the `id` of +`JobExecution` instance +(with `JobExecution.getJobId()`) and query the +`JobRepository` for the job's updated status +using the `JobExplorer`. For more +information, see +xref:job/advanced-meta-data.adoc#queryingRepository[Querying the Repository]. + +[[spring-batch-integration-configuration]] +== Spring Batch Integration Configuration + +Consider a case where someone needs to create a file `inbound-channel-adapter` to listen +for CSV files in the provided directory, hand them off to a transformer +(`FileMessageToJobRequest`), launch the job through the job launching gateway, and +log the output of the `JobExecution` with the `logging-channel-adapter`. + +[tabs] +==== +Java:: ++ +The following example shows how that common case can be configured in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public FileMessageToJobRequest fileMessageToJobRequest() { + FileMessageToJobRequest fileMessageToJobRequest = new FileMessageToJobRequest(); + fileMessageToJobRequest.setFileParameterName("input.file.name"); + fileMessageToJobRequest.setJob(personJob()); + return fileMessageToJobRequest; +} + +@Bean +public JobLaunchingGateway jobLaunchingGateway() { + TaskExecutorJobLauncher jobLauncher = new TaskExecutorJobLauncher(); + jobLauncher.setJobRepository(jobRepository); + jobLauncher.setTaskExecutor(new SyncTaskExecutor()); + JobLaunchingGateway jobLaunchingGateway = new JobLaunchingGateway(jobLauncher); + + return jobLaunchingGateway; +} + +@Bean +public IntegrationFlow integrationFlow(JobLaunchingGateway jobLaunchingGateway) { + return IntegrationFlow.from(Files.inboundAdapter(new File("/tmp/myfiles")). + filter(new SimplePatternFileListFilter("*.csv")), + c -> c.poller(Pollers.fixedRate(1000).maxMessagesPerPoll(1))). + transform(fileMessageToJobRequest()). + handle(jobLaunchingGateway). + log(LoggingHandler.Level.WARN, "headers.id + ': ' + payload"). + get(); +} +---- + +XML:: ++ +The following example shows how that common case can be configured in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + + + + + + +---- +==== + + + + +[[example-itemreader-configuration]] +== Example ItemReader Configuration + +Now that we are polling for files and launching jobs, we need to configure our Spring +Batch `ItemReader` (for example) to use the files found at the location defined by the job +parameter called "input.file.name", as the following bean configuration shows: + + +[tabs] +==== +Java:: ++ +The following Java example shows the necessary bean configuration: ++ +.Java Configuration +[source, java] +---- +@Bean +@StepScope +public ItemReader sampleReader(@Value("#{jobParameters[input.file.name]}") String resource) { +... + FlatFileItemReader flatFileItemReader = new FlatFileItemReader(); + flatFileItemReader.setResource(new FileSystemResource(resource)); +... + return flatFileItemReader; +} +---- + +XML:: ++ +The following XML example shows the necessary bean configuration: ++ +.XML Configuration +[source, xml] +---- + + + ... + +---- + +==== + +The main points of interest in the preceding example are injecting the value of +`#{jobParameters['input.file.name']}` +as the Resource property value and setting the `ItemReader` bean +to have step scope. Setting the bean to have step scope takes advantage of +the late binding support, which allows access to the +`jobParameters` variable. + + diff --git a/spring-batch-docs/modules/ROOT/pages/spring-batch-integration/namespace-support.adoc b/spring-batch-docs/modules/ROOT/pages/spring-batch-integration/namespace-support.adoc new file mode 100644 index 0000000000..d54c5b3f86 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/spring-batch-integration/namespace-support.adoc @@ -0,0 +1,57 @@ +[[namespace-support]] += Namespace Support + +Dedicated XML namespace support was added to Spring Batch Integration in version 1.3, +with the aim to provide an easier configuration +experience. To use the namespace, add the following +namespace declarations to your Spring XML Application Context +file: + +[source, xml] +---- + + + ... + + +---- + +The following example shows a fully configured Spring XML application context file for Spring +Batch Integration: + +[source, xml] +---- + + + ... + + +---- + +Appending version numbers to the referenced XSD file is also +allowed. However, because a version-less declaration always uses the +latest schema, we generally do not recommend appending the version +number to the XSD name. Adding a version number +could possibly create issues when updating the Spring Batch +Integration dependencies, as they may require more recent versions +of the XML schema. + + diff --git a/spring-batch-docs/src/main/asciidoc/spring-batch-integration.adoc b/spring-batch-docs/modules/ROOT/pages/spring-batch-integration/sub-elements.adoc similarity index 64% rename from spring-batch-docs/src/main/asciidoc/spring-batch-integration.adoc rename to spring-batch-docs/modules/ROOT/pages/spring-batch-integration/sub-elements.adoc index 5ba365ff6e..d4ee6bcc9c 100644 --- a/spring-batch-docs/src/main/asciidoc/spring-batch-integration.adoc +++ b/spring-batch-docs/modules/ROOT/pages/spring-batch-integration/sub-elements.adoc @@ -1,420 +1,47 @@ -:toc: left -:toclevels: 4 - -[[springBatchIntegration]] -== Spring Batch Integration - -include::attributes.adoc[] -ifndef::onlyonetoggle[] -include::toggle.adoc[] -endif::onlyonetoggle[] - -Many users of Spring Batch may encounter requirements that are -outside the scope of Spring Batch but that may be efficiently and -concisely implemented by using Spring Integration. Conversely, Spring -Integration users may encounter Spring Batch requirements and need a way -to efficiently integrate both frameworks. In this context, several -patterns and use-cases emerge, and Spring Batch Integration -addresses those requirements. - -The line between Spring Batch and Spring Integration is not always -clear, but two pieces of advice can -help: Thinking about granularity and applying common patterns. Some -of those common patterns are described in this section. - -Adding messaging to a batch process enables automation of -operations and also separation and strategizing of key concerns. -For example, a message might trigger a job to execute, and then -sending the message can be exposed in a variety of ways. Alternatively, when -a job completes or fails, that event might trigger a message to be sent, -and the consumers of those messages might have operational concerns -that have nothing to do with the application itself. Messaging can -also be embedded in a job (for example, reading or writing items for -processing through channels). Remote partitioning and remote chunking -provide methods to distribute workloads over a number of workers. - -This section covers the following key concepts: - -[role="xmlContent"] -* <> -* <> -* <> -* <> -* <> - -[[namespace-support]] -[role="xmlContent"] -=== Namespace Support - -Dedicated XML namespace support was added to Spring Batch Integration in version 1.3, -with the aim to provide an easier configuration -experience. To use the namespace, add the following -namespace declarations to your Spring XML Application Context -file: +[[sub-elements]] += Sub-elements -[source, xml] ----- - - - ... - - ----- - -The following example shows a fully configured Spring XML application context file for Spring -Batch Integration: - -[source, xml] ----- - - - ... - - ----- - -Appending version numbers to the referenced XSD file is also -allowed. However, because a version-less declaration always uses the -latest schema, we generally do not recommend appending the version -number to the XSD name. Adding a version number -could possibly create issues when updating the Spring Batch -Integration dependencies, as they may require more recent versions -of the XML schema. - - -[[launching-batch-jobs-through-messages]] -=== Launching Batch Jobs through Messages - -When starting batch jobs by using the core Spring Batch API, you -basically have two options: - -* From the command line, with the `CommandLineJobRunner` -* Programmatically, with either `JobOperator.start()` or `JobLauncher.run()` - -For example, you may want to use the -`CommandLineJobRunner` when invoking batch jobs by -using a shell script. Alternatively, you can use the -`JobOperator` directly (for example, when using -Spring Batch as part of a web application). However, what about -more complex use cases? Maybe you need to poll a remote (S)FTP -server to retrieve the data for the Batch Job or your application -has to support multiple different data sources simultaneously. For -example, you may receive data files not only from the web but also from -FTP and other sources. Maybe additional transformation of the input files is -needed before invoking Spring Batch. - -Therefore, it would be much more powerful to execute the batch job -by using Spring Integration and its numerous adapters. For example, -you can use a _File Inbound Channel Adapter_ to -monitor a directory in the file-system and start the batch job as -soon as the input file arrives. Additionally, you can create Spring -Integration flows that use multiple different adapters to easily -ingest data for your batch jobs from multiple sources -simultaneously by using only configuration. Implementing all these -scenarios with Spring Integration is easy, as it allows for -decoupled, event-driven execution of the -`JobLauncher`. - -Spring Batch Integration provides the -`JobLaunchingMessageHandler` class that you can -use to launch batch jobs. The input for the -`JobLaunchingMessageHandler` is provided by a -Spring Integration message, which has a payload of type -`JobLaunchRequest`. This class is a wrapper around the `Job` -to be launched and around the `JobParameters` that are -necessary to launch the Batch job. - -The following image shows the typical Spring Integration -message flow that is needed to start a Batch job. The -link:$$https://www.enterpriseintegrationpatterns.com/toc.html$$[EIP (Enterprise Integration Patterns) website] -provides a full overview of messaging icons and their descriptions. - -.Launch Batch Job -image::{batch-asciidoc}images/launch-batch-job.png[Launch Batch Job, scaledwidth="60%"] - - -[[transforming-a-file-into-a-joblaunchrequest]] -==== Transforming a File into a JobLaunchRequest - -The following example transforms a file into a `JobLaunchRequest`: - -[source, java] ----- -package io.spring.sbi; - -import org.springframework.batch.core.Job; -import org.springframework.batch.core.JobParametersBuilder; -import org.springframework.batch.integration.launch.JobLaunchRequest; -import org.springframework.integration.annotation.Transformer; -import org.springframework.messaging.Message; - -import java.io.File; - -public class FileMessageToJobRequest { - private Job job; - private String fileParameterName; - - public void setFileParameterName(String fileParameterName) { - this.fileParameterName = fileParameterName; - } - - public void setJob(Job job) { - this.job = job; - } - - @Transformer - public JobLaunchRequest toRequest(Message message) { - JobParametersBuilder jobParametersBuilder = - new JobParametersBuilder(); - - jobParametersBuilder.addString(fileParameterName, - message.getPayload().getAbsolutePath()); - - return new JobLaunchRequest(job, jobParametersBuilder.toJobParameters()); - } -} ----- - -[[the-jobexecution-response]] -==== The JobExecution Response - -When a batch job is being executed, a -`JobExecution` instance is returned. You can use this -instance to determine the status of an execution. If -a `JobExecution` is able to be created -successfully, it is always returned, regardless of whether -or not the actual execution is successful. - -The exact behavior on how the `JobExecution` -instance is returned depends on the provided -`TaskExecutor`. If a -`synchronous` (single-threaded) -`TaskExecutor` implementation is used, the -`JobExecution` response is returned only -`after` the job completes. When using an -`asynchronous` -`TaskExecutor`, the -`JobExecution` instance is returned -immediately. You can then take the `id` of -`JobExecution` instance -(with `JobExecution.getJobId()`) and query the -`JobRepository` for the job's updated status -using the `JobExplorer`. For more -information, see -<>. - -[[spring-batch-integration-configuration]] -==== Spring Batch Integration Configuration - -Consider a case where someone needs to create a file `inbound-channel-adapter` to listen -for CSV files in the provided directory, hand them off to a transformer -(`FileMessageToJobRequest`), launch the job through the job launching gateway, and -log the output of the `JobExecution` with the `logging-channel-adapter`. - -[role="xmlContent"] -The following example shows how that common case can be configured in XML: -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - - - - - - - - - - - - - ----- +When this `Gateway` is receiving messages from a +`PollableChannel`, you must either provide +a global default `Poller` or provide a `Poller` sub-element to the +`Job Launching Gateway`. -[role="javaContent"] -The following example shows how that common case can be configured in Java: +[tabs] +==== +Java:: ++ +The following example shows how to provide a poller in Java: ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean -public FileMessageToJobRequest fileMessageToJobRequest() { - FileMessageToJobRequest fileMessageToJobRequest = new FileMessageToJobRequest(); - fileMessageToJobRequest.setFileParameterName("input.file.name"); - fileMessageToJobRequest.setJob(personJob()); - return fileMessageToJobRequest; -} - -@Bean -public JobLaunchingGateway jobLaunchingGateway() { - TaskExecutorJobLauncher jobLauncher = new TaskExecutorJobLauncher(); - jobLauncher.setJobRepository(jobRepository); - jobLauncher.setTaskExecutor(new SyncTaskExecutor()); - JobLaunchingGateway jobLaunchingGateway = new JobLaunchingGateway(jobLauncher); - +@ServiceActivator(inputChannel = "queueChannel", poller = @Poller(fixedRate="1000")) +public JobLaunchingGateway sampleJobLaunchingGateway() { + JobLaunchingGateway jobLaunchingGateway = new JobLaunchingGateway(jobLauncher()); + jobLaunchingGateway.setOutputChannel(replyChannel()); return jobLaunchingGateway; } - -@Bean -public IntegrationFlow integrationFlow(JobLaunchingGateway jobLaunchingGateway) { - return IntegrationFlow.from(Files.inboundAdapter(new File("/tmp/myfiles")). - filter(new SimplePatternFileListFilter("*.csv")), - c -> c.poller(Pollers.fixedRate(1000).maxMessagesPerPoll(1))). - transform(fileMessageToJobRequest()). - handle(jobLaunchingGateway). - log(LoggingHandler.Level.WARN, "headers.id + ': ' + payload"). - get(); -} ----- - - -[[example-itemreader-configuration]] -==== Example ItemReader Configuration - -Now that we are polling for files and launching jobs, we need to configure our Spring -Batch `ItemReader` (for example) to use the files found at the location defined by the job -parameter called "input.file.name", as the following bean configuration shows: - -[role="xmlContent"] -The following XML example shows the necessary bean configuration: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - ... - ----- - -[role="javaContent"] -The following Java example shows the necessary bean configuration: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -@StepScope -public ItemReader sampleReader(@Value("#{jobParameters[input.file.name]}") String resource) { -... - FlatFileItemReader flatFileItemReader = new FlatFileItemReader(); - flatFileItemReader.setResource(new FileSystemResource(resource)); -... - return flatFileItemReader; -} ---- -The main points of interest in the preceding example are injecting the value of -`#{jobParameters['input.file.name']}` -as the Resource property value and setting the `ItemReader` bean -to have step scope. Setting the bean to have step scope takes advantage of -the late binding support, which allows access to the -`jobParameters` variable. - - -[[availableAttributesOfTheJobLaunchingGateway]] -=== Available Attributes of the Job-Launching Gateway - -The job-launching gateway has the following attributes that you can set to control a job: - -* `id`: Identifies the underlying Spring bean definition, which is an instance of either: -** `EventDrivenConsumer` -** `PollingConsumer` -(The exact implementation depends on whether the component's input channel is a -`SubscribableChannel` or a `PollableChannel`.) -* `auto-startup`: Boolean flag to indicate that the endpoint should start automatically on -startup. The default is `true`. -* `request-channel`: The input `MessageChannel` of this endpoint. -* `reply-channel`: `MessageChannel` to which the resulting `JobExecution` payload is sent. -* `reply-timeout`: Lets you specify how long (in milliseconds) this gateway waits for the reply message -to be sent successfully to the reply channel before throwing -an exception. This attribute applies only when the channel -might block (for example, when using a bounded queue channel -that is currently full). Also, keep in mind that, when sending to a -`DirectChannel`, the invocation occurs -in the sender's thread. Therefore, the failing of the send -operation may be caused by other components further downstream. -The `reply-timeout` attribute maps to the -`sendTimeout` property of the underlying -`MessagingTemplate` instance. If not specified, the attribute -defaults to -1, -meaning that, by default, the `Gateway` waits indefinitely. -* `job-launcher`: Optional. Accepts a -custom -`JobLauncher` -bean reference. -If not specified, the adapter -re-uses the instance that is registered under the `id` of -`jobLauncher`. If no default instance -exists, an exception is thrown. -* `order`: Specifies the order of invocation when this endpoint is connected as a subscriber -to a `SubscribableChannel`. - -=== Sub-elements - -When this `Gateway` is receiving messages from a -`PollableChannel`, you must either provide -a global default `Poller` or provide a `Poller` sub-element to the -`Job Launching Gateway`. - -[role="xmlContent"] +XML:: ++ The following example shows how to provide a poller in XML: - ++ .XML Configuration -[source, xml, role="xmlContent"] +[source, xml] ---- ---- +==== -[role="javaContent"] -The following example shows how to provide a poller in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -@ServiceActivator(inputChannel = "queueChannel", poller = @Poller(fixedRate="1000")) -public JobLaunchingGateway sampleJobLaunchingGateway() { - JobLaunchingGateway jobLaunchingGateway = new JobLaunchingGateway(jobLauncher()); - jobLaunchingGateway.setOutputChannel(replyChannel()); - return jobLaunchingGateway; -} ----- [[providing-feedback-with-informational-messages]] -==== Providing Feedback with Informational Messages +== Providing Feedback with Informational Messages As Spring Batch jobs can run for long times, providing progress information is often critical. For example, stakeholders may want @@ -446,7 +73,7 @@ routing a message to a mail outbound channel adapter), so that an email notifica be sent out based on some condition. .Handling Informational Messages -image::{batch-asciidoc}images/handling-informational-messages.png[Handling Informational Messages, scaledwidth="60%"] +image::handling-informational-messages.png[Handling Informational Messages, scaledwidth="60%"] The following two-part example shows how a listener is configured to send a message to a `Gateway` for a `StepExecution` events and log its output to a @@ -454,26 +81,15 @@ message to a `Gateway` for a `StepExecution` events and log its output to a First, create the notification integration beans. -[role="xmlContent"] -The following example shows the how to create the notification integration beans in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - ----- -[role="javaContent"] +[tabs] +==== +Java:: ++ The following example shows the how to create the notification integration beans in Java: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean @ServiceActivator(inputChannel = "stepExecutionsChannel") @@ -487,19 +103,59 @@ public LoggingHandler loggingHandler() { @MessagingGateway(name = "notificationExecutionsListener", defaultRequestChannel = "stepExecutionsChannel") public interface NotificationExecutionListener extends StepExecutionListener {} ---- - -[role="javaContent"] ++ NOTE: You need to add the `@IntegrationComponentScan` annotation to your configuration. +XML:: ++ +The following example shows the how to create the notification integration beans in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + +---- + +==== + + + [[message-gateway-entry-list]] Second, modify your job to add a step-level listener. -[role="xmlContent"] -The following example shows the how to add a step-level listener in XML: +[tabs] +==== +Java:: ++ +The following example shows the how to add a step-level listener in Java: ++ +.Java Configuration +[source, java] +---- +public Job importPaymentsJob(JobRepository jobRepository) { + return new JobBuilder("importPayments", jobRepository) + .start(stepBuilderFactory.get("step1") + .chunk(200) + .listener(notificationExecutionsListener()) + ... + ) +} +---- + +XML:: ++ +The following example shows the how to add a step-level listener in XML: ++ .XML Configuration -[source, xml, role="xmlContent"] +[source, xml] ---- @@ -514,24 +170,12 @@ The following example shows the how to add a step-level listener in XML: ---- -[role="javaContent"] -The following example shows the how to add a step-level listener in Java: +==== + -.Java Configuration -[source, java, role="javaContent"] ----- -public Job importPaymentsJob(JobRepository jobRepository) { - return new JobBuilder("importPayments", jobRepository) - .start(stepBuilderFactory.get("step1") - .chunk(200) - .listener(notificationExecutionsListener()) - ... - ) -} ----- [[asynchronous-processors]] -==== Asynchronous Processors +== Asynchronous Processors Asynchronous Processors help you scale the processing of items. In the asynchronous processor use case, an `AsyncItemProcessor` serves as a dispatcher, executing the logic of @@ -542,11 +186,31 @@ Therefore, you can increase performance by using asynchronous item processing, b letting you implement fork-join scenarios. The `AsyncItemWriter` gathers the results and writes back the chunk as soon as all the results become available. -[role="xmlContent"] -The following example shows how to configuration the `AsyncItemProcessor` in XML: +[tabs] +==== +Java:: ++ +The following example shows how to configuration the `AsyncItemProcessor` in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public AsyncItemProcessor processor(ItemProcessor itemProcessor, TaskExecutor taskExecutor) { + AsyncItemProcessor asyncItemProcessor = new AsyncItemProcessor(); + asyncItemProcessor.setTaskExecutor(taskExecutor); + asyncItemProcessor.setDelegate(itemProcessor); + return asyncItemProcessor; +} +---- + +XML:: ++ +The following example shows how to configuration the `AsyncItemProcessor` in XML: ++ .XML Configuration -[source, xml, role="xmlContent"] +[source, xml] ---- @@ -559,29 +223,35 @@ The following example shows how to configuration the `AsyncItemProcessor` in XML ---- -[role="xmlContent"] -The following example shows how to configuration the `AsyncItemProcessor` in XML: +==== + +The `delegate` property refers to your `ItemProcessor` bean, and the `taskExecutor` +property refers to the `TaskExecutor` of your choice. + +[tabs] +==== +Java:: ++ +The following example shows how to configure the `AsyncItemWriter` in Java: ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean -public AsyncItemProcessor processor(ItemProcessor itemProcessor, TaskExecutor taskExecutor) { - AsyncItemProcessor asyncItemProcessor = new AsyncItemProcessor(); - asyncItemProcessor.setTaskExecutor(taskExecutor); - asyncItemProcessor.setDelegate(itemProcessor); - return asyncItemProcessor; +public AsyncItemWriter writer(ItemWriter itemWriter) { + AsyncItemWriter asyncItemWriter = new AsyncItemWriter(); + asyncItemWriter.setDelegate(itemWriter); + return asyncItemWriter; } ---- -The `delegate` property refers to your `ItemProcessor` bean, and the `taskExecutor` -property refers to the `TaskExecutor` of your choice. - -[role="xmlContent"] +XML:: ++ The following example shows how to configure the `AsyncItemWriter` in XML: - ++ .XML Configuration -[source, xml, role="xmlContent"] +[source, xml] ---- @@ -591,26 +261,16 @@ The following example shows how to configure the `AsyncItemWriter` in XML: ---- -[role="javaContent"] -The following example shows how to configure the `AsyncItemWriter` in Java: +==== + -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -public AsyncItemWriter writer(ItemWriter itemWriter) { - AsyncItemWriter asyncItemWriter = new AsyncItemWriter(); - asyncItemWriter.setDelegate(itemWriter); - return asyncItemWriter; -} ----- Again, the `delegate` property is actually a reference to your `ItemWriter` bean. [[externalizing-batch-process-execution]] -==== Externalizing Batch Process Execution +== Externalizing Batch Process Execution The integration approaches discussed so far suggest use cases where Spring Integration wraps Spring Batch like an outer shell. @@ -624,13 +284,13 @@ provides dedicated support for: * Remote Partitioning [[remote-chunking]] -===== Remote Chunking +=== Remote Chunking The following image shows one way that remote chunking works when you use Spring Batch together with Spring Integration: .Remote Chunking -image::{batch-asciidoc}images/remote-chunking-sbi.png[Remote Chunking, scaledwidth="60%"] +image::remote-chunking-sbi.png[Remote Chunking, scaledwidth="60%"] Taking things one step further, you can also externalize the chunk processing by using the @@ -649,29 +309,16 @@ Spring Integration's rich collection of channel adapters (such as JMS and AMQP), you can distribute chunks of a batch job to external systems for processing. -[role="xmlContent"] -A job with a step to be remotely chunked might have a configuration similar to the -following in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - ... - - ----- -[role="javaContent"] +[tabs] +==== +Java:: ++ A job with a step to be remotely chunked might have a configuration similar to the following in Java: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- public Job chunkJob(JobRepository jobRepository) { return new JobBuilder("personJob", jobRepository) @@ -684,6 +331,28 @@ public Job chunkJob(JobRepository jobRepository) { } ---- +XML:: ++ +A job with a step to be remotely chunked might have a configuration similar to the +following in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + ... + + +---- + +==== + + + The `ItemReader` reference points to the bean you want to use for reading data on the manager. The `ItemWriter` reference points to a special `ItemWriter` (called `ChunkMessageChannelItemWriter`), as described earlier. The processor (if any) is left off @@ -691,11 +360,77 @@ the manager configuration, as it is configured on the worker. You should check a additional component properties, such as throttle limits and so on, when implementing your use case. -[role="xmlContent"] -The following XML configuration provides a basic manager setup: +[tabs] +==== +Java:: ++ +The following Java configuration provides a basic manager setup: ++ +.Java Configuration +[source, java] +---- +@Bean +public org.apache.activemq.ActiveMQConnectionFactory connectionFactory() { + ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(); + factory.setBrokerURL("tcp://localhost:61616"); + return factory; +} + +/* + * Configure outbound flow (requests going to workers) + */ +@Bean +public DirectChannel requests() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow outboundFlow(ActiveMQConnectionFactory connectionFactory) { + return IntegrationFlow + .from(requests()) + .handle(Jms.outboundAdapter(connectionFactory).destination("requests")) + .get(); +} + +/* + * Configure inbound flow (replies coming from workers) + */ +@Bean +public QueueChannel replies() { + return new QueueChannel(); +} + +@Bean +public IntegrationFlow inboundFlow(ActiveMQConnectionFactory connectionFactory) { + return IntegrationFlow + .from(Jms.messageDrivenChannelAdapter(connectionFactory).destination("replies")) + .channel(replies()) + .get(); +} + +/* + * Configure the ChunkMessageChannelItemWriter + */ +@Bean +public ItemWriter itemWriter() { + MessagingTemplate messagingTemplate = new MessagingTemplate(); + messagingTemplate.setDefaultChannel(requests()); + messagingTemplate.setReceiveTimeout(2000); + ChunkMessageChannelItemWriter chunkMessageChannelItemWriter + = new ChunkMessageChannelItemWriter<>(); + chunkMessageChannelItemWriter.setMessagingOperations(messagingTemplate); + chunkMessageChannelItemWriter.setReplyChannel(replies()); + return chunkMessageChannelItemWriter; +} +---- + +XML:: ++ +The following XML configuration provides a basic manager setup: ++ .XML Configuration -[source, xml, role="xmlContent"] +[source, xml] ---- @@ -720,16 +455,34 @@ The following XML configuration provides a basic manager setup: - ----- + +---- + +==== + + + +The preceding configuration provides us with a number of beans. We +configure our messaging middleware by using ActiveMQ and the +inbound and outbound JMS adapters provided by Spring Integration. As +shown, our `itemWriter` bean, which is +referenced by our job step, uses the +`ChunkMessageChannelItemWriter` to write chunks over the +configured middleware. + +Now we can move on to the worker configuration, as the following example shows: -[role="javaContent"] -The following Java configuration provides a basic manager setup: +[tabs] +==== +Java:: ++ +The following example shows the worker configuration in Java: ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @Bean public org.apache.activemq.ActiveMQConnectionFactory connectionFactory() { @@ -739,7 +492,7 @@ public org.apache.activemq.ActiveMQConnectionFactory connectionFactory() { } /* - * Configure outbound flow (requests going to workers) + * Configure inbound flow (requests coming from the manager) */ @Bean public DirectChannel requests() { @@ -747,60 +500,50 @@ public DirectChannel requests() { } @Bean -public IntegrationFlow outboundFlow(ActiveMQConnectionFactory connectionFactory) { +public IntegrationFlow inboundFlow(ActiveMQConnectionFactory connectionFactory) { return IntegrationFlow - .from(requests()) - .handle(Jms.outboundAdapter(connectionFactory).destination("requests")) + .from(Jms.messageDrivenChannelAdapter(connectionFactory).destination("requests")) + .channel(requests()) .get(); } /* - * Configure inbound flow (replies coming from workers) + * Configure outbound flow (replies going to the manager) */ @Bean -public QueueChannel replies() { - return new QueueChannel(); +public DirectChannel replies() { + return new DirectChannel(); } @Bean -public IntegrationFlow inboundFlow(ActiveMQConnectionFactory connectionFactory) { +public IntegrationFlow outboundFlow(ActiveMQConnectionFactory connectionFactory) { return IntegrationFlow - .from(Jms.messageDrivenChannelAdapter(connectionFactory).destination("replies")) - .channel(replies()) + .from(replies()) + .handle(Jms.outboundAdapter(connectionFactory).destination("replies")) .get(); } /* - * Configure the ChunkMessageChannelItemWriter + * Configure the ChunkProcessorChunkHandler */ @Bean -public ItemWriter itemWriter() { - MessagingTemplate messagingTemplate = new MessagingTemplate(); - messagingTemplate.setDefaultChannel(requests()); - messagingTemplate.setReceiveTimeout(2000); - ChunkMessageChannelItemWriter chunkMessageChannelItemWriter - = new ChunkMessageChannelItemWriter<>(); - chunkMessageChannelItemWriter.setMessagingOperations(messagingTemplate); - chunkMessageChannelItemWriter.setReplyChannel(replies()); - return chunkMessageChannelItemWriter; +@ServiceActivator(inputChannel = "requests", outputChannel = "replies") +public ChunkProcessorChunkHandler chunkProcessorChunkHandler() { + ChunkProcessor chunkProcessor + = new SimpleChunkProcessor<>(itemProcessor(), itemWriter()); + ChunkProcessorChunkHandler chunkProcessorChunkHandler + = new ChunkProcessorChunkHandler<>(); + chunkProcessorChunkHandler.setChunkProcessor(chunkProcessor); + return chunkProcessorChunkHandler; } ---- -The preceding configuration provides us with a number of beans. We -configure our messaging middleware by using ActiveMQ and the -inbound and outbound JMS adapters provided by Spring Integration. As -shown, our `itemWriter` bean, which is -referenced by our job step, uses the -`ChunkMessageChannelItemWriter` to write chunks over the -configured middleware. - -Now we can move on to the worker configuration, as the following example shows: - -[role="xmlContent"] +XML:: ++ The following example shows the worker configuration in XML: - ++ .XML Configuration -[source, xml, role="xmlContent"] +[source, xml] ---- @@ -839,65 +582,9 @@ The following example shows the worker configuration in XML: ---- -[role="javaContent"] -The following example shows the worker configuration in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -public org.apache.activemq.ActiveMQConnectionFactory connectionFactory() { - ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(); - factory.setBrokerURL("tcp://localhost:61616"); - return factory; -} - -/* - * Configure inbound flow (requests coming from the manager) - */ -@Bean -public DirectChannel requests() { - return new DirectChannel(); -} - -@Bean -public IntegrationFlow inboundFlow(ActiveMQConnectionFactory connectionFactory) { - return IntegrationFlow - .from(Jms.messageDrivenChannelAdapter(connectionFactory).destination("requests")) - .channel(requests()) - .get(); -} - -/* - * Configure outbound flow (replies going to the manager) - */ -@Bean -public DirectChannel replies() { - return new DirectChannel(); -} +==== -@Bean -public IntegrationFlow outboundFlow(ActiveMQConnectionFactory connectionFactory) { - return IntegrationFlow - .from(replies()) - .handle(Jms.outboundAdapter(connectionFactory).destination("replies")) - .get(); -} -/* - * Configure the ChunkProcessorChunkHandler - */ -@Bean -@ServiceActivator(inputChannel = "requests", outputChannel = "replies") -public ChunkProcessorChunkHandler chunkProcessorChunkHandler() { - ChunkProcessor chunkProcessor - = new SimpleChunkProcessor<>(itemProcessor(), itemWriter()); - ChunkProcessorChunkHandler chunkProcessorChunkHandler - = new ChunkProcessorChunkHandler<>(); - chunkProcessorChunkHandler.setChunkProcessor(chunkProcessor); - return chunkProcessorChunkHandler; -} ----- Most of these configuration items should look familiar from the manager configuration. Workers do not need access to @@ -923,7 +610,7 @@ two beans that you can autowire in your application context: These APIs take care of configuring a number of components, as the following diagram shows: .Remote Chunking Configuration -image::{batch-asciidoc}images/remote-chunking-config.png[Remote Chunking Configuration, scaledwidth="80%"] +image::remote-chunking-config.png[Remote Chunking Configuration, scaledwidth="80%"] On the manager side, the `RemoteChunkingManagerStepBuilderFactory` lets you configure a manager step by declaring: @@ -1001,12 +688,12 @@ You can find a complete example of a remote chunking job link:$$https://github.com/spring-projects/spring-batch/tree/main/spring-batch-samples#remote-chunking-sample$$[here]. [[remote-partitioning]] -===== Remote Partitioning +=== Remote Partitioning The following image shows a typical remote partitioning situation: .Remote Partitioning -image::{batch-asciidoc}images/remote-partitioning.png[Remote Partitioning, scaledwidth="60%"] +image::remote-partitioning.png[Remote Partitioning, scaledwidth="60%"] Remote Partitioning, on the other hand, is useful when it is not the processing of items but rather the associated I/O that @@ -1023,7 +710,7 @@ This provides a nice abstraction from the transports (such as JMS and AMQP) being used to communicate with the remote workers. The section of the "`Scalability`" chapter that addresses -<> provides an overview of the concepts and +xref:scalability.adoc#partitioning[remote partitioning] provides an overview of the concepts and components needed to configure remote partitioning and shows an example of using the default `TaskExecutorPartitionHandler` to partition @@ -1038,68 +725,16 @@ Similar to remote chunking, you can use JMS as the "`remoting fabric`". In that a `MessageChannelPartitionHandler` instance as the `PartitionHandler` implementation, as described earlier. -[role="xmlContent"] -The following example assumes an existing partitioned job and focuses on the -`MessageChannelPartitionHandler` and JMS configuration in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ----- -[role="javaContent"] +[tabs] +==== +Java:: ++ The following example assumes an existing partitioned job and focuses on the `MessageChannelPartitionHandler` and JMS configuration in Java: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- /* * Configuration of the manager side @@ -1205,30 +840,78 @@ public IntegrationFlow outboundJmsStaging() { } ---- -You must also ensure that the partition `handler` attribute maps to the `partitionHandler` -bean. - -[role="xmlContent"] -The following example maps the partition `handler` attribute to the `partitionHandler` in -XML: - +XML:: ++ +The following example assumes an existing partitioned job and focuses on the +`MessageChannelPartitionHandler` and JMS configuration in XML: ++ .XML Configuration -[source, xml, role="xmlContent"] +[source, xml] ---- - - - - ... - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ---- -[role="javaContent"] +==== + +You must also ensure that the partition `handler` attribute maps to the `partitionHandler` +bean. + + +[tabs] +==== +Java:: ++ The following example maps the partition `handler` attribute to the `partitionHandler` in Java: - ++ .Java Configuration -[source, java, role="javaContent"] +[source, java] ---- public Job personJob(JobRepository jobRepository) { return new JobBuilder("personJob", jobRepository) @@ -1240,6 +923,24 @@ Java: } ---- +XML:: ++ +The following example maps the partition `handler` attribute to the `partitionHandler` in +XML: ++ +.XML Configuration +[source, xml] +---- + + + + ... + + +---- + +==== + You can find a complete example of a remote partitioning job link:$$https://github.com/spring-projects/spring-batch/tree/main/spring-batch-samples#remote-partitioning-sample$$[here]. @@ -1252,10 +953,10 @@ partitioning setup. This annotation provides two beans that are useful for remot These APIs take care of configuring a number of components, as the following diagrams show: .Remote Partitioning Configuration (with job repository polling) -image::{batch-asciidoc}images/remote-partitioning-polling-config.png[Remote Partitioning Configuration (with job repository polling), scaledwidth="80%"] +image::remote-partitioning-polling-config.png[Remote Partitioning Configuration (with job repository polling), scaledwidth="80%"] .Remote Partitioning Configuration (with replies aggregation) -image::{batch-asciidoc}images/remote-partitioning-aggregation-config.png[Remote Partitioning Configuration (with replies aggregation), scaledwidth="80%"] +image::remote-partitioning-aggregation-config.png[Remote Partitioning Configuration (with replies aggregation), scaledwidth="80%"] On the manager side, the `RemotePartitioningManagerStepBuilderFactory` lets you configure a manager step by declaring: diff --git a/spring-batch-docs/src/main/asciidoc/spring-batch-intro.adoc b/spring-batch-docs/modules/ROOT/pages/spring-batch-intro.adoc similarity index 88% rename from spring-batch-docs/src/main/asciidoc/spring-batch-intro.adoc rename to spring-batch-docs/modules/ROOT/pages/spring-batch-intro.adoc index 3003946a32..dd9747e514 100644 --- a/spring-batch-docs/src/main/asciidoc/spring-batch-intro.adoc +++ b/spring-batch-docs/modules/ROOT/pages/spring-batch-intro.adoc @@ -1,28 +1,5 @@ -:toc: left -:toclevels: 4 - -include::attributes.adoc[] - -ifdef::backend-spring-html[] -This documentation is also available -as link:index.html[multiple HTML files] and as link:../pdf/spring-batch-reference.pdf[PDF] -and link:../epub/spring-batch-reference.epub[EPUB] documents. -endif::[] - -ifdef::backend-pdf[] -This documentation is also available -as link:index.html[multiple HTML files], a link:index-single.html[single HTML file], -and an link:../epub/spring-batch-reference.epub[EPUB] document. -endif::[] - -ifdef::backend-epub3[] -This documentation is also available -as link:index.html[multiple HTML files], a link:index-single.html[single HTML file], -and a link:../pdf/spring-batch-reference.pdf[PDF] document. -endif::[] - [[spring-batch-intro]] -== Spring Batch Introduction += Spring Batch Introduction Many applications within the enterprise domain require bulk processing to perform business operations in mission-critical environments. These business operations include: @@ -58,7 +35,7 @@ transforming it, and so on). High-volume batch jobs can use the framework in a highly scalable manner to process significant volumes of information. [[springBatchBackground]] -=== Background +== Background While open source software projects and associated communities have focused greater attention on web-based and microservices-based architecture frameworks, there has been a @@ -91,7 +68,7 @@ and government agencies desiring to deliver standard, proven solutions to their enterprise IT environments can benefit from Spring Batch. [[springBatchUsageScenarios]] -=== Usage Scenarios +== Usage Scenarios A typical batch program generally: @@ -104,7 +81,8 @@ similar transactions as a set, typically in an offline environment without any u interaction. Batch jobs are part of most IT projects, and Spring Batch is the only open source framework that provides a robust, enterprise-scale solution. -==== Business Scenarios +[[business-scenarios]] +=== Business Scenarios Spring Batch supports the following business scenarios: @@ -118,7 +96,8 @@ Spring Batch supports the following business scenarios: * Whole-batch transaction, for cases with a small batch size or existing stored procedures or scripts. -==== Technical Objectives +[[technical-objectives]] +=== Technical Objectives Spring Batch has the following technical objectives: diff --git a/spring-batch-docs/modules/ROOT/pages/step.adoc b/spring-batch-docs/modules/ROOT/pages/step.adoc new file mode 100644 index 0000000000..0ce5123dbd --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step.adoc @@ -0,0 +1,21 @@ + +[[configureStep]] += Configuring a `Step` +:page-section-summary-toc: 1 + +ifndef::onlyonetoggle[] +endif::onlyonetoggle[] + +As discussed in xref:domain.adoc[the domain chapter], a `Step` is a +domain object that encapsulates an independent, sequential phase of a batch job and +contains all of the information necessary to define and control the actual batch +processing. This is a necessarily vague description because the contents of any given +`Step` are at the discretion of the developer writing a `Job`. A `Step` can be as simple +or complex as the developer desires. A simple `Step` might load data from a file into the +database, requiring little or no code (depending upon the implementations used). A more +complex `Step` might have complicated business rules that are applied as part of the +processing, as the following image shows: + +.Step +image::step.png[Step, scaledwidth="60%"] + diff --git a/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing.adoc b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing.adoc new file mode 100644 index 0000000000..180c6a755c --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing.adoc @@ -0,0 +1,60 @@ +[[chunkOrientedProcessing]] += Chunk-oriented Processing + +Spring Batch uses a "`chunk-oriented`" processing style in its most common +implementation. Chunk oriented processing refers to reading the data one at a time and +creating 'chunks' that are written out within a transaction boundary. Once the number of +items read equals the commit interval, the entire chunk is written out by the +`ItemWriter`, and then the transaction is committed. The following image shows the +process: + +.Chunk-oriented Processing +image::chunk-oriented-processing.png[Chunk Oriented Processing, scaledwidth="60%"] + +The following pseudo code shows the same concepts in a simplified form: + +[source, java] +---- +List items = new Arraylist(); +for(int i = 0; i < commitInterval; i++){ + Object item = itemReader.read(); + if (item != null) { + items.add(item); + } +} +itemWriter.write(items); +---- + +You can also configure a chunk-oriented step with an optional `ItemProcessor` +to process items before passing them to the `ItemWriter`. The following image +shows the process when an `ItemProcessor` is registered in the step: + +.Chunk-oriented Processing with Item Processor +image::chunk-oriented-processing-with-item-processor.png[Chunk Oriented Processing With Item Processor, scaledwidth="60%"] + +The following pseudo code shows how this is implemented in a simplified form: + +[source, java] +---- +List items = new Arraylist(); +for(int i = 0; i < commitInterval; i++){ + Object item = itemReader.read(); + if (item != null) { + items.add(item); + } +} + +List processedItems = new Arraylist(); +for(Object item: items){ + Object processedItem = itemProcessor.process(item); + if (processedItem != null) { + processedItems.add(processedItem); + } +} + +itemWriter.write(processedItems); +---- + +For more details about item processors and their use cases, see the +xref:processor.adoc[Item processing] section. + diff --git a/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/commit-interval.adoc b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/commit-interval.adoc new file mode 100644 index 0000000000..5edc963433 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/commit-interval.adoc @@ -0,0 +1,62 @@ +[[commitInterval]] += The Commit Interval + +As mentioned previously, a step reads in and writes out items, periodically committing +by using the supplied `PlatformTransactionManager`. With a `commit-interval` of 1, it +commits after writing each individual item. This is less than ideal in many situations, +since beginning and committing a transaction is expensive. Ideally, it is preferable to +process as many items as possible in each transaction, which is completely dependent upon +the type of data being processed and the resources with which the step is interacting. +For this reason, you can configure the number of items that are processed within a commit. + +[tabs] +==== +Java:: ++ +The following example shows a `step` whose `tasklet` has a `commit-interval` +value of 10 as it would be defined in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Job sampleJob(JobRepository jobRepository) { + return new JobBuilder("sampleJob", jobRepository) + .start(step1()) + .build(); +} + +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("step1", jobRepository) + .chunk(10, transactionManager) + .reader(itemReader()) + .writer(itemWriter()) + .build(); +} +---- + +XML:: ++ +The following example shows a `step` whose `tasklet` has a `commit-interval` +value of 10 as it would be defined in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + +---- + +==== + +In the preceding example, 10 items are processed within each transaction. At the +beginning of processing, a transaction is begun. Also, each time `read` is called on the +`ItemReader`, a counter is incremented. When it reaches 10, the list of aggregated items +is passed to the `ItemWriter`, and the transaction is committed. + diff --git a/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/configuring-skip.adoc b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/configuring-skip.adoc new file mode 100644 index 0000000000..16a08cb719 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/configuring-skip.adoc @@ -0,0 +1,142 @@ +[[configuringSkip]] += Configuring Skip Logic + +There are many scenarios where errors encountered while processing should not result in +`Step` failure but should be skipped instead. This is usually a decision that must be +made by someone who understands the data itself and what meaning it has. Financial data, +for example, may not be skippable because it results in money being transferred, which +needs to be completely accurate. Loading a list of vendors, on the other hand, might +allow for skips. If a vendor is not loaded because it was formatted incorrectly or was +missing necessary information, there probably are not issues. Usually, these bad +records are logged as well, which is covered later when discussing listeners. + +[tabs] +==== +Java:: ++ +The following Java example shows an example of using a skip limit: ++ +.Java Configuration +[source, java] +---- +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("step1", jobRepository) + .chunk(10, transactionManager) + .reader(flatFileItemReader()) + .writer(itemWriter()) + .faultTolerant() + .skipLimit(10) + .skip(FlatFileParseException.class) + .build(); +} +---- + +XML:: ++ +The following XML example shows an example of using a skip limit: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + +---- + +==== + + + +In the preceding example, a `FlatFileItemReader` is used. If, at any point, a +`FlatFileParseException` is thrown, the item is skipped and counted against the total +skip limit of 10. Exceptions (and their subclasses) that are declared might be thrown +during any phase of the chunk processing (read, process, or write). Separate counts +are made of skips on read, process, and write inside +the step execution, but the limit applies across all skips. Once the skip limit is +reached, the next exception found causes the step to fail. In other words, the eleventh +skip triggers the exception, not the tenth. + +One problem with the preceding example is that any other exception besides a +`FlatFileParseException` causes the `Job` to fail. In certain scenarios, this may be the +correct behavior. However, in other scenarios, it may be easier to identify which +exceptions should cause failure and skip everything else. + +[tabs] +==== +Java:: ++ +The following Java example shows an example excluding a particular exception: ++ +.Java Configuration +[source, java] +---- +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("step1", jobRepository) + .chunk(10, transactionManager) + .reader(flatFileItemReader()) + .writer(itemWriter()) + .faultTolerant() + .skipLimit(10) + .skip(Exception.class) + .noSkip(FileNotFoundException.class) + .build(); +} +---- + +XML:: ++ +The following XML example shows an example excluding a particular exception: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + +---- + +==== + + + +By identifying `java.lang.Exception` as a skippable exception class, the configuration +indicates that all `Exceptions` are skippable. However, by "`excluding`" +`java.io.FileNotFoundException`, the configuration refines the list of skippable +exception classes to be all `Exceptions` __except__ `FileNotFoundException`. Any excluded +exception class is fatal if encountered (that is, they are not skipped). + +For any exception encountered, the skippability is determined by the nearest superclass +in the class hierarchy. Any unclassified exception is treated as 'fatal'. + + +[tabs] +==== +Java:: ++ +The order of the `skip` and `noSkip` method calls does not matter. + +XML:: ++ +The order of the `` and `` elements does not matter. + +==== + + + diff --git a/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/configuring.adoc b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/configuring.adoc new file mode 100644 index 0000000000..100782cd63 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/configuring.adoc @@ -0,0 +1,94 @@ +[[configuringAStep]] += Configuring a Step + +Despite the relatively short list of required dependencies for a `Step`, it is an +extremely complex class that can potentially contain many collaborators. + +[tabs] +==== +Java:: ++ +When using Java configuration, you can use the Spring Batch builders, as the +following example shows: ++ +.Java Configuration +[source, java] +---- +/** + * Note the JobRepository is typically autowired in and not needed to be explicitly + * configured + */ +@Bean +public Job sampleJob(JobRepository jobRepository, Step sampleStep) { + return new JobBuilder("sampleJob", jobRepository) + .start(sampleStep) + .build(); +} + +/** + * Note the TransactionManager is typically autowired in and not needed to be explicitly + * configured + */ +@Bean +public Step sampleStep(JobRepository jobRepository, // <2> + PlatformTransactionManager transactionManager) { // <1> + return new StepBuilder("sampleStep", jobRepository) + .chunk(10, transactionManager) // <3> + .reader(itemReader()) + .writer(itemWriter()) + .build(); +} +---- +<1> `transactionManager`: Spring's `PlatformTransactionManager` that begins and commits +transactions during processing. +<2> `repository`: The Java-specific name of the `JobRepository` that periodically stores +the `StepExecution` and `ExecutionContext` during processing (just before committing). +<3> `chunk`: The Java-specific name of the dependency that indicates that this is an +item-based step and the number of items to be processed before the transaction is +committed. ++ +NOTE: Note that `repository` defaults to `jobRepository` (provided through `@EnableBatchProcessing`) +and `transactionManager` defaults to `transactionManager` (provided from the application context). +Also, the `ItemProcessor` is optional, since the item could be +directly passed from the reader to the writer. + + +XML:: ++ +To ease configuration, you can use the Spring Batch XML namespace, as +the following example shows: ++ +.XML Configuration +[source, xml] +---- + + + + + + + +---- +<1> `transaction-manager`: Spring's `PlatformTransactionManager` that begins and commits +transactions during processing. +<2> `job-repository`: The XML-specific name of the `JobRepository` that periodically stores +the `StepExecution` and `ExecutionContext` during processing (just before committing). For +an in-line `` (one defined within a ``), it is an attribute on the `` +element. For a standalone ``, it is defined as an attribute of the ``. +<3> `commit-interval`: The XML-specific name of the number of items to be processed +before the transaction is committed. ++ +NOTE: Note that `job-repository` defaults to `jobRepository` and +`transaction-manager` defaults to `transactionManager`. Also, the `ItemProcessor` is +optional, since the item could be directly passed from the reader to the writer. +==== + + + +The preceding configuration includes the only required dependencies to create a item-oriented +step: + +* `reader`: The `ItemReader` that provides items for processing. +* `writer`: The `ItemWriter` that processes the items provided by the `ItemReader`. + + diff --git a/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/controlling-rollback.adoc b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/controlling-rollback.adoc new file mode 100644 index 0000000000..69b704c5ae --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/controlling-rollback.adoc @@ -0,0 +1,103 @@ +[[controllingRollback]] += Controlling Rollback + +By default, regardless of retry or skip, any exceptions thrown from the `ItemWriter` +cause the transaction controlled by the `Step` to rollback. If skip is configured as +described earlier, exceptions thrown from the `ItemReader` do not cause a rollback. +However, there are many scenarios in which exceptions thrown from the `ItemWriter` should +not cause a rollback, because no action has taken place to invalidate the transaction. +For this reason, you can configure the `Step` with a list of exceptions that should not +cause rollback. + +[tabs] +==== +Java:: ++ +In Java, you can control rollback as follows: ++ +.Java Configuration +[source, java] +---- +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("step1", jobRepository) + .chunk(2, transactionManager) + .reader(itemReader()) + .writer(itemWriter()) + .faultTolerant() + .noRollback(ValidationException.class) + .build(); +} +---- + +XML:: ++ +In XML, you can control rollback as follows: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + +---- + +==== + + + +[[transactionalReaders]] +== Transactional Readers + +The basic contract of the `ItemReader` is that it is forward-only. The step buffers +reader input so that, in case of a rollback, the items do not need to be re-read +from the reader. However, there are certain scenarios in which the reader is built on +top of a transactional resource, such as a JMS queue. In this case, since the queue is +tied to the transaction that is rolled back, the messages that have been pulled from the +queue are put back on. For this reason, you can configure the step to not buffer the +items. + + +[tabs] +==== +Java:: ++ +The following example shows how to create a reader that does not buffer items in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("step1", jobRepository) + .chunk(2, transactionManager) + .reader(itemReader()) + .writer(itemWriter()) + .readerIsTransactionalQueue() + .build(); +} +---- + +XML:: ++ +The following example shows how to create a reader that does not buffer items in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + +---- + +==== + + diff --git a/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/inheriting-from-parent.adoc b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/inheriting-from-parent.adoc new file mode 100644 index 0000000000..00b59bf54a --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/inheriting-from-parent.adoc @@ -0,0 +1,108 @@ +[[inheriting-from-a-parent-step]] += Inheriting from a Parent `Step` + +[role="xmlContent"] +If a group of `Steps` share similar configurations, then it may be helpful to define a +"`parent`" `Step` from which the concrete `Steps` may inherit properties. Similar to class +inheritance in Java, the "`child`" `Step` combines its elements and attributes with the +parent's. The child also overrides any of the parent's `Steps`. + +[role="xmlContent"] +In the following example, the `Step`, `concreteStep1`, inherits from `parentStep`. It is +instantiated with `itemReader`, `itemProcessor`, `itemWriter`, `startLimit=5`, and +`allowStartIfComplete=true`. Additionally, the `commitInterval` is `5`, since it is +overridden by the `concreteStep1` `Step`, as the following example shows: + +[source, xml, role="xmlContent"] +---- + + + + + + + + + + + +---- + +[role="xmlContent"] +The `id` attribute is still required on the step within the job element. This is for two +reasons: + +* The `id` is used as the step name when persisting the `StepExecution`. If the same +standalone step is referenced in more than one step in the job, an error occurs. + +[role="xmlContent"] +* When creating job flows, as described xref:step/controlling-flow.adoc[later in this chapter], the `next` attribute +should refer to the step in the flow, not the standalone step. + +[[abstractStep]] +[role="xmlContent"] +[[abstract-step]] +== Abstract `Step` + +[role="xmlContent"] +Sometimes, it may be necessary to define a parent `Step` that is not a complete `Step` +configuration. If, for instance, the `reader`, `writer`, and `tasklet` attributes are +left off of a `Step` configuration, then initialization fails. If a parent must be +defined without one or more of these properties, the `abstract` attribute should be used. An +`abstract` `Step` is only extended, never instantiated. + +[role="xmlContent"] +In the following example, the `Step` (`abstractParentStep`) would not be instantiated if it +were not declared to be abstract. The `Step`, (`concreteStep2`) has `itemReader`, +`itemWriter`, and `commit-interval=10`. + +[source, xml, role="xmlContent"] +---- + + + + + + + + + + + +---- + +[[mergingListsOnStep]] +[role="xmlContent"] +[[merging-lists]] +== Merging Lists + +[role="xmlContent"] +Some of the configurable elements on `Steps` are lists, such as the `` element. +If both the parent and child `Steps` declare a `` element, the +child's list overrides the parent's. To allow a child to add additional +listeners to the list defined by the parent, every list element has a `merge` attribute. +If the element specifies that `merge="true"`, then the child's list is combined with the +parent's instead of overriding it. + +[role="xmlContent"] +In the following example, the `Step` "concreteStep3", is created with two listeners: +`listenerOne` and `listenerTwo`: + +[source, xml, role="xmlContent"] +---- + + + + + + + + + + + + + + +---- + diff --git a/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/intercepting-execution.adoc b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/intercepting-execution.adoc new file mode 100644 index 0000000000..bdb7f57b61 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/intercepting-execution.adoc @@ -0,0 +1,266 @@ +[[interceptingStepExecution]] += Intercepting `Step` Execution + +Just as with the `Job`, there are many events during the execution of a `Step` where a +user may need to perform some functionality. For example, to write out to a flat +file that requires a footer, the `ItemWriter` needs to be notified when the `Step` has +been completed so that the footer can be written. This can be accomplished with one of many +`Step` scoped listeners. + +You can apply any class that implements one of the extensions of `StepListener` (but not that interface +itself, since it is empty) to a step through the `listeners` element. +The `listeners` element is valid inside a step, tasklet, or chunk declaration. We +recommend that you declare the listeners at the level at which its function applies +or, if it is multi-featured (such as `StepExecutionListener` and `ItemReadListener`), +declare it at the most granular level where it applies. + + +[tabs] +==== +Java:: ++ +The following example shows a listener applied at the chunk level in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("step1", jobRepository) + .chunk(10, transactionManager) + .reader(reader()) + .writer(writer()) + .listener(chunkListener()) + .build(); +} +---- + + +XML:: ++ +The following example shows a listener applied at the chunk level in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + +---- + +==== + + +An `ItemReader`, `ItemWriter`, or `ItemProcessor` that itself implements one of the +`StepListener` interfaces is registered automatically with the `Step` if using the +namespace `` element or one of the `*StepFactoryBean` factories. This only +applies to components directly injected into the `Step`. If the listener is nested inside +another component, you need to explicitly register it (as described previously under +xref:step/chunk-oriented-processing/registering-item-streams.adoc[Registering `ItemStream` with a `Step`]). + +In addition to the `StepListener` interfaces, annotations are provided to address the +same concerns. Plain old Java objects can have methods with these annotations that are +then converted into the corresponding `StepListener` type. It is also common to annotate +custom implementations of chunk components, such as `ItemReader` or `ItemWriter` or +`Tasklet`. The annotations are analyzed by the XML parser for the `` elements +as well as registered with the `listener` methods in the builders, so all you need to do +is use the XML namespace or builders to register the listeners with a step. + +[[stepExecutionListener]] +== `StepExecutionListener` + +`StepExecutionListener` represents the most generic listener for `Step` execution. It +allows for notification before a `Step` is started and after it ends, whether it ended +normally or failed, as the following example shows: + +[source, java] +---- +public interface StepExecutionListener extends StepListener { + + void beforeStep(StepExecution stepExecution); + + ExitStatus afterStep(StepExecution stepExecution); + +} +---- + +`ExitStatus` has a return type of `afterStep`, to give listeners the chance to +modify the exit code that is returned upon completion of a `Step`. + +The annotations corresponding to this interface are: + +* `@BeforeStep` +* `@AfterStep` + +[[chunkListener]] +== `ChunkListener` + +A "`chunk`" is defined as the items processed within the scope of a transaction. Committing a +transaction, at each commit interval, commits a chunk. You can use a `ChunkListener` to +perform logic before a chunk begins processing or after a chunk has completed +successfully, as the following interface definition shows: + +[source, java] +---- +public interface ChunkListener extends StepListener { + + void beforeChunk(ChunkContext context); + void afterChunk(ChunkContext context); + void afterChunkError(ChunkContext context); + +} +---- + +The beforeChunk method is called after the transaction is started but before reading begins +on the `ItemReader`. Conversely, `afterChunk` is called after the chunk has been +committed (or not at all if there is a rollback). + +The annotations corresponding to this interface are: + +* `@BeforeChunk` +* `@AfterChunk` +* `@AfterChunkError` + +You can apply a `ChunkListener` when there is no chunk declaration. The `TaskletStep` is +responsible for calling the `ChunkListener`, so it applies to a non-item-oriented tasklet +as well (it is called before and after the tasklet). + +[[itemReadListener]] +== `ItemReadListener` + +When discussing skip logic previously, it was mentioned that it may be beneficial to log +the skipped records so that they can be dealt with later. In the case of read errors, +this can be done with an `ItemReaderListener`, as the following interface +definition shows: + +[source, java] +---- +public interface ItemReadListener extends StepListener { + + void beforeRead(); + void afterRead(T item); + void onReadError(Exception ex); + +} +---- + +The `beforeRead` method is called before each call to read on the `ItemReader`. The +`afterRead` method is called after each successful call to read and is passed the item +that was read. If there was an error while reading, the `onReadError` method is called. +The exception encountered is provided so that it can be logged. + +The annotations corresponding to this interface are: + +* `@BeforeRead` +* `@AfterRead` +* `@OnReadError` + +[[itemProcessListener]] +== `ItemProcessListener` + +As with the `ItemReadListener`, the processing of an item can be "`listened`" to, as +the following interface definition shows: + +[source, java] +---- +public interface ItemProcessListener extends StepListener { + + void beforeProcess(T item); + void afterProcess(T item, S result); + void onProcessError(T item, Exception e); + +} +---- + +The `beforeProcess` method is called before `process` on the `ItemProcessor` and is +handed the item that is to be processed. The `afterProcess` method is called after the +item has been successfully processed. If there was an error while processing, the +`onProcessError` method is called. The exception encountered and the item that was +attempted to be processed are provided, so that they can be logged. + +The annotations corresponding to this interface are: + +* `@BeforeProcess` +* `@AfterProcess` +* `@OnProcessError` + +[[itemWriteListener]] +== `ItemWriteListener` + +You can "`listen`" to the writing of an item with the `ItemWriteListener`, as the +following interface definition shows: + +[source, java] +---- +public interface ItemWriteListener extends StepListener { + + void beforeWrite(List items); + void afterWrite(List items); + void onWriteError(Exception exception, List items); + +} +---- + +The `beforeWrite` method is called before `write` on the `ItemWriter` and is handed the +list of items that is written. The `afterWrite` method is called after the item has been +successfully written. If there was an error while writing, the `onWriteError` method is +called. The exception encountered and the item that was attempted to be written are +provided, so that they can be logged. + +The annotations corresponding to this interface are: + +* `@BeforeWrite` +* `@AfterWrite` +* `@OnWriteError` + +[[skipListener]] +== `SkipListener` + +`ItemReadListener`, `ItemProcessListener`, and `ItemWriteListener` all provide mechanisms +for being notified of errors, but none informs you that a record has actually been +skipped. `onWriteError`, for example, is called even if an item is retried and +successful. For this reason, there is a separate interface for tracking skipped items, as +the following interface definition shows: + +[source, java] +---- +public interface SkipListener extends StepListener { + + void onSkipInRead(Throwable t); + void onSkipInProcess(T item, Throwable t); + void onSkipInWrite(S item, Throwable t); + +} +---- + +`onSkipInRead` is called whenever an item is skipped while reading. It should be noted +that rollbacks may cause the same item to be registered as skipped more than once. +`onSkipInWrite` is called when an item is skipped while writing. Because the item has +been read successfully (and not skipped), it is also provided the item itself as an +argument. + +The annotations corresponding to this interface are: + +* `@OnSkipInRead` +* `@OnSkipInWrite` +* `@OnSkipInProcess` + +[[skipListenersAndTransactions]] +=== SkipListeners and Transactions + +One of the most common use cases for a `SkipListener` is to log out a skipped item, so +that another batch process or even human process can be used to evaluate and fix the +issue that leads to the skip. Because there are many cases in which the original transaction +may be rolled back, Spring Batch makes two guarantees: + +* The appropriate skip method (depending on when the error happened) is called only once +per item. +* The `SkipListener` is always called just before the transaction is committed. This is +to ensure that any transactional resources call by the listener are not rolled back by a +failure within the `ItemWriter`. + diff --git a/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/registering-item-streams.adoc b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/registering-item-streams.adoc new file mode 100644 index 0000000000..643087d7d6 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/registering-item-streams.adoc @@ -0,0 +1,92 @@ +[[registeringItemStreams]] += Registering `ItemStream` with a `Step` + +The step has to take care of `ItemStream` callbacks at the necessary points in its +lifecycle. (For more information on the `ItemStream` interface, see +xref:readers-and-writers/item-stream.adoc[ItemStream]). This is vital if a step fails and might +need to be restarted, because the `ItemStream` interface is where the step gets the +information it needs about persistent state between executions. + +If the `ItemReader`, `ItemProcessor`, or `ItemWriter` itself implements the `ItemStream` +interface, these are registered automatically. Any other streams need to be +registered separately. This is often the case where indirect dependencies, such as +delegates, are injected into the reader and writer. You can register a stream on the +`step` through the `stream` element. + +[tabs] +==== +Java:: ++ +The following example shows how to register a `stream` on a `step` in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("step1", jobRepository) + .chunk(2, transactionManager) + .reader(itemReader()) + .writer(compositeItemWriter()) + .stream(fileItemWriter1()) + .stream(fileItemWriter2()) + .build(); +} + +/** + * In Spring Batch 4, the CompositeItemWriter implements ItemStream so this isn't + * necessary, but used for an example. + */ +@Bean +public CompositeItemWriter compositeItemWriter() { + List writers = new ArrayList<>(2); + writers.add(fileItemWriter1()); + writers.add(fileItemWriter2()); + + CompositeItemWriter itemWriter = new CompositeItemWriter(); + + itemWriter.setDelegates(writers); + + return itemWriter; +} +---- + +XML:: ++ +The following example shows how to register a `stream` on a `step` in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + + + + + + + +---- + +==== + + +In the preceding example, the `CompositeItemWriter` is not an `ItemStream`, but both of its +delegates are. Therefore, both delegate writers must be explicitly registered as streams +for the framework to handle them correctly. The `ItemReader` does not need to be +explicitly registered as a stream because it is a direct property of the `Step`. The step +is now restartable, and the state of the reader and writer is correctly persisted in the +event of a failure. + diff --git a/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/restart.adoc b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/restart.adoc new file mode 100644 index 0000000000..90402cc811 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/restart.adoc @@ -0,0 +1,247 @@ +[[stepRestart]] += Configuring a `Step` for Restart + +In the "`xref:job.adoc[Configuring and Running a Job]`" section , restarting a +`Job` was discussed. Restart has numerous impacts on steps, and, consequently, may +require some specific configuration. + +[[startLimit]] +== Setting a Start Limit + +There are many scenarios where you may want to control the number of times a `Step` can +be started. For example, you might need to configure a particular `Step` might so that it +runs only once because it invalidates some resource that must be fixed manually before it can +be run again. This is configurable on the step level, since different steps may have +different requirements. A `Step` that can be executed only once can exist as part of the +same `Job` as a `Step` that can be run infinitely. + + +[tabs] +==== +Java:: ++ +The following code fragment shows an example of a start limit configuration in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("step1", jobRepository) + .chunk(10, transactionManager) + .reader(itemReader()) + .writer(itemWriter()) + .startLimit(1) + .build(); +} +---- + +XML:: ++ +The following code fragment shows an example of a start limit configuration in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + +---- + +==== + + +The step shown in the preceding example can be run only once. Attempting to run it again +causes a `StartLimitExceededException` to be thrown. Note that the default value for the +start-limit is `Integer.MAX_VALUE`. + +[[allowStartIfComplete]] +== Restarting a Completed `Step` + +In the case of a restartable job, there may be one or more steps that should always be +run, regardless of whether or not they were successful the first time. An example might +be a validation step or a `Step` that cleans up resources before processing. During +normal processing of a restarted job, any step with a status of `COMPLETED` (meaning it +has already been completed successfully), is skipped. Setting `allow-start-if-complete` to +`true` overrides this so that the step always runs. + + +[tabs] +==== +Java:: ++ +The following code fragment shows how to define a restartable job in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("step1", jobRepository) + .chunk(10, transactionManager) + .reader(itemReader()) + .writer(itemWriter()) + .allowStartIfComplete(true) + .build(); +} +---- + +XML:: ++ +The following code fragment shows how to define a restartable job in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + +---- + +==== + + + +[[stepRestartExample]] +== `Step` Restart Configuration Example + + +[tabs] +==== +Java:: ++ +The following Java example shows how to configure a job to have steps that can be +restarted: ++ +.Java Configuration +[source, java] +---- +@Bean +public Job footballJob(JobRepository jobRepository) { + return new JobBuilder("footballJob", jobRepository) + .start(playerLoad()) + .next(gameLoad()) + .next(playerSummarization()) + .build(); +} + +@Bean +public Step playerLoad(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("playerLoad", jobRepository) + .chunk(10, transactionManager) + .reader(playerFileItemReader()) + .writer(playerWriter()) + .build(); +} + +@Bean +public Step gameLoad(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("gameLoad", jobRepository) + .allowStartIfComplete(true) + .chunk(10, transactionManager) + .reader(gameFileItemReader()) + .writer(gameWriter()) + .build(); +} + +@Bean +public Step playerSummarization(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("playerSummarization", jobRepository) + .startLimit(2) + .chunk(10, transactionManager) + .reader(playerSummarizationSource()) + .writer(summaryWriter()) + .build(); +} +---- + +XML:: ++ +The following XML example shows how to configure a job to have steps that can be +restarted: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + + + + + +---- + +==== + +The preceding example configuration is for a job that loads in information about football +games and summarizes them. It contains three steps: `playerLoad`, `gameLoad`, and +`playerSummarization`. The `playerLoad` step loads player information from a flat file, +while the `gameLoad` step does the same for games. The final step, +`playerSummarization`, then summarizes the statistics for each player, based upon the +provided games. It is assumed that the file loaded by `playerLoad` must be loaded only +once but that `gameLoad` can load any games found within a particular directory, +deleting them after they have been successfully loaded into the database. As a result, +the `playerLoad` step contains no additional configuration. It can be started any number +of times is skipped if complete. The `gameLoad` step, however, needs to be run +every time in case extra files have been added since it last ran. It has +`allow-start-if-complete` set to `true` to always be started. (It is assumed +that the database table that games are loaded into has a process indicator on it, to ensure +new games can be properly found by the summarization step). The summarization step, +which is the most important in the job, is configured to have a start limit of 2. This +is useful because, if the step continually fails, a new exit code is returned to the +operators that control job execution, and it can not start again until manual +intervention has taken place. + +NOTE: This job provides an example for this document and is not the same as the `footballJob` +found in the samples project. + +The remainder of this section describes what happens for each of the three runs of the +`footballJob` example. + +Run 1: + +. `playerLoad` runs and completes successfully, adding 400 players to the `PLAYERS` +table. +. `gameLoad` runs and processes 11 files worth of game data, loading their contents +into the `GAMES` table. +. `playerSummarization` begins processing and fails after 5 minutes. + +Run 2: + +. `playerLoad` does not run, since it has already completed successfully, and +`allow-start-if-complete` is `false` (the default). +. `gameLoad` runs again and processes another 2 files, loading their contents into the +`GAMES` table as well (with a process indicator indicating they have yet to be +processed). +. `playerSummarization` begins processing of all remaining game data (filtering using the +process indicator) and fails again after 30 minutes. + +Run 3: + +. `playerLoad` does not run, since it has already completed successfully, and +`allow-start-if-complete` is `false` (the default). +. `gameLoad` runs again and processes another 2 files, loading their contents into the +`GAMES` table as well (with a process indicator indicating they have yet to be +processed). +. `playerSummarization` is not started and the job is immediately killed, since this is +the third execution of `playerSummarization`, and its limit is only 2. Either the limit +must be raised or the `Job` must be executed as a new `JobInstance`. + diff --git a/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/retry-logic.adoc b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/retry-logic.adoc new file mode 100644 index 0000000000..c841e94a7c --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/retry-logic.adoc @@ -0,0 +1,58 @@ +[[retryLogic]] += Configuring Retry Logic + +In most cases, you want an exception to cause either a skip or a `Step` failure. However, +not all exceptions are deterministic. If a `FlatFileParseException` is encountered while +reading, it is always thrown for that record. Resetting the `ItemReader` does not help. +However, for other exceptions (such as a `DeadlockLoserDataAccessException`, which +indicates that the current process has attempted to update a record that another process +holds a lock on), waiting and trying again might result in success. + + +[tabs] +==== +Java:: ++ +In Java, retry should be configured as follows: ++ +[source, java] +---- +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("step1", jobRepository) + .chunk(2, transactionManager) + .reader(itemReader()) + .writer(itemWriter()) + .faultTolerant() + .retryLimit(3) + .retry(DeadlockLoserDataAccessException.class) + .build(); +} +---- + +XML:: ++ +In XML, retry should be configured as follows: ++ +[source, xml] +---- + + + + + + + + + +---- + +==== + + + +The `Step` allows a limit for the number of times an individual item can be retried and a +list of exceptions that are "`retryable`". You can find more details on how retry works in +<>. + diff --git a/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/transaction-attributes.adoc b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/transaction-attributes.adoc new file mode 100644 index 0000000000..fdd7fcc327 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/chunk-oriented-processing/transaction-attributes.adoc @@ -0,0 +1,57 @@ +[[transactionAttributes]] += Transaction Attributes + +You can use transaction attributes to control the `isolation`, `propagation`, and +`timeout` settings. You can find more information on setting transaction attributes in +the +https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#transaction[Spring +core documentation]. + +[tabs] +==== +Java:: ++ +The following example sets the `isolation`, `propagation`, and `timeout` transaction +attributes in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + DefaultTransactionAttribute attribute = new DefaultTransactionAttribute(); + attribute.setPropagationBehavior(Propagation.REQUIRED.value()); + attribute.setIsolationLevel(Isolation.DEFAULT.value()); + attribute.setTimeout(30); + + return new StepBuilder("step1", jobRepository) + .chunk(2, transactionManager) + .reader(itemReader()) + .writer(itemWriter()) + .transactionAttribute(attribute) + .build(); +} +---- + +XML:: ++ +The following example sets the `isolation`, `propagation`, and `timeout` transaction +attributes in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + +---- + +==== + + + diff --git a/spring-batch-docs/modules/ROOT/pages/step/controlling-flow.adoc b/spring-batch-docs/modules/ROOT/pages/step/controlling-flow.adoc new file mode 100644 index 0000000000..45aaa009bc --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/controlling-flow.adoc @@ -0,0 +1,828 @@ +[[controllingStepFlow]] += Controlling Step Flow + +With the ability to group steps together within an owning job comes the need to be able +to control how the job "`flows`" from one step to another. The failure of a `Step` does not +necessarily mean that the `Job` should fail. Furthermore, there may be more than one type +of "`success`" that determines which `Step` should be executed next. Depending upon how a +group of `Steps` is configured, certain steps may not even be processed at all. + +[[SequentialFlow]] +== Sequential Flow + +The simplest flow scenario is a job where all of the steps execute sequentially, as +the following image shows: + +.Sequential Flow +image::sequential-flow.png[Sequential Flow, scaledwidth="60%"] + +This can be achieved by using `next` in a `step`. + + +[tabs] +==== +Java:: ++ +The following example shows how to use the `next()` method in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Job job(JobRepository jobRepository) { + return new JobBuilder("job", jobRepository) + .start(stepA()) + .next(stepB()) + .next(stepC()) + .build(); +} +---- + +XML:: ++ +The following example shows how to use the `next` attribute in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + +---- + +==== + + + +In the scenario above, `stepA` runs first because it is the first `Step` listed. If +`stepA` completes normally, `stepB` runs, and so on. However, if `step A` fails, +the entire `Job` fails and `stepB` does not execute. + +[role="xmlContent"] +NOTE: With the Spring Batch XML namespace, the first step listed in the configuration is +_always_ the first step run by the `Job`. The order of the other step elements does not +matter, but the first step must always appear first in the XML. + +[[conditionalFlow]] +== Conditional Flow + +In the preceding example, there are only two possibilities: + +. The `step` is successful, and the next `step` should be executed. +. The `step` failed, and, thus, the `job` should fail. + +In many cases, this may be sufficient. However, what about a scenario in which the +failure of a `step` should trigger a different `step`, rather than causing failure? The +following image shows such a flow: + +.Conditional Flow +image::conditional-flow.png[Conditional Flow, scaledwidth="60%"] + + +[[nextElement]] +[tabs] +==== +Java:: ++ +The Java API offers a fluent set of methods that let you specify the flow and what to do +when a step fails. The following example shows how to specify one step (`stepA`) and then +proceed to either of two different steps (`stepB` or `stepC`), depending on whether +`stepA` succeeds: ++ +.Java Configuration +[source, java] +---- +@Bean +public Job job(JobRepository jobRepository) { + return new JobBuilder("job", jobRepository) + .start(stepA()) + .on("*").to(stepB()) + .from(stepA()).on("FAILED").to(stepC()) + .end() + .build(); +} +---- + +XML:: ++ +To handle more complex scenarios, the Spring Batch XML namespace lets you define transitions +elements within the step element. One such transition is the `next` +element. Like the `next` attribute, the `next` element tells the `Job` which `Step` to +execute next. However, unlike the attribute, any number of `next` elements are allowed on +a given `Step`, and there is no default behavior in the case of failure. This means that, if +transition elements are used, all of the behavior for the `Step` transitions must be +defined explicitly. Note also that a single step cannot have both a `next` attribute and +a `transition` element. ++ +The `next` element specifies a pattern to match and the step to execute next, as +the following example shows: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + +---- + +==== + + +[tabs] +==== +Java:: ++ +When using java configuration, the `on()` method uses a simple pattern-matching scheme to +match the `ExitStatus` that results from the execution of the `Step`. + +XML:: ++ +When using XML configuration, the `on` attribute of a transition element uses a simple +pattern-matching scheme to match the `ExitStatus` that results from the execution of the +`Step`. + +==== + +Only two special characters are allowed in the pattern: + +* `*` matches zero or more characters +* `?` matches exactly one character + +For example, `c*t` matches `cat` and `count`, while `c?t` matches `cat` but not `count`. + +While there is no limit to the number of transition elements on a `Step`, if the `Step` +execution results in an `ExitStatus` that is not covered by an element, the +framework throws an exception and the `Job` fails. The framework automatically orders +transitions from most specific to least specific. This means that, even if the ordering +were swapped for `stepA` in the preceding example, an `ExitStatus` of `FAILED` would still go +to `stepC`. + +[[batchStatusVsExitStatus]] +=== Batch Status Versus Exit Status + +When configuring a `Job` for conditional flow, it is important to understand the +difference between `BatchStatus` and `ExitStatus`. `BatchStatus` is an enumeration that +is a property of both `JobExecution` and `StepExecution` and is used by the framework to +record the status of a `Job` or `Step`. It can be one of the following values: +`COMPLETED`, `STARTING`, `STARTED`, `STOPPING`, `STOPPED`, `FAILED`, `ABANDONED`, or +`UNKNOWN`. Most of them are self explanatory: `COMPLETED` is the status set when a step +or job has completed successfully, `FAILED` is set when it fails, and so on. + + +[tabs] +==== +Java:: ++ +The following example contains the `on` element when using Java Configuration: ++ +[source, java] +---- +... +.from(stepA()).on("FAILED").to(stepB()) +... +---- + +XML:: ++ +The following example contains the `next` element when using XML configuration: +// TODO It might help readers to know the difference between STARTING and STARTED (same +// for STOPPING and STOPPED). Specifically, when does the status go from STARTING to +// STARTED? ++ +[source, xml] +---- + +---- + +==== + + + +At first glance, it would appear that `on` references the `BatchStatus` of the `Step` to +which it belongs. However, it actually references the `ExitStatus` of the `Step`. As the +name implies, `ExitStatus` represents the status of a `Step` after it finishes execution. + + +[tabs] +==== +Java:: ++ +When using Java configuration, the `on()` method shown in the preceding +Java configuration example references the exit code of `ExitStatus`. + +XML:: ++ +More specifically, when using XML configuration, the `next` element shown in the +preceding XML configuration example references the exit code of `ExitStatus`. +==== + +In English, it says: "`go to stepB if the exit code is FAILED`". By default, the exit +code is always the same as the `BatchStatus` for the `Step`, which is why the preceding entry +works. However, what if the exit code needs to be different? A good example comes from +the skip sample job within the samples project: + + +[tabs] +==== +Java:: ++ +The following example shows how to work with a different exit code in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Job job(JobRepository jobRepository) { + return new JobBuilder("job", jobRepository) + .start(step1()).on("FAILED").end() + .from(step1()).on("COMPLETED WITH SKIPS").to(errorPrint1()) + .from(step1()).on("*").to(step2()) + .end() + .build(); +} +---- + +XML:: ++ +The following example shows how to work with a different exit code in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + +---- + +==== + + + +`step1` has three possibilities: + +* The `Step` failed, in which case the job should fail. +* The `Step` completed successfully. +* The `Step` completed successfully but with an exit code of `COMPLETED WITH SKIPS`. In +this case, a different step should be run to handle the errors. + +The preceding configuration works. However, something needs to change the exit code based on +the condition of the execution having skipped records, as the following example shows: + +[source, java] +---- +public class SkipCheckingListener extends StepExecutionListenerSupport { + public ExitStatus afterStep(StepExecution stepExecution) { + String exitCode = stepExecution.getExitStatus().getExitCode(); + if (!exitCode.equals(ExitStatus.FAILED.getExitCode()) && + stepExecution.getSkipCount() > 0) { + return new ExitStatus("COMPLETED WITH SKIPS"); + } + else { + return null; + } + } +} +---- + +The preceding code is a `StepExecutionListener` that first checks to make sure the `Step` was +successful and then checks to see if the skip count on the `StepExecution` is higher than +0. If both conditions are met, a new `ExitStatus` with an exit code of +`COMPLETED WITH SKIPS` is returned. + +[[configuringForStop]] +== Configuring for Stop + +After the discussion of xref:step/controlling-flow.adoc#batchStatusVsExitStatus[`BatchStatus` and `ExitStatus`], +one might wonder how the `BatchStatus` and `ExitStatus` are determined for the `Job`. +While these statuses are determined for the `Step` by the code that is executed, the +statuses for the `Job` are determined based on the configuration. + +So far, all of the job configurations discussed have had at least one final `Step` with +no transitions. + + +[tabs] +==== +Java:: ++ +In the following Java example, after the `step` executes, the `Job` ends: ++ +[source, java] +---- +@Bean +public Job job(JobRepository jobRepository) { + return new JobBuilder("job", jobRepository) + .start(step1()) + .build(); +} +---- + +XML:: ++ +In the following XML example, after the `step` executes, the `Job` ends: ++ +[source, xml] +---- + +---- + +==== + +If no transitions are defined for a `Step`, the status of the `Job` is defined as +follows: + +* If the `Step` ends with `ExitStatus` of `FAILED`, the `BatchStatus` and `ExitStatus` of +the `Job` are both `FAILED`. + +* Otherwise, the `BatchStatus` and `ExitStatus` of the `Job` are both `COMPLETED`. + +While this method of terminating a batch job is sufficient for some batch jobs, such as a +simple sequential step job, custom defined job-stopping scenarios may be required. For +this purpose, Spring Batch provides three transition elements to stop a `Job` (in +addition to the xref:step/controlling-flow.adoc#nextElement[`next` element] that we discussed previously). +Each of these stopping elements stops a `Job` with a particular `BatchStatus`. It is +important to note that the stop transition elements have no effect on either the +`BatchStatus` or `ExitStatus` of any `Steps` in the `Job`. These elements affect only the +final statuses of the `Job`. For example, it is possible for every step in a job to have +a status of `FAILED` but for the job to have a status of `COMPLETED`. + +[[endElement]] +=== Ending at a Step + +Configuring a step end instructs a `Job` to stop with a `BatchStatus` of `COMPLETED`. A +`Job` that has finished with a status of `COMPLETED` cannot be restarted (the framework throws +a `JobInstanceAlreadyCompleteException`). + + +[tabs] +==== +Java:: ++ +When using Java configuration, the `end` method is used for this task. The `end` method +also allows for an optional `exitStatus` parameter that you can use to customize the +`ExitStatus` of the `Job`. If no `exitStatus` value is provided, the `ExitStatus` is +`COMPLETED` by default, to match the `BatchStatus`. + +XML:: ++ +When using XML configuration, you can use the `end` element for this task. The `end` element +also allows for an optional `exit-code` attribute that you can use to customize the +`ExitStatus` of the `Job`. If no `exit-code` attribute is given, the `ExitStatus` is +`COMPLETED` by default, to match the `BatchStatus`. +==== + +Consider the following scenario: If `step2` fails, the `Job` stops with a +`BatchStatus` of `COMPLETED` and an `ExitStatus` of `COMPLETED`, and `step3` does not run. +Otherwise, execution moves to `step3`. Note that if `step2` fails, the `Job` is not +restartable (because the status is `COMPLETED`). + + +[tabs] +==== +Java:: ++ +The following example shows the scenario in Java: ++ +[source, java] +---- +@Bean +public Job job(JobRepository jobRepository) { + return new JobBuilder("job", jobRepository) + .start(step1()) + .next(step2()) + .on("FAILED").end() + .from(step2()).on("*").to(step3()) + .end() + .build(); +} +---- + +XML:: ++ +The following example shows the scenario in XML: ++ +[source, xml] +---- + + + + + + + + +---- + +==== + + + +[[failElement]] +=== Failing a Step + +Configuring a step to fail at a given point instructs a `Job` to stop with a +`BatchStatus` of `FAILED`. Unlike end, the failure of a `Job` does not prevent the `Job` +from being restarted. + +[role="xmlContent"] +When using XML configuration, the `fail` element also allows for an optional `exit-code` +attribute that can be used to customize the `ExitStatus` of the `Job`. If no `exit-code` +attribute is given, the `ExitStatus` is `FAILED` by default, to match the +`BatchStatus`. + +Consider the following scenario: If `step2` fails, the `Job` stops with a +`BatchStatus` of `FAILED` and an `ExitStatus` of `EARLY TERMINATION` and `step3` does not +execute. Otherwise, execution moves to `step3`. Additionally, if `step2` fails and the +`Job` is restarted, execution begins again on `step2`. + + +[tabs] +==== +Java:: ++ +The following example shows the scenario in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Job job(JobRepository jobRepository) { + return new JobBuilder("job", jobRepository) + .start(step1()) + .next(step2()).on("FAILED").fail() + .from(step2()).on("*").to(step3()) + .end() + .build(); +} +---- + +XML:: ++ +The following example shows the scenario in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + +---- + +==== + +[[stopElement]] +=== Stopping a Job at a Given Step + +Configuring a job to stop at a particular step instructs a `Job` to stop with a +`BatchStatus` of `STOPPED`. Stopping a `Job` can provide a temporary break in processing, +so that the operator can take some action before restarting the `Job`. + + +[tabs] +==== +Java:: ++ +When using Java configuration, the `stopAndRestart` method requires a `restart` attribute +that specifies the step where execution should pick up when the Job is restarted. + +XML:: ++ +When using XML configuration, a `stop` element requires a `restart` attribute that specifies +the step where execution should pick up when the `Job` is restarted. +==== + +Consider the following scenario: If `step1` finishes with `COMPLETE`, the job then +stops. Once it is restarted, execution begins on `step2`. + +[tabs] +==== +Java:: ++ +The following example shows the scenario in Java: ++ +[source, java] +---- +@Bean +public Job job(JobRepository jobRepository) { + return new JobBuilder("job", jobRepository) + .start(step1()).on("COMPLETED").stopAndRestart(step2()) + .end() + .build(); +} +---- + +XML:: ++ +The following listing shows the scenario in XML: ++ +[source, xml] +---- + + + + + +---- + +==== + +[[programmaticFlowDecisions]] +== Programmatic Flow Decisions + +In some situations, more information than the `ExitStatus` may be required to decide +which step to execute next. In this case, a `JobExecutionDecider` can be used to assist +in the decision, as the following example shows: + +[source, java] +---- +public class MyDecider implements JobExecutionDecider { + public FlowExecutionStatus decide(JobExecution jobExecution, StepExecution stepExecution) { + String status; + if (someCondition()) { + status = "FAILED"; + } + else { + status = "COMPLETED"; + } + return new FlowExecutionStatus(status); + } +} +---- + + +[tabs] +==== +Java:: ++ +In the following example, a bean implementing the `JobExecutionDecider` is passed +directly to the `next` call when using Java configuration: ++ +.Java Configuration +[source, java] +---- +@Bean +public Job job(JobRepository jobRepository) { + return new JobBuilder("job", jobRepository) + .start(step1()) + .next(decider()).on("FAILED").to(step2()) + .from(decider()).on("COMPLETED").to(step3()) + .end() + .build(); +} +---- + +XML:: ++ +In the following sample job configuration, a `decision` specifies the decider to use as +well as all of the transitions: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + +---- + +==== + + + +[[split-flows]] +== Split Flows + +Every scenario described so far has involved a `Job` that executes its steps one at a +time in a linear fashion. In addition to this typical style, Spring Batch also allows +for a job to be configured with parallel flows. + + +[tabs] +==== +Java:: ++ +Java-based configuration lets you configure splits through the provided builders. As the +following example shows, the `split` element contains one or more `flow` elements, where +entire separate flows can be defined. A `split` element can also contain any of the +previously discussed transition elements, such as the `next` attribute or the `next`, +`end`, or `fail` elements. ++ +[source, java] +---- +@Bean +public Flow flow1() { + return new FlowBuilder("flow1") + .start(step1()) + .next(step2()) + .build(); +} + +@Bean +public Flow flow2() { + return new FlowBuilder("flow2") + .start(step3()) + .build(); +} + +@Bean +public Job job(Flow flow1, Flow flow2) { + return this.jobBuilderFactory.get("job") + .start(flow1) + .split(new SimpleAsyncTaskExecutor()) + .add(flow2) + .next(step4()) + .end() + .build(); +} +---- + +XML:: ++ +The XML namespace lets you use the `split` element. As the following example shows, +the `split` element contains one or more `flow` elements, where entire separate flows can +be defined. A `split` element can also contain any of the previously discussed transition +elements, such as the `next` attribute or the `next`, `end`, or `fail` elements. ++ +[source, xml] +---- + + + + + + + + + + +---- + +==== + + + +[[external-flows]] +== Externalizing Flow Definitions and Dependencies Between Jobs + +Part of the flow in a job can be externalized as a separate bean definition and then +re-used. There are two ways to do so. The first is to declare the flow as a +reference to one defined elsewhere. + + +[tabs] +==== +Java:: ++ +The following Java example shows how to declare a flow as a reference to a flow defined +elsewhere: ++ +.Java Confguration +[source, java] +---- +@Bean +public Job job(JobRepository jobRepository) { + return new JobBuilder("job", jobRepository) + .start(flow1()) + .next(step3()) + .end() + .build(); +} + +@Bean +public Flow flow1() { + return new FlowBuilder("flow1") + .start(step1()) + .next(step2()) + .build(); +} +---- + +XML:: ++ +The following XML example shows how to declare a flow as a reference to a flow defined +elsewhere: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + +---- + +==== + + + +The effect of defining an external flow, as shown in the preceding example, is to insert +the steps from the external flow into the job as if they had been declared inline. In +this way, many jobs can refer to the same template flow and compose such templates into +different logical flows. This is also a good way to separate the integration testing of +the individual flows. + +The other form of an externalized flow is to use a `JobStep`. A `JobStep` is similar to a +`FlowStep` but actually creates and launches a separate job execution for the steps in +the flow specified. + + +[tabs] +==== +Java:: ++ +The following example shows an example of a `JobStep` in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Job jobStepJob(JobRepository jobRepository) { + return new JobBuilder("jobStepJob", jobRepository) + .start(jobStepJobStep1(null)) + .build(); +} + +@Bean +public Step jobStepJobStep1(JobLauncher jobLauncher, JobRepository jobRepository) { + return new StepBuilder("jobStepJobStep1", jobRepository) + .job(job()) + .launcher(jobLauncher) + .parametersExtractor(jobParametersExtractor()) + .build(); +} + +@Bean +public Job job(JobRepository jobRepository) { + return new JobBuilder("job", jobRepository) + .start(step1()) + .build(); +} + +@Bean +public DefaultJobParametersExtractor jobParametersExtractor() { + DefaultJobParametersExtractor extractor = new DefaultJobParametersExtractor(); + + extractor.setKeys(new String[]{"input.file"}); + + return extractor; +} +---- + +XML:: ++ +The following example hows an example of a `JobStep` in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + +... + + + + +---- + +==== + +The job parameters extractor is a strategy that determines how the `ExecutionContext` for +the `Step` is converted into `JobParameters` for the `Job` that is run. The `JobStep` is +useful when you want to have some more granular options for monitoring and reporting on +jobs and steps. Using `JobStep` is also often a good answer to the question: "`How do I +create dependencies between jobs?`" It is a good way to break up a large system into +smaller modules and control the flow of jobs. + diff --git a/spring-batch-docs/modules/ROOT/pages/step/late-binding.adoc b/spring-batch-docs/modules/ROOT/pages/step/late-binding.adoc new file mode 100644 index 0000000000..ceb0d390aa --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/late-binding.adoc @@ -0,0 +1,405 @@ +[[late-binding]] += Late Binding of `Job` and `Step` Attributes + +Both the XML and flat file examples shown earlier use the Spring `Resource` abstraction +to obtain a file. This works because `Resource` has a `getFile` method that returns a +`java.io.File`. You can configure both XML and flat file resources by using standard Spring +constructs: + + +[tabs] +==== +Java:: ++ +The following example shows late binding in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public FlatFileItemReader flatFileItemReader() { + FlatFileItemReader reader = new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource("file://outputs/file.txt")) + ... +} +---- + +XML:: ++ +The following example shows late binding in XML: ++ +.XML Configuration +[source, xml] +---- + + + +---- + +==== + + + + +The preceding `Resource` loads the file from the specified file system location. Note +that absolute locations have to start with a double slash (`//`). In most Spring +applications, this solution is good enough, because the names of these resources are +known at compile time. However, in batch scenarios, the file name may need to be +determined at runtime as a parameter to the job. This can be solved using `-D` parameters +to read a system property. + + +[tabs] +==== +Java:: ++ +The following shows how to read a file name from a property in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public FlatFileItemReader flatFileItemReader(@Value("${input.file.name}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +---- + +XML:: ++ +The following example shows how to read a file name from a property in XML: ++ +.XML Configuration +[source, xml] +---- + + + +---- + +==== + + + + +All that would be required for this solution to work would be a system argument (such as +`-Dinput.file.name="file://outputs/file.txt"`). + +NOTE: Although you can use a `PropertyPlaceholderConfigurer` here, it is not +necessary if the system property is always set because the `ResourceEditor` in Spring +already filters and does placeholder replacement on system properties. + +Often, in a batch setting, it is preferable to parameterize the file name in the +`JobParameters` of the job (instead of through system properties) and access them that +way. To accomplish this, Spring Batch allows for the late binding of various `Job` and +`Step` attributes. + + +[tabs] +==== +Java:: ++ +The following example shows how to parameterize a file name in Java: ++ +.Java Configuration +[source, java] +---- +@StepScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobParameters['input.file.name']}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +---- + +XML:: ++ +The following example shows how to parameterize a file name in XML: ++ +.XML Configuration +[source, xml] +---- + + + +---- + +==== + + + + + +You can access both the `JobExecution` and `StepExecution` level `ExecutionContext` in +the same way. + + +[tabs] +==== +Java:: ++ +The following example shows how to access the `ExecutionContext` in Java: ++ +.Java Configuration +[source, java] +---- +@StepScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobExecutionContext['input.file.name']}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +---- ++ +.Java Configuration +[source, java] +---- +@StepScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{stepExecutionContext['input.file.name']}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +---- + +XML:: ++ +The following example shows how to access the `ExecutionContext` in XML: ++ +.XML Configuration +[source, xml] +---- + + + +---- ++ +.XML Configuration +[source, xml] +---- + + + +---- +==== + + +NOTE: Any bean that uses late binding must be declared with `scope="step"`. See +xref:step/late-binding.adoc#step-scope[Step Scope] for more information. +A `Step` bean should not be step-scoped. If late binding is needed in a step +definition, the components of that step (tasklet, item reader or writer, and so on) +are the ones that should be scoped instead. + +NOTE: If you use Spring 3.0 (or above), the expressions in step-scoped beans are in the +Spring Expression Language, a powerful general purpose language with many interesting +features. To provide backward compatibility, if Spring Batch detects the presence of +older versions of Spring, it uses a native expression language that is less powerful and +that has slightly different parsing rules. The main difference is that the map keys in +the example above do not need to be quoted with Spring 2.5, but the quotes are mandatory +in Spring 3.0. +// TODO Where is that older language described? It'd be good to have a link to it here. +// Also, given that we are up to version 5 of Spring, should we still be talking about +// things from before version 3? (In other words, we should provide a link or drop the +// whole thing.) + +[[step-scope]] +== Step Scope + +All of the late binding examples shown earlier have a scope of `step` declared on the +bean definition. + + +[tabs] +==== +Java:: ++ +The following example shows an example of binding to step scope in Java: ++ +.Java Configuration +[source, java] +---- +@StepScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobParameters[input.file.name]}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +---- + +XML:: ++ +The following example shows an example of binding to step scope in XML: ++ +.XML Configuration +[source, xml] +---- + + + +---- + +==== + + + +Using a scope of `Step` is required to use late binding, because the bean cannot +actually be instantiated until the `Step` starts, to let the attributes be found. +Because it is not part of the Spring container by default, the scope must be added +explicitly, by using the `batch` namespace, by including a bean definition explicitly +for the `StepScope`, or by using the `@EnableBatchProcessing` annotation. Use only one of +those methods. The following example uses the `batch` namespace: + +[source, xml] +---- + + +... + +---- + +The following example includes the bean definition explicitly: + +[source, xml] +---- + +---- + +[[job-scope]] +== Job Scope + +`Job` scope, introduced in Spring Batch 3.0, is similar to `Step` scope in configuration +but is a scope for the `Job` context, so that there is only one instance of such a bean +per running job. Additionally, support is provided for late binding of references +accessible from the `JobContext` by using `#{..}` placeholders. Using this feature, you can pull bean +properties from the job or job execution context and the job parameters. + + +[tabs] +==== +Java:: ++ +The following example shows an example of binding to job scope in Java: ++ +.Java Configuration +[source, java] +---- +@JobScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobParameters[input]}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +---- ++ +.Java Configuration +[source, java] +---- +@JobScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobExecutionContext['input.name']}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +---- + +XML:: ++ +The following example shows an example of binding to job scope in XML: ++ +.XML Configuration +[source, xml] +---- + + + +---- ++ +.XML Configuration +[source, xml] +---- + + + +---- + +==== + + + +Because it is not part of the Spring container by default, the scope must be added +explicitly, by using the `batch` namespace, by including a bean definition explicitly for +the JobScope, or by using the `@EnableBatchProcessing` annotation (choose only one approach). +The following example uses the `batch` namespace: + +[source, xml] +---- + + + +... + +---- + +The following example includes a bean that explicitly defines the `JobScope`: + +[source, xml] +---- + +---- + +NOTE: There are some practical limitations of using job-scoped beans in multi-threaded +or partitioned steps. Spring Batch does not control the threads spawned in these +use cases, so it is not possible to set them up correctly to use such beans. Hence, +we do not recommend using job-scoped beans in multi-threaded or partitioned steps. + +[[scoping-item-streams]] +== Scoping `ItemStream` components + +When using the Java configuration style to define job or step scoped `ItemStream` beans, +the return type of the bean definition method should be at least `ItemStream`. This is required +so that Spring Batch correctly creates a proxy that implements this interface, and therefore +honors its contract by calling `open`, `update` and `close` methods as expected. + +It is recommended to make the bean definition method of such beans return the most specific +known implementation, as shown in the following example: + +.Define a step-scoped bean with the most specific return type +[source, java] +---- +@Bean +@StepScope +public FlatFileItemReader flatFileItemReader(@Value("#{jobParameters['input.file.name']}") String name) { + return new FlatFileItemReaderBuilder() + .resource(new FileSystemResource(name)) + // set other properties of the item reader + .build(); +} +---- diff --git a/spring-batch-docs/modules/ROOT/pages/step/tasklet.adoc b/spring-batch-docs/modules/ROOT/pages/step/tasklet.adoc new file mode 100644 index 0000000000..35bbf33be7 --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/step/tasklet.adoc @@ -0,0 +1,212 @@ +[[taskletStep]] += `TaskletStep` + +xref:step/chunk-oriented-processing.adoc[Chunk-oriented processing] is not the only way to process in a +`Step`. What if a `Step` must consist of a stored procedure call? You could +implement the call as an `ItemReader` and return null after the procedure finishes. +However, doing so is a bit unnatural, since there would need to be a no-op `ItemWriter`. +Spring Batch provides the `TaskletStep` for this scenario. + +The `Tasklet` interface has one method, `execute`, which is called +repeatedly by the `TaskletStep` until it either returns `RepeatStatus.FINISHED` or throws +an exception to signal a failure. Each call to a `Tasklet` is wrapped in a transaction. +`Tasklet` implementors might call a stored procedure, a script, or a SQL update +statement. + + +[tabs] +==== +Java:: ++ +To create a `TaskletStep` in Java, the bean passed to the `tasklet` method of the builder +should implement the `Tasklet` interface. No call to `chunk` should be called when +building a `TaskletStep`. The following example shows a simple tasklet: ++ +[source, java] +---- +@Bean +public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("step1", jobRepository) + .tasklet(myTasklet(), transactionManager) + .build(); +} +---- + +XML:: ++ +To create a `TaskletStep` in XML, the `ref` attribute of the `` element should +reference a bean that defines a `Tasklet` object. No `` element should be used +within the ``. The following example shows a simple tasklet: ++ +[source, xml] +---- + + + +---- + +==== + + + + + + +NOTE: If it implements the `StepListener` interface, `TaskletStep` automatically registers the tasklet as a `StepListener`. + +[[taskletAdapter]] +== `TaskletAdapter` + +As with other adapters for the `ItemReader` and `ItemWriter` interfaces, the `Tasklet` +interface contains an implementation that allows for adapting itself to any pre-existing +class: `TaskletAdapter`. An example where this may be useful is an existing DAO that is +used to update a flag on a set of records. You can use the `TaskletAdapter` to call this +class without having to write an adapter for the `Tasklet` interface. + + +[tabs] +==== +Java:: ++ +The following example shows how to define a `TaskletAdapter` in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public MethodInvokingTaskletAdapter myTasklet() { + MethodInvokingTaskletAdapter adapter = new MethodInvokingTaskletAdapter(); + + adapter.setTargetObject(fooDao()); + adapter.setTargetMethod("updateFoo"); + + return adapter; +} +---- + +XML:: ++ +The following example shows how to define a `TaskletAdapter` in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + +---- + +==== + + +[[exampleTaskletImplementation]] +== Example `Tasklet` Implementation + +Many batch jobs contain steps that must be done before the main processing begins, +to set up various resources or after processing has completed to cleanup those +resources. In the case of a job that works heavily with files, it is often necessary to +delete certain files locally after they have been uploaded successfully to another +location. The following example (taken from the +https://github.com/spring-projects/spring-batch/tree/main/spring-batch-samples[Spring +Batch samples project]) is a `Tasklet` implementation with just such a responsibility: + +[source, java] +---- +public class FileDeletingTasklet implements Tasklet, InitializingBean { + + private Resource directory; + + public RepeatStatus execute(StepContribution contribution, + ChunkContext chunkContext) throws Exception { + File dir = directory.getFile(); + Assert.state(dir.isDirectory()); + + File[] files = dir.listFiles(); + for (int i = 0; i < files.length; i++) { + boolean deleted = files[i].delete(); + if (!deleted) { + throw new UnexpectedJobExecutionException("Could not delete file " + + files[i].getPath()); + } + } + return RepeatStatus.FINISHED; + } + + public void setDirectoryResource(Resource directory) { + this.directory = directory; + } + + public void afterPropertiesSet() throws Exception { + Assert.state(directory != null, "directory must be set"); + } +} +---- + +The preceding `tasklet` implementation deletes all files within a given directory. It +should be noted that the `execute` method is called only once. All that is left is to +reference the `tasklet` from the `step`. + + +[tabs] +==== +Java:: ++ +The following example shows how to reference the `tasklet` from the `step` in Java: ++ +.Java Configuration +[source, java] +---- +@Bean +public Job taskletJob(JobRepository jobRepository) { + return new JobBuilder("taskletJob", jobRepository) + .start(deleteFilesInDir()) + .build(); +} + +@Bean +public Step deleteFilesInDir(JobRepository jobRepository, PlatformTransactionManager transactionManager) { + return new StepBuilder("deleteFilesInDir", jobRepository) + .tasklet(fileDeletingTasklet(), transactionManager) + .build(); +} + +@Bean +public FileDeletingTasklet fileDeletingTasklet() { + FileDeletingTasklet tasklet = new FileDeletingTasklet(); + + tasklet.setDirectoryResource(new FileSystemResource("target/test-outputs/test-dir")); + + return tasklet; +} +---- + +XML:: ++ +The following example shows how to reference the `tasklet` from the `step` in XML: ++ +.XML Configuration +[source, xml] +---- + + + + + + + + + + + + + +---- + +==== + + diff --git a/spring-batch-docs/src/main/asciidoc/testing.adoc b/spring-batch-docs/modules/ROOT/pages/testing.adoc similarity index 95% rename from spring-batch-docs/src/main/asciidoc/testing.adoc rename to spring-batch-docs/modules/ROOT/pages/testing.adoc index 2944430ddf..f6b3d7e523 100644 --- a/spring-batch-docs/src/main/asciidoc/testing.adoc +++ b/spring-batch-docs/modules/ROOT/pages/testing.adoc @@ -1,13 +1,6 @@ -:toc: left -:toclevels: 4 [[testing]] -== Unit Testing - -include::attributes.adoc[] -ifndef::onlyonetoggle[] -include::toggle.adoc[] -endif::onlyonetoggle[] += Unit Testing As with other application styles, it is extremely important to unit test any code written as part of a batch job. The Spring core documentation covers how to unit and integration @@ -17,7 +10,7 @@ The `spring-batch-test` project includes classes that facilitate this end-to-end approach. [[creatingUnitTestClass]] -=== Creating a Unit Test Class +== Creating a Unit Test Class For the unit test to run a batch job, the framework must load the job's `ApplicationContext`. Two annotations are used to trigger this behavior: @@ -31,22 +24,27 @@ NOTE: If the test context contains a single `Job` bean definition, this bean will be autowired in `JobLauncherTestUtils`. Otherwise, the job under test should be manually set on the `JobLauncherTestUtils`. -[role="javaContent"] -The following Java example shows the annotations in use: +[tabs] +==== +Java:: ++ +The following Java example shows the annotations in use: ++ .Using Java Configuration -[source, java, role="javaContent"] +[source, java] ---- @SpringBatchTest @SpringJUnitConfig(SkipSampleConfiguration.class) public class SkipSampleFunctionalTests { ... } ---- -[role="xmlContent"] +XML:: ++ The following XML example shows the annotations in use: - ++ .Using XML Configuration -[source, java, role="xmlContent"] +[source, java] ---- @SpringBatchTest @SpringJUnitConfig(locations = { "/simple-job-launcher-context.xml", @@ -54,8 +52,13 @@ The following XML example shows the annotations in use: public class SkipSampleFunctionalTests { ... } ---- +==== + + + + [[endToEndTesting]] -=== End-To-End Testing of Batch Jobs +== End-To-End Testing of Batch Jobs "`End To end`" testing can be defined as testing the complete run of a batch job from beginning to end. This allows for a test that sets up a test condition, executes the job, @@ -71,15 +74,18 @@ returns the `JobExecution` object, which is useful for asserting particular info about the `Job` run. In the following case, the test verifies that the `Job` ended with a status of `COMPLETED`. -[role="xmlContent"] -The following listing shows an example with JUnit 5 in XML configuration style: -.XML Based Configuration -[source, java, role="xmlContent"] +[tabs] +==== +Java:: ++ +The following listing shows an example with JUnit 5 in Java configuration style: ++ +.Java Based Configuration +[source, java] ---- @SpringBatchTest -@SpringJUnitConfig(locations = { "/simple-job-launcher-context.xml", - "/jobs/skipSampleJob.xml" }) +@SpringJUnitConfig(SkipSampleConfiguration.class) public class SkipSampleFunctionalTests { @Autowired @@ -109,14 +115,17 @@ public class SkipSampleFunctionalTests { } ---- -[role="javaContent"] -The following listing shows an example with JUnit 5 in Java configuration style: -.Java Based Configuration -[source, java, role="javaContent"] +XML:: ++ +The following listing shows an example with JUnit 5 in XML configuration style: ++ +.XML Based Configuration +[source, java] ---- @SpringBatchTest -@SpringJUnitConfig(SkipSampleConfiguration.class) +@SpringJUnitConfig(locations = { "/simple-job-launcher-context.xml", + "/jobs/skipSampleJob.xml" }) public class SkipSampleFunctionalTests { @Autowired @@ -145,9 +154,11 @@ public class SkipSampleFunctionalTests { } } ---- +==== + [[testingIndividualSteps]] -=== Testing Individual Steps +== Testing Individual Steps For complex batch jobs, test cases in the end-to-end testing approach may become unmanageable. It these cases, it may be more useful to have test cases to test individual @@ -164,7 +175,8 @@ JobExecution jobExecution = jobLauncherTestUtils.launchStep("loadFileStep"); -=== Testing Step-Scoped Components +[[testing-step-scoped-components]] +== Testing Step-Scoped Components Often, the components that are configured for your steps at runtime use step scope and late binding to inject context from the step or job execution. These are tricky to test as @@ -263,7 +275,7 @@ int count = StepScopeTestUtils.doInStepScope(stepExecution, ---- [[validatingOutputFiles]] -=== Validating Output Files +== Validating Output Files When a batch job writes to the database, it is easy to query the database to verify that the output is as expected. However, if the batch job writes to a file, it is equally @@ -283,7 +295,7 @@ AssertFile.assertFileEquals(new FileSystemResource(EXPECTED_FILE), ---- [[mockingDomainObjects]] -=== Mocking Domain Objects +== Mocking Domain Objects Another common issue encountered while writing unit and integration tests for Spring Batch components is how to mock domain objects. A good example is a `StepExecutionListener`, as diff --git a/spring-batch-docs/modules/ROOT/pages/tracing.adoc b/spring-batch-docs/modules/ROOT/pages/tracing.adoc new file mode 100644 index 0000000000..113190feeb --- /dev/null +++ b/spring-batch-docs/modules/ROOT/pages/tracing.adoc @@ -0,0 +1,9 @@ +[[tracing]] += Tracing + +As of version 5, Spring Batch provides tracing through Micrometer's `Observation` API. By default, tracing is enabled +when using `@EnableBatchProcessing`. Spring Batch will create a trace for each job execution and a span for each +step execution. + +If you do not use `EnableBatchProcessing`, you need to register a `BatchObservabilityBeanPostProcessor` in your +application context, which will automatically setup Micrometer's observability in your jobs and steps beans. diff --git a/spring-batch-docs/src/main/asciidoc/transaction-appendix.adoc b/spring-batch-docs/modules/ROOT/pages/transaction-appendix.adoc similarity index 93% rename from spring-batch-docs/src/main/asciidoc/transaction-appendix.adoc rename to spring-batch-docs/modules/ROOT/pages/transaction-appendix.adoc index 4cbd030b3f..d29634f30b 100644 --- a/spring-batch-docs/src/main/asciidoc/transaction-appendix.adoc +++ b/spring-batch-docs/modules/ROOT/pages/transaction-appendix.adoc @@ -1,14 +1,12 @@ -:toc: left -:toclevels: 4 [[transactions]] [appendix] -== Batch Processing and Transactions +[[batch-processing-and-transactions]] += Batch Processing and Transactions -include::attributes.adoc[] [[transactionsNoRetry]] -=== Simple Batching with No Retry +== Simple Batching with No Retry Consider the following simple example of a nested batch with no retries. It shows a common scenario for batch processing: An input source is processed until exhausted, and @@ -38,7 +36,7 @@ If the chunk at `REPEAT` (3) fails because of a database exception at 3.2, then must roll back the whole chunk. [[transactionStatelessRetry]] -=== Simple Stateless Retry +== Simple Stateless Retry It is also useful to use a retry for an operation which is not transactional, such as a call to a web-service or other remote resource, as the following example shows: @@ -62,7 +60,7 @@ access (2.1) eventually fails, the transaction, `TX` (0), is guaranteed to roll back. [[repeatRetry]] -=== Typical Repeat-Retry Pattern +== Typical Repeat-Retry Pattern The most typical batch processing pattern is to add a retry to the inner block of the chunk, as the following example shows: @@ -89,8 +87,8 @@ chunk, as the following example shows: ---- -The inner `RETRY` (4) block is marked as "`stateful`". See <> for a description of a stateful retry. This means that, if the +The inner `RETRY` (4) block is marked as "`stateful`". See xref:transaction-appendix.adoc#transactionsNoRetry[the typical use case] + for a description of a stateful retry. This means that, if the retry `PROCESS` (5) block fails, the behavior of the `RETRY` (4) is as follows: . Throw an exception, rolling back the transaction, `TX` (2), at the chunk level, and @@ -135,9 +133,9 @@ overall retry strategy. The inner `RETRY` (4) is aware of the history of each it can decide whether or not to have another attempt at it. [[asyncChunkProcessing]] -=== Asynchronous Chunk Processing +== Asynchronous Chunk Processing -The inner batches or chunks in the <> can be executed +The inner batches or chunks in the xref:transaction-appendix.adoc#repeatRetry[typical example] can be executed concurrently by configuring the outer batch to use an `AsyncTaskExecutor`. The outer batch waits for all the chunks to complete before completing. The following example shows asynchronous chunk processing: @@ -165,9 +163,9 @@ asynchronous chunk processing: ---- [[asyncItemProcessing]] -=== Asynchronous Item Processing +== Asynchronous Item Processing -The individual items in chunks in the <> can also, in +The individual items in chunks in the xref:transaction-appendix.adoc#repeatRetry[typical example] can also, in principle, be processed concurrently. In this case, the transaction boundary has to move to the level of the individual item, so that each transaction is on a single thread, as the following example shows: @@ -199,7 +197,7 @@ the transactional resources chunked together. It is useful only if the cost of t processing (5) is much higher than the cost of transaction management (3). [[transactionPropagation]] -=== Interactions Between Batching and Transaction Propagation +== Interactions Between Batching and Transaction Propagation There is a tighter coupling between batch-retry and transaction management than we would ideally like. In particular, a stateless retry cannot be used to retry database @@ -268,7 +266,7 @@ Consequently, the `NESTED` pattern is best if the retry block contains any datab access. [[specialTransactionOrthonogonal]] -=== Special Case: Transactions with Orthogonal Resources +== Special Case: Transactions with Orthogonal Resources Default propagation is always OK for simple cases where there are no nested database transactions. Consider the following example, where the `SESSION` and `TX` are not @@ -296,7 +294,7 @@ worst that can happen is a duplicate message when the `RETRY` (2) succeeds and t `SESSION` (0) cannot commit (for example, because the message system is unavailable). [[statelessRetryCannotRecover]] -=== Stateless Retry Cannot Recover +== Stateless Retry Cannot Recover The distinction between a stateless and a stateful retry in the typical example shown earlier is important. It is actually ultimately a transactional constraint that forces the diff --git a/spring-batch-docs/src/main/asciidoc/whatsnew.adoc b/spring-batch-docs/modules/ROOT/pages/whatsnew.adoc similarity index 84% rename from spring-batch-docs/src/main/asciidoc/whatsnew.adoc rename to spring-batch-docs/modules/ROOT/pages/whatsnew.adoc index e09abbdc08..8bedc049f7 100644 --- a/spring-batch-docs/src/main/asciidoc/whatsnew.adoc +++ b/spring-batch-docs/modules/ROOT/pages/whatsnew.adoc @@ -1,24 +1,19 @@ -:toc: left -:toclevels: 4 - -This section shows the major highlights of Spring Batch 5.1. - [[whatsNew]] -== What's New in Spring Batch 5.1 += What's New in Spring Batch 5.1 -include::attributes.adoc[] +This section shows the major highlights of Spring Batch 5.1. Spring Batch 5.1 introduces the following features: -* <> -* <> -* <> -* <> -* <> -* <> +* xref:whatsnew.adoc#dependencies-upgrade[Dependencies upgrade] +* xref:whatsnew.adoc#virtual-threads-support[Virtual Threads support] +* xref:whatsnew.adoc#memory-management-improvement-jpaitemwriter[Memory management improvement in the JpaItemWriter] +* xref:whatsnew.adoc#new-synchronized-decorators[New synchronized decorators for item readers and writers] +* xref:whatsnew.adoc#new-cursor-based-mongo-item-reader[New Cursor-based MongoItemReader] +* xref:whatsnew.adoc#bulk-inserts-support-mongo-item-writer[Bulk inserts support in MongoItemWriter] [[dependencies-upgrade]] -=== Dependencies upgrade +== Dependencies upgrade In this release, the Spring dependencies are upgraded to the following versions: @@ -29,7 +24,7 @@ In this release, the Spring dependencies are upgraded to the following versions: * Micrometer 1.12.0-M2 [[virtual-threads-support]] -=== Virtual Threads support +== Virtual Threads support Embracing JDK 21 LTS is one of the main themes for Spring Batch 5.1, especially the support of virtual threads from Project Loom. In this release, virtual threads can be used in all areas of the @@ -44,7 +39,7 @@ In Spring Framework 6.1, a new `TaskExecutor` implementation based on virtual th `VirtualThreadTaskExecutor`. This `TaskExecutor` can be used in Spring Batch wherever a `TaskExecutor` is required. [[memory-management-improvement-jpaitemwriter]] -=== Memory management improvement in the JpaItemWriter +== Memory management improvement in the JpaItemWriter When using the `JpaItemWriter`, the JPA persistence context can quickly grow when the chunk size is large enough. This might lead to `OutOfMemoryError` errors if not cleared appropriately in a timely manner. @@ -54,7 +49,7 @@ to clear the persistence context after writing each chunk of items. This option of chunk-oriented steps dealing with large amounts of data and big chunk sizes. [[new-synchronized-decorators]] -=== New synchronized decorators for item readers and writers +== New synchronized decorators for item readers and writers Up to version 5.0, Spring Batch provided two decorators `SynchronizedItemStreamReader` and `SynchronizedItemStreamWriter` to synchronize thread access to `ItemStreamReader#read` and `ItemStreamWriter#write`. Those decorators are useful when diff --git a/spring-batch-docs/pom.xml b/spring-batch-docs/pom.xml index e8c7b3ccf7..f05fda22c9 100644 --- a/spring-batch-docs/pom.xml +++ b/spring-batch-docs/pom.xml @@ -13,100 +13,20 @@ - org.asciidoctor - asciidoctor-maven-plugin - ${asciidoctor-maven-plugin.version} - - - org.asciidoctor - asciidoctorj-pdf - ${asciidoctorj-pdf.version} - - - org.asciidoctor - asciidoctorj-epub3 - ${asciidoctorj-epub.version} - - - io.spring.asciidoctor.backends - spring-asciidoctor-backends - ${spring-asciidoctor-backends.version} - - - - ${project.basedir}/src/main/asciidoc - ${project.build.directory}/asciidoc - + io.spring.maven.antora + antora-maven-plugin + ${io.spring.maven.antora-version} + true + + + io.spring.maven.antora + antora-component-version-maven-plugin + ${io.spring.maven.antora-version} - generate-html - site - - process-asciidoc - - - spring-html - book - - ${project.build.directory}/asciidoc - shared - css/ - site.css - true - font - - highlight.js - js/highlight - github - - - - ${project.version} - ${project.version} - - - - - - generate-pdf - site - - process-asciidoc - - - pdf - book - - font - - - - - coderay - ${project.version} - ${project.version} - - - - - generate-epub - site - process-asciidoc + antora-component-version - - epub3 - book - - font - - - - - coderay - ${project.version} - ${project.version} - - @@ -132,14 +52,6 @@ - - org.apache.maven.plugins - maven-deploy-plugin - ${maven-deploy-plugin.version} - - true - - diff --git a/spring-batch-docs/src/main/antora/resources/antora-resources/antora.yml b/spring-batch-docs/src/main/antora/resources/antora-resources/antora.yml new file mode 100644 index 0000000000..e61aa5ae27 --- /dev/null +++ b/spring-batch-docs/src/main/antora/resources/antora-resources/antora.yml @@ -0,0 +1,8 @@ +version: ${antora-component.version} +prerelease: ${antora-component.prerelease} + +asciidoc: + attributes: + attribute-missing: 'warn' + chomp: 'all' + batch-asciidoc: '' \ No newline at end of file diff --git a/spring-batch-docs/src/main/asciidoc/header/index-header.adoc b/spring-batch-docs/src/main/asciidoc/header/index-header.adoc deleted file mode 100644 index 388620a66a..0000000000 --- a/spring-batch-docs/src/main/asciidoc/header/index-header.adoc +++ /dev/null @@ -1 +0,0 @@ -= Spring Batch - Reference Documentation diff --git a/spring-batch-docs/src/main/asciidoc/index-single.adoc b/spring-batch-docs/src/main/asciidoc/index-single.adoc deleted file mode 100644 index 65a54e2154..0000000000 --- a/spring-batch-docs/src/main/asciidoc/index-single.adoc +++ /dev/null @@ -1,49 +0,0 @@ -:doctype: book -:toc: left -:toclevels: 4 -:sectnums: -:onlyonetoggle: true - -include::attributes.adoc[] - -include::header/index-header.adoc[] - -include::toggle.adoc[] - -include::spring-batch-intro.adoc[] - -include::spring-batch-architecture.adoc[] - -include::whatsnew.adoc[] - -include::domain.adoc[] - -include::job.adoc[] - -include::step.adoc[] - -include::readersAndWriters.adoc[] - -include::processor.adoc[] - -include::scalability.adoc[] - -include::repeat.adoc[] - -include::retry.adoc[] - -include::testing.adoc[] - -include::common-patterns.adoc[] - -include::spring-batch-integration.adoc[] - -include::monitoring-and-metrics.adoc[] - -include::appendix.adoc[] - -include::schema-appendix.adoc[] - -include::transaction-appendix.adoc[] - -include::glossary.adoc[] diff --git a/spring-batch-docs/src/main/asciidoc/index.adoc b/spring-batch-docs/src/main/asciidoc/index.adoc deleted file mode 100644 index 17afe49987..0000000000 --- a/spring-batch-docs/src/main/asciidoc/index.adoc +++ /dev/null @@ -1,52 +0,0 @@ -include::attributes.adoc[] - -include::header/index-header.adoc[] - -// ====================================================================================== - -This documentation is also available -as a link:index-single.html[single HTML file] and as link:../pdf/spring-batch-reference.pdf[PDF] -and link:../epub/spring-batch-reference.epub[EPUB] documents. - -The reference documentation is divided into several sections: - -[horizontal] -<> :: Background, usage - scenarios, and general guidelines. -<> :: Spring Batch -architecture, general batch principles, batch processing strategies. -<> :: New features introduced in version 5.1. -<> :: Core concepts and abstractions -of the Batch domain language. -<> :: Job configuration, execution, and -administration. -<> :: Step configuration, different types of steps, and -controlling step flow. -<> :: `ItemReader` -and `ItemWriter` interfaces and how to use them. -<> :: `ItemProcessor` interface and how to use it. -<> :: Multi-threaded steps, -parallel steps, remote chunking, and partitioning. -<> :: Completion policies and exception handling of repetitive actions. -<> :: Retry and backoff policies of retryable operations. -<> :: Job and Step testing facilities and APIs. -<> :: Common batch processing patterns -and guidelines. -<> :: Integration -between Spring Batch and Spring Integration projects. -<> :: Batch jobs -monitoring and metrics. - -The following appendices are available: - -[horizontal] -<> :: List of -all provided item readers and writers. -<> :: Core tables used by the Batch -domain model. -<> :: Transaction -boundaries, propagation, and isolation levels used in Spring Batch. -<> :: Glossary of common terms, concepts, and vocabulary of -the Batch domain. - -include::footer/index-footer.adoc[] diff --git a/spring-batch-docs/src/main/asciidoc/job.adoc b/spring-batch-docs/src/main/asciidoc/job.adoc deleted file mode 100644 index 94f3aab23c..0000000000 --- a/spring-batch-docs/src/main/asciidoc/job.adoc +++ /dev/null @@ -1,1676 +0,0 @@ -:toc: left -:toclevels: 4 - -[[configureJob]] -== Configuring and Running a Job - -include::attributes.adoc[] -ifndef::onlyonetoggle[] -include::toggle.adoc[] -endif::onlyonetoggle[] - -In the <> , the overall -architecture design was discussed, using the following diagram as a -guide: - -.Batch Stereotypes -image::{batch-asciidoc}images/spring-batch-reference-model.png[Figure 2.1: Batch Stereotypes, scaledwidth="60%"] - -While the `Job` object may seem like a simple -container for steps, you must be aware of many configuration options. -Furthermore, you must consider many options about -how a `Job` can be run and how its metadata can be -stored during that run. This chapter explains the various configuration -options and runtime concerns of a `Job`. - -[[configuringAJob]] -=== Configuring a Job - -ifdef::backend-spring-html[] -[role="javaContent"] -There are multiple implementations of the <> interface. However, -builders abstract away the difference in configuration. -The following example creates a `footballJob`: - -[source, java, role="javaContent"] ----- -@Bean -public Job footballJob(JobRepository jobRepository) { - return new JobBuilder("footballJob", jobRepository) - .start(playerLoad()) - .next(gameLoad()) - .next(playerSummarization()) - .build(); -} ----- - -[role="javaContent"] -A `Job` (and, typically, any `Step` within it) requires a `JobRepository`. The -configuration of the `JobRepository` is handled through the <>. - -[role="javaContent"] -The preceding example illustrates a `Job` that consists of three `Step` instances. The job related -builders can also contain other elements that help with parallelization (`Split`), -declarative flow control (`Decision`), and externalization of flow definitions (`Flow`). - -[role="xmlContent"] -There are multiple implementations of the <> -interface. However, the namespace abstracts away the differences in configuration. It has -only three required dependencies: a name, `JobRepository` , and a list of `Step` instances. -The following example creates a `footballJob`: - -[source, xml, role="xmlContent"] ----- - - - - - ----- - -[role="xmlContent"] -The examples here use a parent bean definition to create the steps. -See the section on <> -for more options when declaring specific step details inline. The XML namespace -defaults to referencing a repository with an ID of `jobRepository`, which -is a sensible default. However, you can explicitly override it: - -[source, xml, role="xmlContent"] ----- - - - - - ----- - -[role="xmlContent"] -In addition to steps, a job configuration can contain other elements that help with -parallelization (``), declarative flow control (``) and externalization -of flow definitions (``). -endif::backend-spring-html[] - -ifdef::backend-pdf[] -There are multiple implementations of the <> interface. However, -these implementations are abstracted behind either the provided builders (for Java configuration) or the XML -namespace (for XML-based configuration). The following example shows both Java and XML configuration: - -==== -.Java Configuration -[source, java] ----- -@Bean -public Job footballJob(JobRepository jobRepository) { - return new JobBuilder("footballJob", jobRepository) - .start(playerLoad()) - .next(gameLoad()) - .next(playerSummarization()) - .build(); -} ----- - -.XML Configuration -[source, xml] ----- - - - - - ----- -==== - -The preceding examples uses a parent bean definition to create the steps. -See the section on <> -for more options when declaring specific step details inline. The XML namespace -defaults to referencing a repository with an `id` of `jobRepository`, which -is a sensible default. However, you can explicitly override this default: - -==== -[source, xml] ----- - - - - - ----- -==== - -In addition to steps, a job configuration can contain other elements -that help with parallelization (``), -declarative flow control (``), and -externalization of flow definitions -(``). - -endif::backend-pdf[] - -[[restartability]] -==== Restartability - -One key issue when executing a batch job concerns the behavior of a `Job` when it is -restarted. The launching of a `Job` is considered to be a "`restart`" if a `JobExecution` -already exists for the particular `JobInstance`. Ideally, all jobs should be able to start -up where they left off, but there are scenarios where this is not possible. -_In this scenario, it is entirely up to the developer to ensure that a new `JobInstance` is created._ -However, Spring Batch does provide some help. If a `Job` should never be -restarted but should always be run as part of a new `JobInstance`, you can set the -restartable property to `false`. - -[role="xmlContent"] -The following example shows how to set the `restartable` field to `false` in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - ... - ----- - -[role="javaContent"] -The following example shows how to set the `restartable` field to `false` in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -public Job footballJob(JobRepository jobRepository) { - return new JobBuilder("footballJob", jobRepository) - .preventRestart() - ... - .build(); -} ----- - -To phrase it another way, setting `restartable` to `false` means "`this -`Job` does not support being started again`". Restarting a `Job` that is not -restartable causes a `JobRestartException` to -be thrown. -The following Junit code causes the exception to be thrown: - -[source, java] ----- -Job job = new SimpleJob(); -job.setRestartable(false); - -JobParameters jobParameters = new JobParameters(); - -JobExecution firstExecution = jobRepository.createJobExecution(job, jobParameters); -jobRepository.saveOrUpdate(firstExecution); - -try { - jobRepository.createJobExecution(job, jobParameters); - fail(); -} -catch (JobRestartException e) { - // expected -} ----- - -The first attempt to create a -`JobExecution` for a non-restartable -job causes no issues. However, the second -attempt throws a `JobRestartException`. - -[[interceptingJobExecution]] -==== Intercepting Job Execution - -During the course of the execution of a -`Job`, it may be useful to be notified of various -events in its lifecycle so that custom code can be run. -`SimpleJob` allows for this by calling a -`JobListener` at the appropriate time: - -[source, java] ----- -public interface JobExecutionListener { - - void beforeJob(JobExecution jobExecution); - - void afterJob(JobExecution jobExecution); -} ----- - -You can add `JobListeners` to a `SimpleJob` by setting listeners on the job. - -[role="xmlContent"] -The following example shows how to add a listener element to an XML job definition: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - - - ----- - -[role="javaContent"] -The following example shows how to add a listener method to a Java job definition: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -public Job footballJob(JobRepository jobRepository) { - return new JobBuilder("footballJob", jobRepository) - .listener(sampleListener()) - ... - .build(); -} ----- - -Note that the `afterJob` method is called regardless of the success or -failure of the `Job`. If you need to determine success or failure, you can get that information -from the `JobExecution`: - -[source, java] ----- -public void afterJob(JobExecution jobExecution){ - if (jobExecution.getStatus() == BatchStatus.COMPLETED ) { - //job success - } - else if (jobExecution.getStatus() == BatchStatus.FAILED) { - //job failure - } -} ----- - -The annotations corresponding to this interface are: - -* `@BeforeJob` -* `@AfterJob` - -[[inheritingFromAParentJob]] -[role="xmlContent"] -==== Inheriting from a Parent Job - -ifdef::backend-pdf[] -This section applies only to XML based configuration, as Java configuration provides better -reuse capabilities. -endif::backend-pdf[] - -[role="xmlContent"] -If a group of Jobs share similar but not -identical configurations, it may help to define a "`parent`" -`Job` from which the concrete -`Job` instances can inherit properties. Similar to class -inheritance in Java, a "`child`" `Job` combines -its elements and attributes with the parent's. - -[role="xmlContent"] -In the following example, `baseJob` is an abstract -`Job` definition that defines only a list of -listeners. The `Job` (`job1`) is a concrete -definition that inherits the list of listeners from `baseJob` and merges -it with its own list of listeners to produce a -`Job` with two listeners and one -`Step` (`step1`). - -[source, xml, role="xmlContent"] ----- - - - - - - - - - - - - - ----- - -[role="xmlContent"] -See the section on <> -for more detailed information. - -==== JobParametersValidator - -A job declared in the XML namespace or using any subclass of -`AbstractJob` can optionally declare a validator for the job parameters at -runtime. This is useful when, for instance, you need to assert that a job -is started with all its mandatory parameters. There is a -`DefaultJobParametersValidator` that you can use to constrain combinations -of simple mandatory and optional parameters. For more complex -constraints, you can implement the interface yourself. - -ifdef::backend-spring-html[] -[role="xmlContent"] -The configuration of a validator is supported through the XML namespace through a child -element of the job, as the following example shows: - -[source, xml, role="xmlContent"] ----- - - - - ----- - -[role="xmlContent"] -You can specify the validator as a reference (as shown earlier) or as a nested bean -definition in the `beans` namespace. - -[role="javaContent"] -The configuration of a validator is supported through the Java builders: - -[source, java, role="javaContent"] ----- -@Bean -public Job job1(JobRepository jobRepository) { - return new JobBuilder("job1", jobRepository) - .validator(parametersValidator()) - ... - .build(); -} ----- - -endif::backend-spring-html[] - -ifdef::backend-pdf[] -The configuration of a validator is supported through the Java builders, as follows: - -[source, java] ----- -@Bean -public Job job1(JobRepository jobRepository) { - return new JobBuilder("job1", jobRepository) - .validator(parametersValidator()) - ... - .build(); -} ----- - -XML namespace support is also available for configuration of a `JobParametersValidator`: - -[source, xml] ----- - - - - ----- - -You can specify the validator as a reference (as shown earlier) or as a nested bean definition in -the `beans` namespace. - -endif::backend-pdf[] - -[[javaConfig]] -=== Java Configuration - -Spring 3 brought the ability to configure applications with Java instead of XML. As of -Spring Batch 2.2.0, you can configure batch jobs by using the same Java configuration. -There are three components for the Java-based configuration: the `@EnableBatchProcessing` -annotation and two builders. - -The `@EnableBatchProcessing` annotation works similarly to the other `@Enable*` annotations in the -Spring family. In this case, `@EnableBatchProcessing` provides a base configuration for -building batch jobs. Within this base configuration, an instance of `StepScope` and `JobScope` are -created, in addition to a number of beans being made available to be autowired: - -* `JobRepository`: a bean named `jobRepository` -* `JobLauncher`: a bean named `jobLauncher` -* `JobRegistry`: a bean named `jobRegistry` -* `JobExplorer`: a bean named `jobExplorer` -* `JobOperator`: a bean named `jobOperator` - -The default implementation provides the beans mentioned in the preceding list and requires a `DataSource` -and a `PlatformTransactionManager` to be provided as beans within the context. The data source and transaction -manager are used by the `JobRepository` and `JobExplorer` instances. By default, the data source named `dataSource` -and the transaction manager named `transactionManager` will be used. You can customize any of these beans by using -the attributes of the `@EnableBatchProcessing` annotation. The following example shows how to provide a -custom data source and transaction manager: - -[source, java] ----- -@Configuration -@EnableBatchProcessing(dataSourceRef = "batchDataSource", transactionManagerRef = "batchTransactionManager") -public class MyJobConfiguration { - - @Bean - public DataSource batchDataSource() { - return new EmbeddedDatabaseBuilder().setType(EmbeddedDatabaseType.HSQL) - .addScript("/org/springframework/batch/core/schema-hsqldb.sql") - .generateUniqueName(true).build(); - } - - @Bean - public JdbcTransactionManager batchTransactionManager(DataSource dataSource) { - return new JdbcTransactionManager(dataSource); - } - - public Job job(JobRepository jobRepository) { - return new JobBuilder("myJob", jobRepository) - //define job flow as needed - .build(); - } - -} ----- - -NOTE: Only one configuration class needs to have the `@EnableBatchProcessing` annotation. Once -you have a class annotated with it, you have all of the configuration described earlier. - -Starting from v5.0, an alternative, programmatic way of configuring base infrastrucutre beans -is provided through the `DefaultBatchConfiguration` class. This class provides the same beans -provided by `@EnableBatchProcessing` and can be used as a base class to configure batch jobs. -The following snippet is a typical example of how to use it: - -[source, java] ----- -@Configuration -class MyJobConfiguration extends DefaultBatchConfiguration { - - @Bean - public Job job(JobRepository jobRepository) { - return new JobBuilder("job", jobRepository) - // define job flow as needed - .build(); - } - -} ----- - -The data source and transaction manager will be resolved from the application context -and set on the job repository and job explorer. You can customize the configuration -of any infrastructure bean by overriding the required setter. The following example -shows how to customize the character encoding for instance: - -[source, java] ----- -@Configuration -class MyJobConfiguration extends DefaultBatchConfiguration { - - @Bean - public Job job(JobRepository jobRepository) { - return new JobBuilder("job", jobRepository) - // define job flow as needed - .build(); - } - - @Override - protected Charset getCharset() { - return StandardCharsets.ISO_8859_1; - } -} ----- - -NOTE: `@EnableBatchProcessing` should *not* be used with `DefaultBatchConfiguration`. You should -either use the declarative way of configuring Spring Batch through `@EnableBatchProcessing`, -or use the programmatic way of extending `DefaultBatchConfiguration`, but not both ways at -the same time. - -[[configuringJobRepository]] -=== Configuring a JobRepository - -[role="javaContent"] -When using `@EnableBatchProcessing`, a `JobRepository` is provided for you. -This section describes how to configure your own. - -As described earlier, the <> is used for basic CRUD operations of the various persisted -domain objects within Spring Batch, such as `JobExecution` and `StepExecution`. -It is required by many of the major framework features, such as the `JobLauncher`, -`Job`, and `Step`. - -[role="xmlContent"] -The batch namespace abstracts away many of the implementation details of the -`JobRepository` implementations and their collaborators. However, there are still a few -configuration options available, as the following example shows: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - ----- - -[role="xmlContent"] -Other than the `id`, none of the configuration options listed earlier are required. If they are -not set, the defaults shown earlier are used. -The `max-varchar-length` defaults to `2500`, which is the length of the long -`VARCHAR` columns in the <>. - -[role="javaContent"] -Other than the `dataSource` and the `transactionManager`, none of the configuration options listed earlier are required. -If they are not set, the defaults shown earlier -are used. The -max `varchar` length defaults to `2500`, which is the -length of the long `VARCHAR` columns in the -<> - -[[txConfigForJobRepository]] -==== Transaction Configuration for the JobRepository - -If the namespace or the provided `FactoryBean` is used, transactional advice is -automatically created around the repository. This is to ensure that the batch metadata, -including state that is necessary for restarts after a failure, is persisted correctly. -The behavior of the framework is not well defined if the repository methods are not -transactional. The isolation level in the `create*` method attributes is specified -separately to ensure that, when jobs are launched, if two processes try to launch -the same job at the same time, only one succeeds. The default isolation level for that -method is `SERIALIZABLE`, which is quite aggressive. `READ_COMMITTED` usually works equally -well. `READ_UNCOMMITTED` is fine if two processes are not likely to collide in this -way. However, since a call to the `create*` method is quite short, it is unlikely that -`SERIALIZED` causes problems, as long as the database platform supports it. However, you -can override this setting. - -[role="xmlContent"] -The following example shows how to override the isolation level in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - ----- -[role="javaContent"] -The following example shows how to override the isolation level in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Configuration -@EnableBatchProcessing(isolationLevelForCreate = "ISOLATION_REPEATABLE_READ") -public class MyJobConfiguration { - - // job definition - -} ----- - -If the namespace is not used, you must also configure the -transactional behavior of the repository by using AOP. - -[role="xmlContent"] -The following example shows how to configure the transactional behavior of the repository -in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - - - - - ----- - -[role="xmlContent"] -You can use the preceding fragment nearly as is, with almost no changes. Remember also to -include the appropriate namespace declarations and to make sure `spring-tx` and `spring-aop` -(or the whole of Spring) are on the classpath. - -[role="javaContent"] -The following example shows how to configure the transactional behavior of the repository -in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -public TransactionProxyFactoryBean baseProxy() { - TransactionProxyFactoryBean transactionProxyFactoryBean = new TransactionProxyFactoryBean(); - Properties transactionAttributes = new Properties(); - transactionAttributes.setProperty("*", "PROPAGATION_REQUIRED"); - transactionProxyFactoryBean.setTransactionAttributes(transactionAttributes); - transactionProxyFactoryBean.setTarget(jobRepository()); - transactionProxyFactoryBean.setTransactionManager(transactionManager()); - return transactionProxyFactoryBean; -} ----- - -[[repositoryTablePrefix]] -==== Changing the Table Prefix - -Another modifiable property of the `JobRepository` is the table prefix of the meta-data -tables. By default, they are all prefaced with `BATCH_`. `BATCH_JOB_EXECUTION` and -`BATCH_STEP_EXECUTION` are two examples. However, there are potential reasons to modify this -prefix. If the schema names need to be prepended to the table names or if more than one -set of metadata tables is needed within the same schema, the table prefix needs to -be changed. - -[role="xmlContent"] -The following example shows how to change the table prefix in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - ----- - -[role="xmlContent"] -The following example shows how to change the table prefix in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Configuration -@EnableBatchProcessing(tablePrefix = "SYSTEM.TEST_") -public class MyJobConfiguration { - - // job definition - -} ----- - -Given the preceding changes, every query to the metadata tables is prefixed with -`SYSTEM.TEST_`. `BATCH_JOB_EXECUTION` is referred to as `SYSTEM.TEST_JOB_EXECUTION`. - -NOTE: Only the table prefix is configurable. The table and column names are not. - -[[nonStandardDatabaseTypesInRepository]] -==== Non-standard Database Types in a Repository - -If you use a database platform that is not in the list of supported platforms, you -may be able to use one of the supported types, if the SQL variant is close enough. To do -this, you can use the raw `JobRepositoryFactoryBean` instead of the namespace shortcut and -use it to set the database type to the closest match. - -[role="xmlContent"] -The following example shows how to use `JobRepositoryFactoryBean` to set the database type -to the closest match in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - ----- - -[role="javaContent"] -The following example shows how to use `JobRepositoryFactoryBean` to set the database type -to the closest match in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -public JobRepository jobRepository() throws Exception { - JobRepositoryFactoryBean factory = new JobRepositoryFactoryBean(); - factory.setDataSource(dataSource); - factory.setDatabaseType("db2"); - factory.setTransactionManager(transactionManager); - return factory.getObject(); -} ----- - -If the database type is not specified, the `JobRepositoryFactoryBean` tries to -auto-detect the database type from the `DataSource`. -The major differences between platforms are -mainly accounted for by the strategy for incrementing primary keys, so -it is often necessary to override the -`incrementerFactory` as well (by using one of the standard -implementations from the Spring Framework). - -If even that does not work or if you are not using an RDBMS, the -only option may be to implement the various `Dao` -interfaces that the `SimpleJobRepository` depends -on and wire one up manually in the normal Spring way. - -[[configuringJobLauncher]] -=== Configuring a JobLauncher - -[role="javaContent"] -When you use `@EnableBatchProcessing`, a `JobRegistry` is provided for you. -This section describes how to configure your own. - -The most basic implementation of the `JobLauncher` interface is the `TaskExecutorJobLauncher`. -Its only required dependency is a `JobRepository` (needed to obtain an execution). - -[role="xmlContent"] -The following example shows a `TaskExecutorJobLauncher` in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - ----- - -[role="javaContent"] -The following example shows a `TaskExecutorJobLauncher` in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -... -@Bean -public JobLauncher jobLauncher() throws Exception { - TaskExecutorJobLauncher jobLauncher = new TaskExecutorJobLauncher(); - jobLauncher.setJobRepository(jobRepository); - jobLauncher.afterPropertiesSet(); - return jobLauncher; -} -... ----- - -Once a <> is obtained, it is passed to the -execute method of `Job`, ultimately returning the `JobExecution` to the caller, as -the following image shows: - -.Job Launcher Sequence -image::{batch-asciidoc}images/job-launcher-sequence-sync.png[Job Launcher Sequence, scaledwidth="60%"] - -The sequence is straightforward and works well when launched from a scheduler. However, -issues arise when trying to launch from an HTTP request. In this scenario, the launching -needs to be done asynchronously so that the `TaskExecutorJobLauncher` returns immediately to its -caller. This is because it is not good practice to keep an HTTP request open for the -amount of time needed by long running processes (such as batch jobs). The following image shows -an example sequence: - -.Asynchronous Job Launcher Sequence -image::{batch-asciidoc}images/job-launcher-sequence-async.png[Async Job Launcher Sequence, scaledwidth="60%"] - -You can configure the `TaskExecutorJobLauncher` to allow for this scenario by configuring a -`TaskExecutor`. - -[role="xmlContent"] -The following XML example configures a `TaskExecutorJobLauncher` to return immediately: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - ----- - -[role="javaContent"] -The following Java example configures a `TaskExecutorJobLauncher` to return immediately: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -public JobLauncher jobLauncher() { - TaskExecutorJobLauncher jobLauncher = new TaskExecutorJobLauncher(); - jobLauncher.setJobRepository(jobRepository()); - jobLauncher.setTaskExecutor(new SimpleAsyncTaskExecutor()); - jobLauncher.afterPropertiesSet(); - return jobLauncher; -} ----- - -You can use any implementation of the spring `TaskExecutor` -interface to control how jobs are asynchronously -executed. - -[[runningAJob]] -=== Running a Job - -At a minimum, launching a batch job requires two things: the -`Job` to be launched and a -`JobLauncher`. Both can be contained within the same -context or different contexts. For example, if you launch jobs from the -command line, a new JVM is instantiated for each `Job`. Thus, every -job has its own `JobLauncher`. However, if -you run from within a web container that is within the scope of an -`HttpRequest`, there is usually one -`JobLauncher` (configured for asynchronous job -launching) that multiple requests invoke to launch their jobs. - -[[runningJobsFromCommandLine]] -==== Running Jobs from the Command Line - -If you want to run your jobs from an enterprise -scheduler, the command line is the primary interface. This is because -most schedulers (with the exception of Quartz, unless using -`NativeJob`) work directly with operating system -processes, primarily kicked off with shell scripts. There are many ways -to launch a Java process besides a shell script, such as Perl, Ruby, or -even build tools, such as Ant or Maven. However, because most people -are familiar with shell scripts, this example focuses on them. - -[[commandLineJobRunner]] -===== The CommandLineJobRunner - -Because the script launching the job must kick off a Java -Virtual Machine, there needs to be a class with a `main` method to act -as the primary entry point. Spring Batch provides an implementation -that serves this purpose: -`CommandLineJobRunner`. Note -that this is just one way to bootstrap your application. There are -many ways to launch a Java process, and this class should in no way be -viewed as definitive. The `CommandLineJobRunner` -performs four tasks: - -* Load the appropriate `ApplicationContext`. -* Parse command line arguments into `JobParameters`. -* Locate the appropriate job based on arguments. -* Use the `JobLauncher` provided in the application context to launch the job. - -All of these tasks are accomplished with only the arguments passed in. -The following table describes the required arguments: - -.CommandLineJobRunner arguments -|=============== -|`jobPath`|The location of the XML file that is used to -create an `ApplicationContext`. This file -should contain everything needed to run the complete -`Job`. -|`jobName`|The name of the job to be run. -|=============== - -These arguments must be passed in, with the path first and the name second. All arguments -after these are considered to be job parameters, are turned into a `JobParameters` object, -and must be in the format of `name=value`. - -[role="xmlContent"] -The following example shows a date passed as a job parameter to a job defined in XML: - -[source, role="xmlContent"] ----- ->. The first -argument is `endOfDayJob.xml`, which is the Spring ApplicationContext that contains the -`Job`. The second argument, `endOfDay,` represents the job name. The final argument, -`schedule.date=2007-05-05,java.time.LocalDate`, is converted into a `JobParameter` object of type -`java.time.LocalDate`. - -[role="xmlContent"] -The following example shows a sample configuration for `endOfDay` in XML: - -[source, xml, role="xmlContent"] ----- - - - - - - ----- - -[role="javaContent"] -In most cases, you would want to use a manifest to declare your `main` class in a jar. However, -for simplicity, the class was used directly. This example uses the `EndOfDay` -example from the <>. The first -argument is `io.spring.EndOfDayJobConfiguration`, which is the fully qualified class name -to the configuration class that contains the Job. The second argument, `endOfDay`, represents -the job name. The final argument, `schedule.date=2007-05-05,java.time.LocalDate`, is converted -into a `JobParameter` object of type `java.time.LocalDate`. - -[role="javaContent"] -The following example shows a sample configuration for `endOfDay` in Java: - -[source, java, role="javaContent"] ----- -@Configuration -@EnableBatchProcessing -public class EndOfDayJobConfiguration { - - @Bean - public Job endOfDay(JobRepository jobRepository, Step step1) { - return new JobBuilder("endOfDay", jobRepository) - .start(step1) - .build(); - } - - @Bean - public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { - return new StepBuilder("step1", jobRepository) - .tasklet((contribution, chunkContext) -> null, transactionManager) - .build(); - } -} ----- -endif::backend-spring-html[] - -ifdef::backend-pdf[] -In most cases, you would want to use a manifest to declare your `main` class in a jar. However, -for simplicity, the class was used directly. This example uses the `EndOfDay` -example from the <>. The first -argument is where your job is configured (either an XML file or a fully qualified class -name). The second argument, `endOfDay`, represents the job name. The final argument, -schedule.date=2007-05-05,java.time.LocalDate`, is converted into a `JobParameter` object of type -`java.time.LocalDate`. - -// TODO Given that this block is for PDF output, should it have the xmlContent and -// javaContent markers? - -[role="xmlContent"] -The following example shows a sample configuration for `endOfDay` in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - ----- - -[role="javaContent"] -The following example shows a sample configuration for `endOfDay` in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Configuration -@EnableBatchProcessing -public class EndOfDayJobConfiguration { - - @Bean - public Job endOfDay(JobRepository jobRepository, Step step1) { - return new JobBuilder("endOfDay", jobRepository) - .start(step1) - .build(); - } - - @Bean - public Step step1(JobRepository jobRepository, PlatformTransactionManager transactionManager) { - return new StepBuilder("step1", jobRepository) - .tasklet((contribution, chunkContext) -> null, transactionManager) - .build(); - } -} ----- - -endif::backend-pdf[] - -The preceding example is overly simplistic, since there are many more requirements to a -run a batch job in Spring Batch in general, but it serves to show the two main -requirements of the `CommandLineJobRunner`: `Job` and `JobLauncher`. - - - -[[exitCodes]] -===== Exit Codes - -When launching a batch job from the command-line, an enterprise -scheduler is often used. Most schedulers are fairly dumb and work only -at the process level. This means that they only know about some -operating system process (such as a shell script that they invoke). -In this scenario, the only way to communicate back to the scheduler -about the success or failure of a job is through return codes. A -return code is a number that is returned to a scheduler by the process -to indicate the result of the run. In the simplest case, 0 is -success and 1 is failure. However, there may be more complex -scenarios, such as "`If job A returns 4, kick off job B, and, if it returns 5, kick -off job C.`" This type of behavior is configured at the scheduler level, -but it is important that a processing framework such as Spring Batch -provide a way to return a numeric representation of the exit code -for a particular batch job. In Spring Batch, this is encapsulated -within an `ExitStatus`, which is covered in more -detail in Chapter 5. For the purposes of discussing exit codes, the -only important thing to know is that an -`ExitStatus` has an exit code property that is -set by the framework (or the developer) and is returned as part of the -`JobExecution` returned from the -`JobLauncher`. The -`CommandLineJobRunner` converts this string value -to a number by using the `ExitCodeMapper` -interface: - -[source, java] ----- -public interface ExitCodeMapper { - - public int intValue(String exitCode); - -} ----- - -The essential contract of an -`ExitCodeMapper` is that, given a string exit -code, a number representation will be returned. The default -implementation used by the job runner is the `SimpleJvmExitCodeMapper` -that returns 0 for completion, 1 for generic errors, and 2 for any job -runner errors such as not being able to find a -`Job` in the provided context. If anything more -complex than the three values above is needed, a custom -implementation of the `ExitCodeMapper` interface -must be supplied. Because the -`CommandLineJobRunner` is the class that creates -an `ApplicationContext` and, thus, cannot be -'wired together', any values that need to be overwritten must be -autowired. This means that if an implementation of -`ExitCodeMapper` is found within the `BeanFactory`, -it is injected into the runner after the context is created. All -that needs to be done to provide your own -`ExitCodeMapper` is to declare the implementation -as a root level bean and ensure that it is part of the -`ApplicationContext` that is loaded by the -runner. - -[[runningJobsFromWebContainer]] -==== Running Jobs from within a Web Container - -Historically, offline processing (such as batch jobs) has been -launched from the command-line, as described earlier. However, there are -many cases where launching from an `HttpRequest` is -a better option. Many such use cases include reporting, ad-hoc job -running, and web application support. Because a batch job (by definition) -is long running, the most important concern is to launch the -job asynchronously: - -.Asynchronous Job Launcher Sequence From Web Container -image::{batch-asciidoc}images/launch-from-request.png[Async Job Launcher Sequence from web container, scaledwidth="60%"] - -The controller in this case is a Spring MVC controller. See the -Spring Framework Reference Guide for more about https://docs.spring.io/spring/docs/current/spring-framework-reference/web.html#mvc[Spring MVC]. -The controller launches a `Job` by using a -`JobLauncher` that has been configured to launch -<>, which -immediately returns a `JobExecution`. The -`Job` is likely still running. However, this -nonblocking behavior lets the controller return immediately, which -is required when handling an `HttpRequest`. The following listing -shows an example: - -[source, java] ----- -@Controller -public class JobLauncherController { - - @Autowired - JobLauncher jobLauncher; - - @Autowired - Job job; - - @RequestMapping("/jobLauncher.html") - public void handle() throws Exception{ - jobLauncher.run(job, new JobParameters()); - } -} ----- - -[[advancedMetaData]] -=== Advanced Metadata Usage - -So far, both the `JobLauncher` and `JobRepository` interfaces have been -discussed. Together, they represent the simple launching of a job and basic -CRUD operations of batch domain objects: - -.Job Repository -image::{batch-asciidoc}images/job-repository.png[Job Repository, scaledwidth="60%"] - -A `JobLauncher` uses the -`JobRepository` to create new -`JobExecution` objects and run them. -`Job` and `Step` implementations -later use the same `JobRepository` for basic updates -of the same executions during the running of a `Job`. -The basic operations suffice for simple scenarios. However, in a large batch -environment with hundreds of batch jobs and complex scheduling -requirements, more advanced access to the metadata is required: - -.Advanced Job Repository Access -image::{batch-asciidoc}images/job-repository-advanced.png[Job Repository Advanced, scaledwidth="80%"] - -The `JobExplorer` and -`JobOperator` interfaces, which are discussed -in the coming sections, add additional functionality for querying and controlling the metadata. - -[[queryingRepository]] -==== Querying the Repository - -The most basic need before any advanced features is the ability to -query the repository for existing executions. This functionality is -provided by the `JobExplorer` interface: - -[source, java] ----- -public interface JobExplorer { - - List getJobInstances(String jobName, int start, int count); - - JobExecution getJobExecution(Long executionId); - - StepExecution getStepExecution(Long jobExecutionId, Long stepExecutionId); - - JobInstance getJobInstance(Long instanceId); - - List getJobExecutions(JobInstance jobInstance); - - Set findRunningJobExecutions(String jobName); -} ----- - -As is evident from its method signatures, `JobExplorer` is a read-only version of -the `JobRepository`, and, like the `JobRepository`, it can be easily configured by using a -factory bean. - -[role="xmlContent"] -The following example shows how to configure a `JobExplorer` in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - ----- - -[role="javaContent"] -The following example shows how to configure a `JobExplorer` in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -... -// This would reside in your DefaultBatchConfiguration extension -@Bean -public JobExplorer jobExplorer() throws Exception { - JobExplorerFactoryBean factoryBean = new JobExplorerFactoryBean(); - factoryBean.setDataSource(this.dataSource); - return factoryBean.getObject(); -} -... ----- - -<>, we noted that you can modify the table prefix -of the `JobRepository` to allow for different versions or schemas. Because -the `JobExplorer` works with the same tables, it also needs the ability to set a prefix. - -[role="xmlContent"] -The following example shows how to set the table prefix for a `JobExplorer` in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - ----- - -[role="javaContent"] -The following example shows how to set the table prefix for a `JobExplorer` in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -... -// This would reside in your DefaultBatchConfiguration extension -@Bean -public JobExplorer jobExplorer() throws Exception { - JobExplorerFactoryBean factoryBean = new JobExplorerFactoryBean(); - factoryBean.setDataSource(this.dataSource); - factoryBean.setTablePrefix("SYSTEM."); - return factoryBean.getObject(); -} -... ----- - -==== JobRegistry - -A `JobRegistry` (and its parent interface, `JobLocator`) is not mandatory, but it can be -useful if you want to keep track of which jobs are available in the context. It is also -useful for collecting jobs centrally in an application context when they have been created -elsewhere (for example, in child contexts). You can also use custom `JobRegistry` implementations -to manipulate the names and other properties of the jobs that are registered. -There is only one implementation provided by the framework and this is based on a simple -map from job name to job instance. - -[role="xmlContent"] -The following example shows how to include a `JobRegistry` for a job defined in XML: - -[source, xml, role="xmlContent"] ----- - ----- - -[role="javaContent"] -When using `@EnableBatchProcessing`, a `JobRegistry` is provided for you. -The following example shows how to configure your own `JobRegistry`: - -[source, java, role="javaContent"] ----- -... -// This is already provided via the @EnableBatchProcessing but can be customized via -// overriding the bean in the DefaultBatchConfiguration -@Override -@Bean -public JobRegistry jobRegistry() throws Exception { - return new MapJobRegistry(); -} -... ----- - -You can populate a `JobRegistry` in either of two ways: by using -a bean post processor or by using a registrar lifecycle component. The coming -sections describe these two mechanisms. - -===== JobRegistryBeanPostProcessor - -This is a bean post-processor that can register all jobs as they are created. - -[role="xmlContent"] -The following example shows how to include the `JobRegistryBeanPostProcessor` for a job -defined in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - ----- - -[role="javaContent"] -The following example shows how to include the `JobRegistryBeanPostProcessor` for a job -defined in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -public JobRegistryBeanPostProcessor jobRegistryBeanPostProcessor(JobRegistry jobRegistry) { - JobRegistryBeanPostProcessor postProcessor = new JobRegistryBeanPostProcessor(); - postProcessor.setJobRegistry(jobRegistry); - return postProcessor; -} ----- - -Although it is not strictly necessary, the post-processor in the -example has been given an `id` so that it can be included in child -contexts (for example, as a parent bean definition) and cause all jobs created -there to also be registered automatically. - -===== AutomaticJobRegistrar - -This is a lifecycle component that creates child contexts and registers jobs from those -contexts as they are created. One advantage of doing this is that, while the job names in -the child contexts still have to be globally unique in the registry, their dependencies -can have "`natural`" names. So, for example, you can create a set of XML configuration files -that each have only one Job but that all have different definitions of an `ItemReader` with the -same bean name, such as `reader`. If all those files were imported into the same context, -the reader definitions would clash and override one another, but, with the automatic -registrar, this is avoided. This makes it easier to integrate jobs that have been contributed from -separate modules of an application. - -[role="xmlContent"] -The following example shows how to include the `AutomaticJobRegistrar` for a job defined -in XML: - -.XML Configuration -[source, xml, role="xmlContent"] ----- - - - - - - - - - - - - ----- - -[role="javaContent"] -The following example shows how to include the `AutomaticJobRegistrar` for a job defined -in Java: - -.Java Configuration -[source, java, role="javaContent"] ----- -@Bean -public AutomaticJobRegistrar registrar() { - - AutomaticJobRegistrar registrar = new AutomaticJobRegistrar(); - registrar.setJobLoader(jobLoader()); - registrar.setApplicationContextFactories(applicationContextFactories()); - registrar.afterPropertiesSet(); - return registrar; - -} ----- - -The registrar has two mandatory properties: an array of -`ApplicationContextFactory` (created from a -convenient factory bean in the preceding example) and a -`JobLoader`. The `JobLoader` -is responsible for managing the lifecycle of the child contexts and -registering jobs in the `JobRegistry`. - -The `ApplicationContextFactory` is -responsible for creating the child context. The most common usage -is (as in the preceding example) to use a -`ClassPathXmlApplicationContextFactory`. One of -the features of this factory is that, by default, it copies some of the -configuration down from the parent context to the child. So, for -instance, you need not redefine the -`PropertyPlaceholderConfigurer` or AOP -configuration in the child, provided it should be the same as the -parent. - -You can use `AutomaticJobRegistrar` in -conjunction with a `JobRegistryBeanPostProcessor` -(as long as you also use `DefaultJobLoader`). -For instance, this might be desirable if there are jobs -defined in the main parent context as well as in the child -locations. - -[[JobOperator]] -==== JobOperator - -As previously discussed, the `JobRepository` -provides CRUD operations on the meta-data, and the -`JobExplorer` provides read-only operations on the -metadata. However, those operations are most useful when used together -to perform common monitoring tasks such as stopping, restarting, or -summarizing a Job, as is commonly done by batch operators. Spring Batch -provides these types of operations in the -`JobOperator` interface: - -[source, java] ----- -public interface JobOperator { - - List getExecutions(long instanceId) throws NoSuchJobInstanceException; - - List getJobInstances(String jobName, int start, int count) - throws NoSuchJobException; - - Set getRunningExecutions(String jobName) throws NoSuchJobException; - - String getParameters(long executionId) throws NoSuchJobExecutionException; - - Long start(String jobName, String parameters) - throws NoSuchJobException, JobInstanceAlreadyExistsException; - - Long restart(long executionId) - throws JobInstanceAlreadyCompleteException, NoSuchJobExecutionException, - NoSuchJobException, JobRestartException; - - Long startNextInstance(String jobName) - throws NoSuchJobException, JobParametersNotFoundException, JobRestartException, - JobExecutionAlreadyRunningException, JobInstanceAlreadyCompleteException; - - boolean stop(long executionId) - throws NoSuchJobExecutionException, JobExecutionNotRunningException; - - String getSummary(long executionId) throws NoSuchJobExecutionException; - - Map getStepExecutionSummaries(long executionId) - throws NoSuchJobExecutionException; - - Set getJobNames(); - -} ----- - -The preceding operations represent methods from many different interfaces, such as -`JobLauncher`, `JobRepository`, `JobExplorer`, and `JobRegistry`. For this reason, the -provided implementation of `JobOperator` (`SimpleJobOperator`) has many dependencies. - -[role="xmlContent"] -The following example shows a typical bean definition for `SimpleJobOperator` in XML: - -[source, xml, role="xmlContent"] ----- - - - - - - - - - - ----- - -[role="javaContent"] -The following example shows a typical bean definition for `SimpleJobOperator` in Java: - -[source, java, role="javaContent"] ----- - /** - * All injected dependencies for this bean are provided by the @EnableBatchProcessing - * infrastructure out of the box. - */ - @Bean - public SimpleJobOperator jobOperator(JobExplorer jobExplorer, - JobRepository jobRepository, - JobRegistry jobRegistry, - JobLauncher jobLauncher) { - - SimpleJobOperator jobOperator = new SimpleJobOperator(); - jobOperator.setJobExplorer(jobExplorer); - jobOperator.setJobRepository(jobRepository); - jobOperator.setJobRegistry(jobRegistry); - jobOperator.setJobLauncher(jobLauncher); - - return jobOperator; - } ----- - -As of version 5.0, the `@EnableBatchProcessing` annotation automatically registers a job operator bean -in the application context. - -NOTE: If you set the table prefix on the job repository, do not forget to set it on the job explorer as well. - -[[JobParametersIncrementer]] -==== JobParametersIncrementer - -Most of the methods on `JobOperator` are -self-explanatory, and you can find more detailed explanations in the -https://docs.spring.io/spring-batch/docs/current/api/org/springframework/batch/core/launch/JobOperator.html[Javadoc of the interface]. However, the -`startNextInstance` method is worth noting. This -method always starts a new instance of a `Job`. -This can be extremely useful if there are serious issues in a -`JobExecution` and the `Job` -needs to be started over again from the beginning. Unlike -`JobLauncher` (which requires a new -`JobParameters` object that triggers a new -`JobInstance`), if the parameters are different from -any previous set of parameters, the -`startNextInstance` method uses the -`JobParametersIncrementer` tied to the -`Job` to force the `Job` to a -new instance: - -[source, java] ----- -public interface JobParametersIncrementer { - - JobParameters getNext(JobParameters parameters); - -} ----- - -The contract of `JobParametersIncrementer` is -that, given a <> -object, it returns the "`next`" `JobParameters` -object by incrementing any necessary values it may contain. This -strategy is useful because the framework has no way of knowing what -changes to the `JobParameters` make it the "`next`" -instance. For example, if the only value in -`JobParameters` is a date and the next instance -should be created, should that value be incremented by one day or one -week (if the job is weekly, for instance)? The same can be said for any -numerical values that help to identify the `Job`, -as the following example shows: - -[source, java] ----- -public class SampleIncrementer implements JobParametersIncrementer { - - public JobParameters getNext(JobParameters parameters) { - if (parameters==null || parameters.isEmpty()) { - return new JobParametersBuilder().addLong("run.id", 1L).toJobParameters(); - } - long id = parameters.getLong("run.id",1L) + 1; - return new JobParametersBuilder().addLong("run.id", id).toJobParameters(); - } -} ----- - -In this example, the value with a key of `run.id` is used to -discriminate between `JobInstances`. If the -`JobParameters` passed in is null, it can be -assumed that the `Job` has never been run before -and, thus, its initial state can be returned. However, if not, the old -value is obtained, incremented by one, and returned. - -ifdef::backend-spring-html[] -[role="xmlContent"] -For jobs defined in XML, you can associate an incrementer with a `Job` through the -`incrementer` attribute in the namespace, as follows: - -[source, xml, role="xmlContent"] ----- - - ... - ----- - -[role="javaContent"] -For jobs defined in Java, you can associate an incrementer with a `Job` through the -`incrementer` method provided in the builders, as follows: - -[source, java, role="javaContent"] ----- -@Bean -public Job footballJob(JobRepository jobRepository) { - return new JobBuilder("footballJob", jobRepository) - .incrementer(sampleIncrementer()) - ... - .build(); -} ----- -endif::backend-spring-html[] - -ifdef::backend-pdf[] -You can associate an incrementer -with a `Job` by using the `incrementer` -attribute in the namespace: - -[source, xml] ----- - - ... - ----- - -The Java configuration builders also provide facilities for the configuration of an `incrementer`: - -[source, java] ----- -@Bean -public Job footballJob(JobRepository jobRepository) { - return new JobBuilder("footballJob", jobRepository) - .incrementer(sampleIncrementer()) - ... - .build(); -} ----- -endif::backend-pdf[] - -[[stoppingAJob]] -==== Stopping a Job - -One of the most common use cases of -`JobOperator` is gracefully stopping a -Job: - -[source, java] ----- -Set executions = jobOperator.getRunningExecutions("sampleJob"); -jobOperator.stop(executions.iterator().next()); ----- - -The shutdown is not immediate, since there is no way to force -immediate shutdown, especially if the execution is currently in -developer code that the framework has no control over, such as a -business service. However, as soon as control is returned back to the -framework, it sets the status of the current -`StepExecution` to -`BatchStatus.STOPPED`, saves it, and does the same -for the `JobExecution` before finishing. - -==== Aborting a Job - -A job execution that is `FAILED` can be -restarted (if the `Job` is restartable). A job execution whose status is -`ABANDONED` cannot be restarted by the framework. -The `ABANDONED` status is also used in step -executions to mark them as skippable in a restarted job execution. If a -job is running and encounters a step that has been marked -`ABANDONED` in the previous failed job execution, it -moves on to the next step (as determined by the job flow definition -and the step execution exit status). - -If the process died (`kill -9` or server -failure), the job is, of course, not running, but the `JobRepository` has -no way of knowing because no one told it before the process died. You -have to tell it manually that you know that the execution either failed -or should be considered aborted (change its status to -`FAILED` or `ABANDONED`). This is -a business decision, and there is no way to automate it. Change the -status to `FAILED` only if it is restartable and you know that the restart data is valid. diff --git a/spring-batch-docs/src/main/asciidoc/js/DocumentToggle.js b/spring-batch-docs/src/main/asciidoc/js/DocumentToggle.js deleted file mode 100644 index c2941f3580..0000000000 --- a/spring-batch-docs/src/main/asciidoc/js/DocumentToggle.js +++ /dev/null @@ -1,76 +0,0 @@ -$(document).ready(function(){ - - var BATCH_LANGUAGES = ["java", "xml", "both"]; - var $xmlButton = $("#xmlButton"); - var $javaButton = $("#javaButton"); - var $bothButton = $("#bothButton"); - - var $xmlContent = $("*.xmlContent"); - var $xmlContentAll = $("*.xmlContent > *"); - - var $javaContent = $("*.javaContent"); - var $javaContentAll = $("*.javaContent > *"); - - // Initial cookie handler. This part remembers the - // reader's choice and sets the toggle accordingly. - var lang = window.localStorage.getItem("docToggle"); - if (BATCH_LANGUAGES.indexOf(lang) === -1) { - lang = "java"; - $javaButton.prop("checked", true); - setJava(); - } else { - if (lang === "xml") { - $xmlButton.prop("checked", true); - setXml(); - } - if (lang === "java") { - $javaButton.prop("checked", true); - setJava(); - } - if (lang === "both") { - $javaButton.prop("checked", true); - setBoth(); - } - } - - // Click handlers - $xmlButton.on("click", function() { - setXml(); - }); - $javaButton.on("click", function() { - setJava(); - }); - $bothButton.on("click", function() { - setBoth(); - }); - - // Functions to do the work of handling the reader's choice, whether through a click - // or through a cookie. 3652 days is 10 years, give or take a leap day. - function setXml() { - $xmlContent.show(); - $javaContent.hide(); - $javaContentAll.addClass("js-toc-ignore"); - $xmlContentAll.removeClass("js-toc-ignore"); - window.dispatchEvent(new Event("tocRefresh")); - window.localStorage.setItem('docToggle', 'xml'); - } - - function setJava() { - $javaContent.show(); - $xmlContent.hide(); - $xmlContentAll.addClass("js-toc-ignore"); - $javaContentAll.removeClass("js-toc-ignore"); - window.dispatchEvent(new Event("tocRefresh")); - window.localStorage.setItem('docToggle', 'java'); - } - - function setBoth() { - $javaContent.show(); - $xmlContent.show(); - $javaContentAll.removeClass("js-toc-ignore"); - $xmlContentAll.removeClass("js-toc-ignore"); - window.dispatchEvent(new Event("tocRefresh")); - window.localStorage.setItem('docToggle', 'both'); - } - -}); diff --git a/spring-batch-docs/src/main/asciidoc/js/Redirect.js b/spring-batch-docs/src/main/asciidoc/js/Redirect.js deleted file mode 100644 index 61a4f2f9bf..0000000000 --- a/spring-batch-docs/src/main/asciidoc/js/Redirect.js +++ /dev/null @@ -1,62 +0,0 @@ -$(document).ready(function(){ - - redirect(); - - function redirect() { - var anchorMap = { - "#domain": "#domainLanguageOfBatch", - "#domainJob": "#job", - "#domainJobInstance": "#jobinstance", - "#domainJobParameters": "#jobparameters", - "#domainJobExecution": "#jobexecution", - "#d5e455": "#jobexecution", - "#d5e497": "#jobexecution", - "#d5e507": "#jobexecution", - "#d5e523": "#jobexecution", - "#d5e550": "#jobexecution", - "#d5e563": "#jobexecution", - "#d5e591": "#jobexecution", - "#domainStep": "#step", - "#domainStepExecution": "#stepexecution", - "#d5e655": "#stepexecution", - "#domainExecutionContext": "#executioncontext", - "#d5e721": "#executioncontext", - "#d5e731": "#executioncontext", - "#d5e745": "#executioncontext", - "#d5e761": "#executioncontext", - "#d5e779": "#executioncontext", - "#domainJobRepository": "#jobrepository", - "#domainJobLauncher": "#joblauncher", - "#domainItemReader": "#item-reader", - "#domainItemWriter": "#item-writer", - "#domainItemProcessor": "#item-processor", - "#domainBatchNamespace": "#batch-namespace", - "#d5e970": "#jobparametersvalidator", - "#d5e1130": "#commandLineJobRunner", - "#d5e1232": "#jobregistry", - "#d5e1237": "#jobregistrybeanpostprocessor", - "#d5e1242": "#automaticjobregistrar", - "#d5e1320": "#aborting-a-job", - "#filiteringRecords": "#filteringRecords", - "#d5e2247": "#flatFileItemReader", - "#d5e2769": "#JdbcCursorItemReaderProperties", - "#stepExecutionSplitter": "#partitioner", - "#d5e3182": "#bindingInputDataToSteps", - "#d5e3241": "#repeatStatus", - "#d5e3531": "#testing-step-scoped-components", - "#patterns": "#commonPatterns", - "#d5e3959": "#item-based-processing", - "#d5e3969": "#custom-checkpointing", - "#available-attributes-of-the-job-launching-gateway": "#availableAttributesOfTheJobLaunchingGateway", - "#d5e4425": "#itemReadersAppendix", - "#d5e4494": "#itemWritersAppendix", - "#d5e4788": "#recommendationsForIndexingMetaDataTables" - }; - var baseUrl = window.location.origin + window.location.pathname; - var anchor = window.location.hash; - if (anchor && anchorMap[anchor] != null) { - window.location.replace(baseUrl + anchorMap[anchor]); - } - } - -}); diff --git a/spring-batch-docs/src/main/asciidoc/js/jquery-3.2.1.min.js b/spring-batch-docs/src/main/asciidoc/js/jquery-3.2.1.min.js deleted file mode 100644 index 644d35e274..0000000000 --- a/spring-batch-docs/src/main/asciidoc/js/jquery-3.2.1.min.js +++ /dev/null @@ -1,4 +0,0 @@ -/*! jQuery v3.2.1 | (c) JS Foundation and other contributors | jquery.org/license */ -!function(a,b){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){"use strict";var c=[],d=a.document,e=Object.getPrototypeOf,f=c.slice,g=c.concat,h=c.push,i=c.indexOf,j={},k=j.toString,l=j.hasOwnProperty,m=l.toString,n=m.call(Object),o={};function p(a,b){b=b||d;var c=b.createElement("script");c.text=a,b.head.appendChild(c).parentNode.removeChild(c)}var q="3.2.1",r=function(a,b){return new r.fn.init(a,b)},s=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,t=/^-ms-/,u=/-([a-z])/g,v=function(a,b){return b.toUpperCase()};r.fn=r.prototype={jquery:q,constructor:r,length:0,toArray:function(){return f.call(this)},get:function(a){return null==a?f.call(this):a<0?this[a+this.length]:this[a]},pushStack:function(a){var b=r.merge(this.constructor(),a);return b.prevObject=this,b},each:function(a){return r.each(this,a)},map:function(a){return this.pushStack(r.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(f.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(a<0?b:0);return this.pushStack(c>=0&&c0&&b-1 in a)}var x=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ha(),z=ha(),A=ha(),B=function(a,b){return a===b&&(l=!0),0},C={}.hasOwnProperty,D=[],E=D.pop,F=D.push,G=D.push,H=D.slice,I=function(a,b){for(var c=0,d=a.length;c+~]|"+K+")"+K+"*"),S=new RegExp("="+K+"*([^\\]'\"]*?)"+K+"*\\]","g"),T=new RegExp(N),U=new RegExp("^"+L+"$"),V={ID:new RegExp("^#("+L+")"),CLASS:new RegExp("^\\.("+L+")"),TAG:new RegExp("^("+L+"|[*])"),ATTR:new RegExp("^"+M),PSEUDO:new RegExp("^"+N),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+K+"*(even|odd|(([+-]|)(\\d*)n|)"+K+"*(?:([+-]|)"+K+"*(\\d+)|))"+K+"*\\)|)","i"),bool:new RegExp("^(?:"+J+")$","i"),needsContext:new RegExp("^"+K+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+K+"*((?:-\\d)?\\d*)"+K+"*\\)|)(?=[^-]|$)","i")},W=/^(?:input|select|textarea|button)$/i,X=/^h\d$/i,Y=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,$=/[+~]/,_=new RegExp("\\\\([\\da-f]{1,6}"+K+"?|("+K+")|.)","ig"),aa=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:d<0?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},ba=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ca=function(a,b){return b?"\0"===a?"\ufffd":a.slice(0,-1)+"\\"+a.charCodeAt(a.length-1).toString(16)+" ":"\\"+a},da=function(){m()},ea=ta(function(a){return a.disabled===!0&&("form"in a||"label"in a)},{dir:"parentNode",next:"legend"});try{G.apply(D=H.call(v.childNodes),v.childNodes),D[v.childNodes.length].nodeType}catch(fa){G={apply:D.length?function(a,b){F.apply(a,H.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function ga(a,b,d,e){var f,h,j,k,l,o,r,s=b&&b.ownerDocument,w=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==w&&9!==w&&11!==w)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==w&&(l=Z.exec(a)))if(f=l[1]){if(9===w){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(s&&(j=s.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(l[2])return G.apply(d,b.getElementsByTagName(a)),d;if((f=l[3])&&c.getElementsByClassName&&b.getElementsByClassName)return G.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==w)s=b,r=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(ba,ca):b.setAttribute("id",k=u),o=g(a),h=o.length;while(h--)o[h]="#"+k+" "+sa(o[h]);r=o.join(","),s=$.test(a)&&qa(b.parentNode)||b}if(r)try{return G.apply(d,s.querySelectorAll(r)),d}catch(x){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(P,"$1"),b,d,e)}function ha(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ia(a){return a[u]=!0,a}function ja(a){var b=n.createElement("fieldset");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ka(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function la(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&a.sourceIndex-b.sourceIndex;if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function na(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function oa(a){return function(b){return"form"in b?b.parentNode&&b.disabled===!1?"label"in b?"label"in b.parentNode?b.parentNode.disabled===a:b.disabled===a:b.isDisabled===a||b.isDisabled!==!a&&ea(b)===a:b.disabled===a:"label"in b&&b.disabled===a}}function pa(a){return ia(function(b){return b=+b,ia(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function qa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=ga.support={},f=ga.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return!!b&&"HTML"!==b.nodeName},m=ga.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),v!==n&&(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ja(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ja(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Y.test(n.getElementsByClassName),c.getById=ja(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.filter.ID=function(a){var b=a.replace(_,aa);return function(a){return a.getAttribute("id")===b}},d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}}):(d.filter.ID=function(a){var b=a.replace(_,aa);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}},d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c,d,e,f=b.getElementById(a);if(f){if(c=f.getAttributeNode("id"),c&&c.value===a)return[f];e=b.getElementsByName(a),d=0;while(f=e[d++])if(c=f.getAttributeNode("id"),c&&c.value===a)return[f]}return[]}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){if("undefined"!=typeof b.getElementsByClassName&&p)return b.getElementsByClassName(a)},r=[],q=[],(c.qsa=Y.test(n.querySelectorAll))&&(ja(function(a){o.appendChild(a).innerHTML="",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+K+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+K+"*(?:value|"+J+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ja(function(a){a.innerHTML="";var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+K+"*[*^$|!~]?="),2!==a.querySelectorAll(":enabled").length&&q.push(":enabled",":disabled"),o.appendChild(a).disabled=!0,2!==a.querySelectorAll(":disabled").length&&q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Y.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ja(function(a){c.disconnectedMatch=s.call(a,"*"),s.call(a,"[s!='']:x"),r.push("!=",N)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Y.test(o.compareDocumentPosition),t=b||Y.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?I(k,a)-I(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?I(k,a)-I(k,b):0;if(e===f)return la(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?la(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},ga.matches=function(a,b){return ga(a,null,null,b)},ga.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(S,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return ga(b,n,null,[a]).length>0},ga.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},ga.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&C.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},ga.escape=function(a){return(a+"").replace(ba,ca)},ga.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},ga.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=ga.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=ga.selectors={cacheLength:50,createPseudo:ia,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(_,aa),a[3]=(a[3]||a[4]||a[5]||"").replace(_,aa),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||ga.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&ga.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return V.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&T.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(_,aa).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+K+")"+a+"("+K+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=ga.attr(d,a);return null==e?"!="===b:!b||(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(O," ")+" ").indexOf(c)>-1:"|="===b&&(e===c||e.slice(0,c.length+1)===c+"-"))}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||ga.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ia(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=I(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ia(function(a){var b=[],c=[],d=h(a.replace(P,"$1"));return d[u]?ia(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ia(function(a){return function(b){return ga(a,b).length>0}}),contains:ia(function(a){return a=a.replace(_,aa),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ia(function(a){return U.test(a||"")||ga.error("unsupported lang: "+a),a=a.replace(_,aa).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:oa(!1),disabled:oa(!0),checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return X.test(a.nodeName)},input:function(a){return W.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:pa(function(){return[0]}),last:pa(function(a,b){return[b-1]}),eq:pa(function(a,b,c){return[c<0?c+b:c]}),even:pa(function(a,b){for(var c=0;c=0;)a.push(d);return a}),gt:pa(function(a,b,c){for(var d=c<0?c+b:c;++d1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function va(a,b,c){for(var d=0,e=b.length;d-1&&(f[j]=!(g[j]=l))}}else r=wa(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):G.apply(g,r)})}function ya(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ta(function(a){return a===b},h,!0),l=ta(function(a){return I(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];i1&&ua(m),i>1&&sa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(P,"$1"),c,i0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=E.call(i));u=wa(u)}G.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&ga.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ia(f):f}return h=ga.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=ya(b[c]),f[u]?d.push(f):e.push(f);f=A(a,za(e,d)),f.selector=a}return f},i=ga.select=function(a,b,c,e){var f,i,j,k,l,m="function"==typeof a&&a,n=!e&&g(a=m.selector||a);if(c=c||[],1===n.length){if(i=n[0]=n[0].slice(0),i.length>2&&"ID"===(j=i[0]).type&&9===b.nodeType&&p&&d.relative[i[1].type]){if(b=(d.find.ID(j.matches[0].replace(_,aa),b)||[])[0],!b)return c;m&&(b=b.parentNode),a=a.slice(i.shift().value.length)}f=V.needsContext.test(a)?0:i.length;while(f--){if(j=i[f],d.relative[k=j.type])break;if((l=d.find[k])&&(e=l(j.matches[0].replace(_,aa),$.test(i[0].type)&&qa(b.parentNode)||b))){if(i.splice(f,1),a=e.length&&sa(i),!a)return G.apply(c,e),c;break}}}return(m||h(a,n))(e,b,!p,c,!b||$.test(a)&&qa(b.parentNode)||b),c},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ja(function(a){return 1&a.compareDocumentPosition(n.createElement("fieldset"))}),ja(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||ka("type|href|height|width",function(a,b,c){if(!c)return a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ja(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ka("value",function(a,b,c){if(!c&&"input"===a.nodeName.toLowerCase())return a.defaultValue}),ja(function(a){return null==a.getAttribute("disabled")})||ka(J,function(a,b,c){var d;if(!c)return a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),ga}(a);r.find=x,r.expr=x.selectors,r.expr[":"]=r.expr.pseudos,r.uniqueSort=r.unique=x.uniqueSort,r.text=x.getText,r.isXMLDoc=x.isXML,r.contains=x.contains,r.escapeSelector=x.escape;var y=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&r(a).is(c))break;d.push(a)}return d},z=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},A=r.expr.match.needsContext;function B(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()}var C=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i,D=/^.[^:#\[\.,]*$/;function E(a,b,c){return r.isFunction(b)?r.grep(a,function(a,d){return!!b.call(a,d,a)!==c}):b.nodeType?r.grep(a,function(a){return a===b!==c}):"string"!=typeof b?r.grep(a,function(a){return i.call(b,a)>-1!==c}):D.test(b)?r.filter(b,a,c):(b=r.filter(b,a),r.grep(a,function(a){return i.call(b,a)>-1!==c&&1===a.nodeType}))}r.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?r.find.matchesSelector(d,a)?[d]:[]:r.find.matches(a,r.grep(b,function(a){return 1===a.nodeType}))},r.fn.extend({find:function(a){var b,c,d=this.length,e=this;if("string"!=typeof a)return this.pushStack(r(a).filter(function(){for(b=0;b1?r.uniqueSort(c):c},filter:function(a){return this.pushStack(E(this,a||[],!1))},not:function(a){return this.pushStack(E(this,a||[],!0))},is:function(a){return!!E(this,"string"==typeof a&&A.test(a)?r(a):a||[],!1).length}});var F,G=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/,H=r.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||F,"string"==typeof a){if(e="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:G.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof r?b[0]:b,r.merge(this,r.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),C.test(e[1])&&r.isPlainObject(b))for(e in b)r.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return this}return f=d.getElementById(e[2]),f&&(this[0]=f,this.length=1),this}return a.nodeType?(this[0]=a,this.length=1,this):r.isFunction(a)?void 0!==c.ready?c.ready(a):a(r):r.makeArray(a,this)};H.prototype=r.fn,F=r(d);var I=/^(?:parents|prev(?:Until|All))/,J={children:!0,contents:!0,next:!0,prev:!0};r.fn.extend({has:function(a){var b=r(a,this),c=b.length;return this.filter(function(){for(var a=0;a-1:1===c.nodeType&&r.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?r.uniqueSort(f):f)},index:function(a){return a?"string"==typeof a?i.call(r(a),this[0]):i.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(r.uniqueSort(r.merge(this.get(),r(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function K(a,b){while((a=a[b])&&1!==a.nodeType);return a}r.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return y(a,"parentNode")},parentsUntil:function(a,b,c){return y(a,"parentNode",c)},next:function(a){return K(a,"nextSibling")},prev:function(a){return K(a,"previousSibling")},nextAll:function(a){return y(a,"nextSibling")},prevAll:function(a){return y(a,"previousSibling")},nextUntil:function(a,b,c){return y(a,"nextSibling",c)},prevUntil:function(a,b,c){return y(a,"previousSibling",c)},siblings:function(a){return z((a.parentNode||{}).firstChild,a)},children:function(a){return z(a.firstChild)},contents:function(a){return B(a,"iframe")?a.contentDocument:(B(a,"template")&&(a=a.content||a),r.merge([],a.childNodes))}},function(a,b){r.fn[a]=function(c,d){var e=r.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=r.filter(d,e)),this.length>1&&(J[a]||r.uniqueSort(e),I.test(a)&&e.reverse()),this.pushStack(e)}});var L=/[^\x20\t\r\n\f]+/g;function M(a){var b={};return r.each(a.match(L)||[],function(a,c){b[c]=!0}),b}r.Callbacks=function(a){a="string"==typeof a?M(a):r.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=e||a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h-1)f.splice(c,1),c<=h&&h--}),this},has:function(a){return a?r.inArray(a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c="",this},disabled:function(){return!f},lock:function(){return e=g=[],c||b||(f=c=""),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j};function N(a){return a}function O(a){throw a}function P(a,b,c,d){var e;try{a&&r.isFunction(e=a.promise)?e.call(a).done(b).fail(c):a&&r.isFunction(e=a.then)?e.call(a,b,c):b.apply(void 0,[a].slice(d))}catch(a){c.apply(void 0,[a])}}r.extend({Deferred:function(b){var c=[["notify","progress",r.Callbacks("memory"),r.Callbacks("memory"),2],["resolve","done",r.Callbacks("once memory"),r.Callbacks("once memory"),0,"resolved"],["reject","fail",r.Callbacks("once memory"),r.Callbacks("once memory"),1,"rejected"]],d="pending",e={state:function(){return d},always:function(){return f.done(arguments).fail(arguments),this},"catch":function(a){return e.then(null,a)},pipe:function(){var a=arguments;return r.Deferred(function(b){r.each(c,function(c,d){var e=r.isFunction(a[d[4]])&&a[d[4]];f[d[1]](function(){var a=e&&e.apply(this,arguments);a&&r.isFunction(a.promise)?a.promise().progress(b.notify).done(b.resolve).fail(b.reject):b[d[0]+"With"](this,e?[a]:arguments)})}),a=null}).promise()},then:function(b,d,e){var f=0;function g(b,c,d,e){return function(){var h=this,i=arguments,j=function(){var a,j;if(!(b=f&&(d!==O&&(h=void 0,i=[a]),c.rejectWith(h,i))}};b?k():(r.Deferred.getStackHook&&(k.stackTrace=r.Deferred.getStackHook()),a.setTimeout(k))}}return r.Deferred(function(a){c[0][3].add(g(0,a,r.isFunction(e)?e:N,a.notifyWith)),c[1][3].add(g(0,a,r.isFunction(b)?b:N)),c[2][3].add(g(0,a,r.isFunction(d)?d:O))}).promise()},promise:function(a){return null!=a?r.extend(a,e):e}},f={};return r.each(c,function(a,b){var g=b[2],h=b[5];e[b[1]]=g.add,h&&g.add(function(){d=h},c[3-a][2].disable,c[0][2].lock),g.add(b[3].fire),f[b[0]]=function(){return f[b[0]+"With"](this===f?void 0:this,arguments),this},f[b[0]+"With"]=g.fireWith}),e.promise(f),b&&b.call(f,f),f},when:function(a){var b=arguments.length,c=b,d=Array(c),e=f.call(arguments),g=r.Deferred(),h=function(a){return function(c){d[a]=this,e[a]=arguments.length>1?f.call(arguments):c,--b||g.resolveWith(d,e)}};if(b<=1&&(P(a,g.done(h(c)).resolve,g.reject,!b),"pending"===g.state()||r.isFunction(e[c]&&e[c].then)))return g.then();while(c--)P(e[c],h(c),g.reject);return g.promise()}});var Q=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;r.Deferred.exceptionHook=function(b,c){a.console&&a.console.warn&&b&&Q.test(b.name)&&a.console.warn("jQuery.Deferred exception: "+b.message,b.stack,c)},r.readyException=function(b){a.setTimeout(function(){throw b})};var R=r.Deferred();r.fn.ready=function(a){return R.then(a)["catch"](function(a){r.readyException(a)}),this},r.extend({isReady:!1,readyWait:1,ready:function(a){(a===!0?--r.readyWait:r.isReady)||(r.isReady=!0,a!==!0&&--r.readyWait>0||R.resolveWith(d,[r]))}}),r.ready.then=R.then;function S(){d.removeEventListener("DOMContentLoaded",S), -a.removeEventListener("load",S),r.ready()}"complete"===d.readyState||"loading"!==d.readyState&&!d.documentElement.doScroll?a.setTimeout(r.ready):(d.addEventListener("DOMContentLoaded",S),a.addEventListener("load",S));var T=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===r.type(c)){e=!0;for(h in c)T(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,r.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(r(a),c)})),b))for(;h1,null,!0)},removeData:function(a){return this.each(function(){X.remove(this,a)})}}),r.extend({queue:function(a,b,c){var d;if(a)return b=(b||"fx")+"queue",d=W.get(a,b),c&&(!d||Array.isArray(c)?d=W.access(a,b,r.makeArray(c)):d.push(c)),d||[]},dequeue:function(a,b){b=b||"fx";var c=r.queue(a,b),d=c.length,e=c.shift(),f=r._queueHooks(a,b),g=function(){r.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return W.get(a,c)||W.access(a,c,{empty:r.Callbacks("once memory").add(function(){W.remove(a,[b+"queue",c])})})}}),r.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length\x20\t\r\n\f]+)/i,la=/^$|\/(?:java|ecma)script/i,ma={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};ma.optgroup=ma.option,ma.tbody=ma.tfoot=ma.colgroup=ma.caption=ma.thead,ma.th=ma.td;function na(a,b){var c;return c="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):[],void 0===b||b&&B(a,b)?r.merge([a],c):c}function oa(a,b){for(var c=0,d=a.length;c-1)e&&e.push(f);else if(j=r.contains(f.ownerDocument,f),g=na(l.appendChild(f),"script"),j&&oa(g),c){k=0;while(f=g[k++])la.test(f.type||"")&&c.push(f)}return l}!function(){var a=d.createDocumentFragment(),b=a.appendChild(d.createElement("div")),c=d.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),o.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="",o.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var ra=d.documentElement,sa=/^key/,ta=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,ua=/^([^.]*)(?:\.(.+)|)/;function va(){return!0}function wa(){return!1}function xa(){try{return d.activeElement}catch(a){}}function ya(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)ya(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=wa;else if(!e)return a;return 1===f&&(g=e,e=function(a){return r().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=r.guid++)),a.each(function(){r.event.add(this,b,e,d,c)})}r.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,o,p,q=W.get(a);if(q){c.handler&&(f=c,c=f.handler,e=f.selector),e&&r.find.matchesSelector(ra,e),c.guid||(c.guid=r.guid++),(i=q.events)||(i=q.events={}),(g=q.handle)||(g=q.handle=function(b){return"undefined"!=typeof r&&r.event.triggered!==b.type?r.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(L)||[""],j=b.length;while(j--)h=ua.exec(b[j])||[],n=p=h[1],o=(h[2]||"").split(".").sort(),n&&(l=r.event.special[n]||{},n=(e?l.delegateType:l.bindType)||n,l=r.event.special[n]||{},k=r.extend({type:n,origType:p,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&r.expr.match.needsContext.test(e),namespace:o.join(".")},f),(m=i[n])||(m=i[n]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,o,g)!==!1||a.addEventListener&&a.addEventListener(n,g)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),r.event.global[n]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,o,p,q=W.hasData(a)&&W.get(a);if(q&&(i=q.events)){b=(b||"").match(L)||[""],j=b.length;while(j--)if(h=ua.exec(b[j])||[],n=p=h[1],o=(h[2]||"").split(".").sort(),n){l=r.event.special[n]||{},n=(d?l.delegateType:l.bindType)||n,m=i[n]||[],h=h[2]&&new RegExp("(^|\\.)"+o.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&p!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,o,q.handle)!==!1||r.removeEvent(a,n,q.handle),delete i[n])}else for(n in i)r.event.remove(a,n+b[j],c,d,!0);r.isEmptyObject(i)&&W.remove(a,"handle events")}},dispatch:function(a){var b=r.event.fix(a),c,d,e,f,g,h,i=new Array(arguments.length),j=(W.get(this,"events")||{})[b.type]||[],k=r.event.special[b.type]||{};for(i[0]=b,c=1;c=1))for(;j!==this;j=j.parentNode||this)if(1===j.nodeType&&("click"!==a.type||j.disabled!==!0)){for(f=[],g={},c=0;c-1:r.find(e,this,null,[j]).length),g[e]&&f.push(d);f.length&&h.push({elem:j,handlers:f})}return j=this,i\x20\t\r\n\f]*)[^>]*)\/>/gi,Aa=/\s*$/g;function Ea(a,b){return B(a,"table")&&B(11!==b.nodeType?b:b.firstChild,"tr")?r(">tbody",a)[0]||a:a}function Fa(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function Ga(a){var b=Ca.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function Ha(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(W.hasData(a)&&(f=W.access(a),g=W.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;c1&&"string"==typeof q&&!o.checkClone&&Ba.test(q))return a.each(function(e){var f=a.eq(e);s&&(b[0]=q.call(this,e,f.html())),Ja(f,b,c,d)});if(m&&(e=qa(b,a[0].ownerDocument,!1,a,d),f=e.firstChild,1===e.childNodes.length&&(e=f),f||d)){for(h=r.map(na(e,"script"),Fa),i=h.length;l")},clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=r.contains(a.ownerDocument,a);if(!(o.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||r.isXMLDoc(a)))for(g=na(h),f=na(a),d=0,e=f.length;d0&&oa(g,!i&&na(a,"script")),h},cleanData:function(a){for(var b,c,d,e=r.event.special,f=0;void 0!==(c=a[f]);f++)if(U(c)){if(b=c[W.expando]){if(b.events)for(d in b.events)e[d]?r.event.remove(c,d):r.removeEvent(c,d,b.handle);c[W.expando]=void 0}c[X.expando]&&(c[X.expando]=void 0)}}}),r.fn.extend({detach:function(a){return Ka(this,a,!0)},remove:function(a){return Ka(this,a)},text:function(a){return T(this,function(a){return void 0===a?r.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=a)})},null,a,arguments.length)},append:function(){return Ja(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ea(this,a);b.appendChild(a)}})},prepend:function(){return Ja(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ea(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return Ja(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return Ja(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(r.cleanData(na(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null!=a&&a,b=null==b?a:b,this.map(function(){return r.clone(this,a,b)})},html:function(a){return T(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!Aa.test(a)&&!ma[(ka.exec(a)||["",""])[1].toLowerCase()]){a=r.htmlPrefilter(a);try{for(;c1)}});function _a(a,b,c,d,e){return new _a.prototype.init(a,b,c,d,e)}r.Tween=_a,_a.prototype={constructor:_a,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||r.easing._default,this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(r.cssNumber[c]?"":"px")},cur:function(){var a=_a.propHooks[this.prop];return a&&a.get?a.get(this):_a.propHooks._default.get(this)},run:function(a){var b,c=_a.propHooks[this.prop];return this.options.duration?this.pos=b=r.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):this.pos=b=a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):_a.propHooks._default.set(this),this}},_a.prototype.init.prototype=_a.prototype,_a.propHooks={_default:{get:function(a){var b;return 1!==a.elem.nodeType||null!=a.elem[a.prop]&&null==a.elem.style[a.prop]?a.elem[a.prop]:(b=r.css(a.elem,a.prop,""),b&&"auto"!==b?b:0)},set:function(a){r.fx.step[a.prop]?r.fx.step[a.prop](a):1!==a.elem.nodeType||null==a.elem.style[r.cssProps[a.prop]]&&!r.cssHooks[a.prop]?a.elem[a.prop]=a.now:r.style(a.elem,a.prop,a.now+a.unit)}}},_a.propHooks.scrollTop=_a.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},r.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2},_default:"swing"},r.fx=_a.prototype.init,r.fx.step={};var ab,bb,cb=/^(?:toggle|show|hide)$/,db=/queueHooks$/;function eb(){bb&&(d.hidden===!1&&a.requestAnimationFrame?a.requestAnimationFrame(eb):a.setTimeout(eb,r.fx.interval),r.fx.tick())}function fb(){return a.setTimeout(function(){ab=void 0}),ab=r.now()}function gb(a,b){var c,d=0,e={height:a};for(b=b?1:0;d<4;d+=2-b)c=ca[d],e["margin"+c]=e["padding"+c]=a;return b&&(e.opacity=e.width=a),e}function hb(a,b,c){for(var d,e=(kb.tweeners[b]||[]).concat(kb.tweeners["*"]),f=0,g=e.length;f1)},removeAttr:function(a){return this.each(function(){r.removeAttr(this,a)})}}),r.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return"undefined"==typeof a.getAttribute?r.prop(a,b,c):(1===f&&r.isXMLDoc(a)||(e=r.attrHooks[b.toLowerCase()]||(r.expr.match.bool.test(b)?lb:void 0)),void 0!==c?null===c?void r.removeAttr(a,b):e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:(a.setAttribute(b,c+""),c):e&&"get"in e&&null!==(d=e.get(a,b))?d:(d=r.find.attr(a,b), -null==d?void 0:d))},attrHooks:{type:{set:function(a,b){if(!o.radioValue&&"radio"===b&&B(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}},removeAttr:function(a,b){var c,d=0,e=b&&b.match(L);if(e&&1===a.nodeType)while(c=e[d++])a.removeAttribute(c)}}),lb={set:function(a,b,c){return b===!1?r.removeAttr(a,c):a.setAttribute(c,c),c}},r.each(r.expr.match.bool.source.match(/\w+/g),function(a,b){var c=mb[b]||r.find.attr;mb[b]=function(a,b,d){var e,f,g=b.toLowerCase();return d||(f=mb[g],mb[g]=e,e=null!=c(a,b,d)?g:null,mb[g]=f),e}});var nb=/^(?:input|select|textarea|button)$/i,ob=/^(?:a|area)$/i;r.fn.extend({prop:function(a,b){return T(this,r.prop,a,b,arguments.length>1)},removeProp:function(a){return this.each(function(){delete this[r.propFix[a]||a]})}}),r.extend({prop:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return 1===f&&r.isXMLDoc(a)||(b=r.propFix[b]||b,e=r.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=r.find.attr(a,"tabindex");return b?parseInt(b,10):nb.test(a.nodeName)||ob.test(a.nodeName)&&a.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),o.optSelected||(r.propHooks.selected={get:function(a){var b=a.parentNode;return b&&b.parentNode&&b.parentNode.selectedIndex,null},set:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex)}}),r.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){r.propFix[this.toLowerCase()]=this});function pb(a){var b=a.match(L)||[];return b.join(" ")}function qb(a){return a.getAttribute&&a.getAttribute("class")||""}r.fn.extend({addClass:function(a){var b,c,d,e,f,g,h,i=0;if(r.isFunction(a))return this.each(function(b){r(this).addClass(a.call(this,b,qb(this)))});if("string"==typeof a&&a){b=a.match(L)||[];while(c=this[i++])if(e=qb(c),d=1===c.nodeType&&" "+pb(e)+" "){g=0;while(f=b[g++])d.indexOf(" "+f+" ")<0&&(d+=f+" ");h=pb(d),e!==h&&c.setAttribute("class",h)}}return this},removeClass:function(a){var b,c,d,e,f,g,h,i=0;if(r.isFunction(a))return this.each(function(b){r(this).removeClass(a.call(this,b,qb(this)))});if(!arguments.length)return this.attr("class","");if("string"==typeof a&&a){b=a.match(L)||[];while(c=this[i++])if(e=qb(c),d=1===c.nodeType&&" "+pb(e)+" "){g=0;while(f=b[g++])while(d.indexOf(" "+f+" ")>-1)d=d.replace(" "+f+" "," ");h=pb(d),e!==h&&c.setAttribute("class",h)}}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):r.isFunction(a)?this.each(function(c){r(this).toggleClass(a.call(this,c,qb(this),b),b)}):this.each(function(){var b,d,e,f;if("string"===c){d=0,e=r(this),f=a.match(L)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else void 0!==a&&"boolean"!==c||(b=qb(this),b&&W.set(this,"__className__",b),this.setAttribute&&this.setAttribute("class",b||a===!1?"":W.get(this,"__className__")||""))})},hasClass:function(a){var b,c,d=0;b=" "+a+" ";while(c=this[d++])if(1===c.nodeType&&(" "+pb(qb(c))+" ").indexOf(b)>-1)return!0;return!1}});var rb=/\r/g;r.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=r.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,r(this).val()):a,null==e?e="":"number"==typeof e?e+="":Array.isArray(e)&&(e=r.map(e,function(a){return null==a?"":a+""})),b=r.valHooks[this.type]||r.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=r.valHooks[e.type]||r.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(rb,""):null==c?"":c)}}}),r.extend({valHooks:{option:{get:function(a){var b=r.find.attr(a,"value");return null!=b?b:pb(r.text(a))}},select:{get:function(a){var b,c,d,e=a.options,f=a.selectedIndex,g="select-one"===a.type,h=g?null:[],i=g?f+1:e.length;for(d=f<0?i:g?f:0;d-1)&&(c=!0);return c||(a.selectedIndex=-1),f}}}}),r.each(["radio","checkbox"],function(){r.valHooks[this]={set:function(a,b){if(Array.isArray(b))return a.checked=r.inArray(r(a).val(),b)>-1}},o.checkOn||(r.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var sb=/^(?:focusinfocus|focusoutblur)$/;r.extend(r.event,{trigger:function(b,c,e,f){var g,h,i,j,k,m,n,o=[e||d],p=l.call(b,"type")?b.type:b,q=l.call(b,"namespace")?b.namespace.split("."):[];if(h=i=e=e||d,3!==e.nodeType&&8!==e.nodeType&&!sb.test(p+r.event.triggered)&&(p.indexOf(".")>-1&&(q=p.split("."),p=q.shift(),q.sort()),k=p.indexOf(":")<0&&"on"+p,b=b[r.expando]?b:new r.Event(p,"object"==typeof b&&b),b.isTrigger=f?2:3,b.namespace=q.join("."),b.rnamespace=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=e),c=null==c?[b]:r.makeArray(c,[b]),n=r.event.special[p]||{},f||!n.trigger||n.trigger.apply(e,c)!==!1)){if(!f&&!n.noBubble&&!r.isWindow(e)){for(j=n.delegateType||p,sb.test(j+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),i=h;i===(e.ownerDocument||d)&&o.push(i.defaultView||i.parentWindow||a)}g=0;while((h=o[g++])&&!b.isPropagationStopped())b.type=g>1?j:n.bindType||p,m=(W.get(h,"events")||{})[b.type]&&W.get(h,"handle"),m&&m.apply(h,c),m=k&&h[k],m&&m.apply&&U(h)&&(b.result=m.apply(h,c),b.result===!1&&b.preventDefault());return b.type=p,f||b.isDefaultPrevented()||n._default&&n._default.apply(o.pop(),c)!==!1||!U(e)||k&&r.isFunction(e[p])&&!r.isWindow(e)&&(i=e[k],i&&(e[k]=null),r.event.triggered=p,e[p](),r.event.triggered=void 0,i&&(e[k]=i)),b.result}},simulate:function(a,b,c){var d=r.extend(new r.Event,c,{type:a,isSimulated:!0});r.event.trigger(d,null,b)}}),r.fn.extend({trigger:function(a,b){return this.each(function(){r.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];if(c)return r.event.trigger(a,b,c,!0)}}),r.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(a,b){r.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),r.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}}),o.focusin="onfocusin"in a,o.focusin||r.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){r.event.simulate(b,a.target,r.event.fix(a))};r.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=W.access(d,b);e||d.addEventListener(a,c,!0),W.access(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=W.access(d,b)-1;e?W.access(d,b,e):(d.removeEventListener(a,c,!0),W.remove(d,b))}}});var tb=a.location,ub=r.now(),vb=/\?/;r.parseXML=function(b){var c;if(!b||"string"!=typeof b)return null;try{c=(new a.DOMParser).parseFromString(b,"text/xml")}catch(d){c=void 0}return c&&!c.getElementsByTagName("parsererror").length||r.error("Invalid XML: "+b),c};var wb=/\[\]$/,xb=/\r?\n/g,yb=/^(?:submit|button|image|reset|file)$/i,zb=/^(?:input|select|textarea|keygen)/i;function Ab(a,b,c,d){var e;if(Array.isArray(b))r.each(b,function(b,e){c||wb.test(a)?d(a,e):Ab(a+"["+("object"==typeof e&&null!=e?b:"")+"]",e,c,d)});else if(c||"object"!==r.type(b))d(a,b);else for(e in b)Ab(a+"["+e+"]",b[e],c,d)}r.param=function(a,b){var c,d=[],e=function(a,b){var c=r.isFunction(b)?b():b;d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(null==c?"":c)};if(Array.isArray(a)||a.jquery&&!r.isPlainObject(a))r.each(a,function(){e(this.name,this.value)});else for(c in a)Ab(c,a[c],b,e);return d.join("&")},r.fn.extend({serialize:function(){return r.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=r.prop(this,"elements");return a?r.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!r(this).is(":disabled")&&zb.test(this.nodeName)&&!yb.test(a)&&(this.checked||!ja.test(a))}).map(function(a,b){var c=r(this).val();return null==c?null:Array.isArray(c)?r.map(c,function(a){return{name:b.name,value:a.replace(xb,"\r\n")}}):{name:b.name,value:c.replace(xb,"\r\n")}}).get()}});var Bb=/%20/g,Cb=/#.*$/,Db=/([?&])_=[^&]*/,Eb=/^(.*?):[ \t]*([^\r\n]*)$/gm,Fb=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Gb=/^(?:GET|HEAD)$/,Hb=/^\/\//,Ib={},Jb={},Kb="*/".concat("*"),Lb=d.createElement("a");Lb.href=tb.href;function Mb(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(L)||[];if(r.isFunction(c))while(d=f[e++])"+"===d[0]?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Nb(a,b,c,d){var e={},f=a===Jb;function g(h){var i;return e[h]=!0,r.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function Ob(a,b){var c,d,e=r.ajaxSettings.flatOptions||{};for(c in b)void 0!==b[c]&&((e[c]?a:d||(d={}))[c]=b[c]);return d&&r.extend(!0,a,d),a}function Pb(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===d&&(d=a.mimeType||b.getResponseHeader("Content-Type"));if(d)for(e in h)if(h[e]&&h[e].test(d)){i.unshift(e);break}if(i[0]in c)f=i[0];else{for(e in c){if(!i[0]||a.converters[e+" "+i[0]]){f=e;break}g||(g=e)}f=f||g}if(f)return f!==i[0]&&i.unshift(f),c[f]}function Qb(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}r.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:tb.href,type:"GET",isLocal:Fb.test(tb.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Kb,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":r.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Ob(Ob(a,r.ajaxSettings),b):Ob(r.ajaxSettings,a)},ajaxPrefilter:Mb(Ib),ajaxTransport:Mb(Jb),ajax:function(b,c){"object"==typeof b&&(c=b,b=void 0),c=c||{};var e,f,g,h,i,j,k,l,m,n,o=r.ajaxSetup({},c),p=o.context||o,q=o.context&&(p.nodeType||p.jquery)?r(p):r.event,s=r.Deferred(),t=r.Callbacks("once memory"),u=o.statusCode||{},v={},w={},x="canceled",y={readyState:0,getResponseHeader:function(a){var b;if(k){if(!h){h={};while(b=Eb.exec(g))h[b[1].toLowerCase()]=b[2]}b=h[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return k?g:null},setRequestHeader:function(a,b){return null==k&&(a=w[a.toLowerCase()]=w[a.toLowerCase()]||a,v[a]=b),this},overrideMimeType:function(a){return null==k&&(o.mimeType=a),this},statusCode:function(a){var b;if(a)if(k)y.always(a[y.status]);else for(b in a)u[b]=[u[b],a[b]];return this},abort:function(a){var b=a||x;return e&&e.abort(b),A(0,b),this}};if(s.promise(y),o.url=((b||o.url||tb.href)+"").replace(Hb,tb.protocol+"//"),o.type=c.method||c.type||o.method||o.type,o.dataTypes=(o.dataType||"*").toLowerCase().match(L)||[""],null==o.crossDomain){j=d.createElement("a");try{j.href=o.url,j.href=j.href,o.crossDomain=Lb.protocol+"//"+Lb.host!=j.protocol+"//"+j.host}catch(z){o.crossDomain=!0}}if(o.data&&o.processData&&"string"!=typeof o.data&&(o.data=r.param(o.data,o.traditional)),Nb(Ib,o,c,y),k)return y;l=r.event&&o.global,l&&0===r.active++&&r.event.trigger("ajaxStart"),o.type=o.type.toUpperCase(),o.hasContent=!Gb.test(o.type),f=o.url.replace(Cb,""),o.hasContent?o.data&&o.processData&&0===(o.contentType||"").indexOf("application/x-www-form-urlencoded")&&(o.data=o.data.replace(Bb,"+")):(n=o.url.slice(f.length),o.data&&(f+=(vb.test(f)?"&":"?")+o.data,delete o.data),o.cache===!1&&(f=f.replace(Db,"$1"),n=(vb.test(f)?"&":"?")+"_="+ub++ +n),o.url=f+n),o.ifModified&&(r.lastModified[f]&&y.setRequestHeader("If-Modified-Since",r.lastModified[f]),r.etag[f]&&y.setRequestHeader("If-None-Match",r.etag[f])),(o.data&&o.hasContent&&o.contentType!==!1||c.contentType)&&y.setRequestHeader("Content-Type",o.contentType),y.setRequestHeader("Accept",o.dataTypes[0]&&o.accepts[o.dataTypes[0]]?o.accepts[o.dataTypes[0]]+("*"!==o.dataTypes[0]?", "+Kb+"; q=0.01":""):o.accepts["*"]);for(m in o.headers)y.setRequestHeader(m,o.headers[m]);if(o.beforeSend&&(o.beforeSend.call(p,y,o)===!1||k))return y.abort();if(x="abort",t.add(o.complete),y.done(o.success),y.fail(o.error),e=Nb(Jb,o,c,y)){if(y.readyState=1,l&&q.trigger("ajaxSend",[y,o]),k)return y;o.async&&o.timeout>0&&(i=a.setTimeout(function(){y.abort("timeout")},o.timeout));try{k=!1,e.send(v,A)}catch(z){if(k)throw z;A(-1,z)}}else A(-1,"No Transport");function A(b,c,d,h){var j,m,n,v,w,x=c;k||(k=!0,i&&a.clearTimeout(i),e=void 0,g=h||"",y.readyState=b>0?4:0,j=b>=200&&b<300||304===b,d&&(v=Pb(o,y,d)),v=Qb(o,v,y,j),j?(o.ifModified&&(w=y.getResponseHeader("Last-Modified"),w&&(r.lastModified[f]=w),w=y.getResponseHeader("etag"),w&&(r.etag[f]=w)),204===b||"HEAD"===o.type?x="nocontent":304===b?x="notmodified":(x=v.state,m=v.data,n=v.error,j=!n)):(n=x,!b&&x||(x="error",b<0&&(b=0))),y.status=b,y.statusText=(c||x)+"",j?s.resolveWith(p,[m,x,y]):s.rejectWith(p,[y,x,n]),y.statusCode(u),u=void 0,l&&q.trigger(j?"ajaxSuccess":"ajaxError",[y,o,j?m:n]),t.fireWith(p,[y,x]),l&&(q.trigger("ajaxComplete",[y,o]),--r.active||r.event.trigger("ajaxStop")))}return y},getJSON:function(a,b,c){return r.get(a,b,c,"json")},getScript:function(a,b){return r.get(a,void 0,b,"script")}}),r.each(["get","post"],function(a,b){r[b]=function(a,c,d,e){return r.isFunction(c)&&(e=e||d,d=c,c=void 0),r.ajax(r.extend({url:a,type:b,dataType:e,data:c,success:d},r.isPlainObject(a)&&a))}}),r._evalUrl=function(a){return r.ajax({url:a,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,"throws":!0})},r.fn.extend({wrapAll:function(a){var b;return this[0]&&(r.isFunction(a)&&(a=a.call(this[0])),b=r(a,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstElementChild)a=a.firstElementChild;return a}).append(this)),this},wrapInner:function(a){return r.isFunction(a)?this.each(function(b){r(this).wrapInner(a.call(this,b))}):this.each(function(){var b=r(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=r.isFunction(a);return this.each(function(c){r(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(a){return this.parent(a).not("body").each(function(){r(this).replaceWith(this.childNodes)}),this}}),r.expr.pseudos.hidden=function(a){return!r.expr.pseudos.visible(a)},r.expr.pseudos.visible=function(a){return!!(a.offsetWidth||a.offsetHeight||a.getClientRects().length)},r.ajaxSettings.xhr=function(){try{return new a.XMLHttpRequest}catch(b){}};var Rb={0:200,1223:204},Sb=r.ajaxSettings.xhr();o.cors=!!Sb&&"withCredentials"in Sb,o.ajax=Sb=!!Sb,r.ajaxTransport(function(b){var c,d;if(o.cors||Sb&&!b.crossDomain)return{send:function(e,f){var g,h=b.xhr();if(h.open(b.type,b.url,b.async,b.username,b.password),b.xhrFields)for(g in b.xhrFields)h[g]=b.xhrFields[g];b.mimeType&&h.overrideMimeType&&h.overrideMimeType(b.mimeType),b.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest");for(g in e)h.setRequestHeader(g,e[g]);c=function(a){return function(){c&&(c=d=h.onload=h.onerror=h.onabort=h.onreadystatechange=null,"abort"===a?h.abort():"error"===a?"number"!=typeof h.status?f(0,"error"):f(h.status,h.statusText):f(Rb[h.status]||h.status,h.statusText,"text"!==(h.responseType||"text")||"string"!=typeof h.responseText?{binary:h.response}:{text:h.responseText},h.getAllResponseHeaders()))}},h.onload=c(),d=h.onerror=c("error"),void 0!==h.onabort?h.onabort=d:h.onreadystatechange=function(){4===h.readyState&&a.setTimeout(function(){c&&d()})},c=c("abort");try{h.send(b.hasContent&&b.data||null)}catch(i){if(c)throw i}},abort:function(){c&&c()}}}),r.ajaxPrefilter(function(a){a.crossDomain&&(a.contents.script=!1)}),r.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(a){return r.globalEval(a),a}}}),r.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET")}),r.ajaxTransport("script",function(a){if(a.crossDomain){var b,c;return{send:function(e,f){b=r(" - - - -
- - - -
- -+++ -endif::backend-spring-html[]