@@ -136,6 +136,8 @@ func (w *worker) onAddTablePartition(jobCtx *jobContext, job *model.Job) (ver in
136
136
137
137
// move the adding definition into tableInfo.
138
138
updateAddingPartitionInfo (partInfo , tblInfo )
139
+ tblInfo .Partition .DDLState = model .StateReplicaOnly
140
+ tblInfo .Partition .DDLAction = job .Type
139
141
ver , err = updateVersionAndTableInfoWithCheck (jobCtx , job , tblInfo , true )
140
142
if err != nil {
141
143
return ver , errors .Trace (err )
@@ -222,6 +224,8 @@ func (w *worker) onAddTablePartition(jobCtx *jobContext, job *model.Job) (ver in
222
224
223
225
preSplitAndScatter (w .sess .Context , jobCtx .store , tblInfo , addingDefinitions )
224
226
227
+ tblInfo .Partition .DDLState = model .StateNone
228
+ tblInfo .Partition .DDLAction = model .ActionNone
225
229
ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , true )
226
230
if err != nil {
227
231
return ver , errors .Trace (err )
@@ -2244,9 +2248,6 @@ func (w *worker) onDropTablePartition(jobCtx *jobContext, job *model.Job) (ver i
2244
2248
}
2245
2249
2246
2250
var physicalTableIDs []int64
2247
- // In order to skip maintaining the state check in partitionDefinition, TiDB use droppingDefinition instead of state field.
2248
- // So here using `job.SchemaState` to judge what the stage of this job is.
2249
- originalState := job .SchemaState
2250
2251
switch job .SchemaState {
2251
2252
case model .StatePublic :
2252
2253
// Here we mark the partitions to be dropped, so they are not read or written
@@ -2260,11 +2261,11 @@ func (w *worker) onDropTablePartition(jobCtx *jobContext, job *model.Job) (ver i
2260
2261
originalDefs := tblInfo .Partition .Definitions
2261
2262
physicalTableIDs = updateDroppingPartitionInfo (tblInfo , partNames )
2262
2263
tblInfo .Partition .Definitions = originalDefs
2263
- tblInfo .Partition .DDLState = model .StateWriteOnly
2264
- tblInfo .Partition .DDLAction = model .ActionDropTablePartition
2265
-
2266
2264
job .SchemaState = model .StateWriteOnly
2267
- ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , originalState != job .SchemaState )
2265
+ tblInfo .Partition .DDLState = job .SchemaState
2266
+ tblInfo .Partition .DDLAction = job .Type
2267
+
2268
+ ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , true )
2268
2269
case model .StateWriteOnly :
2269
2270
// Since the previous state do not use the dropping partitions,
2270
2271
// we can now actually remove them, allowing to write into the overlapping range
@@ -2308,16 +2309,16 @@ func (w *worker) onDropTablePartition(jobCtx *jobContext, job *model.Job) (ver i
2308
2309
return ver , err
2309
2310
}
2310
2311
2311
- tblInfo .Partition .DDLState = model .StateDeleteOnly
2312
2312
job .SchemaState = model .StateDeleteOnly
2313
- ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , originalState != job .SchemaState )
2313
+ tblInfo .Partition .DDLState = job .SchemaState
2314
+ ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , true )
2314
2315
case model .StateDeleteOnly :
2315
2316
// This state is not a real 'DeleteOnly' state, because tidb does not maintain the state check in partitionDefinition.
2316
2317
// Insert this state to confirm all servers can not see the old partitions when reorg is running,
2317
2318
// so that no new data will be inserted into old partitions when reorganizing.
2318
- tblInfo .Partition .DDLState = model .StateDeleteReorganization
2319
2319
job .SchemaState = model .StateDeleteReorganization
2320
- ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , originalState != job .SchemaState )
2320
+ tblInfo .Partition .DDLState = job .SchemaState
2321
+ ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , true )
2321
2322
case model .StateDeleteReorganization :
2322
2323
oldTblInfo := getTableInfoWithDroppingPartitions (tblInfo )
2323
2324
physicalTableIDs = getPartitionIDsFromDefinitions (tblInfo .Partition .DroppingDefinitions )
@@ -2375,7 +2376,8 @@ func (w *worker) onDropTablePartition(jobCtx *jobContext, job *model.Job) (ver i
2375
2376
}
2376
2377
droppedDefs := tblInfo .Partition .DroppingDefinitions
2377
2378
tblInfo .Partition .DroppingDefinitions = nil
2378
- tblInfo .Partition .DDLState = model .StateNone
2379
+ job .SchemaState = model .StateNone
2380
+ tblInfo .Partition .DDLState = job .SchemaState
2379
2381
tblInfo .Partition .DDLAction = model .ActionNone
2380
2382
// used by ApplyDiff in updateSchemaVersion
2381
2383
job .CtxVars = []any {physicalTableIDs } // TODO remove it.
@@ -2511,14 +2513,16 @@ func (w *worker) onTruncateTablePartition(jobCtx *jobContext, job *model.Job) (i
2511
2513
pi .DroppingDefinitions = truncatingDefinitions
2512
2514
pi .NewPartitionIDs = newIDs [:]
2513
2515
2514
- tblInfo .Partition .DDLAction = model .ActionTruncateTablePartition
2515
2516
job .SchemaState = model .StateDeleteOnly
2517
+ pi .DDLState = job .SchemaState
2518
+ pi .DDLAction = job .Type
2516
2519
ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , true )
2517
2520
case model .StateDeleteOnly :
2518
2521
// This state is not a real 'DeleteOnly' state, because tidb does not maintaining the state check in partitionDefinition.
2519
2522
// Insert this state to confirm all servers can not see the old partitions when reorg is running,
2520
2523
// so that no new data will be inserted into old partitions when reorganizing.
2521
2524
job .SchemaState = model .StateDeleteReorganization
2525
+ pi .DDLState = job .SchemaState
2522
2526
ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , true )
2523
2527
case model .StateDeleteReorganization :
2524
2528
// Step2: clear global index rows.
@@ -2609,6 +2613,8 @@ func (w *worker) onTruncateTablePartition(jobCtx *jobContext, job *model.Job) (i
2609
2613
// Step4: clear DroppingDefinitions and finish job.
2610
2614
tblInfo .Partition .DroppingDefinitions = nil
2611
2615
tblInfo .Partition .NewPartitionIDs = nil
2616
+ tblInfo .Partition .DDLAction = model .ActionNone
2617
+ tblInfo .Partition .DDLState = model .StateNone
2612
2618
2613
2619
preSplitAndScatter (w .sess .Context , jobCtx .store , tblInfo , newPartitions )
2614
2620
@@ -2816,6 +2822,8 @@ func (w *worker) onExchangeTablePartition(jobCtx *jobContext, job *model.Job) (v
2816
2822
// into the table using the schema version
2817
2823
// before the exchange is made.
2818
2824
job .SchemaState = model .StateWriteOnly
2825
+ pt .Partition .DDLState = job .SchemaState
2826
+ pt .Partition .DDLAction = job .Type
2819
2827
return updateVersionAndTableInfoWithCheck (jobCtx , job , nt , true , ptInfo ... )
2820
2828
}
2821
2829
// From now on, nt (the non-partitioned table) has
@@ -2890,6 +2898,8 @@ func (w *worker) onExchangeTablePartition(jobCtx *jobContext, job *model.Job) (v
2890
2898
originalPartitionDef := partDef .Clone ()
2891
2899
originalNt := nt .Clone ()
2892
2900
partDef .ID , nt .ID = nt .ID , partDef .ID
2901
+ pt .Partition .DDLState = model .StateNone
2902
+ pt .Partition .DDLAction = model .ActionNone
2893
2903
2894
2904
err = metaMut .UpdateTable (ptSchemaID , pt )
2895
2905
if err != nil {
@@ -3324,7 +3334,7 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, job *model.Job) (ver
3324
3334
// Assume we cannot have more than MaxUint64 rows, set the progress to 1/10 of that.
3325
3335
metrics .GetBackfillProgressByLabel (metrics .LblReorgPartition , job .SchemaName , tblInfo .Name .String ()).Set (0.1 / float64 (math .MaxUint64 ))
3326
3336
job .SchemaState = model .StateDeleteOnly
3327
- tblInfo .Partition .DDLState = model . StateDeleteOnly
3337
+ tblInfo .Partition .DDLState = job . SchemaState
3328
3338
tblInfo .Partition .DDLAction = job .Type
3329
3339
ver , err = updateVersionAndTableInfoWithCheck (jobCtx , job , tblInfo , true )
3330
3340
if err != nil {
@@ -3394,8 +3404,9 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, job *model.Job) (ver
3394
3404
failpoint .Return (rollbackReorganizePartitionWithErr (jobCtx , job , err ))
3395
3405
}
3396
3406
})
3397
- ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , true )
3398
3407
job .SchemaState = model .StateWriteOnly
3408
+ tblInfo .Partition .DDLState = job .SchemaState
3409
+ ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , true )
3399
3410
case model .StateWriteOnly :
3400
3411
// Insert this state to confirm all servers can see the new partitions when reorg is running,
3401
3412
// so that new data will be updated in both old and new partitions when reorganizing.
@@ -3405,10 +3416,10 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, job *model.Job) (ver
3405
3416
tblInfo .Indices [i ].State = model .StateWriteReorganization
3406
3417
}
3407
3418
}
3408
- tblInfo .Partition .DDLState = model .StateWriteReorganization
3419
+ job .SchemaState = model .StateWriteReorganization
3420
+ tblInfo .Partition .DDLState = job .SchemaState
3409
3421
metrics .GetBackfillProgressByLabel (metrics .LblReorgPartition , job .SchemaName , tblInfo .Name .String ()).Set (0.3 / float64 (math .MaxUint64 ))
3410
3422
ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , true )
3411
- job .SchemaState = model .StateWriteReorganization
3412
3423
case model .StateWriteReorganization :
3413
3424
physicalTableIDs := getPartitionIDsFromDefinitions (tblInfo .Partition .DroppingDefinitions )
3414
3425
tbl , err2 := getTable (jobCtx .getAutoIDRequirement (), job .SchemaID , tblInfo )
@@ -3493,9 +3504,9 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, job *model.Job) (ver
3493
3504
// Now all the data copying is done, but we cannot simply remove the droppingDefinitions
3494
3505
// since they are a part of the normal Definitions that other nodes with
3495
3506
// the current schema version. So we need to double write for one more schema version
3496
- tblInfo .Partition .DDLState = model .StateDeleteReorganization
3497
- ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , true )
3498
3507
job .SchemaState = model .StateDeleteReorganization
3508
+ tblInfo .Partition .DDLState = job .SchemaState
3509
+ ver , err = updateVersionAndTableInfo (jobCtx , job , tblInfo , true )
3499
3510
3500
3511
case model .StateDeleteReorganization :
3501
3512
// Drop the droppingDefinitions and finish the DDL
@@ -3517,6 +3528,7 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, job *model.Job) (ver
3517
3528
tblInfo .Partition .DroppingDefinitions = nil
3518
3529
tblInfo .Partition .AddingDefinitions = nil
3519
3530
tblInfo .Partition .DDLState = model .StateNone
3531
+ tblInfo .Partition .DDLAction = model .ActionNone
3520
3532
tblInfo .Partition .OriginalPartitionIDsOrder = nil
3521
3533
3522
3534
var dropIndices []* model.IndexInfo
0 commit comments