forked from synopse/mORMot
-
Notifications
You must be signed in to change notification settings - Fork 0
/
SynBigTable.pas
3909 lines (3733 loc) · 147 KB
/
SynBigTable.pas
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/// class used to store huge amount of data with fast retrieval
// - licensed under a MPL/GPL/LGPL tri-license; version 1.18
unit SynBigTable;
(*
Synopse Big Table. Copyright (C) 2016 Arnaud Bouchez
Synopse Informatique - http://synopse.info
*** BEGIN LICENSE BLOCK *****
Version: MPL 1.1/GPL 2.0/LGPL 2.1
The contents of this file are subject to the Mozilla Public License Version
1.1 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.mozilla.org/MPL
Software distributed under the License is distributed on an "AS IS" basis,
WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
for the specific language governing rights and limitations under the License.
The Original Code is Synopse Big Table.
The Initial Developer of the Original Code is Arnaud Bouchez.
Portions created by the Initial Developer are Copyright (C) 2016
the Initial Developer. All Rights Reserved.
Contributor(s):
Alternatively, the contents of this file may be used under the terms of
either the GNU General Public License Version 2 or later (the "GPL"), or
the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
in which case the provisions of the GPL or the LGPL are applicable instead
of those above. If you wish to allow use of your version of this file only
under the terms of either the GPL or the LGPL, and not to allow others to
use your version of this file under the terms of the MPL, indicate your
decision by deleting the provisions above and replace them with the notice
and other provisions required by the GPL or the LGPL. If you do not delete
the provisions above, a recipient may use your version of this file under
the terms of any one of the MPL, the GPL or the LGPL.
***** END LICENSE BLOCK *****
What are these classes meant for?
- add a simple storage and data persistence to your application, in a few KB of code
- implement a logging or read/only audit trail mechanism into your application
- access to compressed data on a CD-ROM or DVD-ROM (see our Open Source
compression libraries in our web site) - you can even add items to the list,
since they will remain in memory; but they will be lost when you close the file
- have an easy way to share data between Delphi 7 and Delphi XE applications,
without any Unicode / TStringList encoding headache
- store external resource files (think about an hidden folder) in which you
can have read and write access from your applications
- store efficiently an huge number of records with fields, using
TSynBigTableRecord: creating 1,000,000 records is less than 900 ms, with
integrated indexes, low storage size and fast search possibilities
(this class is much faster than any SQL engine)
- store BLOB with some metadata fields associated, via TSynBigTableMetaData
(can be used e.g. to store thunmbails of pictures, or HTML/XML pages,
associated with some indexed data)
- if you don't want to install a database engine and configure it
- if you need speed and performance
- if you don't want to reinvent the wheel
- etc... etc...
What are these classes NOT for?
- interact with DBExpress or VCL database components
- replacing NTFS - go to Linux and pick the right file system you need
- storing hierarchical data (like directories) - use B-Tree and such instead
- store huge data items (maximum is 1 GB for one item) - a few MB is good,
but remember that data is stored into RAM before writting to disk
- replace a SQL database engine - use our SQLite3 framework instead, see
http://blog.synopse.info/category/Open-Source-Projects/SQLite3-Framework -
but TSynBigTableMetaData and TSynBigTableRecord allow effective field
handling, which could be sufficient as a simple database
- save your soul or make me rich...
Version 1.0
- initial release
Version 1.1
- Fix save on disk issue, when some items are deleted but none added
- enhanced unitary testing procedure
Version 1.2
- new TSynBigTableString class to store data from a UTF-8 encoded string ID
instead of a numerical ID
- added caching for last Get() items (may speed up next Get() a little bit)
- custom Get() method for range retrieval into a dynamic array
- TSynBigTable modified in order to handle custom data in header (used to
store string IDs for TSynBigTableString for instance)
- whole engine more robust against any file corruption or type mistmatch
- Count property returned an incorrect value (including deleted values)
- added timing (in 1/10 ms) for test steps
- version 1.2b: even (much) faster TSynBigTableString.Add()
Version 1.3
- new Open() Read() and Seek() methods to read data like in a TStream
- new Clear method to flush the table and rebuild from scratch
- don't cache data bigger than 1 MB (to save RAM)
Version 1.4
- added RawByteStringFromFile() and FileFromRawByteString() procedures
- added TSynBigTable.AddFile() method
Version 1.7
- Thread safe version of the Synopse Big Table
Version 1.8
- new GetPart() method for retrieving a part of a stored file
(to be used especially for big file content)
- fix issue with files > 2 GB (thanks to sanyin for the report)
Version 1.9 - mostly requests and modification proposal from sanyin - thanks!
- new TSynBigTable.GetLength() method
- new TSynBigTable.ReadToStream() method
- can set additional file open mode flags in TSynBigTable.Create
- fixed an obscure possible issue for saving/loading TSynBigTableString
with string IDs bigger in size than 65535 chars
Version 1.9.2
- Range Checking forced OFF to avoid problems with some projects
- fFile type modified to THandle, instead of integer
Version 1.12
- this is a MAJOR update: the file format changed (new magics $ABAB0004/5)
- now uses SynCommons unit (avoid too much duplicated code)
- buffered writing and reading to file: major speed up of the unit,
since Windows file access API are dead slow; for instance, reading
uses now memory-mapped files for best possible performance
- all previous caching (not working in fact) has been disabled
(the caching is now implemented more efficiently at OS level, within
memory mapped files)
- TSynBigTableString has no 65535 key length limitation any more
- values or UTF-8 keys of fixed-size are now stored in the most efficient way
- new Update() methods, allowing to change the content of any record
- new GetPointer() methods, to retrieve a pointer to the data, directly
in memory mapped buffer (faster than a standar Get() call)
- new GetAsStream() methods, to retrieve a data into an in-memory stream,
pointing into the memory mapped buffer in most cases
- new GetIterating() method, which will loop into all data items, calling
a call-back with pointers to each data element (very fast method)
- fDeleted[] array now stored in ascending order, to make whole unit faster
- NumericalID[] property is now available also in TSynBigTable (don't use it
to loop through all items, but rather the dedicated GetIterating() method)
Version 1.12a
- Offsets can now be stored as Int32 instead of Int64 (to save space in
memory for most TSynBigTable usage): the file format did therefore changed
(new magics $ABAB0006/7)
- fixed issue when opening an TSynBigTableString file with updated items
- fixed a major problem in TFileBufferWriter.WriteVarUInt32Array, with
DataLayout=wkOffset and ValuesCount=1 - the same error occured in
WriteVarUInt64DynArray with Offset=true and ValuesCount=1
- fix issue with files >= 2 GB (thanks to sanyin for another report):
memory mapped files are not well handled in this case -> direct read
from disk is used for such huge files (but faster memory mapping is
using for all files < 2 GB)
- enhanced coherency checks when loading from a TSynBigTable file
- the unit will now flushes the in-memory data to disk when 256 MB of data
is stored in RAM (manual flush was confusing for some users) - see the
new BIGTABLE_AUTOFLUSH_SIZE constant
- code refactoring in TSynBigTableString: some generic functions were
moved into SynCommons
- get rid of the Open/Read/Seek methods: use GetAsStream() instead
- faster GetIterating() method, looping in physical order in disk by default,
and optionaly in Numerical ID order or even the faster available order
(new TSynBigTableIterationOrder parameter)
- added an Opaque parameter to GetIterating() method and corresponding
TSynBigTableIterateEvent call-back
- new GetAllIDs() method to retrieve all IDs at once
- new GetAllPhysicalIndexes() method to retrive all physical indexes at once
- new GetPointerFromPhysicalIndex() public method
- faster Update: in-place refresh if previous data is still in memory
- speed enhancements (e.g. TSynBigTable.Destroy) and extended test coverage
- new TSynBigTableRecord class to store data, with fields handling inside
each record, fast indexes and search possibilities, late-binding access
- new TSynBigTableMetaData class to store BLOB data, with metadata fields in
parallel (also with fast indexes and search, late-binding access)
Version 1.12b
- fixed issue with Pack method implementation, in case of updated records
- fixed issue with TSynBigTableMetaData/TSynBigTableRecord updating and
packing (and added corresponding regression tests)
- AddField methods now return a boolean and no TSynTableFieldProperties,
because these instances may be modified after a later AddFieldUpdate call
- enhanced test coverage (mostly TSynBigTableMetaData/TSynBigTableRecord)
Version 1.13
- record validation now uses the generic TSynValidate mechanism, common
with our main ORM framework, compliant with true multi-tier architecture:
use Table.Validate method (see e.g. the update CheckConstraints)
- record filtering now uses the generic TSynFilter mechanism, common
with our main ORM framework, compliant with true multi-tier architecture:
use Table.Filter method
- GetAsStream() method now will create a TSynMemoryStreamMapped if needed,
so will be faster than direct in-memory reading of the whole content
- fixed issue when compiled with THREADSAFE
Version 1.15
- unit now tested with Delphi XE2 (32 Bit)
Version 1.16
- new overridden TSynBigTableTable.Clear method which will clear fields
- new TSynBigTable.FileFormatCheck class method to recognize file format
- replaced sbtGetMagic kind of header with InternalMagic class function
- fixed issue in TSynTableFieldProperties.SaveTo about saving wrong indexes
- new sbtBeforeWrite step available (e.g. to safely update indexes)
Version 1.18
- unit fixed and tested with Delphi XE2..10.1 Berlin 64-bit compiler
- added TSynBigTable.FileSizeOnDisk property for [e4a1e76a32ab]
- fixed ticket [b9320499ae] about TBigTableRecord saving updated tables with
indexed fields
*)
interface
{$I Synopse.inc} // define HASINLINE USETYPEINFO CPU32 CPU64
{$ifndef LVCL}
{.$define THREADSAFE}
{ define this if you want to make the Synopse Big Table thread safe
- we use internaly TMultiReadExclusiveWriteSynchronizer for better
performance
- not fully tested yet, just to ensure basic thread safety }
{$endif}
{$R-}
{ Range Checking must be sent to OFF in the code below - code is safe anyway }
{$S-}
{ Stack Checking must be sent to OFF in the code below }
uses
{$ifdef MSWINDOWS}
Windows,
{$else}
{$ifdef FPC}
SynFPCLinux,
{$endif}
{$endif}
Classes,
SysUtils,
{$ifdef FPC}
Variants,
{$endif}
SynCommons,
SynTests;
type
TSynBigTable = class;
/// possible actions for CustomHeader() protected virtual method
TSynBigTableCustomHeader = (sbtRead, sbtBeforeWrite, sbtWrite, sbtAfterRead);
/// the way the GetIterating() method will loop through all items
// - ioInternalID and ioInternalPhysical are internal codes used by
// GetID and GetAllPhysicalIndexes methods
TSynBigTableIterationOrder = (ioNone, ioPhysical, ioID, ioFaster,
ioInternalID, ioInternalPhysical);
/// prototype of a callback function for iterating through all items
// of a table
// - will be called following the incremental ID order
// - implementation can set the result to TRUE to break the iteration loop
// - the Data pointer is either direct access to the direct mapped buffer,
// or a global temporary buffer: use the data between two iterations,
// but copy the content if you need some persistency
// - the DataIndex parameter is the index of the data, physicaly on disk
TSynBigTableIterateEvent = function(Sender: TObject; Opaque: pointer;
ID, DataIndex: integer; Data: pointer; DataLen: integer): boolean of object;
/// event called after a pack, just before the UpdateToFile()
// - can be used to synchronized the field indexes, e.g.
// - format of supplied parameter is NewIndexs[oldIndex] := newIndex
TSynBigTableAfterPackEvent = procedure(var NewIndexs: TIntegerDynArray) of object;
/// a class to store huge amount of data, just specified by an integer ID
// - data is stored in an unique file
// - retrieval is very fast (can't be faster IMHO)
// - data is appended at the end of this file at adding (but use a caching
// mechanism for immediate adding)
// - use a temporary in memory adding, till the UpdateToFile method is called
// - data items can be deleted
// - file can be packed using the Pack method in order to retrieve free space
// from deleted entries (sounds like a VACUUM command, but faster)
// - total size of file has no limit (but your hard disk, of course)
// - limit of one data block depends on RAM (RawByteString is used
// as storage for data block)
// - before Delphi 2007, much faster when using FastMM4 memory manager
// - after profiling, most of the time is spent in the Windows kernel,
// waiting from hard disk write of raw data; in all cases, this class is
// much faster than any SQL engine storing BLOB, and than plain Win32 files
// - ACID behavior can be enforced by calling UpdateToFile(true)
TSynBigTable = class
private
function IterateGetAllID(Sender: TObject; Opaque: pointer; ID, Index: integer;
Data: pointer; DataLen: integer): boolean;
function IteratePack(Sender: TObject; Opaque: pointer; ID, Index: integer;
Data: pointer; DataLen: integer): boolean;
/// used in TestBigTable function
function TestIterateEvent(Sender: TObject; Opaque: pointer; ID, Index: integer;
Data: pointer; DataLen: integer): boolean;
function TestIterateEventSpeed(Sender: TObject; Opaque: pointer; ID, Index: integer;
Data: pointer; DataLen: integer): boolean;
/// retrieve a genuine integer value to identify the file content
class function InternalMagic: integer; virtual;
protected
fFileName: TFileName;
fCount: integer;
fCurrentID: integer;
fCurrentInMemoryDataSize: Int64;
fInMemoryCount: integer;
fInMemoryData: TRawByteStringDynArray;
fInMemoryID: TIntegerDynArray;
fDeletedCount: integer;
fAliasCount: integer;
fModified: boolean;
fFile: THandle;
/// all IDs, in the same order as the storage
// - may contain an ID from a deleted entry -> don't use indexed with 0..Count-1
// - Offset[] are in another dynamic array, so the code is somewhat faster
fID: TIntegerDynArray;
/// all end offsets of the storage data in the file (up to 1 GB of data)
// - may contain an offset from a deleted entry -> don't use indexed with 0..Count-1
// - ID[] are in another dynamic array, so the code is somewhat faster
fOffset32: TIntegerDynArray;
/// all end offsets of the storage data in the file (used for huge amount of data)
// - may contain an offset from a deleted entry -> don't use indexed with 0..Count-1
// - ID[] are in another dynamic array, so the code is somewhat faster
fOffset64: TInt64DynArray;
/// array of deleted items IDs
// - this array is stored in ascending order, to make search faster
fDeleted: TIntegerDynArray;
/// array of Aliases for updated records
fAliasSource, fAliasReal, fAliasRealSorted: TIntegerDynArray;
{$ifdef THREADSAFE}
fLock: TMultiReadExclusiveWriteSynchronizer;
{$endif}
fFileOpenMode: Cardinal;
fReadBuffer: TFileBufferReader;
fOnAfterPack: TSynBigTableAfterPackEvent;
function GetCount: integer; virtual;
function GetFileSizeOnDisk: Int64; virtual;
/// this default implementation can be slow, because it must loop through all
// items in case of deleted or updated items
function GetID(Index: integer): integer; virtual;
/// retrieve an index from a supplied ID
// - return -1 if this ID is not existing in the index
// - warning: return an index, even if the ID is in the fDeleted[] list
// - but handle aliases IDs (after Update() method call)
function IDToIndex(aID: integer; handleAliases: boolean=true): integer;
/// retrieve an ID from a supplied index
// - can handle aliases IDs (after Update() method call), i.e. return the
// original ID, from the index of the updated content
function IndexToID(aIndex: Integer; handleAliases: boolean=true): integer; virtual;
/// retrieve a content, from its raw index in the database
function GetFromIndex(aIndex: integer; out aData: RawByteString): boolean;
/// retrieve a pointer to a content, from its raw index in the database
function GetPointerFromIndex(aIndex: Integer; var aTempData: RawByteString;
DataLen: PInteger=nil): pointer;
/// load index from file into fID[], fMemory[] and fDeleted[]
procedure LoadFromFile;
/// returns true if the supplied file matches the current TSynBigTable class
// - will allow making a difference between all implementation classes,
// i.e. you can call TSynBigTable.FileFormatCheck,
// TSynBigTableString.FileFormatCheck, TSynBigTableRecord.FileFormatCheck
// or TSynBigTableMetaData.FileFormatCheck to verify a file layout
// - uses the protected virtual InternalMagic method
class function FileFormatCheck(const aFileName: TFileName): boolean;
/// virtual method which can be used to store additional data into the header
// - do nothing by default in TSynBigTable (means no additional header data)
// - if Action is sbtBeforeWrite, prepare the data before writing: at the
// sbtWrite step, update data won't be available any more (fReadBuffer is
// already closed so e.g. indexes should be recreated in this first step)
// - if Action is sbtWrite, write the custom header data to WriteBuffer
// - if Action is sbtRead, read the custom header data from fReadBuffer
// - if Action is sbtAfterRead, all headers have been read from disk: the
// class can therefore synchronize its content with the data (to recreate
// any missing index, for instance)
function CustomHeader(Action: TSynBigTableCustomHeader;
WriteBuffer: TFileBufferWriter): integer; virtual;
/// retrieve an offset for a specified physical ID
// - read from either fOffset32[] either fOffset64[]
function GetOffset(Index: integer): Int64;
/// retrieve an index from a supplied ID, and the associated AliasReal[] index
function InternalIDToIndex(aID: integer; var aAliasIndex: integer): integer;
/// update the content, returing the updated physical ID
function InternalUpdateFromIndex(const aData: RawByteString;
aID, aIndex, aAliasIndex: integer; var aNewIndex: cardinal): integer;
/// will recreate the file processing all deleted or updated data, following
// the ID order
// - the callback must write the content (after any possible content change)
// to the Opaque.WR file writer
procedure RecreateFileContent(aCallBack: TSynBigTableIterateEvent;
const OpaqueFields: TSQLFieldBits=[]);
public
/// initialize the database engine with a supplied filename
// - InternalCacheSize can be used to customize the internal cache count
// (set to 0 to disable caching; 128 is a good value, bigger makes no difference)
// - you can specify a custom file open mode attributes on request, like
// fmShareDenyNone (not set by default, for safety reason)
constructor Create(const aFileName: TFileName; FileOpenMode: Cardinal = 0); reintroduce;
/// finalize memory, and save all content
destructor Destroy; override;
/// clear the whole table content and indexs
procedure Clear; virtual;
/// add a data to the database
// - return the unique ID created to identify this data
// - you can force a specified ID number, by setting a non null value to
// ForcedID (in this case, it MUST be added in increasing order)
// - return 0 on error, otherwise the created Physical Index can be stored
// - OldPhysicalIndex is to be used in case of constraint check (for
// TSynBigTableRecord and TSynBigTableMetaData)
function Add(const aData: RawByteString; ForcedID: integer=0;
PhysicalIndex: PInteger=nil; OldPhysicalIndex: integer=-1): integer; virtual;
/// add a file content to the database
// - return the unique ID created to identify this data
// - return 0 on error (e.g. specified file doesn't exist)
function AddFile(const aFileName: TFileName): integer;
/// retrieve a content, from a supplied ID
// - return TRUE if found, FALSE if ID was not existing (or deleted)
// - return the data into aData, or '' if ID was not existing (or deleted)
function Get(aID: integer; out aData: RawByteString): boolean; overload;
/// retrieve a content, from a supplied ID, into a TStream
// - this method is faster than Get() into a RawByteString, because the
// data is not moved from memory but mapped into a TCustomMemoryStream
// - if the ID is not correct, returns nil
// - if the ID is correct, returns a TStream instance, able to access to
// the associated content
// - in most cases, this TStream is just a wrapper around the memory mapped
// buffer in memory
// - the TStream must be consummed immediately, before any Pack or
// UpdateToFile method calls
// - the caller must Free the returned TStream instance
// - if the data is not already memory mapped (i.e. for files >= 2 GB)
// a custom TSynMemoryStreamMapped is used to access the data from disk
function GetAsStream(aID: integer): TStream;
/// retrieve the length of a content, from a supplied ID
// - return -1 if the ID was not found, or the length (in bytes) of this ID content
function GetLength(aID: integer): integer;
/// retrieve a content, from a supplied ID, into a pointer
// - returns nil on error
// - returns a pointer to the data on success, directly from the memory mapped
// file on most cases; if the data is not in a memory mapped buffer (i.e.
// for files >= 2 GB) the aTempData variable is used to read the data from disk
// - in case of success, if DataLen is not nil, it will be filled with the
// corresponding data length
// - this method is therefore much faster than Get()
function GetPointer(aID: Integer; var aTempData: RawByteString;
DataLen: PInteger=nil): pointer;
/// retrieve a content, from a supplied ID, into a pointer
// - returns nil on error
// - returns a pointer to the data on success, directly from the memory mapped
// file on most cases; if the data is not in a memory mapped buffer (i.e.
// for files >= 2 GB) the aTempData variable is used to read the data from disk
// - this method is not thread-safe (but is therefore faster)
function GetPointerFromPhysicalIndex(aPhysicalIndex: integer;
var aTempData: RawByteString): pointer;
/// call a supplied Event method by iterating through all table items
// - Event will be called following the physical order of data in the disk
// file (somewhat faster), or incremental ID depending of Order parameter
// - Event can set the result to TRUE to break the iteration loop
// - the Opaque parameter will be supplied to the callback Event
// - set DontRetrieveData to TRUE if you don't need any data to be set
// in the callback, but only the ID (faster)
procedure GetIterating(aCallBack: TSynBigTableIterateEvent;
Order: TSynBigTableIterationOrder=ioPhysical; Opaque: pointer=nil;
DontRetrieveData: Boolean=false);
/// fast retrieval of all IDs
// - returned in physical or increasing ID value order
// - returns the number of IDs stored in the integer array
function GetAllIDs(var IDs: TIntegerDynArray;
Order: TSynBigTableIterationOrder=ioPhysical): integer; virtual;
/// fast retrieval of all used items physical indexes
// - returned in physical order
// - returns the number of Indexes stored in the integer array
function GetAllPhysicalIndexes(var Indexes: TIntegerDynArray): integer;
/// update a record content in the database
// - in fact, a new invisible record is created, and an alias will map this
// new record to the original ID
// - the physical replacement will take place only during Pack method call
// - returns the new content ID value on success
// - returns 0 on error
function Update(aID: Integer; const aData: RawByteString;
PhysicalIndexOldNew: PInt64=nil): integer; virtual;
/// retrieve a part of a file content
// - faster than Open/Seek/Read methods, which loads the whole content in memory
// before Seek and Read
// - only the needed part of data is copied into aData
function GetPart(aID: integer; Offset, Len: Integer; out aData: RawByteString): boolean;
/// retrieve a list of content, from a supplied ID range (including boundaries)
// - return TRUE if all were found, FALSE if some ID were not existing (or deleted)
// - return the data into aData[], or '' if one particular ID was not
// existing (or deleted); after call, length(aData)=aIDLast-aIDFirst+1
function Get(aIDFirst, aIDLast: integer; out aData: TRawByteStringDynArray): boolean; overload;
/// delete an ID
function Delete(aID: integer; PhysicalIndex: PInteger=nil): boolean; virtual;
/// pack the database, i.e. delete all formerly deleted ID from the disk
// - if forceFlushOnDisk is TRUE, data is forced to be saved on the disk
// (slower but allow ACID behavior of the database file)
procedure Pack(forceFlushOnDisk: boolean=false);
/// save last added entries into the files
// - do nothing is nothing is to be written (if forceAlwaysWrite is false)
// - can be called from time to time, after checking CurrentInMemoryDataSize
// - if forceFlushOnDisk is TRUE, data is forced to be saved on the disk
// (slower but allow ACID behavior of the database file)
procedure UpdateToFile(forceFlushOnDisk: boolean=false;
dontReopenReadBuffer: boolean=false);
/// the entries count
property Count: integer read GetCount;
/// the associated filename storing the database
property FileName: TFileName read fFileName;
/// returns the current in memory data size (in bytes)
// - i.e. the data size not written yet to the disk
// - can be used to flush regularely the data to disk by calling UpdateToFile
// method when this value reach a certain limit
property CurrentInMemoryDataSize: Int64 read fCurrentInMemoryDataSize;
/// returns the current data size stored on disk
property FileSizeOnDisk: Int64 read GetFileSizeOnDisk;
/// retrieve an offset for a specified physical ID
// - read from either fOffset32[] either fOffset64[]
property Offset[Index: integer]: Int64 read GetOffset;
/// read-only access to a numerical ID, from its index
// - index is from NumericalID[0] to NumericalID[Count-1]
// - follows the numerical ID order for TSynBigTable, and the alphabetical
// order of UTF-8 keys for TSynBigTableString
// - return 0 in case of out of range index
// - this method can be slow with TSynBigTable (if they are some deleted or
// updated items - just call the Pack method to improve speed); but with
// a TSynBigTableString instance, it will be always fast
// - don't use it to loop through all items, but rather the dedicated
// GetIterating() or GetAllIDs() fast methods
property NumericalID[Index: integer]: integer read GetID;
/// event called after a pack, just before the UpdateToFile() call
// - can be used to synchronized the field indexes, e.g.
property OnAfterPack: TSynBigTableAfterPackEvent read fOnAfterPack write fOnAfterPack;
end;
/// a class to store huge amount of data, just specified by a string ID
// - string ID are case-sensitive (important warning)
// - string ID are of RawUTF8 type, so you must make explicit conversion
// in your program to the native generic string type - you can use our
// Utf8ToString() and StringToUtf8() functions, which work for
// all version of Delphi (from Delphi 6 up to XE)
TSynBigTableString = class(TSynBigTable)
private
function GetStringID(Index: integer): RawUTF8;
protected
/// = real Count property = fCount-fDeletedCount
fHeaderCount: integer;
/// store String IDs (in alpha-sorted order)
fHeaderString: TRawUTF8DynArray;
/// store associated numerical IDs (in alpha-sorted order)
fHeaderID: TIntegerDynArray;
/// overridden method which handle String ID loading and writing
function CustomHeader(Action: TSynBigTableCustomHeader;
WriteBuffer: TFileBufferWriter): integer; override;
class function InternalMagic: integer; override;
/// faster method using fHeaderID[]
function GetID(Index: integer): integer; override;
/// faster method returning fHeaderCount
function GetCount: integer; override;
public
/// clear the whole table content and indexs
procedure Clear; override;
/// add a data to the database, and its associated string ID
// - return the unique numerical ID created to identify this data
// - return 0 if the string ID is invalid (i.e. void or already used)
function Add(const aData: RawByteString; const aID: RawUTF8; ForcedID: integer=0): integer; reintroduce;
/// retrieve a content, from a supplied string ID
// - return TRUE if found, FALSE if this ID was not existing (or deleted)
// - return the data into aData, or '' if ID was not existing (or deleted)
function Get(const aID: RawUTF8; out aData: RawByteString): boolean; overload;
/// retrieve a content, from a supplied ID, into a pointer
// - returns nil on error
// - returns a pointer to the data on success, directly from the memory mapped
// file on most cases; if the data is not in a memory mapped buffer (i.e.
// for files >= 2 GB) the aTempData variable is used to read the data from disk
// - in case of success, if DataLen is not nil, it will be filled with the
// corresponding data length
// - this method is therefore much faster than Get() for big size of data
function GetPointer(const aID: RawUTF8; var aTempData: RawByteString;
DataLen: PInteger=nil): pointer; overload;
/// retrieve a content, from a supplied ID, into a TStream
// - if the ID is not correct, returns nil
// - if the ID is correct, returns a TStream instance, able to access to
// the associated content
// - in most cases, this TStream is just a wrapper around the memory mapped
// buffer in memory
// - the TStream must be consummed immediately, before any Pack or
// UpdateToFile method calls
// - the caller must Free the returned TStream instance
// - if the data is not already memory mapped (i.e. for files >= 2 GB)
// a custom TSynMemoryStreamMapped is used to access the data from disk
function GetAsStream(const aID: RawUTF8): TStream; overload;
/// fast retrieval of all IDs
// - this overridden method handle ioFaster order, i.e; the fHeaderID[] content
// - returns the number of IDs stored in the integer array
function GetAllIDs(var IDs: TIntegerDynArray;
Order: TSynBigTableIterationOrder=ioPhysical): integer; override;
/// delete an entry from its numerical ID
function Delete(aID: integer; PhysicalIndex: PInteger=nil): boolean; overload; override;
/// delete an entry from its string ID
// - return true if the record was successfully deleted
function Delete(const aID: RawUTF8): boolean; reintroduce; overload;
/// update a record content in the database
// - return true if the record was successfully updated
function Update(const aData: RawByteString; const aID: RawUTF8): boolean; reintroduce; overload;
/// retrieve a numerical ID from a UTF-8 encoded string ID
// - return 0 if this string ID was not found
function StringToID(const aID: RawUTF8): integer;
/// retrieve the UTF-8 encoded string ID of a given numerical ID
// - return '' if this ID was not found
function IDToString(aID: integer): RawUTF8;
/// read-only access to a string ID, from its index
// - index is from StringID[0] to StringID[Count-1]
// - string IDs are alphabetically sorted
// - return '' in case of out of range index
property StringID[Index: integer]: RawUTF8 read GetStringID;
end;
TSynBigTableTableClass = class of TSynBigTableTable;
/// an abstract class, associating a TSynTable to a Big Table
// - use optimized TSynTable logic for handling field values, using
// our SBF compact binary format (similar to BSON or Protocol Buffers)
TSynBigTableTable = class(TSynBigTable)
protected
fTableName: RawUTF8;
fTable: TSynTable;
/// used by Search() method below
function IterateSearch(Sender: TObject; Opaque: pointer; ID, Index: integer;
Data: pointer; DataLen: integer): boolean;
/// false e.g. if a tftUnique constraint failed
// - RecordIndex=-1 in case of adding, or the physical index of the updated record
function CheckConstraints(const aRecordData: RawByteString; RecordIndex: integer): boolean;
{$ifdef HASINLINE}inline;{$endif}
/// internal version used by Search() method
procedure SearchIterating(aCallBack: TSynBigTableIterateEvent;
Order: TSynBigTableIterationOrder; Opaque: pointer); virtual; abstract;
public
/// initialize the database engine with a supplied filename
// - you can specify an internal Table Name, similar to SQL table name
// - you should better call either TSynBigTableMetaData or TSynBigTableRecord
// reintroduced constructor, which will set GetRecordData parameter as expected
constructor Create(const aFileName: TFileName; const aTableName: RawUTF8;
GetRecordData: TSynTableGetRecordData; FileOpenMode: Cardinal = 0); reintroduce;
/// finalize memory, and save all content
destructor Destroy; override;
/// add a field description to the table
// - just a wrapper to the Table.AddField method
// - warning: caller must call the AddFieldUpdate method when all
// AddField() methods have been called, in order to eventually process
// all already existing data to the resulting new field order
// - physical order does not necessary follow the AddField() call order:
// for better performance, it will try to store fixed-sized record first,
// multiple of 4 bytes first (access is faster if dat is 4 byte aligned),
// then variable-length after fixed-sized fields; in all case, a field
// indexed will be put first
// - if tfoUnique is set in aOptions and there is already some data, this
// method will raise an exception: it's not possible to a have multiple
// void data unique, so it will always fail the constraint
function AddField(const aName: RawUTF8; aType: TSynTableFieldType;
aOptions: TSynTableFieldOptions=[]): boolean;
/// this method must be called after calls to AddField/Table.AddField
// methods
// - this will launch the recreation of the database file content, if some
// field were effectively added (to map the new field layout): in this case
// some default void value is set for all newly added fields
// - for TSynBigTableRecord, this method may recreate the field order then
// reload all field instances: you must retrieve all TSynTableFieldProperties
// instances after this method call via proper
// ! aField := Table.Table['FieldName'];
procedure AddFieldUpdate; virtual; abstract;
/// search for a matching value in a given field
// - add the matching IDs in ResultID[] (in sorted order, with no duplicate),
// and update the number matching of elements in ResultIDCount (for performance
// reasons, the ResultID[] array remains filled with 0 until
// ResultID[ResultIDCount-1] itemk)
// - will use any existing index, or will iterate through all data (slower)
// if the ForceIterate parameter is either ioPhysical or ioID
// - the Limit parameter is similar to the SQL LIMIT clause: if greater than 0,
// an upper bound on the number of rows returned is placed (e.g. set Limit=1
// to only retrieve the first match)
function Search(Field: TSynTableFieldProperties; const WhereValue: TSBFString;
var ResultID: TIntegerDynArray; var ResultIDCount: integer; Limit: Integer=0;
ForceIterate: TSynBigTableIterationOrder=ioNone): boolean; overload;
{$ifndef LVCL}
/// search for a matching value in a given field
// - add the matching IDs in ResultID[] (in sorted order, with no duplicate)
// - will use any existing index, or will iterate through all data (slower)
// if the ForceIterate parameter is either ioPhysical or ioID
// - the Limit parameter is similar to the SQL LIMIT clause: if greater than 0,
// an upper bound on the number of rows returned is placed (e.g. set Limit=1
// to only retrieve the first match)
function Search(Field: TSynTableFieldProperties; const WhereValue: variant;
var ResultID: TIntegerDynArray; var ResultIDCount: integer; Limit: Integer=0;
ForceIterate: TSynBigTableIterationOrder=ioNone): boolean; overload;
{$ifdef HASINLINE}inline;{$endif}
{$endif}
/// clear the whole table content and indexs
// - also delete the field layout
procedure Clear; override;
/// the associated field description
property Table: TSynTable read fTable;
/// the internal Table Name
property TableName: RawUTF8 read fTableName;
public
{$ifndef LVCL}
/// retrieve a void TSynTableVariantType variant instance
// - similar to a call to Table.Data call
function VariantVoid: Variant;
/// retrieve a TSynTableVariantType variant to access a record properties
function VariantGet(aID: integer): Variant; virtual; abstract;
{$endif}
/// retrieve a record as a TSynTableData to access its properties
// - using TSynTableData is faster than a TSynTableVariantType variant
function RecordGet(aID: integer): TSynTableData; overload;
{$ifdef HASINLINE}inline;{$endif}
/// retrieve a record as a TSynTableData to access its properties
// - using TSynTableData is faster than a TSynTableVariantType variant
// - this overloaded function doesn't use a function return, therefore
// will avoid a Record copy content (faster)
procedure RecordGet(aID: integer; var result: TSynTableData); overload; virtual; abstract;
/// update a Record from a given TSynTableData content
// - using TSynTableData is faster than a TSynTableVariantType variant
// - aRecord.ID is used to identify the record for calling raw Update()
// - returns TRUE on success, FALSE on error (e.g. tftUnique constraint failure)
// - for TSynBigTableMetaData, only update the metadata content, not the
// main record content
function RecordUpdate(const aDataRecord: TSynTableData): boolean; virtual; abstract;
end;
/// a class to store huge data (like files content), with metadata fields
// associated with every record
// - this class will store the fields in memory, then uses TSynBigTable records
// to store some huge data blocks (e.g. file content), whereas TSynBigTableRecord
// will store the fields in the records: TSynBigTableRecord is prefered for
// huge number of records, and TSynBigTableMetaData is designed for less number
// of records, but will natively handle associated "blob-like" data. For instance,
// TSynBigTableRecord would be the right class to implement a logging table,
// whereas TSynBigTableMetaData would be ideal for storing pictures.
// - use optimized TSynTable logic for handling metadata field values, using
// our SBF compact binary format (similar to BSON or Protocol Buffers)
// - you can access to any metadata fields by using a custom
// TSynTableVariantType variant type, allowing late-binding in the code
// (this method is slower than direct access to the data due to the Variant
// overhead, but is perhaps more convenient)
TSynBigTableMetaData = class(TSynBigTableTable)
protected
fMetaDataCount: Integer;
fMetaDataRecords: TRawByteStringDynArray;
fMetaDataID: TIntegerDynArray;
/// retrieve pointer to the metadata, SBF-encoded
function GetMetaData(aPhysicalIndex: integer; var aTempData: RawByteString): pointer;
/// faster method using function[]
function GetID(Index: integer): integer; override;
/// retrieve an ID from a supplied index
// - can handle aliases IDs (after Update() method call), i.e. return the
// original ID, from the index of the updated content
function IndexToID(aIndex: Integer; handleAliases: boolean=true): integer; override;
/// faster method returning fHeaderCount
function GetCount: integer; override;
/// overridden method which handle field description loading and writing
function CustomHeader(Action: TSynBigTableCustomHeader;
WriteBuffer: TFileBufferWriter): integer; override;
class function InternalMagic: integer; override;
/// internal version used by Search() method
// - only handle internal order by now (i.e. ioID)
procedure SearchIterating(aCallBack: TSynBigTableIterateEvent;
Order: TSynBigTableIterationOrder; Opaque: pointer); override;
public
/// initialize the database engine with a supplied filename
// - you can specify an internal Table Name, similar to SQL table name
constructor Create(const aFileName: TFileName; const aTableName: RawUTF8;
FileOpenMode: Cardinal = 0); reintroduce;
/// this method must be called after calls to AddField/Table.AddField
// methods
// - this will launch the recreation of the database file content, if some
// field were effectively added (to map the new field layout): in this case
// some default void value is set for all newly added fields
// - for TSynBigTableMeta, this method may recreate the field order, but
// won't change the TSynTableFieldProperties instances
procedure AddFieldUpdate; override;
/// retrieve the metadata record of a given ID, encoded in our SBF format
// - it could be more convenient to use VariantGet() or even the faster
// RecordGet() methods
function GetMetaDataFromID(aID: integer): pointer;
/// overridden method to delete an entry from its numerical ID
// - this method will handle the metadata fields synchronization
function Delete(aID: integer; PhysicalIndex: PInteger=nil): boolean; override;
/// add a data item with its associated metadata record to the table
// - the metadata record uses our SBF enconding, and is mandatory
// - returns the unique ID created to identify this data
// - returns 0 on adding error (e.g. if a tftUnique constraint failed, or
// if the supplied aMetaData is void)
function Add(const aData: RawByteString; const aMetaData: TSBFString): integer; reintroduce; overload;
/// update a metadata record using our SBF enconding
// - returns TRUE on success, FALSE on error (e.g. tftUnique constraint failure)
// - this method will only update the meta data - the main record data must
// be updated with the inherited Update() method
function Update(aID: integer; const aMetaData: TSBFString): boolean; reintroduce; overload;
public
{$ifndef LVCL}
/// add a data item with its associated metadata record to the table
// - the metadata record is a TSynTableVariantType variant
// - returns the unique ID created to identify this data
// - returns 0 on adding error (e.g. if a tftUnique constraint failed)
function VariantAdd(const aData: RawByteString; const aMetaDataRecord: Variant): integer;
/// retrieve a TSynTableVariantType variant to access a record metadata
function VariantGet(aID: integer): Variant; override;
/// update a metadata record as TSynTableVariantType variant
// - aRecord.ID is used to identify the record for calling raw Update()
// - returns TRUE on success, FALSE on error (e.g. tftUnique constraint
// failure, or wrong variant type)
// - this method will only update the meta data - the main record data must
// be updated with the inherited Update() method
function VariantUpdate(const aMetaDataRecord: Variant): boolean;
{$endif}
/// add a record to the table, with associated meta data
// - using TSynTableData is faster than a TSynTableVariantType variant
// - return the unique ID created to identify this data
// - returns 0 on adding error (e.g. if a tftUnique constraint failed)
function RecordAdd(const aData: RawByteString;
const aMetaDataRecord: TSynTableData): integer;
/// retrieve a record as a TSynTableData to access its properties
// - using TSynTableData is faster than a TSynTableVariantType variant
// - this overloaded function doesn't use a function return, therefore
// will avoid a Record copy content (faster)
procedure RecordGet(aID: integer; var result: TSynTableData); overload; override;
/// update a Record from a given TSynTableData content
// - using TSynTableData is faster than a TSynTableVariantType variant
// - aRecord.ID is used to identify the record for calling raw Update()
// - returns TRUE on success, FALSE on error (e.g. tftUnique constraint failure)
// - this method will only update the meta data - the main record data must
// be updated with the inherited Update() method
function RecordUpdate(const aMetaDataRecord: TSynTableData): boolean; override;
end;
/// a class to store huge amount of data, with fields in every record
// - this class will store the fields in the TSynBigTable records, whereas
// TSynBigTableMetaData will store the fields in memory, and will uses records
// to store some huge data blocks (e.g. file content): TSynBigTableRecord is
// prefered for huge number of records, and TSynBigTableMetaData is designed
// for less records, but with associated "blob-like" data. For instance,
// TSynBigTableRecord would be the right class to implement a logging table,
// where TSynBigTableMetaData would be ideal for storing pictures.
// - use optimized TSynTable logic for handling field values, using
// our SBF compact binary format (similar to BSON or Protocol Buffers)
// - you can access to any record content fields by using a custom
// TSynTableVariantType variant type, allowing late-binding in the code
// (this method is slower than direct access to the data due to the Variant
// overhead, but is perhaps more convenient)
TSynBigTableRecord = class(TSynBigTableTable)
protected
/// overridden method which handle field description loading and writing
function CustomHeader(Action: TSynBigTableCustomHeader;
WriteBuffer: TFileBufferWriter): integer; override;
class function InternalMagic: integer; override;
/// version used by Search() method, calling the default GetIterating method
procedure SearchIterating(aCallBack: TSynBigTableIterateEvent;
Order: TSynBigTableIterationOrder; Opaque: pointer); override;
/// refresh all fields indexes
procedure RefreshIndexes(var NewIndexs: TIntegerDynArray);
public
/// initialize the database engine with a supplied filename
// - you can specify an internal Table Name, similar to SQL table name
constructor Create(const aFileName: TFileName; const aTableName: RawUTF8;
FileOpenMode: Cardinal = 0); reintroduce;
/// this method must be called after calls to AddField/Table.AddField
// methods
// - this will launch the recreation of the database file content, if some
// field were effectively added (to map the new field layout): in this case
// some default void value is set for all newly added fields
// - for TSynBigTableRecord, this method may recreate the field order then
// reload all field instances: you must retrieve all TSynTableFieldProperties
// instances after this method call via proper
// ! aField := Table.Table['FieldName'];
procedure AddFieldUpdate; override;
public
/// overridden method to add a record to the database
// - this method will handle the field indexes synchronization
// - returns 0 on adding error (e.g. if a tftUnique constraint failed)
function Add(const aData: RawByteString; ForcedID: integer=0;
PhysicalIndex: PInteger=nil; OldPhysicalIndex: integer=-1): integer; override;
/// overridden method to delete an entry from its numerical ID
// - this method will handle the field indexes synchronization
function Delete(aID: integer; PhysicalIndex: PInteger=nil): boolean; override;
/// overridden method to update a record content in the database
// - returns 0 on updating error (e.g. if a tftUnique constraint failed)
function Update(aID: Integer; const aData: RawByteString;
PhysicalIndexOldNew: PInt64=nil): integer; override;
public
{$ifndef LVCL}
/// add a record to the table
// - the record is a TSynTableVariantType variant
// - returns the unique ID created to identify this data
// - returns 0 on adding error (e.g. if a tftUnique constraint failed)
function VariantAdd(const aRecord: Variant): integer;
/// retrieve a TSynTableVariantType variant to access a record properties
function VariantGet(aID: integer): Variant; override;
/// update a TSynTableVariantType variant
// - aRecord.ID is used to identify the record for calling raw Update()
// - returns TRUE on success, FALSE on error (e.g. tftUnique constraint failure)
function VariantUpdate(const aRecord: Variant): boolean;
{$endif}
/// add a record to the table
// - using TSynTableData is faster than a TSynTableVariantType variant
// - return the unique ID created to identify this data
// - you can specify an expected ID to be used in aForceID parameter
// - returns 0 on adding error (e.g. if a tftUnique constraint failed)
function RecordAdd(const aRecord: TSynTableData; aForcedID: integer=0): integer;
/// retrieve a record as a TSynTableData to access its properties
// - using TSynTableData is faster than a TSynTableVariantType variant
// - this overloaded function doesn't use a function return, therefore
// will avoid a Record copy content (faster)
procedure RecordGet(aID: integer; var result: TSynTableData); overload; override;
/// update a Record from a given TSynTableData content
// - using TSynTableData is faster than a TSynTableVariantType variant
// - aRecord.ID is used to identify the record for calling raw Update()
// - returns TRUE on success, FALSE on error (e.g. tftUnique constraint failure)
function RecordUpdate(const aRecord: TSynTableData): boolean; override;
end;
/// unitary testing of the SynBigTable unit
TTestBigTable = class(TSynTestCase)
protected
FN: TFileName;
Updated: TIntegerDynArray;
UpdatedCount: integer;
Deleted: TIntegerDynArray;
class function CreateString(aID: integer; n: integer=0): RawByteString;
function TestString(aID: integer; const Data: RawByteString): boolean;
function TestStream(aID: integer; Data: TStream): boolean;
function Delete(var T: TSynBigTable; n: integer; Pack,withString: boolean): boolean;
function DoFieldTest(DoRecord: boolean; n: integer): boolean;
published
/// test TSynBigTable class
procedure _TSynBigTable;
/// test TSynBigTableString class
procedure _TSynBigTableString;
/// test TSynBigTableMetaData class
procedure _TSynBigTableMetaData;
/// test TSynBigTableRecord class
procedure _TSynBigTableRecord;
end;
{$ifndef FPC}
/// unitary test function of the TSynBigTable class
// - return TRUE if test was OK, FALSE on any error
function TestBigTable: boolean;
{$endif}
const
/// will flush the in-memory data to disk when reached 256 MB of data in RAM
BIGTABLE_AUTOFLUSH_SIZE = 1 shl 28;
implementation
{$ifndef FPC}
function TestBigTable: boolean;
procedure SetS(var s: RawUTF8; i: cardinal);
var p: array[0..3] of Word;
begin
p[2] := TwoDigitLookupW[(i div 100)mod 100];
p[3] := TwoDigitLookupW[i mod 100];
i := i div 10000;
p[0] := TwoDigitLookupW[i div 100];
p[1] := TwoDigitLookupW[i mod 100];
SetString(s,PAnsiChar(@p[0]),8);
end;
var
Updated: TIntegerDynArray;
UpdatedCount: integer;
function TestString(aID: integer; const Data: RawByteString): boolean;
var i: integer;
begin
result := false;
if FastFindIntegerSorted(pointer(Updated),UpdatedCount-1,aID)>=0 then
inc(aID,$01010101); // an updated ID has had its content refreshed
for i := 0 to (length(Data) shr 2)-1 do
if PIntegerArray(Data)^[i]<>aID then
exit;
result := true;
end;
function TestStream(aID: integer; Data: TStream): boolean;
var i: integer;
begin
result := false;
if not Data.InheritsFrom(TCustomMemoryStream) then
exit;
if FastFindIntegerSorted(pointer(Updated),UpdatedCount-1,aID)>=0 then
inc(aID,$01010101); // an updated ID has had its content refreshed
with TCustomMemoryStream(Data) do
for i := 0 to (Size shr 2)-1 do
if PIntegerArray(Memory)^[i]<>aID then