Related
I need to read the last line in some very large textfiles (to get the timestamp from the data). TStringlist would be a simple approach but it returns an out of memory error. I'm trying to use seek and blockread, but the characters in the buffer are all nonsense. Is this something to do with unicode?
Function TForm1.ReadLastLine2(FileName: String): String;
var
FileHandle: File;
s,line: string;
ok: 0..1;
Buf: array[1..8] of Char;
k: longword;
i,ReadCount: integer;
begin
AssignFile (FileHandle,FileName);
Reset (FileHandle); // or for binary files: Reset (FileHandle,1);
ok := 0;
k := FileSize (FileHandle);
Seek (FileHandle, k-1);
s := '';
while ok<>1 do begin
BlockRead (FileHandle, buf, SizeOf(Buf)-1, ReadCount); //BlockRead ( var FileHandle : File; var Buffer; RecordCount : Integer {; var RecordsRead : Integer} ) ;
if ord (buf[1]) <>13 then //Arg to integer
s := s + buf[1]
else
ok := ok + 1;
k := k-1;
seek (FileHandle,k);
end;
CloseFile (FileHandle);
// Reverse the order in the line read
setlength (line,length(s));
for i:=1 to length(s) do
line[length(s) - i+1 ] := s[i];
Result := Line;
end;
Based on www.delphipages.com/forum/showthread.php?t=102965
The testfile is a simple CSV I created in excel ( this is not the 100MB I ultimately need to read).
a,b,c,d,e,f,g,h,i,j,blank
A,B,C,D,E,F,G,H,I,J,blank
1,2,3,4,5,6,7,8,9,0,blank
Mary,had,a,little,lamb,His,fleece,was,white,as,snow
And,everywhere,that,Mary,went,The,lamb,was,sure,to,go
You really have to read the file in LARGE chunks from the tail to the head.
Since it is so large it does not fit the memory - then reading it line by line from start to end would be very slow. With ReadLn - twice slow.
You also has to be ready that the last line might end with EOL or may not.
Personally I would also account for three possible EOL sequences:
CR/LF aka #13#10=^M^J - DOS/Windows style
CR without LF - just #13=^M - Classic MacOS file
LF without CR - just #10=^J - UNIX style, including MacOS version 10
If you are sure your CSV files would only ever be generated by native Windows programs it would be safe to assume full CR/LF be used. But if there can be other Java programs, non-Windows platforms, mobile programs - I would be less sure. Of course pure CR without LF would be the least probable case of them all.
uses System.IOUtils, System.Math, System.Classes;
type FileChar = AnsiChar; FileString = AnsiString; // for non-Unicode files
// type FileChar = WideChar; FileString = UnicodeString;// for UTF16 and UCS-2 files
const FileCharSize = SizeOf(FileChar);
// somewhere later in the code add: Assert(FileCharSize = SizeOf(FileString[1]);
function ReadLastLine(const FileName: String): FileString; overload; forward;
const PageSize = 4*1024;
// the minimal read atom of most modern HDD and the memory allocation atom of Win32
// since the chances your file would have lines longer than 4Kb are very small - I would not increase it to several atoms.
function ReadLastLine(const Lines: TStringDynArray): FileString; overload;
var i: integer;
begin
Result := '';
i := High(Lines);
if i < Low(Lines) then exit; // empty array - empty file
Result := Lines[i];
if Result > '' then exit; // we got the line
Dec(i); // skip the empty ghost line, in case last line was CRLF-terminated
if i < Low(Lines) then exit; // that ghost was the only line in the empty file
Result := Lines[i];
end;
// scan for EOLs in not-yet-scanned part
function FindLastLine(buffer: TArray<FileChar>; const OldRead : Integer;
const LastChunk: Boolean; out Line: FileString): boolean;
var i, tailCRLF: integer; c: FileChar;
begin
Result := False;
if Length(Buffer) = 0 then exit;
i := High(Buffer);
tailCRLF := 0; // test for trailing CR/LF
if Buffer[i] = ^J then begin // LF - single, or after CR
Dec(i);
Inc(tailCRLF);
end;
if (i >= Low(Buffer)) and (Buffer[i] = ^M) then begin // CR, alone or before LF
Inc(tailCRLF);
end;
i := High(Buffer) - Max(OldRead, tailCRLF);
if i - Low(Buffer) < 0 then exit; // no new data to read - results would be like before
if OldRead > 0 then Inc(i); // the CR/LF pair could be sliced between new and previous buffer - so need to start a bit earlier
for i := i downto Low(Buffer) do begin
c := Buffer[i];
if (c=^J) or (c=^M) then begin // found EOL
SetString( Line, #Buffer[i+1], High(Buffer) - tailCRLF - i);
exit(True);
end;
end;
// we did not find non-terminating EOL in the buffer (except maybe trailing),
// now we should ask for more file content, if there is still left any
// or take the entire file (without trailing EOL if any)
if LastChunk then begin
SetString( Line, #Buffer[ Low(Buffer) ], Length(Buffer) - tailCRLF);
Result := true;
end;
end;
function ReadLastLine(const FileName: String): FileString; overload;
var Buffer, tmp: TArray<FileChar>;
// dynamic arrays - eases memory management and protect from stack corruption
FS: TFileStream; FSize, NewPos: Int64;
OldRead, NewLen : Integer; EndOfFile: boolean;
begin
Result := '';
FS := TFile.OpenRead(FileName);
try
FSize := FS.Size;
if FSize <= PageSize then begin // small file, we can be lazy!
FreeAndNil(FS); // free the handle and avoid double-free in finally
Result := ReadLastLine( TFile.ReadAllLines( FileName, TEncoding.ANSI ));
// or TEncoding.UTF16
// warning - TFIle is not share-aware, if the file is being written to by another app
exit;
end;
SetLength( Buffer, PageSize div FileCharSize);
OldRead := 0;
repeat
NewPos := FSize - Length(Buffer)*FileCharSize;
EndOfFile := NewPos <= 0;
if NewPos < 0 then NewPos := 0;
FS.Position := NewPos;
FS.ReadBuffer( Buffer[Low(Buffer)], (Length(Buffer) - OldRead)*FileCharSize);
if FindLastLine(Buffer, OldRead, EndOfFile, Result) then
exit; // done !
tmp := Buffer; Buffer := nil; // flip-flop: preparing to broaden our mouth
OldRead := Length(tmp); // need not to re-scan the tail again and again when expanding our scanning range
NewLen := Min( 2*Length(tmp), FSize div FileCharSize );
SetLength(Buffer, NewLen); // this may trigger EOutOfMemory...
Move( tmp[Low(tmp)], Buffer[High(Buffer)-OldRead+1], OldRead*FileCharSize);
tmp := nil; // free old buffer
until EndOfFile;
finally
FS.Free;
end;
end;
PS. Note one extra special case - if you would use Unicode chars (two-bytes ones) and would give odd-length file (3 bytes, 5 bytes, etc) - you would never be ble to scan the starting single byte (half-widechar). Maybe you should add the extra guard there, like Assert( 0 = FS.Size mod FileCharSize)
PPS. As a rule of thumb you better keep those functions out of the form class, - because WHY mixing them? In general you should separate concerns into small blocks. Reading file has nothing with user interaction - so should better be offloaded to an extra UNIT. Then you would be able to use functions from that unit in one form or 10 forms, in main thread or in multi-threaded application. Like LEGO parts - they give you flexibility by being small and separate.
PPPS. Another approach here would be using memory-mapped files. Google for MMF implementations for Delphi and articles about benefits and problems with MMF approach. Personally I think rewriting the code above to use MMF would greatly simplify it, removing several "special cases" and the troublesome and memory copying flip-flop. OTOH it would demand you to be very strict with pointers arithmetic.
https://en.wikipedia.org/wiki/Memory-mapped_file
https://msdn.microsoft.com/en-us/library/ms810613.aspx
http://torry.net/quicksearchd.php?String=memory+map&Title=No
Your char type is two byte, so that buffer is 16 byte. Then with blockread you read sizeof(buffer)-1 byte into it, and check the first 2 byte char if it is equal to #13.
The sizeof(buffer)-1 is dodgy (where does that -1 come from?), and the rest is valid, but only if your input file is utf16.
Also your read 8 (or 16) characters each time, but compare only one and then do a seek again. That is not very logical either.
If your encoding is not utf16, I suggest you change the type of a buffer element to ansichar and remove the -1
In response to kopiks suggestion, I figured out how to do it with TFilestream, it works ok with the simple test file, though there may be some further tweeks when I use it on a variety of csv files. Also, I don't make any claims that this is the most efficient method.
procedure TForm1.Button6Click(Sender: TObject);
Var
StreamSize, ApproxNumRows : Integer;
TempStr : String;
begin
if OpenDialog1.Execute then begin
TempStr := ReadLastLineOfTextFile(OpenDialog1.FileName,StreamSize, ApproxNumRows);
// TempStr := ReadFileStream('c:\temp\CSVTestFile.csv');
ShowMessage ('approximately '+ IntToStr(ApproxNumRows)+' Rows');
ListBox1.Items.Add(TempStr);
end;
end;
Function TForm1.ReadLastLineOfTextFile(const FileName: String; var StreamSize, ApproxNumRows : Integer): String;
const
MAXLINELENGTH = 256;
var
Stream: TFileStream;
BlockSize,CharCount : integer;
Hash13Found : Boolean;
Buffer : array [0..MAXLINELENGTH] of AnsiChar;
begin
Hash13Found := False;
Result :='';
Stream := TFileStream.Create(FileName, fmOpenRead or fmShareDenyWrite);
StreamSize := Stream.size;
if StreamSize < MAXLINELENGTH then
BlockSize := StreamSize
Else
BlockSize := MAXLINELENGTH;
// for CharCount := 0 to Length(Buffer)-1 do begin
// Buffer[CharCount] := #0; // zeroing the buffer can aid diagnostics
// end;
CharCount := 0;
Repeat
Stream.Seek(-(CharCount+3), 2); //+3 misses out the #0,#10,#13 at the end of the file
Stream.Read( Buffer[CharCount], 1);
Result := String(Buffer[CharCount]) + result;
if Buffer[CharCount] =#13 then
Hash13Found := True;
Inc(CharCount);
Until Hash13Found OR (CharCount = BlockSize);
ShowMessage(Result);
ApproxNumRows := Round(StreamSize / CharCount);
end;
Just thought of a new solution.
Again, there could be better ones, but this one is the best i thought of.
function GetLastLine(textFilePath: string): string;
var
list: tstringlist;
begin
list := tstringlist.Create;
try
list.LoadFromFile(textFilePath);
result := list[list.Count-1];
finally
list.free;
end;
end;
How can I insert a string at a specified index in TMemoryStream? If you added a string
"a" to an existing string "b" at Index 0 it would move it forward "ab" etc. For example this is what TStringBuilder.Insert does.
Expand the stream so that there is room for the text to be inserted. So, if the text to be inserted has length N, then you need to make the stream N bytes larger.
Copy all the existing content, starting from the insertion point, to the right to make room for the insertion. A call to Move will get this done. You'll be moving this text N bytes to the right.
Write the inserted string at the insertion point.
I'm assuming an 8 bit encoding. If you use a 16 bit encoding, then the stream needs to be grown by 2N bytes, and so on.
You will find that this is a potentially expensive operation. If you care about performance you will do whatever you can to avoid ever having to do this.
P.S. I'm sorry if I have offended any right-to-left language readers with my Anglo-centric assumption that strings run from left to right!
You asked for some code. Here it is:
procedure TMyStringBuilder.Insert(Index: Integer; const Str: string);
var
N: Integer;
P: Char;
begin
N := Length(Str);
if N=0 then
exit;
FStream.Size := FStream.Size + N*SizeOf(Char);
P := PChar(FStream.Memory);
Move((P + Index)^, (P + Index + N)^, (FStream.Size - N - Index)*SizeOf(Char));
Move(Pointer(Str)^, (P + Index)^, N*SizeOf(Char));
end;
Note that I wrote this code, and then looked at the code in TStringBuilder. It's pretty much identical to that!
The fact that the code you end up writing for this operation is identical to that in TStringBuilder should cause you to stop and contemplate. It's very likely that this new string builder replacement class that you are building will end up with the same implementation as the original. It's highly likely that your replacement will perform no better than the original, and quite plausible that the performance of the replacement will be worse.
It looks a little to me as though you are optimising prematurely. According to your comments below you have not yet timed your code to prove that time spent in TStringBuilder methods is your bottleneck. That's really the first thing that you need to do.
Assuming that you do this timing, and prove that TStringBuilder methods are your bottleneck, you then need to identify why that code is performing below par. And then you need to work out how the code could be improved. Simply repeating the implementation of the original class is not going to yield any benefits.
To move the existing data to make some room for the new string data, you can use pointer operation and Move procedure for faster operation. But it only has to be done if the insertion index is lower then the size of the original stream. If the index is larger than stream size then you could: (1) expand the stream size to accomodate the index number and fill the extra room with zero values or spaces, or (2) reduce the index value to the stream size, so the string will be inserted or appended in the end of the stream.
Depend on your needs, you could: (1) make a class derived from TMemoryStream, or (2) make a function to process an instance of TMemoryStream. Here's the first case:
type
TExtMemoryStream = class(TMemoryStream)
public
procedure InsertString(Index: Integer; const S: string);
end;
procedure TExtMemoryStream.InsertString(Index: Integer; const S: string);
var
SLength, OldSize: Integer;
Src, Dst, PointerToS: ^Char;
begin
if Index > Size then Index := Size;
SLength := Length(S);
OldSize := Size;
SetSize(Size + SLength);
Src := Memory; Inc(Src, Index);
Dst := Src; Inc(Dst, SLength);
Move(Src^, Dst^, OldSize - Index);
PointerToS := #S[1];
Move(PointerToS^, Src^, SLength);
end;
or the second case:
procedure InsertStringToMemoryStream(MS: TMemoryStream;
Index: Integer; const S: string);
var
SLength, OldSize: Integer;
Src, Dst, PointerToS: ^Char;
begin
if Index > MS.Size then Index := MS.Size;
SLength := Length(S);
OldSize := MS.Size;
MS.SetSize(MS.Size + SLength);
Src := MS.Memory; Inc(Src, Index);
Dst := Src; Inc(Dst, SLength);
Move(Src^, Dst^, OldSize - Index);
PointerToS := #S[1];
Move(PointerToS^, Src^, SLength);
end;
There, hope it helps :)
I am having trouble with some Delphi code that uses TFileStream to read chunks of data from a file to a dynamic array. The original objective in writing the code is to compare the contents of two files that have the same size but potentially different date and time stamps to see if the contents are the same. This is done by reading the data from each file of the pair into separate dynamic arrays and comparing each byte of one array with the corresponding byte of the other.
The code makes multiple calls to TFileStream.Read. After about 75 calls, the program crashes with an 'Out of Memory' Error message.
It does not seem to matter how large the blocks of data that are read, it seems to be the number of calls that results in the error message.
The code is a function that I have written that is called elsewhere whenever the program encounters two files that it needs to compare (which, for reasons that I won't go into, could be forty or fifty different file pairs). The 'Out of Memory' error occurs whether it is a single file that is being read in small blocks, or multiple files that are being read in their entirety. It seems to be the number of calls that is the determinant of the error.
While I realize that there might be more elegant ways of achieving the comparison of the files than what I have shown below, what I would really like to know is what is wrong with the use of the TFileStream and/or SetLength calls that are causing the memory problems. I have tried freeing the memory after every call (as shown in the code) and it seems to make no difference.
I would be grateful if someone could explain what is going wrong.
function Compare_file_contents(SPN,TPN : String; SourceFileSize : int64) : boolean;
var
SF : TFileStream; //First file of pair for comparison
TF : TFileStream; //Second file of pair
SourceArray : TBytes; // Buffer array to receive first file data
TargetArray : TBytes; //Buffer array to receive second file data
ArrayLength : int64; //Length of dynamic array
Position : int64; //Position within files to start each block of data read
TestPosition : int64; //Position within dynamic arrays to compare each byte
MaxArrayLength : integer; //Maximum size for the buffer arrays
LastRun : Boolean; //End first repeat loop
begin
{ The comparison has an arbitrary upper boundary of 100 MB to avoid slowing the
the overall program. The main files bigger than this will be *.pst files that
will most likely have new dates every time the program is run, so it will take
about the same time to copy the files as it does to read and compare them, and
it will have to be done every time.
The function terminates when it is confirmed that the files are not the same.
If the source file is bigger than 100 MB, it is simply assumed that they are
not identical, thus Result = False. Also, LongInt integers (=integers) have
a range of -2147483648..2147483647, so files bigger than 2 GB will have
overflowed to a negative number. Hence the check to see if the file size is
less than zero.
The outer repeat ... until loop terminates on LastRun, but LastRun should only
be set if SecondLastRun is True, because it will skip the final comparisons in
the inner repeat ... until loop otherwise. }
Result := True;
LastRun := False;
MaxArrayLength := 1024*1024;
if (SourceFileSize > 100*1024*1024) or (SourceFileSize < 0) then Result := False
else
begin
{ The comparison is done by using TFileStream to open and read the data from
the source and target files as bytes to dynamic arrays (TBytes). Then a repeat
loop is used to compare individual bytes until a difference is found or all
of the information has been compared. If a difference is found, Result is
set to False. }
if SourceFileSize > MaxArrayLength then ArrayLength := MaxArrayLength
else ArrayLength := SourceFileSize;
SF := TFileStream.Create(SPN,fmOpenRead);
TF := TFileStream.Create(TPN,fmOpenRead);
Position := 0;
SetLength(SourceArray,ArrayLength);
SetLength(TargetArray,ArrayLength);
try
SF.Read(SourceArray,ArrayLength);
TF.Read(TargetArray,ArrayLength);
Position := SF.Position;
finally
SF.Free;
TF.Free;
end;
repeat
TestPosition := 0;
repeat
if SourceArray[TestPosition] <> TargetArray[TestPosition] then
Result := False;
Inc(TestPosition);
until (Result = False) or (TestPosition = ArrayLength);
if SourceFileSize > Position then
begin
if SourceFileSize - Position - MaxArrayLength > 0 then
ArrayLength := MaxArrayLength
else ArrayLength := SourceFileSize - Position;
SF := TFileStream.Create(SPN,fmOpenRead);
TF := TFileStream.Create(TPN,fmOpenRead);
SF.Position := Position;
TF.Position := Position;
try
SF.Read(SourceArray,ArrayLength);
TF.Read(TargetArray,ArrayLength);
Position := SF.Position;
finally
SF.Free;
TF.Free;
end;
end else LastRun := True;
until (Result = False) or LastRun;
Finalize(SourceArray);
Finalize(TargetArray);
end;
end; { Compare_file_contents }
This routine seems to be far more complicated than it needs to be. Rather than trying to debug it, I offer you my routine that compares streams.
function StreamsEqual(Stream1, Stream2: TStream): Boolean;
const
OneKB = 1024;
var
Buffer1, Buffer2: array [0..4*OneKB-1] of Byte;
SavePos1, SavePos2: Int64;
Count: Int64;
N: Integer;
begin
if Stream1.Size<>Stream2.Size then begin
Result := False;
exit;
end;
SavePos1 := Stream1.Position;
SavePos2 := Stream2.Position;
Try
Stream1.Position := 0;
Stream2.Position := 0;
Count := Stream1.Size;
while Count <> 0 do begin
N := Min(SizeOf(Buffer1), Count);
Stream1.ReadBuffer(Buffer1, N);
Stream2.ReadBuffer(Buffer2, N);
if not CompareMem(#Buffer1, #Buffer2, N) then begin
Result := False;
exit;
end;
dec(Count, N);
end;
Result := True;
Finally
Stream1.Position := SavePos1;
Stream2.Position := SavePos2;
End;
end;
If you wish to add your 100MB size check to this function, it's obvious where and how to do it.
The routine above uses a stack allocated buffer. In contrast your version allocates on the heap. Perhaps your version leads to heap fragmentation.
I realise that this does not answer the direct question that you asked. However, it does solve your problem. I hope this proves useful.
We have our own data streaming algorithm that include some metadata+records+fields values.
Currently we use a TStream and write to add values to the stream.
Now I'm wondering if this time cosuming operation could be made faster by using some technic.
Edit: We are only appending data to the end, not moving or seeking.
Some things I was thinking of are:
Not use Streams buf some large memory allocated buffer to copy data to, the problem is if we go beyond the buffer size, then we have to relocate to some new memory space.
Use a Stream prefilled with #0s to some size and then start adding values. The rationale is that Tstream have to allocate it's own buffer every time I do a write (I don't know how it really works, just wondering...)
We are adding strings to the TStream and binary data in the form #0#0#0#1 for example.
The data is then transfered via TCP, so it's not about write to a File.
So what is the best method for this?
First, you are assuming the TStream is the bottleneck. You need to profile your code, such as with AQTime, to determine where the bottleneck really is. Don't make assumptions.
Second, what type of TStream are you actually using? TMemoryStream? TFileStream? Something else? Different stream types handle memory differently. TMemoryStream allocates a memory buffer and grows it by a pre-set amount of bytes whenever the buffer fills up. TFileStream, on the other hand, doesn't use any memory at all, it just writes directly to the file and lets the OS handle any buffering.
Regardless of which type of stream you use, one thing you could try is implement your own custom TStream class that has an internal fixed-size buffer and a pointer to your real destination TStream object. You can then pass an instance of your custom class to your algorithm. Have your class override the TStream::Write() method to copy the input data into its buffer until it fills up, then you can Write() the buffer to the destination TStream and clear the buffer. Your algorithm will never know the difference. Both TMemoryStream and TFileStream would benefit from the extra buffering - fewer larger writes means more efficient memory allocations and file I/O. For example:
type
TMyBufferedStreamWriter = class(TStream)
private
fDest: TStream;
fBuffer: array[0..4095] of Byte;
fOffset: Cardinal;
public
constructor Create(ADest: TStream);
function Read(var Buffer; Count: Longint): Longint; override;
function Write(const Buffer; Count: Longint): Longint; override;
procedure FlushBuffer;
end;
.
uses
RTLConsts;
constructor TMyBufferedStreamWriter.Create(ADest: TStream);
begin
fDest := ADest;
end;
function TMyBufferedStreamWriter.Read(var Buffer; Count: Longint): Longint;
begin
Result := 0;
end;
function TMyBufferedStreamWriter.Write(const Buffer; Count: Longint): Longint;
var
pBuffer: PByte;
Num: Cardinal;
begin
Result := 0;
pBuffer := PByte(#Buffer);
while Count > 0 do
begin
Num := Min(SizeOf(fBuffer) - fOffset, Cardinal(Count));
if Num = 0 then FlushBuffer;
Move(pBuffer^, fBuffer[fOffset], Num);
Inc(fOffset, Num);
Inc(pBuffer, Num);
Dec(Count, Num);
Inc(Result, Num);
end;
end;
procedure TMyBufferedStreamWriter.FlushBuffer;
var
Idx: Cardinal;
Written: Longint;
begin
if fOffset = 0 then Exit;
Idx := 0;
repeat
Written := fDest.Write(fBuffer[Idx], fOffset - Idx);
if Written < 1 then raise EWriteError.CreateRes(#SWriteError);
Inc(Idx, Written);
until Idx = fOffset;
fOffset := 0;
end;
.
Writer := TMyBufferedStreamWriter.Create(RealStreamHere);
try
... write data to Writer normally as needed...
Writer.FlushBuffer;
finally
Writer.Free;
end;
Use a profiler to see where it is actually slow.
If it is indeed because of multiple reallocation to increase the size of the stream, you can avoid that by setting the Size property to a big enough amount upfront.
The only case where using a memory buffer could make an obvious difference is if you're using FileStreams, all other useable streams are already doing this for you.
Overwrite TMemoryStream and remove the restriction of Size and Capacity. And do not call TMemoryStream.Clear but call TMemoryStream.SetSize(0)
type
TMemoryStreamEx = class(TMemoryStream)
public
procedure SetSize(NewSize: Longint); override;
property Capacity;
end;
implementation
{ TMemoryStreamEx }
procedure TMemoryStreamEx.SetSize(NewSize: Integer);
var
OldPosition: Longint;
begin
if NewSize > Capacity then
inherited SetSize(NewSize)
else
begin
OldPosition := Position;
SetPointer(Memory, NewSize);
if OldPosition > NewSize then
Seek(0, soFromEnd);
end;
end;
I am using a TStringBuilder class ported from .Net to Delphi 7.
And here is my code snippet:
procedure TForm1.btn1Click(Sender: TObject);
const
FILE_NAME = 'PATH TO A TEXT FILE';
var
sBuilder: TStringBuilder;
I: Integer;
fil: TStringList;
sResult: string;
randInt: Integer;
begin
randomize;
sResult := '';
for I := 1 to 100 do
begin
fil := TStringList.Create;
try
fil.LoadFromFile(FILE_NAME);
randInt := Random(1024);
sBuilder := TStringBuilder.Create(randInt);
try
sBuilder.Append(fil.Text);
sResult := sBuilder.AsString;
finally
sBuilder.free;
end;
mmo1.Text := sResult;
finally
FreeAndNil(fil);
end;
end;
showmessage ('DOne');
end;
I am experiencing AV errors. I can alleviate the problem when I create memory with the size multiple by 1024, however sometimes it still occurs.
Am I doing something wrong?
Your code is fine. The TStringBuilder code you're using is faulty. Consider this method:
procedure TStringBuilder.Append(const AString : string);
var iLen : integer;
begin
iLen := length(AString);
if iLen + FIndex > FBuffMax then _ExpandBuffer;
move(AString[1],FBuffer[FIndex],iLen);
inc(FIndex,iLen);
end;
If the future length is too long for the current buffer size, the buffer is expanded. _ExpandBuffer doubles the size of the buffer, but once that's done, it never checks whether the new buffer size is sufficient. If the original buffer size is 1024, and the file you're loading is 3 KB, then doubling the buffer size to 2048 will still leave the buffer too small in Append, and you'll end up overwriting 1024 bytes beyond the end of the buffer.
Change the if to a while in Append.