Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,7 @@ internal void WriteCentralDirectoryFileHeader()
}


if (_offsetOfLocalHeader > uint.MaxValue
if (OffsetTooLarge()
#if DEBUG_FORCE_ZIP64
|| _archive._forceZip64
#endif
Expand Down Expand Up @@ -799,6 +799,10 @@ private bool IsOpenable(bool needToUncompress, bool needToLoadIntoMemory, out st

private bool SizesTooLarge() => _compressedSize > uint.MaxValue || _uncompressedSize > uint.MaxValue;

private bool OffsetTooLarge() => _offsetOfLocalHeader > uint.MaxValue;

private bool ShouldUseZIP64() => SizesTooLarge() || OffsetTooLarge();

// return value is true if we allocated an extra field for 64 bit headers, un/compressed size
private bool WriteLocalFileHeader(bool isEmptyFile)
{
Expand All @@ -813,6 +817,9 @@ private bool WriteLocalFileHeader(bool isEmptyFile)
bool zip64Used = false;
uint compressedSizeTruncated, uncompressedSizeTruncated;

// save offset
_offsetOfLocalHeader = writer.BaseStream.Position;

// if we already know that we have an empty file don't worry about anything, just do a straight shot of the header
if (isEmptyFile)
{
Expand Down Expand Up @@ -840,7 +847,7 @@ private bool WriteLocalFileHeader(bool isEmptyFile)
{
// We are in seekable mode so we will not need to write a data descriptor
_generalPurposeBitFlag &= ~BitFlagValues.DataDescriptor;
if (SizesTooLarge()
if (ShouldUseZIP64()
#if DEBUG_FORCE_ZIP64
|| (_archive._forceZip64 && _archive.Mode == ZipArchiveMode.Update)
#endif
Expand All @@ -865,9 +872,6 @@ private bool WriteLocalFileHeader(bool isEmptyFile)
}
}

// save offset
_offsetOfLocalHeader = writer.BaseStream.Position;

// calculate extra field. if zip64 stuff + original extraField aren't going to fit, dump the original extraField, because this is more important
int bigExtraFieldLength = (zip64Used ? zip64ExtraField.TotalSize : 0)
+ (_lhUnknownExtraFields != null ? ZipGenericExtraField.TotalSize(_lhUnknownExtraFields) : 0);
Expand Down Expand Up @@ -964,7 +968,7 @@ private void WriteCrcAndSizesInLocalHeader(bool zip64HeaderUsed)
long finalPosition = _archive.ArchiveStream.Position;
BinaryWriter writer = new BinaryWriter(_archive.ArchiveStream);

bool zip64Needed = SizesTooLarge()
bool zip64Needed = ShouldUseZIP64()
#if DEBUG_FORCE_ZIP64
|| _archive._forceZip64
#endif
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

using System.Collections.Generic;
using System.Linq;
using System.Reflection;
using Xunit;

namespace System.IO.Compression.Tests
Expand Down Expand Up @@ -44,5 +45,93 @@ public static void UnzipOver4GBZipFile()
tempDir.Delete(recursive: true);
}
}

private static void FillWithHardToCompressData(byte[] buffer)
{
Random.Shared.NextBytes(buffer);
}

[ConditionalTheory(typeof(PlatformDetection), nameof(PlatformDetection.IsSpeedOptimized), nameof(PlatformDetection.Is64BitProcess))] // don't run it on slower runtimes
[OuterLoop("It requires 5~6 GB of free disk space and a lot of CPU time for compressed tests")]
[InlineData(false)]
[InlineData(true)]
public static void CheckZIP64VersionIsSet_ForSmallFilesAfterBigFiles(bool isCompressed)
{
// issue #94899

byte[] smallBuffer = GC.AllocateUninitializedArray<byte>(1000);
byte[] largeBuffer = GC.AllocateUninitializedArray<byte>(1_000_000_000); // ~1 GB

string zipArchivePath = Path.Combine(Path.GetTempPath(), "over4GB.zip");

try
{
using FileStream fs = File.Open(zipArchivePath, FileMode.Create, FileAccess.ReadWrite);
const string LargeFileName = "largefile";
const string SmallFileName = "smallfile";
const uint ZipLocalFileHeader_OffsetToVersionFromHeaderStart = 4;
const ushort Zip64Version = 45;

{
// Create

var compressLevel = isCompressed ? CompressionLevel.Optimal : CompressionLevel.NoCompression;

using var archive = new ZipArchive(fs, ZipArchiveMode.Create, true);
ZipArchiveEntry file = archive.CreateEntry(LargeFileName, compressLevel);

using (Stream stream = file.Open())
{
// Write 5GB of data

const int HOW_MANY_GB_TO_WRITE = 5;

for (var i = 0; i < HOW_MANY_GB_TO_WRITE; i++)
{
if (isCompressed)
{
FillWithHardToCompressData(largeBuffer);
}

stream.Write(largeBuffer);
}
}

file = archive.CreateEntry(SmallFileName, compressLevel);

using (Stream stream = file.Open())
{
stream.Write(smallBuffer);
}
}

fs.Position = 0;

{
// Validate

using var reader = new BinaryReader(fs);
using var archive = new ZipArchive(fs, ZipArchiveMode.Read);
FieldInfo offsetOfLHField = typeof(ZipArchiveEntry).GetField("_offsetOfLocalHeader", BindingFlags.NonPublic | BindingFlags.Instance);

if (offsetOfLHField is null || offsetOfLHField.FieldType != typeof(long))
{
Assert.Fail("Cannot find the private field of _offsetOfLocalHeader in ZipArchiveEntry or the type is not long. Code may be changed after the test is written.");
}

foreach (ZipArchiveEntry entry in archive.Entries)
{
fs.Position = (long)offsetOfLHField.GetValue(entry) + ZipLocalFileHeader_OffsetToVersionFromHeaderStart;
ushort versionNeeded = reader.ReadUInt16();

Assert.True(versionNeeded == Zip64Version, "Version is not ZIP64 for files with Local Header at >4GB offset.");
}
}
}
finally
{
File.Delete(zipArchivePath);
}
}
}
}