diff --git a/Hazelcast.Net.sln.DotSettings b/Hazelcast.Net.sln.DotSettings
index 1dfdd2c712..1de83e9272 100644
--- a/Hazelcast.Net.sln.DotSettings
+++ b/Hazelcast.Net.sln.DotSettings
@@ -20,6 +20,7 @@
True
True
True
+ True
True
True
True
diff --git a/src/Hazelcast.Net.Tests/Cloud/CloudTests.cs b/src/Hazelcast.Net.Tests/Cloud/CloudTests.cs
index de8459b484..98360c32d3 100644
--- a/src/Hazelcast.Net.Tests/Cloud/CloudTests.cs
+++ b/src/Hazelcast.Net.Tests/Cloud/CloudTests.cs
@@ -13,21 +13,16 @@
// limitations under the License.
using System;
-using System.Collections.Generic;
using System.Diagnostics;
using System.Globalization;
using System.IO;
-using System.Linq;
-using System.Text;
using System.Threading.Tasks;
+using System.IO.Compression;
using Hazelcast.Core;
-using Hazelcast.Metrics;
using Hazelcast.Networking;
using Hazelcast.Testing;
using Hazelcast.Testing.Configuration;
using Hazelcast.Testing.Logging;
-using Hazelcast.Testing.Remote;
-using Ionic.Zlib;
using NUnit.Framework;
namespace Hazelcast.Tests.Cloud
@@ -59,19 +54,10 @@ public class CloudTests : SingleMemberClientRemoteTestBase
//
private const string SecretsKey = "cloud-test";
//
- // 3. a valid path to a Java JDK, indicated by the following constant
- private const string JdkPath = @"C:\Program Files\Java\jdk1.8.0_241";
-
- // 4. the number of put/get iterations + how long to wait between each iteration
+ // 3. the number of put/get iterations + how long to wait between each iteration
private const int IterationCount = 60;
private const int IterationPauseMilliseconds = 100;
- [SetUp]
- public void SetUp()
- {
- Assert.That(Directory.Exists(JdkPath), Is.True, $"JDK directory {JdkPath} does not exist.");
- }
-
[Test]
public async Task SampleClient()
{
@@ -167,358 +153,5 @@ public async Task SampleClient()
HConsole.WriteLine(this, $"Done (elapsed: {stopwatch.Elapsed.ToString("hhmmss\\.fff\\ ", CultureInfo.InvariantCulture)}).");
}
-
- [Test]
- public void ZLibTest()
- {
- // this tests that a zipped blob of text can be unzipped
- const string sourceString = "this is a test";
-
- // compress the string
- var sourceBytes = Encoding.UTF8.GetBytes(sourceString);
- var sourceStream = new MemoryStream(sourceBytes);
- var compressStream = new ZlibStream(sourceStream, CompressionMode.Compress, CompressionLevel.BestSpeed,false);
- var compressedStream = new MemoryStream();
- compressStream.CopyTo(compressedStream);
- var compressedBytes = compressedStream.ToArray();
-
- // dump the compressed bytes to console
- Console.WriteLine(compressedBytes.Dump(formatted: false));
-
- // decompress
- compressedStream = new MemoryStream(compressedBytes);
- var uncompressStream = new ZlibStream(compressedStream, CompressionMode.Decompress, false);
- var destStream = new MemoryStream();
- uncompressStream.CopyTo(destStream);
- var destBytes = destStream.ToArray();
- var destString = Encoding.UTF8.GetString(destBytes);
-
- // validate
- Assert.That(destString, Is.EqualTo(sourceString));
- }
-
- [Test]
- public void MetricsCompress()
- {
- // compress some metrics
- var compressor = new MetricsCompressor();
- compressor.Append(MetricDescriptor.Create("name1", MetricUnit.Count).WithValue(1234));
- compressor.Append(MetricDescriptor.Create("name2", MetricUnit.Count).WithValue(5678));
- var bytes = compressor.GetBytesAndReset();
-
- // dump the compressed bytes to console
- Console.WriteLine(bytes.Dump(formatted: false));
-
- // get the metrics back
- var metrics = MetricsDecompressor.GetMetrics(bytes);
- Assert.That(metrics.Count(), Is.EqualTo(2));
- }
-
- private static class MetricsDecompressor
- {
- private static string _prefix;
- private static string _name;
- private static string _discname;
- private static string _discvalue;
- private static MetricUnit _unit;
-
- // note: BytesExtensions ReadInt() etc methods are highly optimized and assume that boundary
- // checks have been properly performed beforehand - which means that if they fail, they can
- // take the entire process down - so here we protect them with CanRead() calls which we would
- // not use in the actual code - so we get a "normal" exception
-
- private static byte[] Decompress(byte[] bytes)
- {
- try
- {
- using var memory = new MemoryStream(bytes);
- using var uncompressing = new ZlibStream(memory, CompressionMode.Decompress, false);
- var u = new MemoryStream();
- uncompressing.CopyTo(u);
- return u.ToArray();
- }
- catch (Exception e)
- {
- Console.WriteLine("Failed to decompress!");
- Console.WriteLine(e);
- return null;
- }
- }
-
- private static Dictionary GetStrings(byte[] stringsBytes, bool verbose)
- {
- var stringsCount = stringsBytes.ReadInt(0, Endianness.BigEndian);
- if (verbose) Console.WriteLine($"Containing {stringsCount} strings");
- var strings = new Dictionary();
- var pos = 4;
- char[] pchars = null;
- for (var i = 0; i < stringsCount; i++)
- {
- var stringId = stringsBytes.CanRead(pos, BytesExtensions.SizeOfInt).ReadInt(pos, Endianness.BigEndian);
- pos += 4;
- var commonLen = stringsBytes.ReadByte(pos++);
- var diffLen = stringsBytes.ReadByte(pos++);
- var chars = new char[commonLen + diffLen];
- for (var j = 0; j < commonLen; j++)
- {
- chars[j] = pchars[j];
- }
- for (var j = commonLen; j < commonLen + diffLen; j++)
- {
- chars[j] = stringsBytes.CanRead(pos, BytesExtensions.SizeOfChar).ReadChar(pos, Endianness.BigEndian);
- pos += 2;
- }
- pchars = chars;
- strings[stringId] = new string(chars);
- if (verbose) Console.WriteLine($"s[{stringId:000}]={strings[stringId]}");
- }
- return strings;
- }
-
- private static string GetString(Dictionary strings, int id)
- {
- if (id < 0) return null;
- if (strings.TryGetValue(id, out var value)) return value;
- return $"";
- }
-
- private static Metric GetMetric(byte[] metricsBytes, ref int pos, Dictionary strings, bool verbose)
- {
- var mask = (MetricsCompressor.DescriptorMask)metricsBytes.ReadByte(pos);
- if (verbose) Console.WriteLine($" Mask: 0x{(byte)mask:x2} -> {mask}");
- pos += 1;
-
- var prefix = _prefix;
- if (mask.HasNone(MetricsCompressor.DescriptorMask.Prefix))
- {
- var id = metricsBytes.CanRead(pos, BytesExtensions.SizeOfInt).ReadInt(pos, Endianness.BigEndian);
- prefix = GetString(strings, id);
- if (verbose) Console.WriteLine($" PrefixId: {id} -> {prefix}");
- pos += 4;
- }
-
- var name = _name;
- if (mask.HasNone(MetricsCompressor.DescriptorMask.Name))
- {
- var id = metricsBytes.CanRead(pos, BytesExtensions.SizeOfInt).ReadInt(pos, Endianness.BigEndian);
- name = GetString(strings, id);
- if (verbose) Console.WriteLine($" NameId: {id} -> {name}");
- pos += 4;
- }
-
- var discname = _discname;
- if (mask.HasNone(MetricsCompressor.DescriptorMask.DiscriminatorName))
- {
- var id = metricsBytes.CanRead(pos, BytesExtensions.SizeOfInt).ReadInt(pos, Endianness.BigEndian);
- discname = GetString(strings, id);
- if (verbose) Console.WriteLine($" Disc.Key: {id} -> {discname}");
- pos += 4;
- }
-
- var discvalue = _discvalue;
- if (mask.HasNone(MetricsCompressor.DescriptorMask.DiscriminatorValue))
- {
- var id = metricsBytes.CanRead(pos, BytesExtensions.SizeOfInt).ReadInt(pos, Endianness.BigEndian);
- discvalue = GetString(strings, id);
- if (verbose) Console.WriteLine($" Disc.Value: {id} -> {discvalue}");
- pos += 4;
- }
-
- var unit = _unit;
- if (mask.HasNone(MetricsCompressor.DescriptorMask.Unit))
- {
- unit = (MetricUnit)metricsBytes.CanRead(pos, BytesExtensions.SizeOfByte).ReadByte(pos);
- if ((byte) unit == 255) unit = MetricUnit.None;
- if (verbose) Console.WriteLine($" Unit: {unit}");
- pos += 1;
- }
-
- if (mask.HasNone(MetricsCompressor.DescriptorMask.ExcludedTargets))
- {
- var excludedTargets = metricsBytes.CanRead(pos, BytesExtensions.SizeOfByte).ReadByte(pos);
- if (verbose) Console.WriteLine($" ExcludedTargets: {excludedTargets}");
- pos += 1;
- }
- if (mask.HasNone(MetricsCompressor.DescriptorMask.TagCount))
- {
- var tagCount = metricsBytes.CanRead(pos, BytesExtensions.SizeOfByte).ReadByte(pos);
- if (verbose) Console.WriteLine($" TagCount: {tagCount}");
- pos += 1;
- }
-
- Metric metric;
- var type = (MetricValueType)metricsBytes.ReadByte(pos);
- pos += 1;
- if (verbose) Console.WriteLine($" ValueType: {type}");
- switch (type)
- {
- case MetricValueType.Double:
- var d = metricsBytes.CanRead(pos, BytesExtensions.SizeOfDouble).ReadDouble(pos, Endianness.BigEndian);
- if (verbose) Console.WriteLine($" Value: {d}");
- pos += BytesExtensions.SizeOfDouble;
- var ddesc = MetricDescriptor.Create(prefix, name, unit);
- if (discname != null) ddesc = ddesc.WithDiscriminator(discname, discvalue);
- metric = ddesc.WithValue(d);
- break;
- case MetricValueType.Long:
- var l = metricsBytes.CanRead(pos, BytesExtensions.SizeOfLong).ReadLong(pos, Endianness.BigEndian);
- if (verbose) Console.WriteLine($" Value: {l}");
- pos += BytesExtensions.SizeOfLong;
- var ldesc = MetricDescriptor.Create(prefix, name, unit);
- if (discname != null) ldesc = ldesc.WithDiscriminator(discname, discvalue);
- metric = ldesc.WithValue(l);
- break;
- default:
- if (verbose) Console.WriteLine(" Value: ?!");
- // TODO: how shall we handle eg strings?!
- metric = null;
- break;
- }
-
- _prefix = prefix;
- _name = name;
- _discname = discname;
- _discvalue = discvalue;
- _unit = unit;
-
- return metric;
- }
-
- public static IEnumerable GetMetrics(byte[] bytes, bool verbose = false)
- {
- // get the strings blob and decompress it
- var stringsLength = bytes.CanRead(2, BytesExtensions.SizeOfInt).ReadInt(2, Endianness.BigEndian);
- if (verbose) Console.WriteLine($"StringsLength is {stringsLength} bytes [{2 + 4}..{2 + 4 + stringsLength - 1}]");
- var stringsBytes = new byte[stringsLength];
- for (var i = 0; i < stringsLength; i++) stringsBytes[i] = bytes[2 + 4 + i];
- var stringsData = Decompress(stringsBytes);
- if (verbose) Console.WriteLine($"Uncompressed to {stringsData.Length} bytes");
-
- // build the strings dictionary
- var strings = GetStrings(stringsData, verbose);
- if (verbose) Console.WriteLine($"Contains {strings.Count} strings");
-
- // get the metrics count
- var metricsCount = bytes.ReadInt(2 + 4 + stringsLength, Endianness.BigEndian);
- Console.WriteLine($"MetricsCount is {metricsCount} metrics");
-
- // get the metrics blob and decompress it
- var metricsLength = bytes.Length - 2 - 4 - stringsLength - 4; // everything that is left
- if (verbose) Console.WriteLine($"MetricsLength is {metricsLength} bytes [{2 + 4 + stringsLength + 4}..{2 + 4 + stringsLength + 4 + metricsLength}]");
- var metricsBytes = new byte[metricsLength];
- for (var i = 0; i < metricsLength; i++) metricsBytes[i] = bytes[2 + 4 + stringsLength + 4 + i];
- var metricsData = Decompress(metricsBytes);
- if (verbose) Console.WriteLine($"Uncompressed to {metricsData.Length} bytes");
-
- // get the metrics
- var pos = 0;
- var metrics = new List();
- for (var i = 0; i < metricsCount; i++)
- {
- if (verbose) Console.WriteLine($"[{i}]");
- metrics.Add(GetMetric(metricsData, ref pos, strings, verbose));
- }
- return metrics;
- }
- }
-
- [TestCase(0)]
- [TestCase(1)]
- [TestCase(2)]
- [TestCase(3)]
- [Timeout(30_000)]
- public async Task MetricsDecompressorTests(int blobNo)
- {
- var blobs = new[]
- {
- // this blob was captured during the execution of a .NET client
- "0001000001bd78018d93594fc2501085cf95452da222b8a3fe054d8c898fc6e54913a3c657835095a4052cc5edd7fbcd452d90684cd3de65b633674e25ed482aabaa865e78db8af8def30d75a9445d35d9f579ba9cfaf83a6d701769c029c596e8185b8727e43e25839d6e58631f99922f568fc89c5bc716f3b4f1b3d8966ed927ec077845bae02ef695de758de583b35473257c423c26b34bab9ac5f2826f56db506eba406f54b5ec599c59e615e8c1e70bc7ea49f9fc123d3fe99dce2cdfef880af932f85ef1e8b1f6588da50cf19af2e4b12eade2966aec3b9edf885d6b04d11571cf746f1c0fbdeb73ce4749753f0bbbbfe61d766127c3d6f7ac477e4e59e7dfac4b2b2ac1a4e1bbd3ae0eb447d5a60eb50f9ee2540d6ecec863533ef1b59b30d2c63bf5ecdb4407204ef15ec81527785a77057c4d13865a5aa6db0eb56cde52d5cdb036c8670c36e1c16ea7995497fc219effad3ce5864c6cb879b8367db574ea5125cc6814e376108c61349b3125cd30ef1e58321d1f731efce8539a035bf2d5ed50b3425315e24d07a6d258e7c437a87ec40c4df10d3db24a8bb06c8cd974c6d52b0585ffaaa954f85b4d1515c167b3f9c6b7e466c1667c8cfc319f117fa36c0000001a78015590490e825010441b704070c001678d319a78000fc019d8b8e500ba75250b6fe0c218171ecc6b7802ad6af83fb192caaf4777bad3acbe90407c55042797e73acb990a7aa4d70b920b7b7c9dcd3d12d62b702152d580d66a7f542fc9ddb1d3577a9dd384d4e01a152928b3e894d0d211a909b7ec52ff046cdb06edef1418df9422252f11616b574fc555ef2db1a745f1924c32c43e3c806378680e3f7cd839b22b75ea189f28073f4a64a219b4274d4b928034331492e68674cac212572c2dc90fd6e326f5",
-
- // this blob was captured during the execution of a .NET client
- "0001000001f278018d93dd6fd26018c54fb78250d8701bea36bff53ff0c2c44bddf4464d165df4d230a84ad2422d30c5bfdedf79191648344bd3be1fcfd779ce792ae9a9a41d1da8a74bdea132be177c539da9d4587d76139e31a709be918eb9cb34e334c556ea04db8827e57e4a069fce59f31039255fae82c8ede8085bce33c4cfb1037d625fb29fe195e93d7779a834d7472cbf394bdda8854f8ac76676e98e9a582ef1ad6a1be5dd28d12faa3a7b1567cbae127d0df9d2b57a521cefd3f377cde9ccf9fe8da816ef80ef271e056bc16a962ac4878ac9e32e5df1bebaec4781df8cdd6005d107e27ed0bd395e78df6b4721ea21bd590bdf9f6137fbc3704ac9e57aa7c4ad76273dea34f416cb3c6832c36f4afdc79df8af1ed2934e5b9f895b6a30d22bfcad86b12a3ea40f6b66ae5ddb4c4c42be2c4c45c5f339366b2cdd568b9dd9f802b7177ac63bd073bdc056dfeaa2c41b7c3d53a7a14e3fd42fc8eea9f2fc2cb176b6eb1baa1c45b580647285f016dc8ea8b5a87c1035b8ef91c57af561dd786ec0dd183466eaba95b7a205efc7d12eca7a9a077a1d182c37f87c90246b188ddf4c490da6ab004bf5d79c709eadb0df065b79d5ed92bda6f688f7d459855cef88ef51fd251363dd7bfac62add8465336675d6ff1529a95d77765bb5ffcfee9eeae0b3364b7cfb51136ce663e5fffc03975bb6320000001e78015590c90dc23014441d8725ec6109fb12502e5c39724a2914001d0489129090e8840e28291c38c19f89e3285f1a795efe77c676f893525258590027f9bc0eb52b9ca1e89888d32217ab133d9442bf22ca0a54cd81bd5a89ea86b4c6a4478ae3ef0dd4400c0bd4343e4b68593a8b6b8b3a36d4bb0876ed00537b190677924fd2a9af30dae755c5044fe0804da5d3589dc40e452351201ae717dfbf3139b191fceb543eb11c9c776640f1c5e639b9e82d72e2be6589569610b1b6c4a36eec9310c3e20018de96bbbb62ef1f0b1d2c2d",
-
- // this blob was captured during the execution of a Python client
- "00010000015378018d91594fc2401485cf48cb52c01571f93bc44713a3c657832d8626dd8482e2aff7bb4390faa03193cedce59cbb9c4aba9674c6996acd972ae37ee19ee94e0b958ab1969c126f09f64a43f21bd53e7ecf6d8814f64c09f9cb30d00376e1bd23eac770734e0ac758899eb017d82b7a65ba2596fbfa1b98a93ef125e7fa6066206a6225f51e79739f0bd4e35de3c7df599badeb227da822be68f02c73a8b15e7d3ddb6bcefc4bcffebdff4930649a77ea54bc15afedb99f2f54401ddbc9ea471a61175ec30c2b69f4378dded8d574dca27b03e7592d78b9eff04c07d374ee3795da2ea48ae96e2ce9806e05c8edfe725de253f689b96358a6d80054e9d11d77c8e405d14437bc35c88d266457de93fa51f44377cbd974d2395a55e0f77f7e82bf6aa87faa0ef96da5dd1fb9500b760c521aba31ec9ac9b27f6a7d1cfeadf5586dfa55d4dcf51bb99ed726696ef105d47175b0000000147801636080004620c504c4cc20060303632dc3f49c4fb940160b0b580044d402312b8c1708e2b1c17825201e3b8c9703e271c078607d9c301e3b488e0bc65b03b2821bc613ca480032798098f73f0c80e4f8dfe526804de103f1b817052b8079fc201e8b8f9403982700e209f941550a8278bc2d5c17186a40c2402c0c12010101900b44c04cc69d0160799095a26011f955d601301d6260110686fa1339409638104b404518aabd41864842b99607003c0523c7",
-
- // this blob was captured during the execution of the MetricsCompress test
- "00010000002078016360606062000156863c8644865c86540643208f918591c10800198d021200000002780153f80f040c4000a2418005c40101964bb5409211cc666010d30300a21b0db1"
- };
-
- var blob = blobs[blobNo];
-
- // get the bytes
- Console.WriteLine($"Blob is {blob.Length} chars -> {blob.Length/2} bytes");
- var bytes = new byte[blob.Length / 2];
- for (var i = 0; i < blob.Length; i += 2)
- bytes[i/2] = byte.Parse(blob.Substring(i, 2), NumberStyles.HexNumber);
-
- // decompress in C#
- var metrics = MetricsDecompressor.GetMetrics(bytes, true);
- foreach (var metric in metrics) Console.WriteLine(metric);
-
- // consume in Java - will throw if exit code is not zero
- await JavaConsume(bytes);
- }
-
- private async Task JavaConsume(byte[] bytes)
- {
- const string scriptTemplate = @"
-// import types
-var ArrayOfBytes = Java.type(""byte[]"")
-var MetricsCompressor = Java.type(""com.hazelcast.internal.metrics.impl.MetricsCompressor"")
-var MetricConsumer = Java.type(""com.hazelcast.internal.metrics.MetricConsumer"")
-var StringBuilder = Java.type(""java.lang.StringBuilder"")
-
-// prepare bytes
-var bytes = new ArrayOfBytes($$COUNT$$)
-$$BYTES$$
-
-// consumer will append to the string builder
-var text = new StringBuilder()
-var TestConsumer = Java.extend(MetricConsumer, {
- consumeLong: function(descriptor, value) {
- text.append(""prefix = "")
- text.append(descriptor.prefix())
- text.append(""\n"")
- text.append(""disc.key = "")
- text.append(descriptor.discriminator())
- text.append(""\n"")
- text.append(""disc.val = "")
- text.append(descriptor.discriminatorValue())
- text.append(""\n"")
- text.append(""string = "")
- text.append(descriptor.metricString())
- text.append(""\n"")
-
- text.append(descriptor.metric())
- text.append("" = "")
- text.append(value)
- text.append(""\n"")
- },
- consumeDouble: function(descriptor, value) {
- text.append(descriptor.metric())
- text.append("" = "")
- text.append(value)
- text.append(""\n"")
- }
-})
-var consumer = new TestConsumer()
-MetricsCompressor.extractMetrics(bytes, consumer)
-
-result = """" + text
-";
-
- var script = scriptTemplate
- .Replace("$$COUNT$$", bytes.Length.ToString())
- .Replace("$$BYTES$$", string.Join("\n",
- bytes.Select((x, i) => $"bytes[{i}] = {bytes[i]}")));
-
- var response = await RcClient.ExecuteOnControllerAsync(RcCluster.Id, script, Lang.JAVASCRIPT);
- Assert.That(response.Success, $"message: {response.Message}");
- Assert.That(response.Result, Is.Not.Null);
- var resultString = Encoding.UTF8.GetString(response.Result, 0, response.Result.Length).Trim();
- Console.WriteLine("JAVA OUTPUT:");
- Console.WriteLine(resultString);
- }
}
}
diff --git a/src/Hazelcast.Net.Tests/Cloud/MetricsRemoteTests.cs b/src/Hazelcast.Net.Tests/Cloud/MetricsRemoteTests.cs
new file mode 100644
index 0000000000..51043021e6
--- /dev/null
+++ b/src/Hazelcast.Net.Tests/Cloud/MetricsRemoteTests.cs
@@ -0,0 +1,146 @@
+// Copyright (c) 2008-2023, Hazelcast, Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+using System;
+using System.Globalization;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+using Hazelcast.Metrics;
+using Hazelcast.Testing;
+using Hazelcast.Testing.Remote;
+using NUnit.Framework;
+
+namespace Hazelcast.Tests.Cloud;
+
+[TestFixture]
+public class MetricsRemoteTests : SingleMemberClientRemoteTestBase
+{
+ [TestCase(0)]
+ [TestCase(1)]
+ [TestCase(2)]
+ [TestCase(3)]
+ [Timeout(30_000)]
+ public async Task MetricsDecompressorTests(int blobNo)
+ {
+ var blobs = new[]
+ {
+ // this blob was captured during the execution of a .NET client
+ "0001000001bd78018d93594fc2501085cf95452da222b8a3fe054d8c898fc6e54913a3c657835095a4052cc5edd7fbcd452d90684cd3de65b633674e25ed482aabaa865e78db8af8def30d75a9445d35d9f579ba9cfaf83a6d701769c029c596e8185b8727e43e25839d6e58631f99922f568fc89c5bc716f3b4f1b3d8966ed927ec077845bae02ef695de758de583b35473257c423c26b34bab9ac5f2826f56db506eba406f54b5ec599c59e615e8c1e70bc7ea49f9fc123d3fe99dce2cdfef880af932f85ef1e8b1f6588da50cf19af2e4b12eade2966aec3b9edf885d6b04d11571cf746f1c0fbdeb73ce4749753f0bbbbfe61d766127c3d6f7ac477e4e59e7dfac4b2b2ac1a4e1bbd3ae0eb447d5a60eb50f9ee2540d6ecec863533ef1b59b30d2c63bf5ecdb4407204ef15ec81527785a77057c4d13865a5aa6db0eb56cde52d5cdb036c8670c36e1c16ea7995497fc219effad3ce5864c6cb879b8367db574ea5125cc6814e376108c61349b3125cd30ef1e58321d1f731efce8539a035bf2d5ed50b3425315e24d07a6d258e7c437a87ec40c4df10d3db24a8bb06c8cd974c6d52b0585ffaaa954f85b4d1515c167b3f9c6b7e466c1667c8cfc319f117fa36c0000001a78015590490e825010441b704070c001678d319a78000fc019d8b8e500ba75250b6fe0c218171ecc6b7802ad6af83fb192caaf4777bad3acbe90407c55042797e73acb990a7aa4d70b920b7b7c9dcd3d12d62b702152d580d66a7f542fc9ddb1d3577a9dd384d4e01a152928b3e894d0d211a909b7ec52ff046cdb06edef1418df9422252f11616b574fc555ef2db1a745f1924c32c43e3c806378680e3f7cd839b22b75ea189f28073f4a64a219b4274d4b928034331492e68674cac212572c2dc90fd6e326f5",
+
+ // this blob was captured during the execution of a .NET client
+ "0001000001f278018d93dd6fd26018c54fb78250d8701bea36bff53ff0c2c44bddf4464d165df4d230a84ad2422d30c5bfdedf79191648344bd3be1fcfd779ce792ae9a9a41d1da8a74bdea132be177c539da9d4587d76139e31a709be918eb9cb34e334c556ea04db8827e57e4a069fce59f31039255fae82c8ede8085bce33c4cfb1037d625fb29fe195e93d7779a834d7472cbf394bdda8854f8ac76676e98e9a582ef1ad6a1be5dd28d12faa3a7b1567cbae127d0df9d2b57a521cefd3f377cde9ccf9fe8da816ef80ef271e056bc16a962ac4878ac9e32e5df1bebaec4781df8cdd6005d107e27ed0bd395e78df6b4721ea21bd590bdf9f6137fbc3704ac9e57aa7c4ad76273dea34f416cb3c6832c36f4afdc79df8af1ed2934e5b9f895b6a30d22bfcad86b12a3ea40f6b66ae5ddb4c4c42be2c4c45c5f339366b2cdd568b9dd9f802b7177ac63bd073bdc056dfeaa2c41b7c3d53a7a14e3fd42fc8eea9f2fc2cb176b6eb1baa1c45b580647285f016dc8ea8b5a87c1035b8ef91c57af561dd786ec0dd183466eaba95b7a205efc7d12eca7a9a077a1d182c37f87c90246b188ddf4c490da6ab004bf5d79c709eadb0df065b79d5ed92bda6f688f7d459855cef88ef51fd251363dd7bfac62add8465336675d6ff1529a95d77765bb5ffcfee9eeae0b3364b7cfb51136ce663e5fffc03975bb6320000001e78015590c90dc23014441d8725ec6109fb12502e5c39724a2914001d0489129090e8840e28291c38c19f89e3285f1a795efe77c676f893525258590027f9bc0eb52b9ca1e89888d32217ab133d9442bf22ca0a54cd81bd5a89ea86b4c6a4478ae3ef0dd4400c0bd4343e4b68593a8b6b8b3a36d4bb0876ed00537b190677924fd2a9af30dae755c5044fe0804da5d3589dc40e452351201ae717dfbf3139b191fceb543eb11c9c776640f1c5e639b9e82d72e2be6589569610b1b6c4a36eec9310c3e20018de96bbbb62ef1f0b1d2c2d",
+
+ // this blob was captured during the execution of a Python client
+ "00010000015378018d91594fc2401485cf48cb52c01571f93bc44713a3c657832d8626dd8482e2aff7bb4390faa03193cedce59cbb9c4aba9674c6996acd972ae37ee19ee94e0b958ab1969c126f09f64a43f21bd53e7ecf6d8814f64c09f9cb30d00376e1bd23eac770734e0ac758899eb017d82b7a65ba2596fbfa1b98a93ef125e7fa6066206a6225f51e79739f0bd4e35de3c7df599badeb227da822be68f02c73a8b15e7d3ddb6bcefc4bcffebdff4930649a77ea54bc15afedb99f2f54401ddbc9ea471a61175ec30c2b69f4378dded8d574dca27b03e7592d78b9eff04c07d374ee3795da2ea48ae96e2ce9806e05c8edfe725de253f689b96358a6d80054e9d11d77c8e405d14437bc35c88d266457de93fa51f44377cbd974d2395a55e0f77f7e82bf6aa87faa0ef96da5dd1fb9500b760c521aba31ec9ac9b27f6a7d1cfeadf5586dfa55d4dcf51bb99ed726696ef105d47175b0000000147801636080004620c504c4cc20060303632dc3f49c4fb940160b0b580044d402312b8c1708e2b1c17825201e3b8c9703e271c078607d9c301e3b488e0bc65b03b2821bc613ca480032798098f73f0c80e4f8dfe526804de103f1b817052b8079fc201e8b8f9403982700e209f941550a8278bc2d5c17186a40c2402c0c12010101900b44c04cc69d0160799095a26011f955d601301d6260110686fa1339409638104b404518aabd41864842b99607003c0523c7",
+
+ // this blob was captured during the execution of the MetricsCompress test
+ "00010000002078016360606062000156863c8644865c86540643208f918591c10800198d021200000002780153f80f040c4000a2418005c40101964bb5409211cc666010d30300a21b0db1"
+ };
+
+ var blob = blobs[blobNo];
+
+ // get the bytes
+ Console.WriteLine($"Blob is {blob.Length} chars -> {blob.Length / 2} bytes");
+ var bytes = new byte[blob.Length / 2];
+ for (var i = 0; i < blob.Length; i += 2)
+ bytes[i / 2] = byte.Parse(blob.Substring(i, 2), NumberStyles.HexNumber);
+
+ // decompress in C#
+ var metrics = MetricsTests.MetricsDecompressor.GetMetrics(bytes, true);
+ foreach (var metric in metrics) Console.WriteLine(metric);
+
+ // consume in Java - will throw if exit code is not zero
+ await JavaConsume(bytes);
+ }
+
+ [Test]
+ [Timeout(30_000)]
+ public async Task MetricsCompressDecompressorTests()
+ {
+ // compress some metrics
+ var compressor = new MetricsCompressor();
+ compressor.Append(MetricDescriptor.Create("name1", MetricUnit.Count).WithValue(1234));
+ compressor.Append(MetricDescriptor.Create("name2", MetricUnit.Count).WithValue(5678));
+ var bytes = compressor.GetBytesAndReset();
+
+ // decompress in C#
+ var metrics = MetricsTests.MetricsDecompressor.GetMetrics(bytes, true);
+ foreach (var metric in metrics) Console.WriteLine(metric);
+
+ // consume in Java - will throw if exit code is not zero
+ await JavaConsume(bytes);
+ }
+
+ private async Task JavaConsume(byte[] bytes)
+ {
+ const string scriptTemplate = @"
+// import types
+var ArrayOfBytes = Java.type(""byte[]"")
+var MetricsCompressor = Java.type(""com.hazelcast.internal.metrics.impl.MetricsCompressor"")
+var MetricConsumer = Java.type(""com.hazelcast.internal.metrics.MetricConsumer"")
+var StringBuilder = Java.type(""java.lang.StringBuilder"")
+
+// prepare bytes
+var bytes = new ArrayOfBytes($$COUNT$$)
+$$BYTES$$
+
+// consumer will append to the string builder
+var text = new StringBuilder()
+var TestConsumer = Java.extend(MetricConsumer, {
+ consumeLong: function(descriptor, value) {
+ text.append(""prefix = "")
+ text.append(descriptor.prefix())
+ text.append(""\n"")
+ text.append(""disc.key = "")
+ text.append(descriptor.discriminator())
+ text.append(""\n"")
+ text.append(""disc.val = "")
+ text.append(descriptor.discriminatorValue())
+ text.append(""\n"")
+ text.append(""string = "")
+ text.append(descriptor.metricString())
+ text.append(""\n"")
+
+ text.append(descriptor.metric())
+ text.append("" = "")
+ text.append(value)
+ text.append(""\n"")
+ },
+ consumeDouble: function(descriptor, value) {
+ text.append(descriptor.metric())
+ text.append("" = "")
+ text.append(value)
+ text.append(""\n"")
+ }
+})
+var consumer = new TestConsumer()
+MetricsCompressor.extractMetrics(bytes, consumer)
+
+result = """" + text
+";
+
+ var script = scriptTemplate
+ .Replace("$$COUNT$$", bytes.Length.ToString())
+ .Replace("$$BYTES$$", string.Join("\n",
+ bytes.Select((x, i) => $"bytes[{i}] = {bytes[i]}")));
+
+ var response = await RcClient.ExecuteOnControllerAsync(RcCluster.Id, script, Lang.JAVASCRIPT);
+ Assert.That(response.Success, $"message: {response.Message}");
+ Assert.That(response.Result, Is.Not.Null);
+ var resultString = Encoding.UTF8.GetString(response.Result, 0, response.Result.Length).Trim();
+ Console.WriteLine("JAVA OUTPUT:");
+ Console.WriteLine(resultString);
+ }
+}
diff --git a/src/Hazelcast.Net.Tests/Cloud/MetricsTests.cs b/src/Hazelcast.Net.Tests/Cloud/MetricsTests.cs
new file mode 100644
index 0000000000..b14e69a528
--- /dev/null
+++ b/src/Hazelcast.Net.Tests/Cloud/MetricsTests.cs
@@ -0,0 +1,285 @@
+// Copyright (c) 2008-2023, Hazelcast, Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using Hazelcast.Core;
+using Hazelcast.Metrics;
+using Hazelcast.Polyfills;
+using NUnit.Framework;
+
+namespace Hazelcast.Tests.Cloud;
+
+[TestFixture]
+public class MetricsTests
+{
+ [Test]
+ public void ZLibTest()
+ {
+ // this tests that a zipped blob of text can be unzipped
+ const string sourceString = "this is a test";
+
+ // compress the string
+ var sourceBytes = Encoding.UTF8.GetBytes(sourceString);
+ var sourceStream = new MemoryStream(sourceBytes);
+
+ var compressedStream = new MemoryStream();
+ var compress = ZLibStreamFactory.Compress(compressedStream, false);
+ sourceStream.CopyTo(compress);
+ compress.Dispose(); // because, Flush is not enough
+ var compressedBytes = compressedStream.ToArray();
+
+ // dump the compressed bytes to console
+ Console.WriteLine(compressedBytes.Dump(formatted: false));
+
+ // decompress
+ compressedStream = new MemoryStream(compressedBytes);
+ var decompress = ZLibStreamFactory.Decompress(compressedStream, false);
+ var decompressedStream = new MemoryStream();
+ decompress.CopyTo(decompressedStream);
+ var resultBytes = decompressedStream.ToArray();
+ var resultString = Encoding.UTF8.GetString(resultBytes);
+
+ // validate
+ Assert.That(resultString, Is.EqualTo(sourceString));
+ }
+
+ [Test]
+ public void MetricsCompress()
+ {
+ // compress some metrics
+ var compressor = new MetricsCompressor();
+ compressor.Append(MetricDescriptor.Create("name1", MetricUnit.Count).WithValue(1234));
+ compressor.Append(MetricDescriptor.Create("name2", MetricUnit.Count).WithValue(5678));
+ var bytes = compressor.GetBytesAndReset();
+
+ // dump the compressed bytes to console
+ Console.WriteLine(bytes.Dump(formatted: false));
+
+ // get the metrics back
+ var metrics = MetricsDecompressor.GetMetrics(bytes);
+ Assert.That(metrics.Count(), Is.EqualTo(2));
+ }
+
+ internal static class MetricsDecompressor
+ {
+ private static string _prefix;
+ private static string _name;
+ private static string _discName;
+ private static string _discValue;
+ private static MetricUnit _unit;
+
+ // note: BytesExtensions ReadInt() etc methods are highly optimized and assume that boundary
+ // checks have been properly performed beforehand - which means that if they fail, they can
+ // take the entire process down - so here we protect them with CanRead() calls which we would
+ // not use in the actual code - so we get a "normal" exception
+
+ private static byte[] Decompress(byte[] bytes)
+ {
+ try
+ {
+ using var bytesStream = new MemoryStream(bytes);
+ using var decompress = ZLibStreamFactory.Decompress(bytesStream, false);
+ var u = new MemoryStream();
+ decompress.CopyTo(u);
+ return u.ToArray();
+ }
+ catch (Exception e)
+ {
+ Console.WriteLine("Failed to decompress!");
+ Console.WriteLine(e);
+ return null;
+ }
+ }
+
+ private static Dictionary GetStrings(byte[] stringsBytes, bool verbose)
+ {
+ var stringsCount = stringsBytes.ReadInt(0, Endianness.BigEndian);
+ if (verbose) Console.WriteLine($"Containing {stringsCount} strings");
+ var strings = new Dictionary();
+ var pos = 4;
+ char[] pchars = null;
+ for (var i = 0; i < stringsCount; i++)
+ {
+ var stringId = stringsBytes.CanRead(pos, BytesExtensions.SizeOfInt).ReadInt(pos, Endianness.BigEndian);
+ pos += 4;
+ var commonLen = stringsBytes.ReadByte(pos++);
+ var diffLen = stringsBytes.ReadByte(pos++);
+ var chars = new char[commonLen + diffLen];
+ for (var j = 0; j < commonLen; j++)
+ {
+ chars[j] = pchars[j];
+ }
+ for (var j = commonLen; j < commonLen + diffLen; j++)
+ {
+ chars[j] = stringsBytes.CanRead(pos, BytesExtensions.SizeOfChar).ReadChar(pos, Endianness.BigEndian);
+ pos += 2;
+ }
+ pchars = chars;
+ strings[stringId] = new string(chars);
+ if (verbose) Console.WriteLine($"s[{stringId:000}]={strings[stringId]}");
+ }
+ return strings;
+ }
+
+ private static string GetString(Dictionary strings, int id)
+ {
+ if (id < 0) return null;
+ if (strings.TryGetValue(id, out var value)) return value;
+ return $"";
+ }
+
+ private static Metric GetMetric(byte[] metricsBytes, ref int pos, Dictionary strings, bool verbose)
+ {
+ var mask = (MetricsCompressor.DescriptorMask)metricsBytes.ReadByte(pos);
+ if (verbose) Console.WriteLine($" Mask: 0x{(byte)mask:x2} -> {mask}");
+ pos += 1;
+
+ var prefix = _prefix;
+ if (mask.HasNone(MetricsCompressor.DescriptorMask.Prefix))
+ {
+ var id = metricsBytes.CanRead(pos, BytesExtensions.SizeOfInt).ReadInt(pos, Endianness.BigEndian);
+ prefix = GetString(strings, id);
+ if (verbose) Console.WriteLine($" PrefixId: {id} -> {prefix}");
+ pos += 4;
+ }
+
+ var name = _name;
+ if (mask.HasNone(MetricsCompressor.DescriptorMask.Name))
+ {
+ var id = metricsBytes.CanRead(pos, BytesExtensions.SizeOfInt).ReadInt(pos, Endianness.BigEndian);
+ name = GetString(strings, id);
+ if (verbose) Console.WriteLine($" NameId: {id} -> {name}");
+ pos += 4;
+ }
+
+ var discName = _discName;
+ if (mask.HasNone(MetricsCompressor.DescriptorMask.DiscriminatorName))
+ {
+ var id = metricsBytes.CanRead(pos, BytesExtensions.SizeOfInt).ReadInt(pos, Endianness.BigEndian);
+ discName = GetString(strings, id);
+ if (verbose) Console.WriteLine($" Disc.Key: {id} -> {discName}");
+ pos += 4;
+ }
+
+ var discValue = _discValue;
+ if (mask.HasNone(MetricsCompressor.DescriptorMask.DiscriminatorValue))
+ {
+ var id = metricsBytes.CanRead(pos, BytesExtensions.SizeOfInt).ReadInt(pos, Endianness.BigEndian);
+ discValue = GetString(strings, id);
+ if (verbose) Console.WriteLine($" Disc.Value: {id} -> {discValue}");
+ pos += 4;
+ }
+
+ var unit = _unit;
+ if (mask.HasNone(MetricsCompressor.DescriptorMask.Unit))
+ {
+ unit = (MetricUnit)metricsBytes.CanRead(pos, BytesExtensions.SizeOfByte).ReadByte(pos);
+ if ((byte)unit == 255) unit = MetricUnit.None;
+ if (verbose) Console.WriteLine($" Unit: {unit}");
+ pos += 1;
+ }
+
+ if (mask.HasNone(MetricsCompressor.DescriptorMask.ExcludedTargets))
+ {
+ var excludedTargets = metricsBytes.CanRead(pos, BytesExtensions.SizeOfByte).ReadByte(pos);
+ if (verbose) Console.WriteLine($" ExcludedTargets: {excludedTargets}");
+ pos += 1;
+ }
+ if (mask.HasNone(MetricsCompressor.DescriptorMask.TagCount))
+ {
+ var tagCount = metricsBytes.CanRead(pos, BytesExtensions.SizeOfByte).ReadByte(pos);
+ if (verbose) Console.WriteLine($" TagCount: {tagCount}");
+ pos += 1;
+ }
+
+ Metric metric;
+ var type = (MetricValueType)metricsBytes.ReadByte(pos);
+ pos += 1;
+ if (verbose) Console.WriteLine($" ValueType: {type}");
+ switch (type)
+ {
+ case MetricValueType.Double:
+ var d = metricsBytes.CanRead(pos, BytesExtensions.SizeOfDouble).ReadDouble(pos, Endianness.BigEndian);
+ if (verbose) Console.WriteLine($" Value: {d}");
+ pos += BytesExtensions.SizeOfDouble;
+ var doubleDescriptor = MetricDescriptor.Create(prefix, name, unit);
+ if (discName != null) doubleDescriptor = doubleDescriptor.WithDiscriminator(discName, discValue);
+ metric = doubleDescriptor.WithValue(d);
+ break;
+ case MetricValueType.Long:
+ var l = metricsBytes.CanRead(pos, BytesExtensions.SizeOfLong).ReadLong(pos, Endianness.BigEndian);
+ if (verbose) Console.WriteLine($" Value: {l}");
+ pos += BytesExtensions.SizeOfLong;
+ var longDescriptor = MetricDescriptor.Create(prefix, name, unit);
+ if (discName != null) longDescriptor = longDescriptor.WithDiscriminator(discName, discValue);
+ metric = longDescriptor.WithValue(l);
+ break;
+ default:
+ if (verbose) Console.WriteLine(" Value: ?!");
+ // TODO: how shall we handle eg strings?!
+ metric = null;
+ break;
+ }
+
+ _prefix = prefix;
+ _name = name;
+ _discName = discName;
+ _discValue = discValue;
+ _unit = unit;
+
+ return metric;
+ }
+
+ public static IEnumerable GetMetrics(byte[] bytes, bool verbose = false)
+ {
+ // get the strings blob and decompress it
+ var stringsLength = bytes.CanRead(2, BytesExtensions.SizeOfInt).ReadInt(2, Endianness.BigEndian);
+ if (verbose) Console.WriteLine($"StringsLength is {stringsLength} bytes [{2 + 4}..{2 + 4 + stringsLength - 1}]");
+ var stringsBytes = new byte[stringsLength];
+ for (var i = 0; i < stringsLength; i++) stringsBytes[i] = bytes[2 + 4 + i];
+ var stringsData = Decompress(stringsBytes);
+ if (verbose) Console.WriteLine($"Uncompressed to {stringsData.Length} bytes");
+
+ // build the strings dictionary
+ var strings = GetStrings(stringsData, verbose);
+ if (verbose) Console.WriteLine($"Contains {strings.Count} strings");
+
+ // get the metrics count
+ var metricsCount = bytes.ReadInt(2 + 4 + stringsLength, Endianness.BigEndian);
+ Console.WriteLine($"MetricsCount is {metricsCount} metrics");
+
+ // get the metrics blob and decompress it
+ var metricsLength = bytes.Length - 2 - 4 - stringsLength - 4; // everything that is left
+ if (verbose) Console.WriteLine($"MetricsLength is {metricsLength} bytes [{2 + 4 + stringsLength + 4}..{2 + 4 + stringsLength + 4 + metricsLength}]");
+ var metricsBytes = new byte[metricsLength];
+ for (var i = 0; i < metricsLength; i++) metricsBytes[i] = bytes[2 + 4 + stringsLength + 4 + i];
+ var metricsData = Decompress(metricsBytes);
+ if (verbose) Console.WriteLine($"Uncompressed to {metricsData.Length} bytes");
+
+ // get the metrics
+ var pos = 0;
+ var metrics = new List();
+ for (var i = 0; i < metricsCount; i++)
+ {
+ if (verbose) Console.WriteLine($"[{i}]");
+ metrics.Add(GetMetric(metricsData, ref pos, strings, verbose));
+ }
+ return metrics;
+ }
+ }
+}
diff --git a/src/Hazelcast.Net.Tests/Hazelcast.Net.Tests.csproj b/src/Hazelcast.Net.Tests/Hazelcast.Net.Tests.csproj
index 9a7bddc198..be42092def 100644
--- a/src/Hazelcast.Net.Tests/Hazelcast.Net.Tests.csproj
+++ b/src/Hazelcast.Net.Tests/Hazelcast.Net.Tests.csproj
@@ -37,7 +37,7 @@
-
+
all
runtime; build; native; contentfiles; analyzers; buildtransitive
diff --git a/src/Hazelcast.Net/Hazelcast.Net.csproj b/src/Hazelcast.Net/Hazelcast.Net.csproj
index 1da375fc13..d262ea5f8a 100644
--- a/src/Hazelcast.Net/Hazelcast.Net.csproj
+++ b/src/Hazelcast.Net/Hazelcast.Net.csproj
@@ -60,7 +60,6 @@
-
all
@@ -194,4 +193,8 @@
+
+
+
+
diff --git a/src/Hazelcast.Net/Metrics/MetricsCompressor.cs b/src/Hazelcast.Net/Metrics/MetricsCompressor.cs
index 1d7c90b0bb..21c86355c0 100644
--- a/src/Hazelcast.Net/Metrics/MetricsCompressor.cs
+++ b/src/Hazelcast.Net/Metrics/MetricsCompressor.cs
@@ -16,7 +16,8 @@
using System.Collections.Generic;
using System.IO;
using Hazelcast.Core;
-using Ionic.Zlib;
+using System.IO.Compression;
+using Hazelcast.Polyfills;
namespace Hazelcast.Metrics
{
@@ -27,19 +28,14 @@ internal class MetricsCompressor : IDisposable
private const int InitialMetricsBufferSize = 2 << 11; // 4kB
private const int InitialTempBufferSize = 2 << 8; // 512B
- // about compression
- // read https://stackoverflow.com/questions/6522778/java-util-zip-deflater-equivalent-in-c-sharp
- // System.IO.Compression.DeflateStream is *not* Java-compatible!
- // now using ZlibStream from DotNetZip, would be worth benchmarking against SharpZipLib
-
// output streams for the blob containing the strings
private MemoryStream _stringsBuffer;
- private ZlibStream _stringsCompressStream;
+ private Stream _stringsCompressStream;
private DataOutputStream _stringsOutput;
// output streams for the blob containing the metrics
private MemoryStream _metricsBuffer;
- private ZlibStream _metricsCompressStream;
+ private Stream _metricsCompressStream;
private DataOutputStream _metricsOutput;
// temporary buffer to avoid fragmented writes to the compressed streams, when
@@ -47,7 +43,7 @@ internal class MetricsCompressor : IDisposable
private readonly MemoryStream _tempBuffer;
private readonly DataOutputStream _tempOutput;
- private SortedDictionary _strings = new SortedDictionary();
+ private SortedDictionary _strings = new();
private int _count;
private IMetricDescriptor _lastDescriptor;
private bool _disposed, _closed;
@@ -61,26 +57,33 @@ public MetricsCompressor()
_tempOutput = new DataOutputStream(_tempBuffer);
}
- private static void Reset(ref MemoryStream buffer, ref ZlibStream compress, ref DataOutputStream output, int size)
+ private static void Reset(ref MemoryStream buffer, ref Stream compress, out DataOutputStream output, int size)
{
compress?.Dispose();
// shrink if capacity is more than 50% larger than the estimated size
- if (buffer == null || buffer.Capacity > 3 * size/ 2)
+ if (buffer == null)
+ {
+ buffer = new MemoryStream(size);
+ }
+ else if (buffer.Capacity > 3 * size / 2)
{
- buffer?.Dispose();
+ buffer.Dispose();
buffer = new MemoryStream(size);
}
+ else
+ {
+ buffer.Seek(0, SeekOrigin.Begin);
+ }
- buffer.Seek(0, SeekOrigin.Begin);
- compress = new ZlibStream(buffer, CompressionMode.Compress, CompressionLevel.BestSpeed, true);
+ compress = ZLibStreamFactory.Compress(buffer, true);
output = new DataOutputStream(compress);
}
private void Reset(int stringsBufferSize, int metricsBufferSize)
{
- Reset(ref _stringsBuffer, ref _stringsCompressStream, ref _stringsOutput, stringsBufferSize);
- Reset(ref _metricsBuffer, ref _metricsCompressStream, ref _metricsOutput, metricsBufferSize);
+ Reset(ref _stringsBuffer, ref _stringsCompressStream, out _stringsOutput, stringsBufferSize);
+ Reset(ref _metricsBuffer, ref _metricsCompressStream, out _metricsOutput, metricsBufferSize);
_strings = new SortedDictionary();
_count = 0;
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/CRC32.cs b/src/Hazelcast.Net/Polyfills/ZLib/CRC32.cs
new file mode 100644
index 0000000000..97593b9130
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/CRC32.cs
@@ -0,0 +1,814 @@
+// CRC32.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2011 Dino Chiesa.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// Last Saved: <2011-August-02 18:25:54>
+//
+// ------------------------------------------------------------------
+//
+// This module defines the CRC32 class, which can do the CRC32 algorithm, using
+// arbitrary starting polynomials, and bit reversal. The bit reversal is what
+// distinguishes this CRC-32 used in BZip2 from the CRC-32 that is used in PKZIP
+// files, or GZIP files. This class does both.
+//
+// ------------------------------------------------------------------
+
+
+using System;
+using Interop = System.Runtime.InteropServices;
+
+namespace Ionic.Crc
+{
+ ///
+ /// Computes a CRC-32. The CRC-32 algorithm is parameterized - you
+ /// can set the polynomial and enable or disable bit
+ /// reversal. This can be used for GZIP, BZip2, or ZIP.
+ ///
+ ///
+ /// This type is used internally by DotNetZip; it is generally not used
+ /// directly by applications wishing to create, read, or manipulate zip
+ /// archive files.
+ ///
+
+ [Interop.GuidAttribute("ebc25cf6-9120-4283-b972-0e5520d0000C")]
+ [Interop.ComVisible(true)]
+#if !NETCF
+ [Interop.ClassInterface(Interop.ClassInterfaceType.AutoDispatch)]
+#endif
+ public class CRC32
+ {
+ ///
+ /// Indicates the total number of bytes applied to the CRC.
+ ///
+ public Int64 TotalBytesRead
+ {
+ get
+ {
+ return _TotalBytesRead;
+ }
+ }
+
+ ///
+ /// Indicates the current CRC for all blocks slurped in.
+ ///
+ public Int32 Crc32Result
+ {
+ get
+ {
+ return unchecked((Int32)(~_register));
+ }
+ }
+
+ ///
+ /// Returns the CRC32 for the specified stream.
+ ///
+ /// The stream over which to calculate the CRC32
+ /// the CRC32 calculation
+ public Int32 GetCrc32(System.IO.Stream input)
+ {
+ return GetCrc32AndCopy(input, null);
+ }
+
+ ///
+ /// Returns the CRC32 for the specified stream, and writes the input into the
+ /// output stream.
+ ///
+ /// The stream over which to calculate the CRC32
+ /// The stream into which to deflate the input
+ /// the CRC32 calculation
+ public Int32 GetCrc32AndCopy(System.IO.Stream input, System.IO.Stream output)
+ {
+ if (input == null)
+ throw new Exception("The input stream must not be null.");
+
+ unchecked
+ {
+ byte[] buffer = new byte[BUFFER_SIZE];
+ int readSize = BUFFER_SIZE;
+
+ _TotalBytesRead = 0;
+ int count = input.Read(buffer, 0, readSize);
+ if (output != null) output.Write(buffer, 0, count);
+ _TotalBytesRead += count;
+ while (count > 0)
+ {
+ SlurpBlock(buffer, 0, count);
+ count = input.Read(buffer, 0, readSize);
+ if (output != null) output.Write(buffer, 0, count);
+ _TotalBytesRead += count;
+ }
+
+ return (Int32)(~_register);
+ }
+ }
+
+
+ ///
+ /// Get the CRC32 for the given (word,byte) combo. This is a
+ /// computation defined by PKzip for PKZIP 2.0 (weak) encryption.
+ ///
+ /// The word to start with.
+ /// The byte to combine it with.
+ /// The CRC-ized result.
+ public Int32 ComputeCrc32(Int32 W, byte B)
+ {
+ return _InternalComputeCrc32((UInt32)W, B);
+ }
+
+ internal Int32 _InternalComputeCrc32(UInt32 W, byte B)
+ {
+ return (Int32)(crc32Table[(W ^ B) & 0xFF] ^ (W >> 8));
+ }
+
+
+ ///
+ /// Update the value for the running CRC32 using the given block of bytes.
+ /// This is useful when using the CRC32() class in a Stream.
+ ///
+ /// block of bytes to slurp
+ /// starting point in the block
+ /// how many bytes within the block to slurp
+ public void SlurpBlock(byte[] block, int offset, int count)
+ {
+ if (block == null)
+ throw new Exception("The data buffer must not be null.");
+
+ // bzip algorithm
+ for (int i = 0; i < count; i++)
+ {
+ int x = offset + i;
+ byte b = block[x];
+ if (this.reverseBits)
+ {
+ UInt32 temp = (_register >> 24) ^ b;
+ _register = (_register << 8) ^ crc32Table[temp];
+ }
+ else
+ {
+ UInt32 temp = (_register & 0x000000FF) ^ b;
+ _register = (_register >> 8) ^ crc32Table[temp];
+ }
+ }
+ _TotalBytesRead += count;
+ }
+
+
+ ///
+ /// Process one byte in the CRC.
+ ///
+ /// the byte to include into the CRC .
+ public void UpdateCRC(byte b)
+ {
+ if (this.reverseBits)
+ {
+ UInt32 temp = (_register >> 24) ^ b;
+ _register = (_register << 8) ^ crc32Table[temp];
+ }
+ else
+ {
+ UInt32 temp = (_register & 0x000000FF) ^ b;
+ _register = (_register >> 8) ^ crc32Table[temp];
+ }
+ }
+
+ ///
+ /// Process a run of N identical bytes into the CRC.
+ ///
+ ///
+ ///
+ /// This method serves as an optimization for updating the CRC when a
+ /// run of identical bytes is found. Rather than passing in a buffer of
+ /// length n, containing all identical bytes b, this method accepts the
+ /// byte value and the length of the (virtual) buffer - the length of
+ /// the run.
+ ///
+ ///
+ /// the byte to include into the CRC.
+ /// the number of times that byte should be repeated.
+ public void UpdateCRC(byte b, int n)
+ {
+ while (n-- > 0)
+ {
+ if (this.reverseBits)
+ {
+ uint temp = (_register >> 24) ^ b;
+ _register = (_register << 8) ^ crc32Table[(temp >= 0)
+ ? temp
+ : (temp + 256)];
+ }
+ else
+ {
+ UInt32 temp = (_register & 0x000000FF) ^ b;
+ _register = (_register >> 8) ^ crc32Table[(temp >= 0)
+ ? temp
+ : (temp + 256)];
+
+ }
+ }
+ }
+
+
+
+ private static uint ReverseBits(uint data)
+ {
+ unchecked
+ {
+ uint ret = data;
+ ret = (ret & 0x55555555) << 1 | (ret >> 1) & 0x55555555;
+ ret = (ret & 0x33333333) << 2 | (ret >> 2) & 0x33333333;
+ ret = (ret & 0x0F0F0F0F) << 4 | (ret >> 4) & 0x0F0F0F0F;
+ ret = (ret << 24) | ((ret & 0xFF00) << 8) | ((ret >> 8) & 0xFF00) | (ret >> 24);
+ return ret;
+ }
+ }
+
+ private static byte ReverseBits(byte data)
+ {
+ unchecked
+ {
+ uint u = (uint)data * 0x00020202;
+ uint m = 0x01044010;
+ uint s = u & m;
+ uint t = (u << 2) & (m << 1);
+ return (byte)((0x01001001 * (s + t)) >> 24);
+ }
+ }
+
+
+
+ private void GenerateLookupTable()
+ {
+ crc32Table = new UInt32[256];
+ unchecked
+ {
+ UInt32 dwCrc;
+ byte i = 0;
+ do
+ {
+ dwCrc = i;
+ for (byte j = 8; j > 0; j--)
+ {
+ if ((dwCrc & 1) == 1)
+ {
+ dwCrc = (dwCrc >> 1) ^ dwPolynomial;
+ }
+ else
+ {
+ dwCrc >>= 1;
+ }
+ }
+ if (reverseBits)
+ {
+ crc32Table[ReverseBits(i)] = ReverseBits(dwCrc);
+ }
+ else
+ {
+ crc32Table[i] = dwCrc;
+ }
+ i++;
+ } while (i!=0);
+ }
+
+#if VERBOSE
+ Console.WriteLine();
+ Console.WriteLine("private static readonly UInt32[] crc32Table = {");
+ for (int i = 0; i < crc32Table.Length; i+=4)
+ {
+ Console.Write(" ");
+ for (int j=0; j < 4; j++)
+ {
+ Console.Write(" 0x{0:X8}U,", crc32Table[i+j]);
+ }
+ Console.WriteLine();
+ }
+ Console.WriteLine("};");
+ Console.WriteLine();
+#endif
+ }
+
+
+ private uint gf2_matrix_times(uint[] matrix, uint vec)
+ {
+ uint sum = 0;
+ int i=0;
+ while (vec != 0)
+ {
+ if ((vec & 0x01)== 0x01)
+ sum ^= matrix[i];
+ vec >>= 1;
+ i++;
+ }
+ return sum;
+ }
+
+ private void gf2_matrix_square(uint[] square, uint[] mat)
+ {
+ for (int i = 0; i < 32; i++)
+ square[i] = gf2_matrix_times(mat, mat[i]);
+ }
+
+
+
+ ///
+ /// Combines the given CRC32 value with the current running total.
+ ///
+ ///
+ /// This is useful when using a divide-and-conquer approach to
+ /// calculating a CRC. Multiple threads can each calculate a
+ /// CRC32 on a segment of the data, and then combine the
+ /// individual CRC32 values at the end.
+ ///
+ /// the crc value to be combined with this one
+ /// the length of data the CRC value was calculated on
+ public void Combine(int crc, int length)
+ {
+ uint[] even = new uint[32]; // even-power-of-two zeros operator
+ uint[] odd = new uint[32]; // odd-power-of-two zeros operator
+
+ if (length == 0)
+ return;
+
+ uint crc1= ~_register;
+ uint crc2= (uint) crc;
+
+ // put operator for one zero bit in odd
+ odd[0] = this.dwPolynomial; // the CRC-32 polynomial
+ uint row = 1;
+ for (int i = 1; i < 32; i++)
+ {
+ odd[i] = row;
+ row <<= 1;
+ }
+
+ // put operator for two zero bits in even
+ gf2_matrix_square(even, odd);
+
+ // put operator for four zero bits in odd
+ gf2_matrix_square(odd, even);
+
+ uint len2 = (uint) length;
+
+ // apply len2 zeros to crc1 (first square will put the operator for one
+ // zero byte, eight zero bits, in even)
+ do {
+ // apply zeros operator for this bit of len2
+ gf2_matrix_square(even, odd);
+
+ if ((len2 & 1)== 1)
+ crc1 = gf2_matrix_times(even, crc1);
+ len2 >>= 1;
+
+ if (len2 == 0)
+ break;
+
+ // another iteration of the loop with odd and even swapped
+ gf2_matrix_square(odd, even);
+ if ((len2 & 1)==1)
+ crc1 = gf2_matrix_times(odd, crc1);
+ len2 >>= 1;
+
+
+ } while (len2 != 0);
+
+ crc1 ^= crc2;
+
+ _register= ~crc1;
+
+ //return (int) crc1;
+ return;
+ }
+
+
+ ///
+ /// Create an instance of the CRC32 class using the default settings: no
+ /// bit reversal, and a polynomial of 0xEDB88320.
+ ///
+ public CRC32() : this(false)
+ {
+ }
+
+ ///
+ /// Create an instance of the CRC32 class, specifying whether to reverse
+ /// data bits or not.
+ ///
+ ///
+ /// specify true if the instance should reverse data bits.
+ ///
+ ///
+ ///
+ /// In the CRC-32 used by BZip2, the bits are reversed. Therefore if you
+ /// want a CRC32 with compatibility with BZip2, you should pass true
+ /// here. In the CRC-32 used by GZIP and PKZIP, the bits are not
+ /// reversed; Therefore if you want a CRC32 with compatibility with
+ /// those, you should pass false.
+ ///
+ ///
+ public CRC32(bool reverseBits) :
+ this( unchecked((int)0xEDB88320), reverseBits)
+ {
+ }
+
+
+ ///
+ /// Create an instance of the CRC32 class, specifying the polynomial and
+ /// whether to reverse data bits or not.
+ ///
+ ///
+ /// The polynomial to use for the CRC, expressed in the reversed (LSB)
+ /// format: the highest ordered bit in the polynomial value is the
+ /// coefficient of the 0th power; the second-highest order bit is the
+ /// coefficient of the 1 power, and so on. Expressed this way, the
+ /// polynomial for the CRC-32C used in IEEE 802.3, is 0xEDB88320.
+ ///
+ ///
+ /// specify true if the instance should reverse data bits.
+ ///
+ ///
+ ///
+ ///
+ /// In the CRC-32 used by BZip2, the bits are reversed. Therefore if you
+ /// want a CRC32 with compatibility with BZip2, you should pass true
+ /// here for the reverseBits parameter. In the CRC-32 used by
+ /// GZIP and PKZIP, the bits are not reversed; Therefore if you want a
+ /// CRC32 with compatibility with those, you should pass false for the
+ /// reverseBits parameter.
+ ///
+ ///
+ public CRC32(int polynomial, bool reverseBits)
+ {
+ this.reverseBits = reverseBits;
+ this.dwPolynomial = (uint) polynomial;
+ this.GenerateLookupTable();
+ }
+
+ ///
+ /// Reset the CRC-32 class - clear the CRC "remainder register."
+ ///
+ ///
+ ///
+ /// Use this when employing a single instance of this class to compute
+ /// multiple, distinct CRCs on multiple, distinct data blocks.
+ ///
+ ///
+ public void Reset()
+ {
+ _register = 0xFFFFFFFFU;
+ }
+
+ // private member vars
+ private UInt32 dwPolynomial;
+ private Int64 _TotalBytesRead;
+ private bool reverseBits;
+ private UInt32[] crc32Table;
+ private const int BUFFER_SIZE = 8192;
+ private UInt32 _register = 0xFFFFFFFFU;
+ }
+
+
+ ///
+ /// A Stream that calculates a CRC32 (a checksum) on all bytes read,
+ /// or on all bytes written.
+ ///
+ ///
+ ///
+ ///
+ /// This class can be used to verify the CRC of a ZipEntry when
+ /// reading from a stream, or to calculate a CRC when writing to a
+ /// stream. The stream should be used to either read, or write, but
+ /// not both. If you intermix reads and writes, the results are not
+ /// defined.
+ ///
+ ///
+ ///
+ /// This class is intended primarily for use internally by the
+ /// DotNetZip library.
+ ///
+ ///
+ public class CrcCalculatorStream : System.IO.Stream, System.IDisposable
+ {
+ private static readonly Int64 UnsetLengthLimit = -99;
+
+ internal System.IO.Stream _innerStream;
+ private CRC32 _Crc32;
+ private Int64 _lengthLimit = -99;
+ private bool _leaveOpen;
+
+ ///
+ /// The default constructor.
+ ///
+ ///
+ ///
+ /// Instances returned from this constructor will leave the underlying
+ /// stream open upon Close(). The stream uses the default CRC32
+ /// algorithm, which implies a polynomial of 0xEDB88320.
+ ///
+ ///
+ /// The underlying stream
+ public CrcCalculatorStream(System.IO.Stream stream)
+ : this(true, CrcCalculatorStream.UnsetLengthLimit, stream, null)
+ {
+ }
+
+ ///
+ /// The constructor allows the caller to specify how to handle the
+ /// underlying stream at close.
+ ///
+ ///
+ ///
+ /// The stream uses the default CRC32 algorithm, which implies a
+ /// polynomial of 0xEDB88320.
+ ///
+ ///
+ /// The underlying stream
+ /// true to leave the underlying stream
+ /// open upon close of the CrcCalculatorStream; false otherwise.
+ public CrcCalculatorStream(System.IO.Stream stream, bool leaveOpen)
+ : this(leaveOpen, CrcCalculatorStream.UnsetLengthLimit, stream, null)
+ {
+ }
+
+ ///
+ /// A constructor allowing the specification of the length of the stream
+ /// to read.
+ ///
+ ///
+ ///
+ /// The stream uses the default CRC32 algorithm, which implies a
+ /// polynomial of 0xEDB88320.
+ ///
+ ///
+ /// Instances returned from this constructor will leave the underlying
+ /// stream open upon Close().
+ ///
+ ///
+ /// The underlying stream
+ /// The length of the stream to slurp
+ public CrcCalculatorStream(System.IO.Stream stream, Int64 length)
+ : this(true, length, stream, null)
+ {
+ if (length < 0)
+ throw new ArgumentException("length");
+ }
+
+ ///
+ /// A constructor allowing the specification of the length of the stream
+ /// to read, as well as whether to keep the underlying stream open upon
+ /// Close().
+ ///
+ ///
+ ///
+ /// The stream uses the default CRC32 algorithm, which implies a
+ /// polynomial of 0xEDB88320.
+ ///
+ ///
+ /// The underlying stream
+ /// The length of the stream to slurp
+ /// true to leave the underlying stream
+ /// open upon close of the CrcCalculatorStream; false otherwise.
+ public CrcCalculatorStream(System.IO.Stream stream, Int64 length, bool leaveOpen)
+ : this(leaveOpen, length, stream, null)
+ {
+ if (length < 0)
+ throw new ArgumentException("length");
+ }
+
+ ///
+ /// A constructor allowing the specification of the length of the stream
+ /// to read, as well as whether to keep the underlying stream open upon
+ /// Close(), and the CRC32 instance to use.
+ ///
+ ///
+ ///
+ /// The stream uses the specified CRC32 instance, which allows the
+ /// application to specify how the CRC gets calculated.
+ ///
+ ///
+ /// The underlying stream
+ /// The length of the stream to slurp
+ /// true to leave the underlying stream
+ /// open upon close of the CrcCalculatorStream; false otherwise.
+ /// the CRC32 instance to use to calculate the CRC32
+ public CrcCalculatorStream(System.IO.Stream stream, Int64 length, bool leaveOpen,
+ CRC32 crc32)
+ : this(leaveOpen, length, stream, crc32)
+ {
+ if (length < 0)
+ throw new ArgumentException("length");
+ }
+
+
+ // This ctor is private - no validation is done here. This is to allow the use
+ // of a (specific) negative value for the _lengthLimit, to indicate that there
+ // is no length set. So we validate the length limit in those ctors that use an
+ // explicit param, otherwise we don't validate, because it could be our special
+ // value.
+ private CrcCalculatorStream
+ (bool leaveOpen, Int64 length, System.IO.Stream stream, CRC32 crc32)
+ : base()
+ {
+ _innerStream = stream;
+ _Crc32 = crc32 ?? new CRC32();
+ _lengthLimit = length;
+ _leaveOpen = leaveOpen;
+ }
+
+
+ ///
+ /// Gets the total number of bytes run through the CRC32 calculator.
+ ///
+ ///
+ ///
+ /// This is either the total number of bytes read, or the total number of
+ /// bytes written, depending on the direction of this stream.
+ ///
+ public Int64 TotalBytesSlurped
+ {
+ get { return _Crc32.TotalBytesRead; }
+ }
+
+ ///
+ /// Provides the current CRC for all blocks slurped in.
+ ///
+ ///
+ ///
+ /// The running total of the CRC is kept as data is written or read
+ /// through the stream. read this property after all reads or writes to
+ /// get an accurate CRC for the entire stream.
+ ///
+ ///
+ public Int32 Crc
+ {
+ get { return _Crc32.Crc32Result; }
+ }
+
+ ///
+ /// Indicates whether the underlying stream will be left open when the
+ /// CrcCalculatorStream is Closed.
+ ///
+ ///
+ ///
+ /// Set this at any point before calling .
+ ///
+ ///
+ public bool LeaveOpen
+ {
+ get { return _leaveOpen; }
+ set { _leaveOpen = value; }
+ }
+
+ ///
+ /// Read from the stream
+ ///
+ /// the buffer to read
+ /// the offset at which to start
+ /// the number of bytes to read
+ /// the number of bytes actually read
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ int bytesToRead = count;
+
+ // Need to limit the # of bytes returned, if the stream is intended to have
+ // a definite length. This is especially useful when returning a stream for
+ // the uncompressed data directly to the application. The app won't
+ // necessarily read only the UncompressedSize number of bytes. For example
+ // wrapping the stream returned from OpenReader() into a StreadReader() and
+ // calling ReadToEnd() on it, We can "over-read" the zip data and get a
+ // corrupt string. The length limits that, prevents that problem.
+
+ if (_lengthLimit != CrcCalculatorStream.UnsetLengthLimit)
+ {
+ if (_Crc32.TotalBytesRead >= _lengthLimit) return 0; // EOF
+ Int64 bytesRemaining = _lengthLimit - _Crc32.TotalBytesRead;
+ if (bytesRemaining < count) bytesToRead = (int)bytesRemaining;
+ }
+ int n = _innerStream.Read(buffer, offset, bytesToRead);
+ if (n > 0) _Crc32.SlurpBlock(buffer, offset, n);
+ return n;
+ }
+
+ ///
+ /// Write to the stream.
+ ///
+ /// the buffer from which to write
+ /// the offset at which to start writing
+ /// the number of bytes to write
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ if (count > 0) _Crc32.SlurpBlock(buffer, offset, count);
+ _innerStream.Write(buffer, offset, count);
+ }
+
+ ///
+ /// Indicates whether the stream supports reading.
+ ///
+ public override bool CanRead
+ {
+ get { return _innerStream.CanRead; }
+ }
+
+ ///
+ /// Indicates whether the stream supports seeking.
+ ///
+ ///
+ ///
+ /// Always returns false.
+ ///
+ ///
+ public override bool CanSeek
+ {
+ get { return false; }
+ }
+
+ ///
+ /// Indicates whether the stream supports writing.
+ ///
+ public override bool CanWrite
+ {
+ get { return _innerStream.CanWrite; }
+ }
+
+ ///
+ /// Flush the stream.
+ ///
+ public override void Flush()
+ {
+ _innerStream.Flush();
+ }
+
+ ///
+ /// Returns the length of the underlying stream.
+ ///
+ public override long Length
+ {
+ get
+ {
+ if (_lengthLimit == CrcCalculatorStream.UnsetLengthLimit)
+ return _innerStream.Length;
+ else return _lengthLimit;
+ }
+ }
+
+ ///
+ /// The getter for this property returns the total bytes read.
+ /// If you use the setter, it will throw
+ /// .
+ ///
+ public override long Position
+ {
+ get { return _Crc32.TotalBytesRead; }
+ set { throw new NotSupportedException(); }
+ }
+
+ ///
+ /// Seeking is not supported on this stream. This method always throws
+ ///
+ ///
+ /// N/A
+ /// N/A
+ /// N/A
+ public override long Seek(long offset, System.IO.SeekOrigin origin)
+ {
+ throw new NotSupportedException();
+ }
+
+ ///
+ /// This method always throws
+ ///
+ ///
+ /// N/A
+ public override void SetLength(long value)
+ {
+ throw new NotSupportedException();
+ }
+
+
+ void IDisposable.Dispose()
+ {
+ Close();
+ }
+
+ ///
+ /// Closes the stream.
+ ///
+ public override void Close()
+ {
+ base.Close();
+ if (!_leaveOpen)
+ _innerStream.Close();
+ }
+
+ }
+
+}
\ No newline at end of file
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/Deflate.cs b/src/Hazelcast.Net/Polyfills/ZLib/Deflate.cs
new file mode 100644
index 0000000000..96722507c5
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/Deflate.cs
@@ -0,0 +1,1879 @@
+// Deflate.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2011-August-03 19:52:15>
+//
+// ------------------------------------------------------------------
+//
+// This module defines logic for handling the Deflate or compression.
+//
+// This code is based on multiple sources:
+// - the original zlib v1.2.3 source, which is Copyright (C) 1995-2005 Jean-loup Gailly.
+// - the original jzlib, which is Copyright (c) 2000-2003 ymnk, JCraft,Inc.
+//
+// However, this code is significantly different from both.
+// The object model is not the same, and many of the behaviors are different.
+//
+// In keeping with the license for these other works, the copyrights for
+// jzlib and zlib are here.
+//
+// -----------------------------------------------------------------------
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// This program is based on zlib-1.1.3; credit to authors
+// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+// and contributors of zlib.
+//
+// -----------------------------------------------------------------------
+
+
+using System;
+
+namespace Ionic.Zlib
+{
+
+ internal enum BlockState
+ {
+ NeedMore = 0, // block not completed, need more input or more output
+ BlockDone, // block flush performed
+ FinishStarted, // finish started, need only more output at next deflate
+ FinishDone // finish done, accept no more input or output
+ }
+
+ internal enum DeflateFlavor
+ {
+ Store,
+ Fast,
+ Slow
+ }
+
+ internal sealed class DeflateManager
+ {
+ private static readonly int MEM_LEVEL_MAX = 9;
+ private static readonly int MEM_LEVEL_DEFAULT = 8;
+
+ internal delegate BlockState CompressFunc(FlushType flush);
+
+ internal class Config
+ {
+ // Use a faster search when the previous match is longer than this
+ internal int GoodLength; // reduce lazy search above this match length
+
+ // Attempt to find a better match only when the current match is
+ // strictly smaller than this value. This mechanism is used only for
+ // compression levels >= 4. For levels 1,2,3: MaxLazy is actually
+ // MaxInsertLength. (See DeflateFast)
+
+ internal int MaxLazy; // do not perform lazy search above this match length
+
+ internal int NiceLength; // quit search above this match length
+
+ // To speed up deflation, hash chains are never searched beyond this
+ // length. A higher limit improves compression ratio but degrades the speed.
+
+ internal int MaxChainLength;
+
+ internal DeflateFlavor Flavor;
+
+ private Config(int goodLength, int maxLazy, int niceLength, int maxChainLength, DeflateFlavor flavor)
+ {
+ this.GoodLength = goodLength;
+ this.MaxLazy = maxLazy;
+ this.NiceLength = niceLength;
+ this.MaxChainLength = maxChainLength;
+ this.Flavor = flavor;
+ }
+
+ public static Config Lookup(CompressionLevel level)
+ {
+ return Table[(int)level];
+ }
+
+
+ static Config()
+ {
+ Table = new Config[] {
+ new Config(0, 0, 0, 0, DeflateFlavor.Store),
+ new Config(4, 4, 8, 4, DeflateFlavor.Fast),
+ new Config(4, 5, 16, 8, DeflateFlavor.Fast),
+ new Config(4, 6, 32, 32, DeflateFlavor.Fast),
+
+ new Config(4, 4, 16, 16, DeflateFlavor.Slow),
+ new Config(8, 16, 32, 32, DeflateFlavor.Slow),
+ new Config(8, 16, 128, 128, DeflateFlavor.Slow),
+ new Config(8, 32, 128, 256, DeflateFlavor.Slow),
+ new Config(32, 128, 258, 1024, DeflateFlavor.Slow),
+ new Config(32, 258, 258, 4096, DeflateFlavor.Slow),
+ };
+ }
+
+ private static readonly Config[] Table;
+ }
+
+
+ private CompressFunc DeflateFunction;
+
+ private static readonly System.String[] _ErrorMessage = new System.String[]
+ {
+ "need dictionary",
+ "stream end",
+ "",
+ "file error",
+ "stream error",
+ "data error",
+ "insufficient memory",
+ "buffer error",
+ "incompatible version",
+ ""
+ };
+
+ // preset dictionary flag in zlib header
+ private static readonly int PRESET_DICT = 0x20;
+
+ private static readonly int INIT_STATE = 42;
+ private static readonly int BUSY_STATE = 113;
+ private static readonly int FINISH_STATE = 666;
+
+ // The deflate compression method
+ private static readonly int Z_DEFLATED = 8;
+
+ private static readonly int STORED_BLOCK = 0;
+ private static readonly int STATIC_TREES = 1;
+ private static readonly int DYN_TREES = 2;
+
+ // The three kinds of block type
+ private static readonly int Z_BINARY = 0;
+ private static readonly int Z_ASCII = 1;
+ private static readonly int Z_UNKNOWN = 2;
+
+ private static readonly int Buf_size = 8 * 2;
+
+ private static readonly int MIN_MATCH = 3;
+ private static readonly int MAX_MATCH = 258;
+
+ private static readonly int MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);
+
+ private static readonly int HEAP_SIZE = (2 * InternalConstants.L_CODES + 1);
+
+ private static readonly int END_BLOCK = 256;
+
+ internal ZlibCodec _codec; // the zlib encoder/decoder
+ internal int status; // as the name implies
+ internal byte[] pending; // output still pending - waiting to be compressed
+ internal int nextPending; // index of next pending byte to output to the stream
+ internal int pendingCount; // number of bytes in the pending buffer
+
+ internal sbyte data_type; // UNKNOWN, BINARY or ASCII
+ internal int last_flush; // value of flush param for previous deflate call
+
+ internal int w_size; // LZ77 window size (32K by default)
+ internal int w_bits; // log2(w_size) (8..16)
+ internal int w_mask; // w_size - 1
+
+ //internal byte[] dictionary;
+ internal byte[] window;
+
+ // Sliding window. Input bytes are read into the second half of the window,
+ // and move to the first half later to keep a dictionary of at least wSize
+ // bytes. With this organization, matches are limited to a distance of
+ // wSize-MAX_MATCH bytes, but this ensures that IO is always
+ // performed with a length multiple of the block size.
+ //
+ // To do: use the user input buffer as sliding window.
+
+ internal int window_size;
+ // Actual size of window: 2*wSize, except when the user input buffer
+ // is directly used as sliding window.
+
+ internal short[] prev;
+ // Link to older string with same hash index. To limit the size of this
+ // array to 64K, this link is maintained only for the last 32K strings.
+ // An index in this array is thus a window index modulo 32K.
+
+ internal short[] head; // Heads of the hash chains or NIL.
+
+ internal int ins_h; // hash index of string to be inserted
+ internal int hash_size; // number of elements in hash table
+ internal int hash_bits; // log2(hash_size)
+ internal int hash_mask; // hash_size-1
+
+ // Number of bits by which ins_h must be shifted at each input
+ // step. It must be such that after MIN_MATCH steps, the oldest
+ // byte no longer takes part in the hash key, that is:
+ // hash_shift * MIN_MATCH >= hash_bits
+ internal int hash_shift;
+
+ // Window position at the beginning of the current output block. Gets
+ // negative when the window is moved backwards.
+
+ internal int block_start;
+
+ Config config;
+ internal int match_length; // length of best match
+ internal int prev_match; // previous match
+ internal int match_available; // set if previous match exists
+ internal int strstart; // start of string to insert into.....????
+ internal int match_start; // start of matching string
+ internal int lookahead; // number of valid bytes ahead in window
+
+ // Length of the best match at previous step. Matches not greater than this
+ // are discarded. This is used in the lazy match evaluation.
+ internal int prev_length;
+
+ // Insert new strings in the hash table only if the match length is not
+ // greater than this length. This saves time but degrades compression.
+ // max_insert_length is used only for compression levels <= 3.
+
+ internal CompressionLevel compressionLevel; // compression level (1..9)
+ internal CompressionStrategy compressionStrategy; // favor or force Huffman coding
+
+
+ internal short[] dyn_ltree; // literal and length tree
+ internal short[] dyn_dtree; // distance tree
+ internal short[] bl_tree; // Huffman tree for bit lengths
+
+ internal Tree treeLiterals = new Tree(); // desc for literal tree
+ internal Tree treeDistances = new Tree(); // desc for distance tree
+ internal Tree treeBitLengths = new Tree(); // desc for bit length tree
+
+ // number of codes at each bit length for an optimal tree
+ internal short[] bl_count = new short[InternalConstants.MAX_BITS + 1];
+
+ // heap used to build the Huffman trees
+ internal int[] heap = new int[2 * InternalConstants.L_CODES + 1];
+
+ internal int heap_len; // number of elements in the heap
+ internal int heap_max; // element of largest frequency
+
+ // The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+ // The same heap array is used to build all trees.
+
+ // Depth of each subtree used as tie breaker for trees of equal frequency
+ internal sbyte[] depth = new sbyte[2 * InternalConstants.L_CODES + 1];
+
+ internal int _lengthOffset; // index for literals or lengths
+
+
+ // Size of match buffer for literals/lengths. There are 4 reasons for
+ // limiting lit_bufsize to 64K:
+ // - frequencies can be kept in 16 bit counters
+ // - if compression is not successful for the first block, all input
+ // data is still in the window so we can still emit a stored block even
+ // when input comes from standard input. (This can also be done for
+ // all blocks if lit_bufsize is not greater than 32K.)
+ // - if compression is not successful for a file smaller than 64K, we can
+ // even emit a stored file instead of a stored block (saving 5 bytes).
+ // This is applicable only for zip (not gzip or zlib).
+ // - creating new Huffman trees less frequently may not provide fast
+ // adaptation to changes in the input data statistics. (Take for
+ // example a binary file with poorly compressible code followed by
+ // a highly compressible string table.) Smaller buffer sizes give
+ // fast adaptation but have of course the overhead of transmitting
+ // trees more frequently.
+
+ internal int lit_bufsize;
+
+ internal int last_lit; // running index in l_buf
+
+ // Buffer for distances. To simplify the code, d_buf and l_buf have
+ // the same number of elements. To use different lengths, an extra flag
+ // array would be necessary.
+
+ internal int _distanceOffset; // index into pending; points to distance data??
+
+ internal int opt_len; // bit length of current block with optimal trees
+ internal int static_len; // bit length of current block with static trees
+ internal int matches; // number of string matches in current block
+ internal int last_eob_len; // bit length of EOB code for last block
+
+ // Output buffer. bits are inserted starting at the bottom (least
+ // significant bits).
+ internal short bi_buf;
+
+ // Number of valid bits in bi_buf. All bits above the last valid bit
+ // are always zero.
+ internal int bi_valid;
+
+
+ internal DeflateManager()
+ {
+ dyn_ltree = new short[HEAP_SIZE * 2];
+ dyn_dtree = new short[(2 * InternalConstants.D_CODES + 1) * 2]; // distance tree
+ bl_tree = new short[(2 * InternalConstants.BL_CODES + 1) * 2]; // Huffman tree for bit lengths
+ }
+
+
+ // lm_init
+ private void _InitializeLazyMatch()
+ {
+ window_size = 2 * w_size;
+
+ // clear the hash - workitem 9063
+ Array.Clear(head, 0, hash_size);
+ //for (int i = 0; i < hash_size; i++) head[i] = 0;
+
+ config = Config.Lookup(compressionLevel);
+ SetDeflater();
+
+ strstart = 0;
+ block_start = 0;
+ lookahead = 0;
+ match_length = prev_length = MIN_MATCH - 1;
+ match_available = 0;
+ ins_h = 0;
+ }
+
+ // Initialize the tree data structures for a new zlib stream.
+ private void _InitializeTreeData()
+ {
+ treeLiterals.dyn_tree = dyn_ltree;
+ treeLiterals.staticTree = StaticTree.Literals;
+
+ treeDistances.dyn_tree = dyn_dtree;
+ treeDistances.staticTree = StaticTree.Distances;
+
+ treeBitLengths.dyn_tree = bl_tree;
+ treeBitLengths.staticTree = StaticTree.BitLengths;
+
+ bi_buf = 0;
+ bi_valid = 0;
+ last_eob_len = 8; // enough lookahead for inflate
+
+ // Initialize the first block of the first file:
+ _InitializeBlocks();
+ }
+
+ internal void _InitializeBlocks()
+ {
+ // Initialize the trees.
+ for (int i = 0; i < InternalConstants.L_CODES; i++)
+ dyn_ltree[i * 2] = 0;
+ for (int i = 0; i < InternalConstants.D_CODES; i++)
+ dyn_dtree[i * 2] = 0;
+ for (int i = 0; i < InternalConstants.BL_CODES; i++)
+ bl_tree[i * 2] = 0;
+
+ dyn_ltree[END_BLOCK * 2] = 1;
+ opt_len = static_len = 0;
+ last_lit = matches = 0;
+ }
+
+ // Restore the heap property by moving down the tree starting at node k,
+ // exchanging a node with the smallest of its two sons if necessary, stopping
+ // when the heap property is re-established (each father smaller than its
+ // two sons).
+ internal void pqdownheap(short[] tree, int k)
+ {
+ int v = heap[k];
+ int j = k << 1; // left son of k
+ while (j <= heap_len)
+ {
+ // Set j to the smallest of the two sons:
+ if (j < heap_len && _IsSmaller(tree, heap[j + 1], heap[j], depth))
+ {
+ j++;
+ }
+ // Exit if v is smaller than both sons
+ if (_IsSmaller(tree, v, heap[j], depth))
+ break;
+
+ // Exchange v with the smallest son
+ heap[k] = heap[j]; k = j;
+ // And continue down the tree, setting j to the left son of k
+ j <<= 1;
+ }
+ heap[k] = v;
+ }
+
+ internal static bool _IsSmaller(short[] tree, int n, int m, sbyte[] depth)
+ {
+ short tn2 = tree[n * 2];
+ short tm2 = tree[m * 2];
+ return (tn2 < tm2 || (tn2 == tm2 && depth[n] <= depth[m]));
+ }
+
+
+ // Scan a literal or distance tree to determine the frequencies of the codes
+ // in the bit length tree.
+ internal void scan_tree(short[] tree, int max_code)
+ {
+ int n; // iterates over all tree elements
+ int prevlen = -1; // last emitted length
+ int curlen; // length of current code
+ int nextlen = (int)tree[0 * 2 + 1]; // length of next code
+ int count = 0; // repeat count of the current code
+ int max_count = 7; // max repeat count
+ int min_count = 4; // min repeat count
+
+ if (nextlen == 0)
+ {
+ max_count = 138; min_count = 3;
+ }
+ tree[(max_code + 1) * 2 + 1] = (short)0x7fff; // guard //??
+
+ for (n = 0; n <= max_code; n++)
+ {
+ curlen = nextlen; nextlen = (int)tree[(n + 1) * 2 + 1];
+ if (++count < max_count && curlen == nextlen)
+ {
+ continue;
+ }
+ else if (count < min_count)
+ {
+ bl_tree[curlen * 2] = (short)(bl_tree[curlen * 2] + count);
+ }
+ else if (curlen != 0)
+ {
+ if (curlen != prevlen)
+ bl_tree[curlen * 2]++;
+ bl_tree[InternalConstants.REP_3_6 * 2]++;
+ }
+ else if (count <= 10)
+ {
+ bl_tree[InternalConstants.REPZ_3_10 * 2]++;
+ }
+ else
+ {
+ bl_tree[InternalConstants.REPZ_11_138 * 2]++;
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0)
+ {
+ max_count = 138; min_count = 3;
+ }
+ else if (curlen == nextlen)
+ {
+ max_count = 6; min_count = 3;
+ }
+ else
+ {
+ max_count = 7; min_count = 4;
+ }
+ }
+ }
+
+ // Construct the Huffman tree for the bit lengths and return the index in
+ // bl_order of the last bit length code to send.
+ internal int build_bl_tree()
+ {
+ int max_blindex; // index of last bit length code of non zero freq
+
+ // Determine the bit length frequencies for literal and distance trees
+ scan_tree(dyn_ltree, treeLiterals.max_code);
+ scan_tree(dyn_dtree, treeDistances.max_code);
+
+ // Build the bit length tree:
+ treeBitLengths.build_tree(this);
+ // opt_len now includes the length of the tree representations, except
+ // the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
+
+ // Determine the number of bit length codes to send. The pkzip format
+ // requires that at least 4 bit length codes be sent. (appnote.txt says
+ // 3 but the actual value used is 4.)
+ for (max_blindex = InternalConstants.BL_CODES - 1; max_blindex >= 3; max_blindex--)
+ {
+ if (bl_tree[Tree.bl_order[max_blindex] * 2 + 1] != 0)
+ break;
+ }
+ // Update opt_len to include the bit length tree and counts
+ opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;
+
+ return max_blindex;
+ }
+
+
+ // Send the header for a block using dynamic Huffman trees: the counts, the
+ // lengths of the bit length codes, the literal tree and the distance tree.
+ // IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
+ internal void send_all_trees(int lcodes, int dcodes, int blcodes)
+ {
+ int rank; // index in bl_order
+
+ send_bits(lcodes - 257, 5); // not +255 as stated in appnote.txt
+ send_bits(dcodes - 1, 5);
+ send_bits(blcodes - 4, 4); // not -3 as stated in appnote.txt
+ for (rank = 0; rank < blcodes; rank++)
+ {
+ send_bits(bl_tree[Tree.bl_order[rank] * 2 + 1], 3);
+ }
+ send_tree(dyn_ltree, lcodes - 1); // literal tree
+ send_tree(dyn_dtree, dcodes - 1); // distance tree
+ }
+
+ // Send a literal or distance tree in compressed form, using the codes in
+ // bl_tree.
+ internal void send_tree(short[] tree, int max_code)
+ {
+ int n; // iterates over all tree elements
+ int prevlen = -1; // last emitted length
+ int curlen; // length of current code
+ int nextlen = tree[0 * 2 + 1]; // length of next code
+ int count = 0; // repeat count of the current code
+ int max_count = 7; // max repeat count
+ int min_count = 4; // min repeat count
+
+ if (nextlen == 0)
+ {
+ max_count = 138; min_count = 3;
+ }
+
+ for (n = 0; n <= max_code; n++)
+ {
+ curlen = nextlen; nextlen = tree[(n + 1) * 2 + 1];
+ if (++count < max_count && curlen == nextlen)
+ {
+ continue;
+ }
+ else if (count < min_count)
+ {
+ do
+ {
+ send_code(curlen, bl_tree);
+ }
+ while (--count != 0);
+ }
+ else if (curlen != 0)
+ {
+ if (curlen != prevlen)
+ {
+ send_code(curlen, bl_tree); count--;
+ }
+ send_code(InternalConstants.REP_3_6, bl_tree);
+ send_bits(count - 3, 2);
+ }
+ else if (count <= 10)
+ {
+ send_code(InternalConstants.REPZ_3_10, bl_tree);
+ send_bits(count - 3, 3);
+ }
+ else
+ {
+ send_code(InternalConstants.REPZ_11_138, bl_tree);
+ send_bits(count - 11, 7);
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0)
+ {
+ max_count = 138; min_count = 3;
+ }
+ else if (curlen == nextlen)
+ {
+ max_count = 6; min_count = 3;
+ }
+ else
+ {
+ max_count = 7; min_count = 4;
+ }
+ }
+ }
+
+ // Output a block of bytes on the stream.
+ // IN assertion: there is enough room in pending_buf.
+ private void put_bytes(byte[] p, int start, int len)
+ {
+ Array.Copy(p, start, pending, pendingCount, len);
+ pendingCount += len;
+ }
+
+#if NOTNEEDED
+ private void put_byte(byte c)
+ {
+ pending[pendingCount++] = c;
+ }
+ internal void put_short(int b)
+ {
+ unchecked
+ {
+ pending[pendingCount++] = (byte)b;
+ pending[pendingCount++] = (byte)(b >> 8);
+ }
+ }
+ internal void putShortMSB(int b)
+ {
+ unchecked
+ {
+ pending[pendingCount++] = (byte)(b >> 8);
+ pending[pendingCount++] = (byte)b;
+ }
+ }
+#endif
+
+ internal void send_code(int c, short[] tree)
+ {
+ int c2 = c * 2;
+ send_bits((tree[c2] & 0xffff), (tree[c2 + 1] & 0xffff));
+ }
+
+ internal void send_bits(int value, int length)
+ {
+ int len = length;
+ unchecked
+ {
+ if (bi_valid > (int)Buf_size - len)
+ {
+ //int val = value;
+ // bi_buf |= (val << bi_valid);
+
+ bi_buf |= (short)((value << bi_valid) & 0xffff);
+ //put_short(bi_buf);
+ pending[pendingCount++] = (byte)bi_buf;
+ pending[pendingCount++] = (byte)(bi_buf >> 8);
+
+
+ bi_buf = (short)((uint)value >> (Buf_size - bi_valid));
+ bi_valid += len - Buf_size;
+ }
+ else
+ {
+ // bi_buf |= (value) << bi_valid;
+ bi_buf |= (short)((value << bi_valid) & 0xffff);
+ bi_valid += len;
+ }
+ }
+ }
+
+ // Send one empty static block to give enough lookahead for inflate.
+ // This takes 10 bits, of which 7 may remain in the bit buffer.
+ // The current inflate code requires 9 bits of lookahead. If the
+ // last two codes for the previous block (real code plus EOB) were coded
+ // on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
+ // the last real code. In this case we send two empty static blocks instead
+ // of one. (There are no problems if the previous block is stored or fixed.)
+ // To simplify the code, we assume the worst case of last real code encoded
+ // on one bit only.
+ internal void _tr_align()
+ {
+ send_bits(STATIC_TREES << 1, 3);
+ send_code(END_BLOCK, StaticTree.lengthAndLiteralsTreeCodes);
+
+ bi_flush();
+
+ // Of the 10 bits for the empty block, we have already sent
+ // (10 - bi_valid) bits. The lookahead for the last real code (before
+ // the EOB of the previous block) was thus at least one plus the length
+ // of the EOB plus what we have just sent of the empty static block.
+ if (1 + last_eob_len + 10 - bi_valid < 9)
+ {
+ send_bits(STATIC_TREES << 1, 3);
+ send_code(END_BLOCK, StaticTree.lengthAndLiteralsTreeCodes);
+ bi_flush();
+ }
+ last_eob_len = 7;
+ }
+
+
+ // Save the match info and tally the frequency counts. Return true if
+ // the current block must be flushed.
+ internal bool _tr_tally(int dist, int lc)
+ {
+ pending[_distanceOffset + last_lit * 2] = unchecked((byte) ( (uint)dist >> 8 ) );
+ pending[_distanceOffset + last_lit * 2 + 1] = unchecked((byte)dist);
+ pending[_lengthOffset + last_lit] = unchecked((byte)lc);
+ last_lit++;
+
+ if (dist == 0)
+ {
+ // lc is the unmatched char
+ dyn_ltree[lc * 2]++;
+ }
+ else
+ {
+ matches++;
+ // Here, lc is the match length - MIN_MATCH
+ dist--; // dist = match distance - 1
+ dyn_ltree[(Tree.LengthCode[lc] + InternalConstants.LITERALS + 1) * 2]++;
+ dyn_dtree[Tree.DistanceCode(dist) * 2]++;
+ }
+
+ if ((last_lit & 0x1fff) == 0 && (int)compressionLevel > 2)
+ {
+ // Compute an upper bound for the compressed length
+ int out_length = last_lit << 3;
+ int in_length = strstart - block_start;
+ int dcode;
+ for (dcode = 0; dcode < InternalConstants.D_CODES; dcode++)
+ {
+ out_length = (int)(out_length + (int)dyn_dtree[dcode * 2] * (5L + Tree.ExtraDistanceBits[dcode]));
+ }
+ out_length >>= 3;
+ if ((matches < (last_lit / 2)) && out_length < in_length / 2)
+ return true;
+ }
+
+ return (last_lit == lit_bufsize - 1) || (last_lit == lit_bufsize);
+ // dinoch - wraparound?
+ // We avoid equality with lit_bufsize because of wraparound at 64K
+ // on 16 bit machines and because stored blocks are restricted to
+ // 64K-1 bytes.
+ }
+
+
+
+ // Send the block data compressed using the given Huffman trees
+ internal void send_compressed_block(short[] ltree, short[] dtree)
+ {
+ int distance; // distance of matched string
+ int lc; // match length or unmatched char (if dist == 0)
+ int lx = 0; // running index in l_buf
+ int code; // the code to send
+ int extra; // number of extra bits to send
+
+ if (last_lit != 0)
+ {
+ do
+ {
+ int ix = _distanceOffset + lx * 2;
+ distance = ((pending[ix] << 8) & 0xff00) |
+ (pending[ix + 1] & 0xff);
+ lc = (pending[_lengthOffset + lx]) & 0xff;
+ lx++;
+
+ if (distance == 0)
+ {
+ send_code(lc, ltree); // send a literal byte
+ }
+ else
+ {
+ // literal or match pair
+ // Here, lc is the match length - MIN_MATCH
+ code = Tree.LengthCode[lc];
+
+ // send the length code
+ send_code(code + InternalConstants.LITERALS + 1, ltree);
+ extra = Tree.ExtraLengthBits[code];
+ if (extra != 0)
+ {
+ // send the extra length bits
+ lc -= Tree.LengthBase[code];
+ send_bits(lc, extra);
+ }
+ distance--; // dist is now the match distance - 1
+ code = Tree.DistanceCode(distance);
+
+ // send the distance code
+ send_code(code, dtree);
+
+ extra = Tree.ExtraDistanceBits[code];
+ if (extra != 0)
+ {
+ // send the extra distance bits
+ distance -= Tree.DistanceBase[code];
+ send_bits(distance, extra);
+ }
+ }
+
+ // Check that the overlay between pending and d_buf+l_buf is ok:
+ }
+ while (lx < last_lit);
+ }
+
+ send_code(END_BLOCK, ltree);
+ last_eob_len = ltree[END_BLOCK * 2 + 1];
+ }
+
+
+
+ // Set the data type to ASCII or BINARY, using a crude approximation:
+ // binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
+ // IN assertion: the fields freq of dyn_ltree are set and the total of all
+ // frequencies does not exceed 64K (to fit in an int on 16 bit machines).
+ internal void set_data_type()
+ {
+ int n = 0;
+ int ascii_freq = 0;
+ int bin_freq = 0;
+ while (n < 7)
+ {
+ bin_freq += dyn_ltree[n * 2]; n++;
+ }
+ while (n < 128)
+ {
+ ascii_freq += dyn_ltree[n * 2]; n++;
+ }
+ while (n < InternalConstants.LITERALS)
+ {
+ bin_freq += dyn_ltree[n * 2]; n++;
+ }
+ data_type = (sbyte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
+ }
+
+
+
+ // Flush the bit buffer, keeping at most 7 bits in it.
+ internal void bi_flush()
+ {
+ if (bi_valid == 16)
+ {
+ pending[pendingCount++] = (byte)bi_buf;
+ pending[pendingCount++] = (byte)(bi_buf >> 8);
+ bi_buf = 0;
+ bi_valid = 0;
+ }
+ else if (bi_valid >= 8)
+ {
+ //put_byte((byte)bi_buf);
+ pending[pendingCount++] = (byte)bi_buf;
+ bi_buf >>= 8;
+ bi_valid -= 8;
+ }
+ }
+
+ // Flush the bit buffer and align the output on a byte boundary
+ internal void bi_windup()
+ {
+ if (bi_valid > 8)
+ {
+ pending[pendingCount++] = (byte)bi_buf;
+ pending[pendingCount++] = (byte)(bi_buf >> 8);
+ }
+ else if (bi_valid > 0)
+ {
+ //put_byte((byte)bi_buf);
+ pending[pendingCount++] = (byte)bi_buf;
+ }
+ bi_buf = 0;
+ bi_valid = 0;
+ }
+
+ // Copy a stored block, storing first the length and its
+ // one's complement if requested.
+ internal void copy_block(int buf, int len, bool header)
+ {
+ bi_windup(); // align on byte boundary
+ last_eob_len = 8; // enough lookahead for inflate
+
+ if (header)
+ unchecked
+ {
+ //put_short((short)len);
+ pending[pendingCount++] = (byte)len;
+ pending[pendingCount++] = (byte)(len >> 8);
+ //put_short((short)~len);
+ pending[pendingCount++] = (byte)~len;
+ pending[pendingCount++] = (byte)(~len >> 8);
+ }
+
+ put_bytes(window, buf, len);
+ }
+
+ internal void flush_block_only(bool eof)
+ {
+ _tr_flush_block(block_start >= 0 ? block_start : -1, strstart - block_start, eof);
+ block_start = strstart;
+ _codec.flush_pending();
+ }
+
+ // Copy without compression as much as possible from the input stream, return
+ // the current block state.
+ // This function does not insert new strings in the dictionary since
+ // uncompressible data is probably not useful. This function is used
+ // only for the level=0 compression option.
+ // NOTE: this function should be optimized to avoid extra copying from
+ // window to pending_buf.
+ internal BlockState DeflateNone(FlushType flush)
+ {
+ // Stored blocks are limited to 0xffff bytes, pending is limited
+ // to pending_buf_size, and each stored block has a 5 byte header:
+
+ int max_block_size = 0xffff;
+ int max_start;
+
+ if (max_block_size > pending.Length - 5)
+ {
+ max_block_size = pending.Length - 5;
+ }
+
+ // Copy as much as possible from input to output:
+ while (true)
+ {
+ // Fill the window as much as possible:
+ if (lookahead <= 1)
+ {
+ _fillWindow();
+ if (lookahead == 0 && flush == FlushType.None)
+ return BlockState.NeedMore;
+ if (lookahead == 0)
+ break; // flush the current block
+ }
+
+ strstart += lookahead;
+ lookahead = 0;
+
+ // Emit a stored block if pending will be full:
+ max_start = block_start + max_block_size;
+ if (strstart == 0 || strstart >= max_start)
+ {
+ // strstart == 0 is possible when wraparound on 16-bit machine
+ lookahead = (int)(strstart - max_start);
+ strstart = (int)max_start;
+
+ flush_block_only(false);
+ if (_codec.AvailableBytesOut == 0)
+ return BlockState.NeedMore;
+ }
+
+ // Flush if we may have to slide, otherwise block_start may become
+ // negative and the data will be gone:
+ if (strstart - block_start >= w_size - MIN_LOOKAHEAD)
+ {
+ flush_block_only(false);
+ if (_codec.AvailableBytesOut == 0)
+ return BlockState.NeedMore;
+ }
+ }
+
+ flush_block_only(flush == FlushType.Finish);
+ if (_codec.AvailableBytesOut == 0)
+ return (flush == FlushType.Finish) ? BlockState.FinishStarted : BlockState.NeedMore;
+
+ return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone;
+ }
+
+
+ // Send a stored block
+ internal void _tr_stored_block(int buf, int stored_len, bool eof)
+ {
+ send_bits((STORED_BLOCK << 1) + (eof ? 1 : 0), 3); // send block type
+ copy_block(buf, stored_len, true); // with header
+ }
+
+ // Determine the best encoding for the current block: dynamic trees, static
+ // trees or store, and output the encoded block to the zip file.
+ internal void _tr_flush_block(int buf, int stored_len, bool eof)
+ {
+ int opt_lenb, static_lenb; // opt_len and static_len in bytes
+ int max_blindex = 0; // index of last bit length code of non zero freq
+
+ // Build the Huffman trees unless a stored block is forced
+ if (compressionLevel > 0)
+ {
+ // Check if the file is ascii or binary
+ if (data_type == Z_UNKNOWN)
+ set_data_type();
+
+ // Construct the literal and distance trees
+ treeLiterals.build_tree(this);
+
+ treeDistances.build_tree(this);
+
+ // At this point, opt_len and static_len are the total bit lengths of
+ // the compressed block data, excluding the tree representations.
+
+ // Build the bit length tree for the above two trees, and get the index
+ // in bl_order of the last bit length code to send.
+ max_blindex = build_bl_tree();
+
+ // Determine the best encoding. Compute first the block length in bytes
+ opt_lenb = (opt_len + 3 + 7) >> 3;
+ static_lenb = (static_len + 3 + 7) >> 3;
+
+ if (static_lenb <= opt_lenb)
+ opt_lenb = static_lenb;
+ }
+ else
+ {
+ opt_lenb = static_lenb = stored_len + 5; // force a stored block
+ }
+
+ if (stored_len + 4 <= opt_lenb && buf != -1)
+ {
+ // 4: two words for the lengths
+ // The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
+ // Otherwise we can't have processed more than WSIZE input bytes since
+ // the last block flush, because compression would have been
+ // successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
+ // transform a block into a stored block.
+ _tr_stored_block(buf, stored_len, eof);
+ }
+ else if (static_lenb == opt_lenb)
+ {
+ send_bits((STATIC_TREES << 1) + (eof ? 1 : 0), 3);
+ send_compressed_block(StaticTree.lengthAndLiteralsTreeCodes, StaticTree.distTreeCodes);
+ }
+ else
+ {
+ send_bits((DYN_TREES << 1) + (eof ? 1 : 0), 3);
+ send_all_trees(treeLiterals.max_code + 1, treeDistances.max_code + 1, max_blindex + 1);
+ send_compressed_block(dyn_ltree, dyn_dtree);
+ }
+
+ // The above check is made mod 2^32, for files larger than 512 MB
+ // and uLong implemented on 32 bits.
+
+ _InitializeBlocks();
+
+ if (eof)
+ {
+ bi_windup();
+ }
+ }
+
+ // Fill the window when the lookahead becomes insufficient.
+ // Updates strstart and lookahead.
+ //
+ // IN assertion: lookahead < MIN_LOOKAHEAD
+ // OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ // At least one byte has been read, or avail_in == 0; reads are
+ // performed for at least two bytes (required for the zip translate_eol
+ // option -- not supported here).
+ private void _fillWindow()
+ {
+ int n, m;
+ int p;
+ int more; // Amount of free space at the end of the window.
+
+ do
+ {
+ more = (window_size - lookahead - strstart);
+
+ // Deal with !@#$% 64K limit:
+ if (more == 0 && strstart == 0 && lookahead == 0)
+ {
+ more = w_size;
+ }
+ else if (more == -1)
+ {
+ // Very unlikely, but possible on 16 bit machine if strstart == 0
+ // and lookahead == 1 (input done one byte at time)
+ more--;
+
+ // If the window is almost full and there is insufficient lookahead,
+ // move the upper half to the lower one to make room in the upper half.
+ }
+ else if (strstart >= w_size + w_size - MIN_LOOKAHEAD)
+ {
+ Array.Copy(window, w_size, window, 0, w_size);
+ match_start -= w_size;
+ strstart -= w_size; // we now have strstart >= MAX_DIST
+ block_start -= w_size;
+
+ // Slide the hash table (could be avoided with 32 bit values
+ // at the expense of memory usage). We slide even when level == 0
+ // to keep the hash table consistent if we switch back to level > 0
+ // later. (Using level 0 permanently is not an optimal usage of
+ // zlib, so we don't care about this pathological case.)
+
+ n = hash_size;
+ p = n;
+ do
+ {
+ m = (head[--p] & 0xffff);
+ head[p] = (short)((m >= w_size) ? (m - w_size) : 0);
+ }
+ while (--n != 0);
+
+ n = w_size;
+ p = n;
+ do
+ {
+ m = (prev[--p] & 0xffff);
+ prev[p] = (short)((m >= w_size) ? (m - w_size) : 0);
+ // If n is not on any hash chain, prev[n] is garbage but
+ // its value will never be used.
+ }
+ while (--n != 0);
+ more += w_size;
+ }
+
+ if (_codec.AvailableBytesIn == 0)
+ return;
+
+ // If there was no sliding:
+ // strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ // more == window_size - lookahead - strstart
+ // => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ // => more >= window_size - 2*WSIZE + 2
+ // In the BIG_MEM or MMAP case (not yet supported),
+ // window_size == input_size + MIN_LOOKAHEAD &&
+ // strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ // Otherwise, window_size == 2*WSIZE so more >= 2.
+ // If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+
+ n = _codec.read_buf(window, strstart + lookahead, more);
+ lookahead += n;
+
+ // Initialize the hash value now that we have some input:
+ if (lookahead >= MIN_MATCH)
+ {
+ ins_h = window[strstart] & 0xff;
+ ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask;
+ }
+ // If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
+ // but this is not important since only literal bytes will be emitted.
+ }
+ while (lookahead < MIN_LOOKAHEAD && _codec.AvailableBytesIn != 0);
+ }
+
+ // Compress as much as possible from the input stream, return the current
+ // block state.
+ // This function does not perform lazy evaluation of matches and inserts
+ // new strings in the dictionary only for unmatched strings or for short
+ // matches. It is used only for the fast compression options.
+ internal BlockState DeflateFast(FlushType flush)
+ {
+ // short hash_head = 0; // head of the hash chain
+ int hash_head = 0; // head of the hash chain
+ bool bflush; // set if current block must be flushed
+
+ while (true)
+ {
+ // Make sure that we always have enough lookahead, except
+ // at the end of the input file. We need MAX_MATCH bytes
+ // for the next match, plus MIN_MATCH bytes to insert the
+ // string following the next match.
+ if (lookahead < MIN_LOOKAHEAD)
+ {
+ _fillWindow();
+ if (lookahead < MIN_LOOKAHEAD && flush == FlushType.None)
+ {
+ return BlockState.NeedMore;
+ }
+ if (lookahead == 0)
+ break; // flush the current block
+ }
+
+ // Insert the string window[strstart .. strstart+2] in the
+ // dictionary, and set hash_head to the head of the hash chain:
+ if (lookahead >= MIN_MATCH)
+ {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = unchecked((short)strstart);
+ }
+
+ // Find the longest match, discarding those <= prev_length.
+ // At this point we have always match_length < MIN_MATCH
+
+ if (hash_head != 0L && ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD)
+ {
+ // To simplify the code, we prevent matches with the string
+ // of window index 0 (in particular we have to avoid a match
+ // of the string with itself at the start of the input file).
+ if (compressionStrategy != CompressionStrategy.HuffmanOnly)
+ {
+ match_length = longest_match(hash_head);
+ }
+ // longest_match() sets match_start
+ }
+ if (match_length >= MIN_MATCH)
+ {
+ // check_match(strstart, match_start, match_length);
+
+ bflush = _tr_tally(strstart - match_start, match_length - MIN_MATCH);
+
+ lookahead -= match_length;
+
+ // Insert new strings in the hash table only if the match length
+ // is not too large. This saves time but degrades compression.
+ if (match_length <= config.MaxLazy && lookahead >= MIN_MATCH)
+ {
+ match_length--; // string at strstart already in hash table
+ do
+ {
+ strstart++;
+
+ ins_h = ((ins_h << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = unchecked((short)strstart);
+
+ // strstart never exceeds WSIZE-MAX_MATCH, so there are
+ // always MIN_MATCH bytes ahead.
+ }
+ while (--match_length != 0);
+ strstart++;
+ }
+ else
+ {
+ strstart += match_length;
+ match_length = 0;
+ ins_h = window[strstart] & 0xff;
+
+ ins_h = (((ins_h) << hash_shift) ^ (window[strstart + 1] & 0xff)) & hash_mask;
+ // If lookahead < MIN_MATCH, ins_h is garbage, but it does not
+ // matter since it will be recomputed at next deflate call.
+ }
+ }
+ else
+ {
+ // No match, output a literal byte
+
+ bflush = _tr_tally(0, window[strstart] & 0xff);
+ lookahead--;
+ strstart++;
+ }
+ if (bflush)
+ {
+ flush_block_only(false);
+ if (_codec.AvailableBytesOut == 0)
+ return BlockState.NeedMore;
+ }
+ }
+
+ flush_block_only(flush == FlushType.Finish);
+ if (_codec.AvailableBytesOut == 0)
+ {
+ if (flush == FlushType.Finish)
+ return BlockState.FinishStarted;
+ else
+ return BlockState.NeedMore;
+ }
+ return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone;
+ }
+
+ // Same as above, but achieves better compression. We use a lazy
+ // evaluation for matches: a match is finally adopted only if there is
+ // no better match at the next window position.
+ internal BlockState DeflateSlow(FlushType flush)
+ {
+ // short hash_head = 0; // head of hash chain
+ int hash_head = 0; // head of hash chain
+ bool bflush; // set if current block must be flushed
+
+ // Process the input block.
+ while (true)
+ {
+ // Make sure that we always have enough lookahead, except
+ // at the end of the input file. We need MAX_MATCH bytes
+ // for the next match, plus MIN_MATCH bytes to insert the
+ // string following the next match.
+
+ if (lookahead < MIN_LOOKAHEAD)
+ {
+ _fillWindow();
+ if (lookahead < MIN_LOOKAHEAD && flush == FlushType.None)
+ return BlockState.NeedMore;
+
+ if (lookahead == 0)
+ break; // flush the current block
+ }
+
+ // Insert the string window[strstart .. strstart+2] in the
+ // dictionary, and set hash_head to the head of the hash chain:
+
+ if (lookahead >= MIN_MATCH)
+ {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ // prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = unchecked((short)strstart);
+ }
+
+ // Find the longest match, discarding those <= prev_length.
+ prev_length = match_length;
+ prev_match = match_start;
+ match_length = MIN_MATCH - 1;
+
+ if (hash_head != 0 && prev_length < config.MaxLazy &&
+ ((strstart - hash_head) & 0xffff) <= w_size - MIN_LOOKAHEAD)
+ {
+ // To simplify the code, we prevent matches with the string
+ // of window index 0 (in particular we have to avoid a match
+ // of the string with itself at the start of the input file).
+
+ if (compressionStrategy != CompressionStrategy.HuffmanOnly)
+ {
+ match_length = longest_match(hash_head);
+ }
+ // longest_match() sets match_start
+
+ if (match_length <= 5 && (compressionStrategy == CompressionStrategy.Filtered ||
+ (match_length == MIN_MATCH && strstart - match_start > 4096)))
+ {
+
+ // If prev_match is also MIN_MATCH, match_start is garbage
+ // but we will ignore the current match anyway.
+ match_length = MIN_MATCH - 1;
+ }
+ }
+
+ // If there was a match at the previous step and the current
+ // match is not better, output the previous match:
+ if (prev_length >= MIN_MATCH && match_length <= prev_length)
+ {
+ int max_insert = strstart + lookahead - MIN_MATCH;
+ // Do not insert strings in hash table beyond this.
+
+ // check_match(strstart-1, prev_match, prev_length);
+
+ bflush = _tr_tally(strstart - 1 - prev_match, prev_length - MIN_MATCH);
+
+ // Insert in hash table all strings up to the end of the match.
+ // strstart-1 and strstart are already inserted. If there is not
+ // enough lookahead, the last two strings are not inserted in
+ // the hash table.
+ lookahead -= (prev_length - 1);
+ prev_length -= 2;
+ do
+ {
+ if (++strstart <= max_insert)
+ {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(strstart) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ //prev[strstart&w_mask]=hash_head=head[ins_h];
+ hash_head = (head[ins_h] & 0xffff);
+ prev[strstart & w_mask] = head[ins_h];
+ head[ins_h] = unchecked((short)strstart);
+ }
+ }
+ while (--prev_length != 0);
+ match_available = 0;
+ match_length = MIN_MATCH - 1;
+ strstart++;
+
+ if (bflush)
+ {
+ flush_block_only(false);
+ if (_codec.AvailableBytesOut == 0)
+ return BlockState.NeedMore;
+ }
+ }
+ else if (match_available != 0)
+ {
+
+ // If there was no match at the previous position, output a
+ // single literal. If there was a match but the current match
+ // is longer, truncate the previous match to a single literal.
+
+ bflush = _tr_tally(0, window[strstart - 1] & 0xff);
+
+ if (bflush)
+ {
+ flush_block_only(false);
+ }
+ strstart++;
+ lookahead--;
+ if (_codec.AvailableBytesOut == 0)
+ return BlockState.NeedMore;
+ }
+ else
+ {
+ // There is no previous match to compare with, wait for
+ // the next step to decide.
+
+ match_available = 1;
+ strstart++;
+ lookahead--;
+ }
+ }
+
+ if (match_available != 0)
+ {
+ bflush = _tr_tally(0, window[strstart - 1] & 0xff);
+ match_available = 0;
+ }
+ flush_block_only(flush == FlushType.Finish);
+
+ if (_codec.AvailableBytesOut == 0)
+ {
+ if (flush == FlushType.Finish)
+ return BlockState.FinishStarted;
+ else
+ return BlockState.NeedMore;
+ }
+
+ return flush == FlushType.Finish ? BlockState.FinishDone : BlockState.BlockDone;
+ }
+
+
+ internal int longest_match(int cur_match)
+ {
+ int chain_length = config.MaxChainLength; // max hash chain length
+ int scan = strstart; // current string
+ int match; // matched string
+ int len; // length of current match
+ int best_len = prev_length; // best match length so far
+ int limit = strstart > (w_size - MIN_LOOKAHEAD) ? strstart - (w_size - MIN_LOOKAHEAD) : 0;
+
+ int niceLength = config.NiceLength;
+
+ // Stop when cur_match becomes <= limit. To simplify the code,
+ // we prevent matches with the string of window index 0.
+
+ int wmask = w_mask;
+
+ int strend = strstart + MAX_MATCH;
+ byte scan_end1 = window[scan + best_len - 1];
+ byte scan_end = window[scan + best_len];
+
+ // The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ // It is easy to get rid of this optimization if necessary.
+
+ // Do not waste too much time if we already have a good match:
+ if (prev_length >= config.GoodLength)
+ {
+ chain_length >>= 2;
+ }
+
+ // Do not look for matches beyond the end of the input. This is necessary
+ // to make deflate deterministic.
+ if (niceLength > lookahead)
+ niceLength = lookahead;
+
+ do
+ {
+ match = cur_match;
+
+ // Skip to next match if the match length cannot increase
+ // or if the match length is less than 2:
+ if (window[match + best_len] != scan_end ||
+ window[match + best_len - 1] != scan_end1 ||
+ window[match] != window[scan] ||
+ window[++match] != window[scan + 1])
+ continue;
+
+ // The check at best_len-1 can be removed because it will be made
+ // again later. (This heuristic is not always a win.)
+ // It is not necessary to compare scan[2] and match[2] since they
+ // are always equal when the other bytes match, given that
+ // the hash keys are equal and that HASH_BITS >= 8.
+ scan += 2; match++;
+
+ // We check for insufficient lookahead only every 8th comparison;
+ // the 256th check will be made at strstart+258.
+ do
+ {
+ }
+ while (window[++scan] == window[++match] &&
+ window[++scan] == window[++match] &&
+ window[++scan] == window[++match] &&
+ window[++scan] == window[++match] &&
+ window[++scan] == window[++match] &&
+ window[++scan] == window[++match] &&
+ window[++scan] == window[++match] &&
+ window[++scan] == window[++match] && scan < strend);
+
+ len = MAX_MATCH - (int)(strend - scan);
+ scan = strend - MAX_MATCH;
+
+ if (len > best_len)
+ {
+ match_start = cur_match;
+ best_len = len;
+ if (len >= niceLength)
+ break;
+ scan_end1 = window[scan + best_len - 1];
+ scan_end = window[scan + best_len];
+ }
+ }
+ while ((cur_match = (prev[cur_match & wmask] & 0xffff)) > limit && --chain_length != 0);
+
+ if (best_len <= lookahead)
+ return best_len;
+ return lookahead;
+ }
+
+
+ private bool Rfc1950BytesEmitted = false;
+ private bool _WantRfc1950HeaderBytes = true;
+ internal bool WantRfc1950HeaderBytes
+ {
+ get { return _WantRfc1950HeaderBytes; }
+ set { _WantRfc1950HeaderBytes = value; }
+ }
+
+
+ internal int Initialize(ZlibCodec codec, CompressionLevel level)
+ {
+ return Initialize(codec, level, ZlibConstants.WindowBitsMax);
+ }
+
+ internal int Initialize(ZlibCodec codec, CompressionLevel level, int bits)
+ {
+ return Initialize(codec, level, bits, MEM_LEVEL_DEFAULT, CompressionStrategy.Default);
+ }
+
+ internal int Initialize(ZlibCodec codec, CompressionLevel level, int bits, CompressionStrategy compressionStrategy)
+ {
+ return Initialize(codec, level, bits, MEM_LEVEL_DEFAULT, compressionStrategy);
+ }
+
+ internal int Initialize(ZlibCodec codec, CompressionLevel level, int windowBits, int memLevel, CompressionStrategy strategy)
+ {
+ _codec = codec;
+ _codec.Message = null;
+
+ // validation
+ if (windowBits < 9 || windowBits > 15)
+ throw new ZlibException("windowBits must be in the range 9..15.");
+
+ if (memLevel < 1 || memLevel > MEM_LEVEL_MAX)
+ throw new ZlibException(String.Format("memLevel must be in the range 1.. {0}", MEM_LEVEL_MAX));
+
+ _codec.dstate = this;
+
+ w_bits = windowBits;
+ w_size = 1 << w_bits;
+ w_mask = w_size - 1;
+
+ hash_bits = memLevel + 7;
+ hash_size = 1 << hash_bits;
+ hash_mask = hash_size - 1;
+ hash_shift = ((hash_bits + MIN_MATCH - 1) / MIN_MATCH);
+
+ window = new byte[w_size * 2];
+ prev = new short[w_size];
+ head = new short[hash_size];
+
+ // for memLevel==8, this will be 16384, 16k
+ lit_bufsize = 1 << (memLevel + 6);
+
+ // Use a single array as the buffer for data pending compression,
+ // the output distance codes, and the output length codes (aka tree).
+ // orig comment: This works just fine since the average
+ // output size for (length,distance) codes is <= 24 bits.
+ pending = new byte[lit_bufsize * 4];
+ _distanceOffset = lit_bufsize;
+ _lengthOffset = (1 + 2) * lit_bufsize;
+
+ // So, for memLevel 8, the length of the pending buffer is 65536. 64k.
+ // The first 16k are pending bytes.
+ // The middle slice, of 32k, is used for distance codes.
+ // The final 16k are length codes.
+
+ this.compressionLevel = level;
+ this.compressionStrategy = strategy;
+
+ Reset();
+ return ZlibConstants.Z_OK;
+ }
+
+
+ internal void Reset()
+ {
+ _codec.TotalBytesIn = _codec.TotalBytesOut = 0;
+ _codec.Message = null;
+ //strm.data_type = Z_UNKNOWN;
+
+ pendingCount = 0;
+ nextPending = 0;
+
+ Rfc1950BytesEmitted = false;
+
+ status = (WantRfc1950HeaderBytes) ? INIT_STATE : BUSY_STATE;
+ _codec._Adler32 = Adler.Adler32(0, null, 0, 0);
+
+ last_flush = (int)FlushType.None;
+
+ _InitializeTreeData();
+ _InitializeLazyMatch();
+ }
+
+
+ internal int End()
+ {
+ if (status != INIT_STATE && status != BUSY_STATE && status != FINISH_STATE)
+ {
+ return ZlibConstants.Z_STREAM_ERROR;
+ }
+ // Deallocate in reverse order of allocations:
+ pending = null;
+ head = null;
+ prev = null;
+ window = null;
+ // free
+ // dstate=null;
+ return status == BUSY_STATE ? ZlibConstants.Z_DATA_ERROR : ZlibConstants.Z_OK;
+ }
+
+
+ private void SetDeflater()
+ {
+ switch (config.Flavor)
+ {
+ case DeflateFlavor.Store:
+ DeflateFunction = DeflateNone;
+ break;
+ case DeflateFlavor.Fast:
+ DeflateFunction = DeflateFast;
+ break;
+ case DeflateFlavor.Slow:
+ DeflateFunction = DeflateSlow;
+ break;
+ }
+ }
+
+
+ internal int SetParams(CompressionLevel level, CompressionStrategy strategy)
+ {
+ int result = ZlibConstants.Z_OK;
+
+ if (compressionLevel != level)
+ {
+ Config newConfig = Config.Lookup(level);
+
+ // change in the deflate flavor (Fast vs slow vs none)?
+ if (newConfig.Flavor != config.Flavor && _codec.TotalBytesIn != 0)
+ {
+ // Flush the last buffer:
+ result = _codec.Deflate(FlushType.Partial);
+ }
+
+ compressionLevel = level;
+ config = newConfig;
+ SetDeflater();
+ }
+
+ // no need to flush with change in strategy? Really?
+ compressionStrategy = strategy;
+
+ return result;
+ }
+
+
+ internal int SetDictionary(byte[] dictionary)
+ {
+ int length = dictionary.Length;
+ int index = 0;
+
+ if (dictionary == null || status != INIT_STATE)
+ throw new ZlibException("Stream error.");
+
+ _codec._Adler32 = Adler.Adler32(_codec._Adler32, dictionary, 0, dictionary.Length);
+
+ if (length < MIN_MATCH)
+ return ZlibConstants.Z_OK;
+ if (length > w_size - MIN_LOOKAHEAD)
+ {
+ length = w_size - MIN_LOOKAHEAD;
+ index = dictionary.Length - length; // use the tail of the dictionary
+ }
+ Array.Copy(dictionary, index, window, 0, length);
+ strstart = length;
+ block_start = length;
+
+ // Insert all strings in the hash table (except for the last two bytes).
+ // s->lookahead stays null, so s->ins_h will be recomputed at the next
+ // call of fill_window.
+
+ ins_h = window[0] & 0xff;
+ ins_h = (((ins_h) << hash_shift) ^ (window[1] & 0xff)) & hash_mask;
+
+ for (int n = 0; n <= length - MIN_MATCH; n++)
+ {
+ ins_h = (((ins_h) << hash_shift) ^ (window[(n) + (MIN_MATCH - 1)] & 0xff)) & hash_mask;
+ prev[n & w_mask] = head[ins_h];
+ head[ins_h] = (short)n;
+ }
+ return ZlibConstants.Z_OK;
+ }
+
+
+
+ internal int Deflate(FlushType flush)
+ {
+ int old_flush;
+
+ if (_codec.OutputBuffer == null ||
+ (_codec.InputBuffer == null && _codec.AvailableBytesIn != 0) ||
+ (status == FINISH_STATE && flush != FlushType.Finish))
+ {
+ _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_STREAM_ERROR)];
+ throw new ZlibException(String.Format("Something is fishy. [{0}]", _codec.Message));
+ }
+ if (_codec.AvailableBytesOut == 0)
+ {
+ _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)];
+ throw new ZlibException("OutputBuffer is full (AvailableBytesOut == 0)");
+ }
+
+ old_flush = last_flush;
+ last_flush = (int)flush;
+
+ // Write the zlib (rfc1950) header bytes
+ if (status == INIT_STATE)
+ {
+ int header = (Z_DEFLATED + ((w_bits - 8) << 4)) << 8;
+ int level_flags = (((int)compressionLevel - 1) & 0xff) >> 1;
+
+ if (level_flags > 3)
+ level_flags = 3;
+ header |= (level_flags << 6);
+ if (strstart != 0)
+ header |= PRESET_DICT;
+ header += 31 - (header % 31);
+
+ status = BUSY_STATE;
+ //putShortMSB(header);
+ unchecked
+ {
+ pending[pendingCount++] = (byte)(header >> 8);
+ pending[pendingCount++] = (byte)header;
+ }
+ // Save the adler32 of the preset dictionary:
+ if (strstart != 0)
+ {
+ pending[pendingCount++] = (byte)((_codec._Adler32 & 0xFF000000) >> 24);
+ pending[pendingCount++] = (byte)((_codec._Adler32 & 0x00FF0000) >> 16);
+ pending[pendingCount++] = (byte)((_codec._Adler32 & 0x0000FF00) >> 8);
+ pending[pendingCount++] = (byte)(_codec._Adler32 & 0x000000FF);
+ }
+ _codec._Adler32 = Adler.Adler32(0, null, 0, 0);
+ }
+
+ // Flush as much pending output as possible
+ if (pendingCount != 0)
+ {
+ _codec.flush_pending();
+ if (_codec.AvailableBytesOut == 0)
+ {
+ //System.out.println(" avail_out==0");
+ // Since avail_out is 0, deflate will be called again with
+ // more output space, but possibly with both pending and
+ // avail_in equal to zero. There won't be anything to do,
+ // but this is not an error situation so make sure we
+ // return OK instead of BUF_ERROR at next call of deflate:
+ last_flush = -1;
+ return ZlibConstants.Z_OK;
+ }
+
+ // Make sure there is something to do and avoid duplicate consecutive
+ // flushes. For repeated and useless calls with Z_FINISH, we keep
+ // returning Z_STREAM_END instead of Z_BUFF_ERROR.
+ }
+ else if (_codec.AvailableBytesIn == 0 &&
+ (int)flush <= old_flush &&
+ flush != FlushType.Finish)
+ {
+ // workitem 8557
+ //
+ // Not sure why this needs to be an error. pendingCount == 0, which
+ // means there's nothing to deflate. And the caller has not asked
+ // for a FlushType.Finish, but... that seems very non-fatal. We
+ // can just say "OK" and do nothing.
+
+ // _codec.Message = z_errmsg[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)];
+ // throw new ZlibException("AvailableBytesIn == 0 && flush<=old_flush && flush != FlushType.Finish");
+
+ return ZlibConstants.Z_OK;
+ }
+
+ // User must not provide more input after the first FINISH:
+ if (status == FINISH_STATE && _codec.AvailableBytesIn != 0)
+ {
+ _codec.Message = _ErrorMessage[ZlibConstants.Z_NEED_DICT - (ZlibConstants.Z_BUF_ERROR)];
+ throw new ZlibException("status == FINISH_STATE && _codec.AvailableBytesIn != 0");
+ }
+
+ // Start a new block or continue the current one.
+ if (_codec.AvailableBytesIn != 0 || lookahead != 0 || (flush != FlushType.None && status != FINISH_STATE))
+ {
+ BlockState bstate = DeflateFunction(flush);
+
+ if (bstate == BlockState.FinishStarted || bstate == BlockState.FinishDone)
+ {
+ status = FINISH_STATE;
+ }
+ if (bstate == BlockState.NeedMore || bstate == BlockState.FinishStarted)
+ {
+ if (_codec.AvailableBytesOut == 0)
+ {
+ last_flush = -1; // avoid BUF_ERROR next call, see above
+ }
+ return ZlibConstants.Z_OK;
+ // If flush != Z_NO_FLUSH && avail_out == 0, the next call
+ // of deflate should use the same flush parameter to make sure
+ // that the flush is complete. So we don't have to output an
+ // empty block here, this will be done at next call. This also
+ // ensures that for a very small output buffer, we emit at most
+ // one empty block.
+ }
+
+ if (bstate == BlockState.BlockDone)
+ {
+ if (flush == FlushType.Partial)
+ {
+ _tr_align();
+ }
+ else
+ {
+ // FlushType.Full or FlushType.Sync
+ _tr_stored_block(0, 0, false);
+ // For a full flush, this empty block will be recognized
+ // as a special marker by inflate_sync().
+ if (flush == FlushType.Full)
+ {
+ // clear hash (forget the history)
+ for (int i = 0; i < hash_size; i++)
+ head[i] = 0;
+ }
+ }
+ _codec.flush_pending();
+ if (_codec.AvailableBytesOut == 0)
+ {
+ last_flush = -1; // avoid BUF_ERROR at next call, see above
+ return ZlibConstants.Z_OK;
+ }
+ }
+ }
+
+ if (flush != FlushType.Finish)
+ return ZlibConstants.Z_OK;
+
+ if (!WantRfc1950HeaderBytes || Rfc1950BytesEmitted)
+ return ZlibConstants.Z_STREAM_END;
+
+ // Write the zlib trailer (adler32)
+ pending[pendingCount++] = (byte)((_codec._Adler32 & 0xFF000000) >> 24);
+ pending[pendingCount++] = (byte)((_codec._Adler32 & 0x00FF0000) >> 16);
+ pending[pendingCount++] = (byte)((_codec._Adler32 & 0x0000FF00) >> 8);
+ pending[pendingCount++] = (byte)(_codec._Adler32 & 0x000000FF);
+ //putShortMSB((int)(SharedUtils.URShift(_codec._Adler32, 16)));
+ //putShortMSB((int)(_codec._Adler32 & 0xffff));
+
+ _codec.flush_pending();
+
+ // If avail_out is zero, the application will call deflate again
+ // to flush the rest.
+
+ Rfc1950BytesEmitted = true; // write the trailer only once!
+
+ return pendingCount != 0 ? ZlibConstants.Z_OK : ZlibConstants.Z_STREAM_END;
+ }
+
+ }
+}
\ No newline at end of file
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/DeflateStream.cs b/src/Hazelcast.Net/Polyfills/ZLib/DeflateStream.cs
new file mode 100644
index 0000000000..05362d4168
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/DeflateStream.cs
@@ -0,0 +1,740 @@
+// DeflateStream.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009-2010 Dino Chiesa.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2011-July-31 14:48:11>
+//
+// ------------------------------------------------------------------
+//
+// This module defines the DeflateStream class, which can be used as a replacement for
+// the System.IO.Compression.DeflateStream class in the .NET BCL.
+//
+// ------------------------------------------------------------------
+
+
+using System;
+
+namespace Ionic.Zlib
+{
+ ///
+ /// A class for compressing and decompressing streams using the Deflate algorithm.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// The DeflateStream is a Decorator on a . It adds DEFLATE compression or decompression to any
+ /// stream.
+ ///
+ ///
+ ///
+ /// Using this stream, applications can compress or decompress data via stream
+ /// Read and Write operations. Either compresssion or decompression
+ /// can occur through either reading or writing. The compression format used is
+ /// DEFLATE, which is documented in IETF RFC 1951, "DEFLATE
+ /// Compressed Data Format Specification version 1.3.".
+ ///
+ ///
+ ///
+ /// This class is similar to , except that
+ /// ZlibStream adds the RFC
+ /// 1950 - ZLIB framing bytes to a compressed stream when compressing, or
+ /// expects the RFC1950 framing bytes when decompressing. The DeflateStream
+ /// does not.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public class DeflateStream : System.IO.Stream
+ {
+ internal ZlibBaseStream _baseStream;
+ internal System.IO.Stream _innerStream;
+ bool _disposed;
+
+ ///
+ /// Create a DeflateStream using the specified CompressionMode.
+ ///
+ ///
+ ///
+ /// When mode is CompressionMode.Compress, the DeflateStream will use
+ /// the default compression level. The "captive" stream will be closed when
+ /// the DeflateStream is closed.
+ ///
+ ///
+ ///
+ /// This example uses a DeflateStream to compress data from a file, and writes
+ /// the compressed data to another file.
+ ///
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ /// {
+ /// using (var raw = System.IO.File.Create(fileToCompress + ".deflated"))
+ /// {
+ /// using (Stream compressor = new DeflateStream(raw, CompressionMode.Compress))
+ /// {
+ /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ /// int n;
+ /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ /// {
+ /// compressor.Write(buffer, 0, n);
+ /// }
+ /// }
+ /// }
+ /// }
+ ///
+ ///
+ ///
+ /// Using input As Stream = File.OpenRead(fileToCompress)
+ /// Using raw As FileStream = File.Create(fileToCompress & ".deflated")
+ /// Using compressor As Stream = New DeflateStream(raw, CompressionMode.Compress)
+ /// Dim buffer As Byte() = New Byte(4096) {}
+ /// Dim n As Integer = -1
+ /// Do While (n <> 0)
+ /// If (n > 0) Then
+ /// compressor.Write(buffer, 0, n)
+ /// End If
+ /// n = input.Read(buffer, 0, buffer.Length)
+ /// Loop
+ /// End Using
+ /// End Using
+ /// End Using
+ ///
+ ///
+ /// The stream which will be read or written.
+ /// Indicates whether the DeflateStream will compress or decompress.
+ public DeflateStream(System.IO.Stream stream, CompressionMode mode)
+ : this(stream, mode, CompressionLevel.Default, false)
+ {
+ }
+
+ ///
+ /// Create a DeflateStream using the specified CompressionMode and the specified CompressionLevel.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// When mode is CompressionMode.Decompress, the level parameter is
+ /// ignored. The "captive" stream will be closed when the DeflateStream is
+ /// closed.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// This example uses a DeflateStream to compress data from a file, and writes
+ /// the compressed data to another file.
+ ///
+ ///
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ /// {
+ /// using (var raw = System.IO.File.Create(fileToCompress + ".deflated"))
+ /// {
+ /// using (Stream compressor = new DeflateStream(raw,
+ /// CompressionMode.Compress,
+ /// CompressionLevel.BestCompression))
+ /// {
+ /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ /// int n= -1;
+ /// while (n != 0)
+ /// {
+ /// if (n > 0)
+ /// compressor.Write(buffer, 0, n);
+ /// n= input.Read(buffer, 0, buffer.Length);
+ /// }
+ /// }
+ /// }
+ /// }
+ ///
+ ///
+ ///
+ /// Using input As Stream = File.OpenRead(fileToCompress)
+ /// Using raw As FileStream = File.Create(fileToCompress & ".deflated")
+ /// Using compressor As Stream = New DeflateStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression)
+ /// Dim buffer As Byte() = New Byte(4096) {}
+ /// Dim n As Integer = -1
+ /// Do While (n <> 0)
+ /// If (n > 0) Then
+ /// compressor.Write(buffer, 0, n)
+ /// End If
+ /// n = input.Read(buffer, 0, buffer.Length)
+ /// Loop
+ /// End Using
+ /// End Using
+ /// End Using
+ ///
+ ///
+ /// The stream to be read or written while deflating or inflating.
+ /// Indicates whether the DeflateStream will compress or decompress.
+ /// A tuning knob to trade speed for effectiveness.
+ public DeflateStream(System.IO.Stream stream, CompressionMode mode, CompressionLevel level)
+ : this(stream, mode, level, false)
+ {
+ }
+
+ ///
+ /// Create a DeflateStream using the specified
+ /// CompressionMode, and explicitly specify whether the
+ /// stream should be left open after Deflation or Inflation.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// This constructor allows the application to request that the captive stream
+ /// remain open after the deflation or inflation occurs. By default, after
+ /// Close() is called on the stream, the captive stream is also
+ /// closed. In some cases this is not desired, for example if the stream is a
+ /// memory stream that will be re-read after compression. Specify true for
+ /// the parameter to leave the stream open.
+ ///
+ ///
+ ///
+ /// The DeflateStream will use the default compression level.
+ ///
+ ///
+ ///
+ /// See the other overloads of this constructor for example code.
+ ///
+ ///
+ ///
+ ///
+ /// The stream which will be read or written. This is called the
+ /// "captive" stream in other places in this documentation.
+ ///
+ ///
+ ///
+ /// Indicates whether the DeflateStream will compress or decompress.
+ ///
+ ///
+ /// true if the application would like the stream to
+ /// remain open after inflation/deflation.
+ public DeflateStream(System.IO.Stream stream, CompressionMode mode, bool leaveOpen)
+ : this(stream, mode, CompressionLevel.Default, leaveOpen)
+ {
+ }
+
+ ///
+ /// Create a DeflateStream using the specified CompressionMode
+ /// and the specified CompressionLevel, and explicitly specify whether
+ /// the stream should be left open after Deflation or Inflation.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// When mode is CompressionMode.Decompress, the level parameter is ignored.
+ ///
+ ///
+ ///
+ /// This constructor allows the application to request that the captive stream
+ /// remain open after the deflation or inflation occurs. By default, after
+ /// Close() is called on the stream, the captive stream is also
+ /// closed. In some cases this is not desired, for example if the stream is a
+ /// that will be re-read after
+ /// compression. Specify true for the parameter
+ /// to leave the stream open.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// This example shows how to use a DeflateStream to compress data from
+ /// a file, and store the compressed data into another file.
+ ///
+ ///
+ /// using (var output = System.IO.File.Create(fileToCompress + ".deflated"))
+ /// {
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ /// {
+ /// using (Stream compressor = new DeflateStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, true))
+ /// {
+ /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ /// int n= -1;
+ /// while (n != 0)
+ /// {
+ /// if (n > 0)
+ /// compressor.Write(buffer, 0, n);
+ /// n= input.Read(buffer, 0, buffer.Length);
+ /// }
+ /// }
+ /// }
+ /// // can write additional data to the output stream here
+ /// }
+ ///
+ ///
+ ///
+ /// Using output As FileStream = File.Create(fileToCompress & ".deflated")
+ /// Using input As Stream = File.OpenRead(fileToCompress)
+ /// Using compressor As Stream = New DeflateStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, True)
+ /// Dim buffer As Byte() = New Byte(4096) {}
+ /// Dim n As Integer = -1
+ /// Do While (n <> 0)
+ /// If (n > 0) Then
+ /// compressor.Write(buffer, 0, n)
+ /// End If
+ /// n = input.Read(buffer, 0, buffer.Length)
+ /// Loop
+ /// End Using
+ /// End Using
+ /// ' can write additional data to the output stream here.
+ /// End Using
+ ///
+ ///
+ /// The stream which will be read or written.
+ /// Indicates whether the DeflateStream will compress or decompress.
+ /// true if the application would like the stream to remain open after inflation/deflation.
+ /// A tuning knob to trade speed for effectiveness.
+ public DeflateStream(System.IO.Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
+ {
+ _innerStream = stream;
+ _baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.DEFLATE, leaveOpen);
+ }
+
+ #region Zlib properties
+
+ ///
+ /// This property sets the flush behavior on the stream.
+ ///
+ /// See the ZLIB documentation for the meaning of the flush behavior.
+ ///
+ virtual public FlushType FlushMode
+ {
+ get { return (this._baseStream._flushMode); }
+ set
+ {
+ if (_disposed) throw new ObjectDisposedException("DeflateStream");
+ this._baseStream._flushMode = value;
+ }
+ }
+
+ ///
+ /// The size of the working buffer for the compression codec.
+ ///
+ ///
+ ///
+ ///
+ /// The working buffer is used for all stream operations. The default size is
+ /// 1024 bytes. The minimum size is 128 bytes. You may get better performance
+ /// with a larger buffer. Then again, you might not. You would have to test
+ /// it.
+ ///
+ ///
+ ///
+ /// Set this before the first call to Read() or Write() on the
+ /// stream. If you try to set it afterwards, it will throw.
+ ///
+ ///
+ public int BufferSize
+ {
+ get
+ {
+ return this._baseStream._bufferSize;
+ }
+ set
+ {
+ if (_disposed) throw new ObjectDisposedException("DeflateStream");
+ if (this._baseStream._workingBuffer != null)
+ throw new ZlibException("The working buffer is already set.");
+ if (value < ZlibConstants.WorkingBufferSizeMin)
+ throw new ZlibException(String.Format("Don't be silly. {0} bytes?? Use a bigger buffer, at least {1}.", value, ZlibConstants.WorkingBufferSizeMin));
+ this._baseStream._bufferSize = value;
+ }
+ }
+
+ ///
+ /// The ZLIB strategy to be used during compression.
+ ///
+ ///
+ ///
+ /// By tweaking this parameter, you may be able to optimize the compression for
+ /// data with particular characteristics.
+ ///
+ public CompressionStrategy Strategy
+ {
+ get
+ {
+ return this._baseStream.Strategy;
+ }
+ set
+ {
+ if (_disposed) throw new ObjectDisposedException("DeflateStream");
+ this._baseStream.Strategy = value;
+ }
+ }
+
+ /// Returns the total number of bytes input so far.
+ virtual public long TotalIn
+ {
+ get
+ {
+ return this._baseStream._z.TotalBytesIn;
+ }
+ }
+
+ /// Returns the total number of bytes output so far.
+ virtual public long TotalOut
+ {
+ get
+ {
+ return this._baseStream._z.TotalBytesOut;
+ }
+ }
+
+ #endregion
+
+ #region System.IO.Stream methods
+ ///
+ /// Dispose the stream.
+ ///
+ ///
+ ///
+ /// This may or may not result in a Close() call on the captive
+ /// stream. See the constructors that have a leaveOpen parameter
+ /// for more information.
+ ///
+ ///
+ /// Application code won't call this code directly. This method may be
+ /// invoked in two distinct scenarios. If disposing == true, the method
+ /// has been called directly or indirectly by a user's code, for example
+ /// via the public Dispose() method. In this case, both managed and
+ /// unmanaged resources can be referenced and disposed. If disposing ==
+ /// false, the method has been called by the runtime from inside the
+ /// object finalizer and this method should not reference other objects;
+ /// in that case only unmanaged resources must be referenced or
+ /// disposed.
+ ///
+ ///
+ ///
+ /// true if the Dispose method was invoked by user code.
+ ///
+ protected override void Dispose(bool disposing)
+ {
+ try
+ {
+ if (!_disposed)
+ {
+ if (disposing && (this._baseStream != null))
+ this._baseStream.Close();
+ _disposed = true;
+ }
+ }
+ finally
+ {
+ base.Dispose(disposing);
+ }
+ }
+
+
+
+ ///
+ /// Indicates whether the stream can be read.
+ ///
+ ///
+ /// The return value depends on whether the captive stream supports reading.
+ ///
+ public override bool CanRead
+ {
+ get
+ {
+ if (_disposed) throw new ObjectDisposedException("DeflateStream");
+ return _baseStream._stream.CanRead;
+ }
+ }
+
+ ///
+ /// Indicates whether the stream supports Seek operations.
+ ///
+ ///
+ /// Always returns false.
+ ///
+ public override bool CanSeek
+ {
+ get { return false; }
+ }
+
+
+ ///
+ /// Indicates whether the stream can be written.
+ ///
+ ///
+ /// The return value depends on whether the captive stream supports writing.
+ ///
+ public override bool CanWrite
+ {
+ get
+ {
+ if (_disposed) throw new ObjectDisposedException("DeflateStream");
+ return _baseStream._stream.CanWrite;
+ }
+ }
+
+ ///
+ /// Flush the stream.
+ ///
+ public override void Flush()
+ {
+ if (_disposed) throw new ObjectDisposedException("DeflateStream");
+ _baseStream.Flush();
+ }
+
+ ///
+ /// Reading this property always throws a .
+ ///
+ public override long Length
+ {
+ get { throw new NotImplementedException(); }
+ }
+
+ ///
+ /// The position of the stream pointer.
+ ///
+ ///
+ ///
+ /// Setting this property always throws a . Reading will return the total bytes
+ /// written out, if used in writing, or the total bytes read in, if used in
+ /// reading. The count may refer to compressed bytes or uncompressed bytes,
+ /// depending on how you've used the stream.
+ ///
+ public override long Position
+ {
+ get
+ {
+ if (this._baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Writer)
+ return this._baseStream._z.TotalBytesOut;
+ if (this._baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Reader)
+ return this._baseStream._z.TotalBytesIn;
+ return 0;
+ }
+ set { throw new NotImplementedException(); }
+ }
+
+ ///
+ /// Read data from the stream.
+ ///
+ ///
+ ///
+ ///
+ /// If you wish to use the DeflateStream to compress data while
+ /// reading, you can create a DeflateStream with
+ /// CompressionMode.Compress, providing an uncompressed data stream.
+ /// Then call Read() on that DeflateStream, and the data read will be
+ /// compressed as you read. If you wish to use the DeflateStream to
+ /// decompress data while reading, you can create a DeflateStream with
+ /// CompressionMode.Decompress, providing a readable compressed data
+ /// stream. Then call Read() on that DeflateStream, and the data read
+ /// will be decompressed as you read.
+ ///
+ ///
+ ///
+ /// A DeflateStream can be used for Read() or Write(), but not both.
+ ///
+ ///
+ ///
+ /// The buffer into which the read data should be placed.
+ /// the offset within that data array to put the first byte read.
+ /// the number of bytes to read.
+ /// the number of bytes actually read
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ if (_disposed) throw new ObjectDisposedException("DeflateStream");
+ return _baseStream.Read(buffer, offset, count);
+ }
+
+
+ ///
+ /// Calling this method always throws a .
+ ///
+ /// this is irrelevant, since it will always throw!
+ /// this is irrelevant, since it will always throw!
+ /// irrelevant!
+ public override long Seek(long offset, System.IO.SeekOrigin origin)
+ {
+ throw new NotImplementedException();
+ }
+
+ ///
+ /// Calling this method always throws a .
+ ///
+ /// this is irrelevant, since it will always throw!
+ public override void SetLength(long value)
+ {
+ throw new NotImplementedException();
+ }
+
+ ///
+ /// Write data to the stream.
+ ///
+ ///
+ ///
+ ///
+ /// If you wish to use the DeflateStream to compress data while
+ /// writing, you can create a DeflateStream with
+ /// CompressionMode.Compress, and a writable output stream. Then call
+ /// Write() on that DeflateStream, providing uncompressed data
+ /// as input. The data sent to the output stream will be the compressed form
+ /// of the data written. If you wish to use the DeflateStream to
+ /// decompress data while writing, you can create a DeflateStream with
+ /// CompressionMode.Decompress, and a writable output stream. Then
+ /// call Write() on that stream, providing previously compressed
+ /// data. The data sent to the output stream will be the decompressed form of
+ /// the data written.
+ ///
+ ///
+ ///
+ /// A DeflateStream can be used for Read() or Write(),
+ /// but not both.
+ ///
+ ///
+ ///
+ ///
+ /// The buffer holding data to write to the stream.
+ /// the offset within that data array to find the first byte to write.
+ /// the number of bytes to write.
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ if (_disposed) throw new ObjectDisposedException("DeflateStream");
+ _baseStream.Write(buffer, offset, count);
+ }
+ #endregion
+
+
+
+
+ ///
+ /// Compress a string into a byte array using DEFLATE (RFC 1951).
+ ///
+ ///
+ ///
+ /// Uncompress it with .
+ ///
+ ///
+ /// DeflateStream.UncompressString(byte[])
+ /// DeflateStream.CompressBuffer(byte[])
+ /// GZipStream.CompressString(string)
+ /// ZlibStream.CompressString(string)
+ ///
+ ///
+ /// A string to compress. The string will first be encoded
+ /// using UTF8, then compressed.
+ ///
+ ///
+ /// The string in compressed form
+ public static byte[] CompressString(String s)
+ {
+ using (var ms = new System.IO.MemoryStream())
+ {
+ System.IO.Stream compressor =
+ new DeflateStream(ms, CompressionMode.Compress, CompressionLevel.BestCompression);
+ ZlibBaseStream.CompressString(s, compressor);
+ return ms.ToArray();
+ }
+ }
+
+
+ ///
+ /// Compress a byte array into a new byte array using DEFLATE.
+ ///
+ ///
+ ///
+ /// Uncompress it with .
+ ///
+ ///
+ /// DeflateStream.CompressString(string)
+ /// DeflateStream.UncompressBuffer(byte[])
+ /// GZipStream.CompressBuffer(byte[])
+ /// ZlibStream.CompressBuffer(byte[])
+ ///
+ ///
+ /// A buffer to compress.
+ ///
+ ///
+ /// The data in compressed form
+ public static byte[] CompressBuffer(byte[] b)
+ {
+ using (var ms = new System.IO.MemoryStream())
+ {
+ System.IO.Stream compressor =
+ new DeflateStream( ms, CompressionMode.Compress, CompressionLevel.BestCompression );
+
+ ZlibBaseStream.CompressBuffer(b, compressor);
+ return ms.ToArray();
+ }
+ }
+
+
+ ///
+ /// Uncompress a DEFLATE'd byte array into a single string.
+ ///
+ ///
+ /// DeflateStream.CompressString(String)
+ /// DeflateStream.UncompressBuffer(byte[])
+ /// GZipStream.UncompressString(byte[])
+ /// ZlibStream.UncompressString(byte[])
+ ///
+ ///
+ /// A buffer containing DEFLATE-compressed data.
+ ///
+ ///
+ /// The uncompressed string
+ public static String UncompressString(byte[] compressed)
+ {
+ using (var input = new System.IO.MemoryStream(compressed))
+ {
+ System.IO.Stream decompressor =
+ new DeflateStream(input, CompressionMode.Decompress);
+
+ return ZlibBaseStream.UncompressString(compressed, decompressor);
+ }
+ }
+
+
+ ///
+ /// Uncompress a DEFLATE'd byte array into a byte array.
+ ///
+ ///
+ /// DeflateStream.CompressBuffer(byte[])
+ /// DeflateStream.UncompressString(byte[])
+ /// GZipStream.UncompressBuffer(byte[])
+ /// ZlibStream.UncompressBuffer(byte[])
+ ///
+ ///
+ /// A buffer containing data that has been compressed with DEFLATE.
+ ///
+ ///
+ /// The data in uncompressed form
+ public static byte[] UncompressBuffer(byte[] compressed)
+ {
+ using (var input = new System.IO.MemoryStream(compressed))
+ {
+ System.IO.Stream decompressor =
+ new DeflateStream( input, CompressionMode.Decompress );
+
+ return ZlibBaseStream.UncompressBuffer(compressed, decompressor);
+ }
+ }
+
+ }
+
+}
+
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/GZipStream.cs b/src/Hazelcast.Net/Polyfills/ZLib/GZipStream.cs
new file mode 100644
index 0000000000..745e096395
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/GZipStream.cs
@@ -0,0 +1,1033 @@
+// GZipStream.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2011-August-08 18:14:39>
+//
+// ------------------------------------------------------------------
+//
+// This module defines the GZipStream class, which can be used as a replacement for
+// the System.IO.Compression.GZipStream class in the .NET BCL. NB: The design is not
+// completely OO clean: there is some intelligence in the ZlibBaseStream that reads the
+// GZip header.
+//
+// ------------------------------------------------------------------
+
+
+using System;
+using System.IO;
+
+namespace Ionic.Zlib
+{
+ ///
+ /// A class for compressing and decompressing GZIP streams.
+ ///
+ ///
+ ///
+ ///
+ /// The GZipStream is a Decorator on a
+ /// . It adds GZIP compression or decompression to any
+ /// stream.
+ ///
+ ///
+ ///
+ /// Like the System.IO.Compression.GZipStream in the .NET Base Class Library, the
+ /// Ionic.Zlib.GZipStream can compress while writing, or decompress while
+ /// reading, but not vice versa. The compression method used is GZIP, which is
+ /// documented in IETF RFC
+ /// 1952, "GZIP file format specification version 4.3".
+ ///
+ ///
+ /// A GZipStream can be used to decompress data (through Read()) or
+ /// to compress data (through Write()), but not both.
+ ///
+ ///
+ ///
+ /// If you wish to use the GZipStream to compress data, you must wrap it
+ /// around a write-able stream. As you call Write() on the GZipStream, the
+ /// data will be compressed into the GZIP format. If you want to decompress data,
+ /// you must wrap the GZipStream around a readable stream that contains an
+ /// IETF RFC 1952-compliant stream. The data will be decompressed as you call
+ /// Read() on the GZipStream.
+ ///
+ ///
+ ///
+ /// Though the GZIP format allows data from multiple files to be concatenated
+ /// together, this stream handles only a single segment of GZIP format, typically
+ /// representing a single file.
+ ///
+ ///
+ ///
+ /// This class is similar to and .
+ /// ZlibStream handles RFC1950-compliant streams.
+ /// handles RFC1951-compliant streams. This class handles RFC1952-compliant streams.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public class GZipStream : System.IO.Stream
+ {
+ // GZip format
+ // source: http://tools.ietf.org/html/rfc1952
+ //
+ // header id: 2 bytes 1F 8B
+ // compress method 1 byte 8= DEFLATE (none other supported)
+ // flag 1 byte bitfield (See below)
+ // mtime 4 bytes time_t (seconds since jan 1, 1970 UTC of the file.
+ // xflg 1 byte 2 = max compress used , 4 = max speed (can be ignored)
+ // OS 1 byte OS for originating archive. set to 0xFF in compression.
+ // extra field length 2 bytes optional - only if FEXTRA is set.
+ // extra field varies
+ // filename varies optional - if FNAME is set. zero terminated. ISO-8859-1.
+ // file comment varies optional - if FCOMMENT is set. zero terminated. ISO-8859-1.
+ // crc16 1 byte optional - present only if FHCRC bit is set
+ // compressed data varies
+ // CRC32 4 bytes
+ // isize 4 bytes data size modulo 2^32
+ //
+ // FLG (FLaGs)
+ // bit 0 FTEXT - indicates file is ASCII text (can be safely ignored)
+ // bit 1 FHCRC - there is a CRC16 for the header immediately following the header
+ // bit 2 FEXTRA - extra fields are present
+ // bit 3 FNAME - the zero-terminated filename is present. encoding; ISO-8859-1.
+ // bit 4 FCOMMENT - a zero-terminated file comment is present. encoding: ISO-8859-1
+ // bit 5 reserved
+ // bit 6 reserved
+ // bit 7 reserved
+ //
+ // On consumption:
+ // Extra field is a bunch of nonsense and can be safely ignored.
+ // Header CRC and OS, likewise.
+ //
+ // on generation:
+ // all optional fields get 0, except for the OS, which gets 255.
+ //
+
+
+
+ ///
+ /// The comment on the GZIP stream.
+ ///
+ ///
+ ///
+ ///
+ /// The GZIP format allows for each file to optionally have an associated
+ /// comment stored with the file. The comment is encoded with the ISO-8859-1
+ /// code page. To include a comment in a GZIP stream you create, set this
+ /// property before calling Write() for the first time on the
+ /// GZipStream.
+ ///
+ ///
+ ///
+ /// When using GZipStream to decompress, you can retrieve this property
+ /// after the first call to Read(). If no comment has been set in the
+ /// GZIP bytestream, the Comment property will return null
+ /// (Nothing in VB).
+ ///
+ ///
+ public String Comment
+ {
+ get
+ {
+ return _Comment;
+ }
+ set
+ {
+ if (_disposed) throw new ObjectDisposedException("GZipStream");
+ _Comment = value;
+ }
+ }
+
+ ///
+ /// The FileName for the GZIP stream.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// The GZIP format optionally allows each file to have an associated
+ /// filename. When compressing data (through Write()), set this
+ /// FileName before calling Write() the first time on the GZipStream.
+ /// The actual filename is encoded into the GZIP bytestream with the
+ /// ISO-8859-1 code page, according to RFC 1952. It is the application's
+ /// responsibility to insure that the FileName can be encoded and decoded
+ /// correctly with this code page.
+ ///
+ ///
+ ///
+ /// When decompressing (through Read()), you can retrieve this value
+ /// any time after the first Read(). In the case where there was no filename
+ /// encoded into the GZIP bytestream, the property will return null (Nothing
+ /// in VB).
+ ///
+ ///
+ public String FileName
+ {
+ get { return _FileName; }
+ set
+ {
+ if (_disposed) throw new ObjectDisposedException("GZipStream");
+ _FileName = value;
+ if (_FileName == null) return;
+ if (_FileName.IndexOf("/") != -1)
+ {
+ _FileName = _FileName.Replace("/", "\\");
+ }
+ if (_FileName.EndsWith("\\"))
+ throw new Exception("Illegal filename");
+ if (_FileName.IndexOf("\\") != -1)
+ {
+ // trim any leading path
+ _FileName = Path.GetFileName(_FileName);
+ }
+ }
+ }
+
+ ///
+ /// The last modified time for the GZIP stream.
+ ///
+ ///
+ ///
+ /// GZIP allows the storage of a last modified time with each GZIP entry.
+ /// When compressing data, you can set this before the first call to
+ /// Write(). When decompressing, you can retrieve this value any time
+ /// after the first call to Read().
+ ///
+ public DateTime? LastModified;
+
+ ///
+ /// The CRC on the GZIP stream.
+ ///
+ ///
+ /// This is used for internal error checking. You probably don't need to look at this property.
+ ///
+ public int Crc32 { get { return _Crc32; } }
+
+ private int _headerByteCount;
+ internal ZlibBaseStream _baseStream;
+ bool _disposed;
+ bool _firstReadDone;
+ string _FileName;
+ string _Comment;
+ int _Crc32;
+
+
+ ///
+ /// Create a GZipStream using the specified CompressionMode.
+ ///
+ ///
+ ///
+ ///
+ /// When mode is CompressionMode.Compress, the GZipStream will use the
+ /// default compression level.
+ ///
+ ///
+ ///
+ /// As noted in the class documentation, the CompressionMode (Compress
+ /// or Decompress) also establishes the "direction" of the stream. A
+ /// GZipStream with CompressionMode.Compress works only through
+ /// Write(). A GZipStream with
+ /// CompressionMode.Decompress works only through Read().
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// This example shows how to use a GZipStream to compress data.
+ ///
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ /// {
+ /// using (var raw = System.IO.File.Create(outputFile))
+ /// {
+ /// using (Stream compressor = new GZipStream(raw, CompressionMode.Compress))
+ /// {
+ /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ /// int n;
+ /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ /// {
+ /// compressor.Write(buffer, 0, n);
+ /// }
+ /// }
+ /// }
+ /// }
+ ///
+ ///
+ /// Dim outputFile As String = (fileToCompress & ".compressed")
+ /// Using input As Stream = File.OpenRead(fileToCompress)
+ /// Using raw As FileStream = File.Create(outputFile)
+ /// Using compressor As Stream = New GZipStream(raw, CompressionMode.Compress)
+ /// Dim buffer As Byte() = New Byte(4096) {}
+ /// Dim n As Integer = -1
+ /// Do While (n <> 0)
+ /// If (n > 0) Then
+ /// compressor.Write(buffer, 0, n)
+ /// End If
+ /// n = input.Read(buffer, 0, buffer.Length)
+ /// Loop
+ /// End Using
+ /// End Using
+ /// End Using
+ ///
+ ///
+ ///
+ ///
+ /// This example shows how to use a GZipStream to uncompress a file.
+ ///
+ /// private void GunZipFile(string filename)
+ /// {
+ /// if (!filename.EndsWith(".gz))
+ /// throw new ArgumentException("filename");
+ /// var DecompressedFile = filename.Substring(0,filename.Length-3);
+ /// byte[] working = new byte[WORKING_BUFFER_SIZE];
+ /// int n= 1;
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(filename))
+ /// {
+ /// using (Stream decompressor= new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, true))
+ /// {
+ /// using (var output = System.IO.File.Create(DecompressedFile))
+ /// {
+ /// while (n !=0)
+ /// {
+ /// n= decompressor.Read(working, 0, working.Length);
+ /// if (n > 0)
+ /// {
+ /// output.Write(working, 0, n);
+ /// }
+ /// }
+ /// }
+ /// }
+ /// }
+ /// }
+ ///
+ ///
+ ///
+ /// Private Sub GunZipFile(ByVal filename as String)
+ /// If Not (filename.EndsWith(".gz)) Then
+ /// Throw New ArgumentException("filename")
+ /// End If
+ /// Dim DecompressedFile as String = filename.Substring(0,filename.Length-3)
+ /// Dim working(WORKING_BUFFER_SIZE) as Byte
+ /// Dim n As Integer = 1
+ /// Using input As Stream = File.OpenRead(filename)
+ /// Using decompressor As Stream = new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, True)
+ /// Using output As Stream = File.Create(UncompressedFile)
+ /// Do
+ /// n= decompressor.Read(working, 0, working.Length)
+ /// If n > 0 Then
+ /// output.Write(working, 0, n)
+ /// End IF
+ /// Loop While (n > 0)
+ /// End Using
+ /// End Using
+ /// End Using
+ /// End Sub
+ ///
+ ///
+ ///
+ /// The stream which will be read or written.
+ /// Indicates whether the GZipStream will compress or decompress.
+ public GZipStream(Stream stream, CompressionMode mode)
+ : this(stream, mode, CompressionLevel.Default, false)
+ {
+ }
+
+ ///
+ /// Create a GZipStream using the specified CompressionMode and
+ /// the specified CompressionLevel.
+ ///
+ ///
+ ///
+ ///
+ /// The CompressionMode (Compress or Decompress) also establishes the
+ /// "direction" of the stream. A GZipStream with
+ /// CompressionMode.Compress works only through Write(). A
+ /// GZipStream with CompressionMode.Decompress works only
+ /// through Read().
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// This example shows how to use a GZipStream to compress a file into a .gz file.
+ ///
+ ///
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ /// {
+ /// using (var raw = System.IO.File.Create(fileToCompress + ".gz"))
+ /// {
+ /// using (Stream compressor = new GZipStream(raw,
+ /// CompressionMode.Compress,
+ /// CompressionLevel.BestCompression))
+ /// {
+ /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ /// int n;
+ /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ /// {
+ /// compressor.Write(buffer, 0, n);
+ /// }
+ /// }
+ /// }
+ /// }
+ ///
+ ///
+ ///
+ /// Using input As Stream = File.OpenRead(fileToCompress)
+ /// Using raw As FileStream = File.Create(fileToCompress & ".gz")
+ /// Using compressor As Stream = New GZipStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression)
+ /// Dim buffer As Byte() = New Byte(4096) {}
+ /// Dim n As Integer = -1
+ /// Do While (n <> 0)
+ /// If (n > 0) Then
+ /// compressor.Write(buffer, 0, n)
+ /// End If
+ /// n = input.Read(buffer, 0, buffer.Length)
+ /// Loop
+ /// End Using
+ /// End Using
+ /// End Using
+ ///
+ ///
+ /// The stream to be read or written while deflating or inflating.
+ /// Indicates whether the GZipStream will compress or decompress.
+ /// A tuning knob to trade speed for effectiveness.
+ public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level)
+ : this(stream, mode, level, false)
+ {
+ }
+
+ ///
+ /// Create a GZipStream using the specified CompressionMode, and
+ /// explicitly specify whether the stream should be left open after Deflation
+ /// or Inflation.
+ ///
+ ///
+ ///
+ ///
+ /// This constructor allows the application to request that the captive stream
+ /// remain open after the deflation or inflation occurs. By default, after
+ /// Close() is called on the stream, the captive stream is also
+ /// closed. In some cases this is not desired, for example if the stream is a
+ /// memory stream that will be re-read after compressed data has been written
+ /// to it. Specify true for the parameter to leave
+ /// the stream open.
+ ///
+ ///
+ ///
+ /// The (Compress or Decompress) also
+ /// establishes the "direction" of the stream. A GZipStream with
+ /// CompressionMode.Compress works only through Write(). A GZipStream
+ /// with CompressionMode.Decompress works only through Read().
+ ///
+ ///
+ ///
+ /// The GZipStream will use the default compression level. If you want
+ /// to specify the compression level, see .
+ ///
+ ///
+ ///
+ /// See the other overloads of this constructor for example code.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// The stream which will be read or written. This is called the "captive"
+ /// stream in other places in this documentation.
+ ///
+ ///
+ /// Indicates whether the GZipStream will compress or decompress.
+ ///
+ ///
+ ///
+ /// true if the application would like the base stream to remain open after
+ /// inflation/deflation.
+ ///
+ public GZipStream(Stream stream, CompressionMode mode, bool leaveOpen)
+ : this(stream, mode, CompressionLevel.Default, leaveOpen)
+ {
+ }
+
+ ///
+ /// Create a GZipStream using the specified CompressionMode and the
+ /// specified CompressionLevel, and explicitly specify whether the
+ /// stream should be left open after Deflation or Inflation.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// This constructor allows the application to request that the captive stream
+ /// remain open after the deflation or inflation occurs. By default, after
+ /// Close() is called on the stream, the captive stream is also
+ /// closed. In some cases this is not desired, for example if the stream is a
+ /// memory stream that will be re-read after compressed data has been written
+ /// to it. Specify true for the parameter to
+ /// leave the stream open.
+ ///
+ ///
+ ///
+ /// As noted in the class documentation, the CompressionMode (Compress
+ /// or Decompress) also establishes the "direction" of the stream. A
+ /// GZipStream with CompressionMode.Compress works only through
+ /// Write(). A GZipStream with CompressionMode.Decompress works only
+ /// through Read().
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// This example shows how to use a GZipStream to compress data.
+ ///
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ /// {
+ /// using (var raw = System.IO.File.Create(outputFile))
+ /// {
+ /// using (Stream compressor = new GZipStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression, true))
+ /// {
+ /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ /// int n;
+ /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ /// {
+ /// compressor.Write(buffer, 0, n);
+ /// }
+ /// }
+ /// }
+ /// }
+ ///
+ ///
+ /// Dim outputFile As String = (fileToCompress & ".compressed")
+ /// Using input As Stream = File.OpenRead(fileToCompress)
+ /// Using raw As FileStream = File.Create(outputFile)
+ /// Using compressor As Stream = New GZipStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression, True)
+ /// Dim buffer As Byte() = New Byte(4096) {}
+ /// Dim n As Integer = -1
+ /// Do While (n <> 0)
+ /// If (n > 0) Then
+ /// compressor.Write(buffer, 0, n)
+ /// End If
+ /// n = input.Read(buffer, 0, buffer.Length)
+ /// Loop
+ /// End Using
+ /// End Using
+ /// End Using
+ ///
+ ///
+ /// The stream which will be read or written.
+ /// Indicates whether the GZipStream will compress or decompress.
+ /// true if the application would like the stream to remain open after inflation/deflation.
+ /// A tuning knob to trade speed for effectiveness.
+ public GZipStream(Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
+ {
+ _baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.GZIP, leaveOpen);
+ }
+
+ #region Zlib properties
+
+ ///
+ /// This property sets the flush behavior on the stream.
+ ///
+ virtual public FlushType FlushMode
+ {
+ get { return (this._baseStream._flushMode); }
+ set {
+ if (_disposed) throw new ObjectDisposedException("GZipStream");
+ this._baseStream._flushMode = value;
+ }
+ }
+
+ ///
+ /// The size of the working buffer for the compression codec.
+ ///
+ ///
+ ///
+ ///
+ /// The working buffer is used for all stream operations. The default size is
+ /// 1024 bytes. The minimum size is 128 bytes. You may get better performance
+ /// with a larger buffer. Then again, you might not. You would have to test
+ /// it.
+ ///
+ ///
+ ///
+ /// Set this before the first call to Read() or Write() on the
+ /// stream. If you try to set it afterwards, it will throw.
+ ///
+ ///
+ public int BufferSize
+ {
+ get
+ {
+ return this._baseStream._bufferSize;
+ }
+ set
+ {
+ if (_disposed) throw new ObjectDisposedException("GZipStream");
+ if (this._baseStream._workingBuffer != null)
+ throw new ZlibException("The working buffer is already set.");
+ if (value < ZlibConstants.WorkingBufferSizeMin)
+ throw new ZlibException(String.Format("Don't be silly. {0} bytes?? Use a bigger buffer, at least {1}.", value, ZlibConstants.WorkingBufferSizeMin));
+ this._baseStream._bufferSize = value;
+ }
+ }
+
+
+ /// Returns the total number of bytes input so far.
+ virtual public long TotalIn
+ {
+ get
+ {
+ return this._baseStream._z.TotalBytesIn;
+ }
+ }
+
+ /// Returns the total number of bytes output so far.
+ virtual public long TotalOut
+ {
+ get
+ {
+ return this._baseStream._z.TotalBytesOut;
+ }
+ }
+
+ #endregion
+
+ #region Stream methods
+
+ ///
+ /// Dispose the stream.
+ ///
+ ///
+ ///
+ /// This may or may not result in a Close() call on the captive
+ /// stream. See the constructors that have a leaveOpen parameter
+ /// for more information.
+ ///
+ ///
+ /// This method may be invoked in two distinct scenarios. If disposing
+ /// == true, the method has been called directly or indirectly by a
+ /// user's code, for example via the public Dispose() method. In this
+ /// case, both managed and unmanaged resources can be referenced and
+ /// disposed. If disposing == false, the method has been called by the
+ /// runtime from inside the object finalizer and this method should not
+ /// reference other objects; in that case only unmanaged resources must
+ /// be referenced or disposed.
+ ///
+ ///
+ ///
+ /// indicates whether the Dispose method was invoked by user code.
+ ///
+ protected override void Dispose(bool disposing)
+ {
+ try
+ {
+ if (!_disposed)
+ {
+ if (disposing && (this._baseStream != null))
+ {
+ this._baseStream.Close();
+ this._Crc32 = _baseStream.Crc32;
+ }
+ _disposed = true;
+ }
+ }
+ finally
+ {
+ base.Dispose(disposing);
+ }
+ }
+
+
+ ///
+ /// Indicates whether the stream can be read.
+ ///
+ ///
+ /// The return value depends on whether the captive stream supports reading.
+ ///
+ public override bool CanRead
+ {
+ get
+ {
+ if (_disposed) throw new ObjectDisposedException("GZipStream");
+ return _baseStream._stream.CanRead;
+ }
+ }
+
+ ///
+ /// Indicates whether the stream supports Seek operations.
+ ///
+ ///
+ /// Always returns false.
+ ///
+ public override bool CanSeek
+ {
+ get { return false; }
+ }
+
+
+ ///
+ /// Indicates whether the stream can be written.
+ ///
+ ///
+ /// The return value depends on whether the captive stream supports writing.
+ ///
+ public override bool CanWrite
+ {
+ get
+ {
+ if (_disposed) throw new ObjectDisposedException("GZipStream");
+ return _baseStream._stream.CanWrite;
+ }
+ }
+
+ ///
+ /// Flush the stream.
+ ///
+ public override void Flush()
+ {
+ if (_disposed) throw new ObjectDisposedException("GZipStream");
+ _baseStream.Flush();
+ }
+
+ ///
+ /// Reading this property always throws a .
+ ///
+ public override long Length
+ {
+ get { throw new NotImplementedException(); }
+ }
+
+ ///
+ /// The position of the stream pointer.
+ ///
+ ///
+ ///
+ /// Setting this property always throws a . Reading will return the total bytes
+ /// written out, if used in writing, or the total bytes read in, if used in
+ /// reading. The count may refer to compressed bytes or uncompressed bytes,
+ /// depending on how you've used the stream.
+ ///
+ public override long Position
+ {
+ get
+ {
+ if (this._baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Writer)
+ return this._baseStream._z.TotalBytesOut + _headerByteCount;
+ if (this._baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Reader)
+ return this._baseStream._z.TotalBytesIn + this._baseStream._gzipHeaderByteCount;
+ return 0;
+ }
+
+ set { throw new NotImplementedException(); }
+ }
+
+ ///
+ /// Read and decompress data from the source stream.
+ ///
+ ///
+ ///
+ /// With a GZipStream, decompression is done through reading.
+ ///
+ ///
+ ///
+ ///
+ /// byte[] working = new byte[WORKING_BUFFER_SIZE];
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(_CompressedFile))
+ /// {
+ /// using (Stream decompressor= new Ionic.Zlib.GZipStream(input, CompressionMode.Decompress, true))
+ /// {
+ /// using (var output = System.IO.File.Create(_DecompressedFile))
+ /// {
+ /// int n;
+ /// while ((n= decompressor.Read(working, 0, working.Length)) !=0)
+ /// {
+ /// output.Write(working, 0, n);
+ /// }
+ /// }
+ /// }
+ /// }
+ ///
+ ///
+ /// The buffer into which the decompressed data should be placed.
+ /// the offset within that data array to put the first byte read.
+ /// the number of bytes to read.
+ /// the number of bytes actually read
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ if (_disposed) throw new ObjectDisposedException("GZipStream");
+ int n = _baseStream.Read(buffer, offset, count);
+
+ // Console.WriteLine("GZipStream::Read(buffer, off({0}), c({1}) = {2}", offset, count, n);
+ // Console.WriteLine( Util.FormatByteArray(buffer, offset, n) );
+
+ if (!_firstReadDone)
+ {
+ _firstReadDone = true;
+ FileName = _baseStream._GzipFileName;
+ Comment = _baseStream._GzipComment;
+ }
+ return n;
+ }
+
+
+
+ ///
+ /// Calling this method always throws a .
+ ///
+ /// irrelevant; it will always throw!
+ /// irrelevant; it will always throw!
+ /// irrelevant!
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ throw new NotImplementedException();
+ }
+
+ ///
+ /// Calling this method always throws a .
+ ///
+ /// irrelevant; this method will always throw!
+ public override void SetLength(long value)
+ {
+ throw new NotImplementedException();
+ }
+
+ ///
+ /// Write data to the stream.
+ ///
+ ///
+ ///
+ ///
+ /// If you wish to use the GZipStream to compress data while writing,
+ /// you can create a GZipStream with CompressionMode.Compress, and a
+ /// writable output stream. Then call Write() on that GZipStream,
+ /// providing uncompressed data as input. The data sent to the output stream
+ /// will be the compressed form of the data written.
+ ///
+ ///
+ ///
+ /// A GZipStream can be used for Read() or Write(), but not
+ /// both. Writing implies compression. Reading implies decompression.
+ ///
+ ///
+ ///
+ /// The buffer holding data to write to the stream.
+ /// the offset within that data array to find the first byte to write.
+ /// the number of bytes to write.
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ if (_disposed) throw new ObjectDisposedException("GZipStream");
+ if (_baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Undefined)
+ {
+ //Console.WriteLine("GZipStream: First write");
+ if (_baseStream._wantCompress)
+ {
+ // first write in compression, therefore, emit the GZIP header
+ _headerByteCount = EmitHeader();
+ }
+ else
+ {
+ throw new InvalidOperationException();
+ }
+ }
+
+ _baseStream.Write(buffer, offset, count);
+ }
+ #endregion
+
+
+ internal static readonly System.DateTime _unixEpoch = new System.DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
+#if SILVERLIGHT || NETCF
+ internal static readonly System.Text.Encoding iso8859dash1 = new Ionic.Encoding.Iso8859Dash1Encoding();
+#else
+ internal static readonly System.Text.Encoding iso8859dash1 = System.Text.Encoding.GetEncoding("iso-8859-1");
+#endif
+
+
+ private int EmitHeader()
+ {
+ byte[] commentBytes = (Comment == null) ? null : iso8859dash1.GetBytes(Comment);
+ byte[] filenameBytes = (FileName == null) ? null : iso8859dash1.GetBytes(FileName);
+
+ int cbLength = (Comment == null) ? 0 : commentBytes.Length + 1;
+ int fnLength = (FileName == null) ? 0 : filenameBytes.Length + 1;
+
+ int bufferLength = 10 + cbLength + fnLength;
+ byte[] header = new byte[bufferLength];
+ int i = 0;
+ // ID
+ header[i++] = 0x1F;
+ header[i++] = 0x8B;
+
+ // compression method
+ header[i++] = 8;
+ byte flag = 0;
+ if (Comment != null)
+ flag ^= 0x10;
+ if (FileName != null)
+ flag ^= 0x8;
+
+ // flag
+ header[i++] = flag;
+
+ // mtime
+ if (!LastModified.HasValue) LastModified = DateTime.Now;
+ System.TimeSpan delta = LastModified.Value - _unixEpoch;
+ Int32 timet = (Int32)delta.TotalSeconds;
+ Array.Copy(BitConverter.GetBytes(timet), 0, header, i, 4);
+ i += 4;
+
+ // xflg
+ header[i++] = 0; // this field is totally useless
+ // OS
+ header[i++] = 0xFF; // 0xFF == unspecified
+
+ // extra field length - only if FEXTRA is set, which it is not.
+ //header[i++]= 0;
+ //header[i++]= 0;
+
+ // filename
+ if (fnLength != 0)
+ {
+ Array.Copy(filenameBytes, 0, header, i, fnLength - 1);
+ i += fnLength - 1;
+ header[i++] = 0; // terminate
+ }
+
+ // comment
+ if (cbLength != 0)
+ {
+ Array.Copy(commentBytes, 0, header, i, cbLength - 1);
+ i += cbLength - 1;
+ header[i++] = 0; // terminate
+ }
+
+ _baseStream._stream.Write(header, 0, header.Length);
+
+ return header.Length; // bytes written
+ }
+
+
+
+ ///
+ /// Compress a string into a byte array using GZip.
+ ///
+ ///
+ ///
+ /// Uncompress it with .
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A string to compress. The string will first be encoded
+ /// using UTF8, then compressed.
+ ///
+ ///
+ /// The string in compressed form
+ public static byte[] CompressString(String s)
+ {
+ using (var ms = new MemoryStream())
+ {
+ System.IO.Stream compressor =
+ new GZipStream(ms, CompressionMode.Compress, CompressionLevel.BestCompression);
+ ZlibBaseStream.CompressString(s, compressor);
+ return ms.ToArray();
+ }
+ }
+
+
+ ///
+ /// Compress a byte array into a new byte array using GZip.
+ ///
+ ///
+ ///
+ /// Uncompress it with .
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A buffer to compress.
+ ///
+ ///
+ /// The data in compressed form
+ public static byte[] CompressBuffer(byte[] b)
+ {
+ using (var ms = new MemoryStream())
+ {
+ System.IO.Stream compressor =
+ new GZipStream( ms, CompressionMode.Compress, CompressionLevel.BestCompression );
+
+ ZlibBaseStream.CompressBuffer(b, compressor);
+ return ms.ToArray();
+ }
+ }
+
+
+ ///
+ /// Uncompress a GZip'ed byte array into a single string.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A buffer containing GZIP-compressed data.
+ ///
+ ///
+ /// The uncompressed string
+ public static String UncompressString(byte[] compressed)
+ {
+ using (var input = new MemoryStream(compressed))
+ {
+ Stream decompressor = new GZipStream(input, CompressionMode.Decompress);
+ return ZlibBaseStream.UncompressString(compressed, decompressor);
+ }
+ }
+
+
+ ///
+ /// Uncompress a GZip'ed byte array into a byte array.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A buffer containing data that has been compressed with GZip.
+ ///
+ ///
+ /// The data in uncompressed form
+ public static byte[] UncompressBuffer(byte[] compressed)
+ {
+ using (var input = new System.IO.MemoryStream(compressed))
+ {
+ System.IO.Stream decompressor =
+ new GZipStream( input, CompressionMode.Decompress );
+
+ return ZlibBaseStream.UncompressBuffer(compressed, decompressor);
+ }
+ }
+
+
+ }
+}
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/InfTree.cs b/src/Hazelcast.Net/Polyfills/ZLib/InfTree.cs
new file mode 100644
index 0000000000..416b143a7f
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/InfTree.cs
@@ -0,0 +1,436 @@
+// Inftree.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2009-October-28 12:43:54>
+//
+// ------------------------------------------------------------------
+//
+// This module defines classes used in decompression. This code is derived
+// from the jzlib implementation of zlib. In keeping with the license for jzlib,
+// the copyright to that code is below.
+//
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// This program is based on zlib-1.1.3; credit to authors
+// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+// and contributors of zlib.
+//
+// -----------------------------------------------------------------------
+
+
+
+using System;
+namespace Ionic.Zlib
+{
+
+ sealed class InfTree
+ {
+
+ private const int MANY = 1440;
+
+ private const int Z_OK = 0;
+ private const int Z_STREAM_END = 1;
+ private const int Z_NEED_DICT = 2;
+ private const int Z_ERRNO = - 1;
+ private const int Z_STREAM_ERROR = - 2;
+ private const int Z_DATA_ERROR = - 3;
+ private const int Z_MEM_ERROR = - 4;
+ private const int Z_BUF_ERROR = - 5;
+ private const int Z_VERSION_ERROR = - 6;
+
+ internal const int fixed_bl = 9;
+ internal const int fixed_bd = 5;
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'fixed_tl'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal static readonly int[] fixed_tl = new int[]{96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 192, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 160, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 224, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 144, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 208, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 176, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 240, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 200, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, 168, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 232, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 152, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, 216, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 184, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 248, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 196, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 164, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0, 9, 228, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 148, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 212, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9, 180, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 244, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 204, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 172, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 236, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 156, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 220, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 188, 0, 8, 14, 0, 8, 142, 0, 8, 78, 0, 9, 252, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 194, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 162, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 226, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 146, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 210, 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 178, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 242, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117, 0, 8, 53, 0, 9, 202, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 170, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 234, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 154, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 218, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 186,
+ 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 250, 80, 7, 3, 0, 8, 83, 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 198, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 166, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 230, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 150, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 214, 82, 7, 19, 0, 8, 107, 0, 8, 43, 0, 9, 182, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 246, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9, 206, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 174, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 238, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 158, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 222, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 190, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 254, 96, 7, 256, 0, 8, 80, 0, 8, 16, 84, 8, 115, 82, 7, 31, 0, 8, 112, 0, 8, 48, 0, 9, 193, 80, 7, 10, 0, 8, 96, 0, 8, 32, 0, 9, 161, 0, 8, 0, 0, 8, 128, 0, 8, 64, 0, 9, 225, 80, 7, 6, 0, 8, 88, 0, 8, 24, 0, 9, 145, 83, 7, 59, 0, 8, 120, 0, 8, 56, 0, 9, 209, 81, 7, 17, 0, 8, 104, 0, 8, 40, 0, 9, 177, 0, 8, 8, 0, 8, 136, 0, 8, 72, 0, 9, 241, 80, 7, 4, 0, 8, 84, 0, 8, 20, 85, 8, 227, 83, 7, 43, 0, 8, 116, 0, 8, 52, 0, 9, 201, 81, 7, 13, 0, 8, 100, 0, 8, 36, 0, 9, 169, 0, 8, 4, 0, 8, 132, 0, 8, 68, 0, 9, 233, 80, 7, 8, 0, 8, 92, 0, 8, 28, 0, 9, 153, 84, 7, 83, 0, 8, 124, 0, 8, 60, 0, 9, 217, 82, 7, 23, 0, 8, 108, 0, 8, 44, 0, 9, 185, 0, 8, 12, 0, 8, 140, 0, 8, 76, 0, 9, 249, 80, 7, 3, 0, 8, 82, 0, 8, 18, 85, 8, 163, 83, 7, 35, 0, 8, 114, 0, 8, 50, 0, 9, 197, 81, 7, 11, 0, 8, 98, 0, 8, 34, 0, 9, 165, 0, 8, 2, 0, 8, 130, 0, 8, 66, 0, 9, 229, 80, 7, 7, 0, 8, 90, 0, 8, 26, 0, 9, 149, 84, 7, 67, 0, 8, 122, 0, 8, 58, 0, 9, 213, 82, 7, 19, 0, 8, 106, 0, 8, 42, 0, 9, 181, 0, 8, 10, 0, 8, 138, 0, 8, 74, 0, 9, 245, 80, 7, 5, 0, 8, 86, 0, 8, 22, 192, 8, 0, 83, 7, 51, 0, 8, 118, 0, 8, 54, 0, 9, 205, 81, 7, 15, 0, 8, 102, 0, 8, 38, 0, 9, 173, 0, 8, 6, 0, 8, 134, 0, 8, 70, 0, 9, 237, 80, 7, 9, 0, 8, 94, 0, 8, 30, 0, 9, 157, 84, 7, 99, 0, 8, 126, 0, 8, 62, 0, 9, 221, 82, 7, 27, 0, 8, 110, 0, 8, 46, 0, 9, 189, 0, 8,
+ 14, 0, 8, 142, 0, 8, 78, 0, 9, 253, 96, 7, 256, 0, 8, 81, 0, 8, 17, 85, 8, 131, 82, 7, 31, 0, 8, 113, 0, 8, 49, 0, 9, 195, 80, 7, 10, 0, 8, 97, 0, 8, 33, 0, 9, 163, 0, 8, 1, 0, 8, 129, 0, 8, 65, 0, 9, 227, 80, 7, 6, 0, 8, 89, 0, 8, 25, 0, 9, 147, 83, 7, 59, 0, 8, 121, 0, 8, 57, 0, 9, 211, 81, 7, 17, 0, 8, 105, 0, 8, 41, 0, 9, 179, 0, 8, 9, 0, 8, 137, 0, 8, 73, 0, 9, 243, 80, 7, 4, 0, 8, 85, 0, 8, 21, 80, 8, 258, 83, 7, 43, 0, 8, 117, 0, 8, 53, 0, 9, 203, 81, 7, 13, 0, 8, 101, 0, 8, 37, 0, 9, 171, 0, 8, 5, 0, 8, 133, 0, 8, 69, 0, 9, 235, 80, 7, 8, 0, 8, 93, 0, 8, 29, 0, 9, 155, 84, 7, 83, 0, 8, 125, 0, 8, 61, 0, 9, 219, 82, 7, 23, 0, 8, 109, 0, 8, 45, 0, 9, 187, 0, 8, 13, 0, 8, 141, 0, 8, 77, 0, 9, 251, 80, 7, 3, 0, 8, 83, 0, 8, 19, 85, 8, 195, 83, 7, 35, 0, 8, 115, 0, 8, 51, 0, 9, 199, 81, 7, 11, 0, 8, 99, 0, 8, 35, 0, 9, 167, 0, 8, 3, 0, 8, 131, 0, 8, 67, 0, 9, 231, 80, 7, 7, 0, 8, 91, 0, 8, 27, 0, 9, 151, 84, 7, 67, 0, 8, 123, 0, 8, 59, 0, 9, 215, 82, 7, 19, 0, 8, 107, 0, 8, 43, 0, 9, 183, 0, 8, 11, 0, 8, 139, 0, 8, 75, 0, 9, 247, 80, 7, 5, 0, 8, 87, 0, 8, 23, 192, 8, 0, 83, 7, 51, 0, 8, 119, 0, 8, 55, 0, 9, 207, 81, 7, 15, 0, 8, 103, 0, 8, 39, 0, 9, 175, 0, 8, 7, 0, 8, 135, 0, 8, 71, 0, 9, 239, 80, 7, 9, 0, 8, 95, 0, 8, 31, 0, 9, 159, 84, 7, 99, 0, 8, 127, 0, 8, 63, 0, 9, 223, 82, 7, 27, 0, 8, 111, 0, 8, 47, 0, 9, 191, 0, 8, 15, 0, 8, 143, 0, 8, 79, 0, 9, 255};
+ //UPGRADE_NOTE: Final was removed from the declaration of 'fixed_td'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal static readonly int[] fixed_td = new int[]{80, 5, 1, 87, 5, 257, 83, 5, 17, 91, 5, 4097, 81, 5, 5, 89, 5, 1025, 85, 5, 65, 93, 5, 16385, 80, 5, 3, 88, 5, 513, 84, 5, 33, 92, 5, 8193, 82, 5, 9, 90, 5, 2049, 86, 5, 129, 192, 5, 24577, 80, 5, 2, 87, 5, 385, 83, 5, 25, 91, 5, 6145, 81, 5, 7, 89, 5, 1537, 85, 5, 97, 93, 5, 24577, 80, 5, 4, 88, 5, 769, 84, 5, 49, 92, 5, 12289, 82, 5, 13, 90, 5, 3073, 86, 5, 193, 192, 5, 24577};
+
+ // Tables for deflate from PKZIP's appnote.txt.
+ //UPGRADE_NOTE: Final was removed from the declaration of 'cplens'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal static readonly int[] cplens = new int[]{3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+
+ // see note #13 above about 258
+ //UPGRADE_NOTE: Final was removed from the declaration of 'cplext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal static readonly int[] cplext = new int[]{0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112};
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'cpdist'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal static readonly int[] cpdist = new int[]{1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577};
+
+ //UPGRADE_NOTE: Final was removed from the declaration of 'cpdext'. "ms-help://MS.VSCC.v80/dv_commoner/local/redirect.htm?index='!DefaultContextWindowIndex'&keyword='jlca1003'"
+ internal static readonly int[] cpdext = new int[]{0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
+
+ // If BMAX needs to be larger than 16, then h and x[] should be uLong.
+ internal const int BMAX = 15; // maximum bit length of any code
+
+ internal int[] hn = null; // hufts used in space
+ internal int[] v = null; // work area for huft_build
+ internal int[] c = null; // bit length count table
+ internal int[] r = null; // table entry for structure assignment
+ internal int[] u = null; // table stack
+ internal int[] x = null; // bit offsets, then code stack
+
+ private int huft_build(int[] b, int bindex, int n, int s, int[] d, int[] e, int[] t, int[] m, int[] hp, int[] hn, int[] v)
+ {
+ // Given a list of code lengths and a maximum table size, make a set of
+ // tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
+ // if the given code set is incomplete (the tables are still built in this
+ // case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of
+ // lengths), or Z_MEM_ERROR if not enough memory.
+
+ int a; // counter for codes of length k
+ int f; // i repeats in table every f entries
+ int g; // maximum code length
+ int h; // table level
+ int i; // counter, current code
+ int j; // counter
+ int k; // number of bits in current code
+ int l; // bits per table (returned in m)
+ int mask; // (1 << w) - 1, to avoid cc -O bug on HP
+ int p; // pointer into c[], b[], or v[]
+ int q; // points to current table
+ int w; // bits before this table == (l * h)
+ int xp; // pointer into x
+ int y; // number of dummy codes added
+ int z; // number of entries in current table
+
+ // Generate counts for each bit length
+
+ p = 0; i = n;
+ do
+ {
+ c[b[bindex + p]]++; p++; i--; // assume all entries <= BMAX
+ }
+ while (i != 0);
+
+ if (c[0] == n)
+ {
+ // null input--all zero length codes
+ t[0] = - 1;
+ m[0] = 0;
+ return Z_OK;
+ }
+
+ // Find minimum and maximum length, bound *m by those
+ l = m[0];
+ for (j = 1; j <= BMAX; j++)
+ if (c[j] != 0)
+ break;
+ k = j; // minimum code length
+ if (l < j)
+ {
+ l = j;
+ }
+ for (i = BMAX; i != 0; i--)
+ {
+ if (c[i] != 0)
+ break;
+ }
+ g = i; // maximum code length
+ if (l > i)
+ {
+ l = i;
+ }
+ m[0] = l;
+
+ // Adjust last length count to fill out codes, if needed
+ for (y = 1 << j; j < i; j++, y <<= 1)
+ {
+ if ((y -= c[j]) < 0)
+ {
+ return Z_DATA_ERROR;
+ }
+ }
+ if ((y -= c[i]) < 0)
+ {
+ return Z_DATA_ERROR;
+ }
+ c[i] += y;
+
+ // Generate starting offsets into the value table for each length
+ x[1] = j = 0;
+ p = 1; xp = 2;
+ while (--i != 0)
+ {
+ // note that i == g from above
+ x[xp] = (j += c[p]);
+ xp++;
+ p++;
+ }
+
+ // Make a table of values in order of bit lengths
+ i = 0; p = 0;
+ do
+ {
+ if ((j = b[bindex + p]) != 0)
+ {
+ v[x[j]++] = i;
+ }
+ p++;
+ }
+ while (++i < n);
+ n = x[g]; // set n to length of v
+
+ // Generate the Huffman codes and for each, make the table entries
+ x[0] = i = 0; // first Huffman code is zero
+ p = 0; // grab values in bit order
+ h = - 1; // no tables yet--level -1
+ w = - l; // bits decoded == (l * h)
+ u[0] = 0; // just to keep compilers happy
+ q = 0; // ditto
+ z = 0; // ditto
+
+ // go through the bit lengths (k already is bits in shortest code)
+ for (; k <= g; k++)
+ {
+ a = c[k];
+ while (a-- != 0)
+ {
+ // here i is the Huffman code of length k bits for value *p
+ // make tables up to required level
+ while (k > w + l)
+ {
+ h++;
+ w += l; // previous table always l bits
+ // compute minimum size table less than or equal to l bits
+ z = g - w;
+ z = (z > l)?l:z; // table size upper limit
+ if ((f = 1 << (j = k - w)) > a + 1)
+ {
+ // try a k-w bit table
+ // too few codes for k-w bit table
+ f -= (a + 1); // deduct codes from patterns left
+ xp = k;
+ if (j < z)
+ {
+ while (++j < z)
+ {
+ // try smaller tables up to z bits
+ if ((f <<= 1) <= c[++xp])
+ break; // enough codes to use up j bits
+ f -= c[xp]; // else deduct codes from patterns
+ }
+ }
+ }
+ z = 1 << j; // table entries for j-bit table
+
+ // allocate new table
+ if (hn[0] + z > MANY)
+ {
+ // (note: doesn't matter for fixed)
+ return Z_DATA_ERROR; // overflow of MANY
+ }
+ u[h] = q = hn[0]; // DEBUG
+ hn[0] += z;
+
+ // connect to last table, if there is one
+ if (h != 0)
+ {
+ x[h] = i; // save pattern for backing up
+ r[0] = (sbyte) j; // bits in this table
+ r[1] = (sbyte) l; // bits to dump before this table
+ j = SharedUtils.URShift(i, (w - l));
+ r[2] = (int) (q - u[h - 1] - j); // offset to this table
+ Array.Copy(r, 0, hp, (u[h - 1] + j) * 3, 3); // connect to last table
+ }
+ else
+ {
+ t[0] = q; // first table is returned result
+ }
+ }
+
+ // set up table entry in r
+ r[1] = (sbyte) (k - w);
+ if (p >= n)
+ {
+ r[0] = 128 + 64; // out of values--invalid code
+ }
+ else if (v[p] < s)
+ {
+ r[0] = (sbyte) (v[p] < 256?0:32 + 64); // 256 is end-of-block
+ r[2] = v[p++]; // simple code is just the value
+ }
+ else
+ {
+ r[0] = (sbyte) (e[v[p] - s] + 16 + 64); // non-simple--look up in lists
+ r[2] = d[v[p++] - s];
+ }
+
+ // fill code-like entries with r
+ f = 1 << (k - w);
+ for (j = SharedUtils.URShift(i, w); j < z; j += f)
+ {
+ Array.Copy(r, 0, hp, (q + j) * 3, 3);
+ }
+
+ // backwards increment the k-bit code i
+ for (j = 1 << (k - 1); (i & j) != 0; j = SharedUtils.URShift(j, 1))
+ {
+ i ^= j;
+ }
+ i ^= j;
+
+ // backup over finished tables
+ mask = (1 << w) - 1; // needed on HP, cc -O bug
+ while ((i & mask) != x[h])
+ {
+ h--; // don't need to update q
+ w -= l;
+ mask = (1 << w) - 1;
+ }
+ }
+ }
+ // Return Z_BUF_ERROR if we were given an incomplete table
+ return y != 0 && g != 1?Z_BUF_ERROR:Z_OK;
+ }
+
+ internal int inflate_trees_bits(int[] c, int[] bb, int[] tb, int[] hp, ZlibCodec z)
+ {
+ int result;
+ initWorkArea(19);
+ hn[0] = 0;
+ result = huft_build(c, 0, 19, 19, null, null, tb, bb, hp, hn, v);
+
+ if (result == Z_DATA_ERROR)
+ {
+ z.Message = "oversubscribed dynamic bit lengths tree";
+ }
+ else if (result == Z_BUF_ERROR || bb[0] == 0)
+ {
+ z.Message = "incomplete dynamic bit lengths tree";
+ result = Z_DATA_ERROR;
+ }
+ return result;
+ }
+
+ internal int inflate_trees_dynamic(int nl, int nd, int[] c, int[] bl, int[] bd, int[] tl, int[] td, int[] hp, ZlibCodec z)
+ {
+ int result;
+
+ // build literal/length tree
+ initWorkArea(288);
+ hn[0] = 0;
+ result = huft_build(c, 0, nl, 257, cplens, cplext, tl, bl, hp, hn, v);
+ if (result != Z_OK || bl[0] == 0)
+ {
+ if (result == Z_DATA_ERROR)
+ {
+ z.Message = "oversubscribed literal/length tree";
+ }
+ else if (result != Z_MEM_ERROR)
+ {
+ z.Message = "incomplete literal/length tree";
+ result = Z_DATA_ERROR;
+ }
+ return result;
+ }
+
+ // build distance tree
+ initWorkArea(288);
+ result = huft_build(c, nl, nd, 0, cpdist, cpdext, td, bd, hp, hn, v);
+
+ if (result != Z_OK || (bd[0] == 0 && nl > 257))
+ {
+ if (result == Z_DATA_ERROR)
+ {
+ z.Message = "oversubscribed distance tree";
+ }
+ else if (result == Z_BUF_ERROR)
+ {
+ z.Message = "incomplete distance tree";
+ result = Z_DATA_ERROR;
+ }
+ else if (result != Z_MEM_ERROR)
+ {
+ z.Message = "empty distance tree with lengths";
+ result = Z_DATA_ERROR;
+ }
+ return result;
+ }
+
+ return Z_OK;
+ }
+
+ internal static int inflate_trees_fixed(int[] bl, int[] bd, int[][] tl, int[][] td, ZlibCodec z)
+ {
+ bl[0] = fixed_bl;
+ bd[0] = fixed_bd;
+ tl[0] = fixed_tl;
+ td[0] = fixed_td;
+ return Z_OK;
+ }
+
+ private void initWorkArea(int vsize)
+ {
+ if (hn == null)
+ {
+ hn = new int[1];
+ v = new int[vsize];
+ c = new int[BMAX + 1];
+ r = new int[3];
+ u = new int[BMAX];
+ x = new int[BMAX + 1];
+ }
+ else
+ {
+ if (v.Length < vsize)
+ {
+ v = new int[vsize];
+ }
+ Array.Clear(v,0,vsize);
+ Array.Clear(c,0,BMAX+1);
+ r[0]=0; r[1]=0; r[2]=0;
+ // for(int i=0; i
+//
+// ------------------------------------------------------------------
+//
+// This module defines classes for decompression. This code is derived
+// from the jzlib implementation of zlib, but significantly modified.
+// The object model is not the same, and many of the behaviors are
+// different. Nonetheless, in keeping with the license for jzlib, I am
+// reproducing the copyright to that code here.
+//
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// This program is based on zlib-1.1.3; credit to authors
+// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+// and contributors of zlib.
+//
+// -----------------------------------------------------------------------
+
+
+using System;
+namespace Ionic.Zlib
+{
+ sealed class InflateBlocks
+ {
+ private const int MANY = 1440;
+
+ // Table for deflate from PKZIP's appnote.txt.
+ internal static readonly int[] border = new int[]
+ { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
+
+ private enum InflateBlockMode
+ {
+ TYPE = 0, // get type bits (3, including end bit)
+ LENS = 1, // get lengths for stored
+ STORED = 2, // processing stored block
+ TABLE = 3, // get table lengths
+ BTREE = 4, // get bit lengths tree for a dynamic block
+ DTREE = 5, // get length, distance trees for a dynamic block
+ CODES = 6, // processing fixed or dynamic block
+ DRY = 7, // output remaining window bytes
+ DONE = 8, // finished last block, done
+ BAD = 9, // ot a data error--stuck here
+ }
+
+ private InflateBlockMode mode; // current inflate_block mode
+
+ internal int left; // if STORED, bytes left to copy
+
+ internal int table; // table lengths (14 bits)
+ internal int index; // index into blens (or border)
+ internal int[] blens; // bit lengths of codes
+ internal int[] bb = new int[1]; // bit length tree depth
+ internal int[] tb = new int[1]; // bit length decoding tree
+
+ internal InflateCodes codes = new InflateCodes(); // if CODES, current state
+
+ internal int last; // true if this block is the last block
+
+ internal ZlibCodec _codec; // pointer back to this zlib stream
+
+ // mode independent information
+ internal int bitk; // bits in bit buffer
+ internal int bitb; // bit buffer
+ internal int[] hufts; // single malloc for tree space
+ internal byte[] window; // sliding window
+ internal int end; // one byte after sliding window
+ internal int readAt; // window read pointer
+ internal int writeAt; // window write pointer
+ internal System.Object checkfn; // check function
+ internal uint check; // check on output
+
+ internal InfTree inftree = new InfTree();
+
+ internal InflateBlocks(ZlibCodec codec, System.Object checkfn, int w)
+ {
+ _codec = codec;
+ hufts = new int[MANY * 3];
+ window = new byte[w];
+ end = w;
+ this.checkfn = checkfn;
+ mode = InflateBlockMode.TYPE;
+ Reset();
+ }
+
+ internal uint Reset()
+ {
+ uint oldCheck = check;
+ mode = InflateBlockMode.TYPE;
+ bitk = 0;
+ bitb = 0;
+ readAt = writeAt = 0;
+
+ if (checkfn != null)
+ _codec._Adler32 = check = Adler.Adler32(0, null, 0, 0);
+ return oldCheck;
+ }
+
+
+ internal int Process(int r)
+ {
+ int t; // temporary storage
+ int b; // bit buffer
+ int k; // bits in bit buffer
+ int p; // input data pointer
+ int n; // bytes available there
+ int q; // output window write pointer
+ int m; // bytes to end of window or read pointer
+
+ // copy input/output information to locals (UPDATE macro restores)
+
+ p = _codec.NextIn;
+ n = _codec.AvailableBytesIn;
+ b = bitb;
+ k = bitk;
+
+ q = writeAt;
+ m = (int)(q < readAt ? readAt - q - 1 : end - q);
+
+
+ // process input based on current state
+ while (true)
+ {
+ switch (mode)
+ {
+ case InflateBlockMode.TYPE:
+
+ while (k < (3))
+ {
+ if (n != 0)
+ {
+ r = ZlibConstants.Z_OK;
+ }
+ else
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ n--;
+ b |= (_codec.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+ t = (int)(b & 7);
+ last = t & 1;
+
+ switch ((uint)t >> 1)
+ {
+ case 0: // stored
+ b >>= 3; k -= (3);
+ t = k & 7; // go to byte boundary
+ b >>= t; k -= t;
+ mode = InflateBlockMode.LENS; // get length of stored block
+ break;
+
+ case 1: // fixed
+ int[] bl = new int[1];
+ int[] bd = new int[1];
+ int[][] tl = new int[1][];
+ int[][] td = new int[1][];
+ InfTree.inflate_trees_fixed(bl, bd, tl, td, _codec);
+ codes.Init(bl[0], bd[0], tl[0], 0, td[0], 0);
+ b >>= 3; k -= 3;
+ mode = InflateBlockMode.CODES;
+ break;
+
+ case 2: // dynamic
+ b >>= 3; k -= 3;
+ mode = InflateBlockMode.TABLE;
+ break;
+
+ case 3: // illegal
+ b >>= 3; k -= 3;
+ mode = InflateBlockMode.BAD;
+ _codec.Message = "invalid block type";
+ r = ZlibConstants.Z_DATA_ERROR;
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ break;
+
+ case InflateBlockMode.LENS:
+
+ while (k < (32))
+ {
+ if (n != 0)
+ {
+ r = ZlibConstants.Z_OK;
+ }
+ else
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ ;
+ n--;
+ b |= (_codec.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ if ( ( ((~b)>>16) & 0xffff) != (b & 0xffff))
+ {
+ mode = InflateBlockMode.BAD;
+ _codec.Message = "invalid stored block lengths";
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ left = (b & 0xffff);
+ b = k = 0; // dump bits
+ mode = left != 0 ? InflateBlockMode.STORED : (last != 0 ? InflateBlockMode.DRY : InflateBlockMode.TYPE);
+ break;
+
+ case InflateBlockMode.STORED:
+ if (n == 0)
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ if (m == 0)
+ {
+ if (q == end && readAt != 0)
+ {
+ q = 0; m = (int)(q < readAt ? readAt - q - 1 : end - q);
+ }
+ if (m == 0)
+ {
+ writeAt = q;
+ r = Flush(r);
+ q = writeAt; m = (int)(q < readAt ? readAt - q - 1 : end - q);
+ if (q == end && readAt != 0)
+ {
+ q = 0; m = (int)(q < readAt ? readAt - q - 1 : end - q);
+ }
+ if (m == 0)
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ }
+ }
+ r = ZlibConstants.Z_OK;
+
+ t = left;
+ if (t > n)
+ t = n;
+ if (t > m)
+ t = m;
+ Array.Copy(_codec.InputBuffer, p, window, q, t);
+ p += t; n -= t;
+ q += t; m -= t;
+ if ((left -= t) != 0)
+ break;
+ mode = last != 0 ? InflateBlockMode.DRY : InflateBlockMode.TYPE;
+ break;
+
+ case InflateBlockMode.TABLE:
+
+ while (k < (14))
+ {
+ if (n != 0)
+ {
+ r = ZlibConstants.Z_OK;
+ }
+ else
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ n--;
+ b |= (_codec.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ table = t = (b & 0x3fff);
+ if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
+ {
+ mode = InflateBlockMode.BAD;
+ _codec.Message = "too many length or distance symbols";
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
+ if (blens == null || blens.Length < t)
+ {
+ blens = new int[t];
+ }
+ else
+ {
+ Array.Clear(blens, 0, t);
+ // for (int i = 0; i < t; i++)
+ // {
+ // blens[i] = 0;
+ // }
+ }
+
+ b >>= 14;
+ k -= 14;
+
+
+ index = 0;
+ mode = InflateBlockMode.BTREE;
+ goto case InflateBlockMode.BTREE;
+
+ case InflateBlockMode.BTREE:
+ while (index < 4 + (table >> 10))
+ {
+ while (k < (3))
+ {
+ if (n != 0)
+ {
+ r = ZlibConstants.Z_OK;
+ }
+ else
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ n--;
+ b |= (_codec.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ blens[border[index++]] = b & 7;
+
+ b >>= 3; k -= 3;
+ }
+
+ while (index < 19)
+ {
+ blens[border[index++]] = 0;
+ }
+
+ bb[0] = 7;
+ t = inftree.inflate_trees_bits(blens, bb, tb, hufts, _codec);
+ if (t != ZlibConstants.Z_OK)
+ {
+ r = t;
+ if (r == ZlibConstants.Z_DATA_ERROR)
+ {
+ blens = null;
+ mode = InflateBlockMode.BAD;
+ }
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ index = 0;
+ mode = InflateBlockMode.DTREE;
+ goto case InflateBlockMode.DTREE;
+
+ case InflateBlockMode.DTREE:
+ while (true)
+ {
+ t = table;
+ if (!(index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f)))
+ {
+ break;
+ }
+
+ int i, j, c;
+
+ t = bb[0];
+
+ while (k < t)
+ {
+ if (n != 0)
+ {
+ r = ZlibConstants.Z_OK;
+ }
+ else
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ n--;
+ b |= (_codec.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ t = hufts[(tb[0] + (b & InternalInflateConstants.InflateMask[t])) * 3 + 1];
+ c = hufts[(tb[0] + (b & InternalInflateConstants.InflateMask[t])) * 3 + 2];
+
+ if (c < 16)
+ {
+ b >>= t; k -= t;
+ blens[index++] = c;
+ }
+ else
+ {
+ // c == 16..18
+ i = c == 18 ? 7 : c - 14;
+ j = c == 18 ? 11 : 3;
+
+ while (k < (t + i))
+ {
+ if (n != 0)
+ {
+ r = ZlibConstants.Z_OK;
+ }
+ else
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ n--;
+ b |= (_codec.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ b >>= t; k -= t;
+
+ j += (b & InternalInflateConstants.InflateMask[i]);
+
+ b >>= i; k -= i;
+
+ i = index;
+ t = table;
+ if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) || (c == 16 && i < 1))
+ {
+ blens = null;
+ mode = InflateBlockMode.BAD;
+ _codec.Message = "invalid bit length repeat";
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+
+ c = (c == 16) ? blens[i-1] : 0;
+ do
+ {
+ blens[i++] = c;
+ }
+ while (--j != 0);
+ index = i;
+ }
+ }
+
+ tb[0] = -1;
+ {
+ int[] bl = new int[] { 9 }; // must be <= 9 for lookahead assumptions
+ int[] bd = new int[] { 6 }; // must be <= 9 for lookahead assumptions
+ int[] tl = new int[1];
+ int[] td = new int[1];
+
+ t = table;
+ t = inftree.inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f), blens, bl, bd, tl, td, hufts, _codec);
+
+ if (t != ZlibConstants.Z_OK)
+ {
+ if (t == ZlibConstants.Z_DATA_ERROR)
+ {
+ blens = null;
+ mode = InflateBlockMode.BAD;
+ }
+ r = t;
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ codes.Init(bl[0], bd[0], hufts, tl[0], hufts, td[0]);
+ }
+ mode = InflateBlockMode.CODES;
+ goto case InflateBlockMode.CODES;
+
+ case InflateBlockMode.CODES:
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+
+ r = codes.Process(this, r);
+ if (r != ZlibConstants.Z_STREAM_END)
+ {
+ return Flush(r);
+ }
+
+ r = ZlibConstants.Z_OK;
+ p = _codec.NextIn;
+ n = _codec.AvailableBytesIn;
+ b = bitb;
+ k = bitk;
+ q = writeAt;
+ m = (int)(q < readAt ? readAt - q - 1 : end - q);
+
+ if (last == 0)
+ {
+ mode = InflateBlockMode.TYPE;
+ break;
+ }
+ mode = InflateBlockMode.DRY;
+ goto case InflateBlockMode.DRY;
+
+ case InflateBlockMode.DRY:
+ writeAt = q;
+ r = Flush(r);
+ q = writeAt; m = (int)(q < readAt ? readAt - q - 1 : end - q);
+ if (readAt != writeAt)
+ {
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ mode = InflateBlockMode.DONE;
+ goto case InflateBlockMode.DONE;
+
+ case InflateBlockMode.DONE:
+ r = ZlibConstants.Z_STREAM_END;
+ bitb = b;
+ bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+
+ case InflateBlockMode.BAD:
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+
+
+ default:
+ r = ZlibConstants.Z_STREAM_ERROR;
+
+ bitb = b; bitk = k;
+ _codec.AvailableBytesIn = n;
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ writeAt = q;
+ return Flush(r);
+ }
+ }
+ }
+
+
+ internal void Free()
+ {
+ Reset();
+ window = null;
+ hufts = null;
+ }
+
+ internal void SetDictionary(byte[] d, int start, int n)
+ {
+ Array.Copy(d, start, window, 0, n);
+ readAt = writeAt = n;
+ }
+
+ // Returns true if inflate is currently at the end of a block generated
+ // by Z_SYNC_FLUSH or Z_FULL_FLUSH.
+ internal int SyncPoint()
+ {
+ return mode == InflateBlockMode.LENS ? 1 : 0;
+ }
+
+ // copy as much as possible from the sliding window to the output area
+ internal int Flush(int r)
+ {
+ int nBytes;
+
+ for (int pass=0; pass < 2; pass++)
+ {
+ if (pass==0)
+ {
+ // compute number of bytes to copy as far as end of window
+ nBytes = (int)((readAt <= writeAt ? writeAt : end) - readAt);
+ }
+ else
+ {
+ // compute bytes to copy
+ nBytes = writeAt - readAt;
+ }
+
+ // workitem 8870
+ if (nBytes == 0)
+ {
+ if (r == ZlibConstants.Z_BUF_ERROR)
+ r = ZlibConstants.Z_OK;
+ return r;
+ }
+
+ if (nBytes > _codec.AvailableBytesOut)
+ nBytes = _codec.AvailableBytesOut;
+
+ if (nBytes != 0 && r == ZlibConstants.Z_BUF_ERROR)
+ r = ZlibConstants.Z_OK;
+
+ // update counters
+ _codec.AvailableBytesOut -= nBytes;
+ _codec.TotalBytesOut += nBytes;
+
+ // update check information
+ if (checkfn != null)
+ _codec._Adler32 = check = Adler.Adler32(check, window, readAt, nBytes);
+
+ // copy as far as end of window
+ Array.Copy(window, readAt, _codec.OutputBuffer, _codec.NextOut, nBytes);
+ _codec.NextOut += nBytes;
+ readAt += nBytes;
+
+ // see if more to copy at beginning of window
+ if (readAt == end && pass == 0)
+ {
+ // wrap pointers
+ readAt = 0;
+ if (writeAt == end)
+ writeAt = 0;
+ }
+ else pass++;
+ }
+
+ // done
+ return r;
+ }
+ }
+
+
+ internal static class InternalInflateConstants
+ {
+ // And'ing with mask[n] masks the lower n bits
+ internal static readonly int[] InflateMask = new int[] {
+ 0x00000000, 0x00000001, 0x00000003, 0x00000007,
+ 0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f,
+ 0x000000ff, 0x000001ff, 0x000003ff, 0x000007ff,
+ 0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff };
+ }
+
+
+ sealed class InflateCodes
+ {
+ // waiting for "i:"=input,
+ // "o:"=output,
+ // "x:"=nothing
+ private const int START = 0; // x: set up for LEN
+ private const int LEN = 1; // i: get length/literal/eob next
+ private const int LENEXT = 2; // i: getting length extra (have base)
+ private const int DIST = 3; // i: get distance next
+ private const int DISTEXT = 4; // i: getting distance extra
+ private const int COPY = 5; // o: copying bytes in window, waiting for space
+ private const int LIT = 6; // o: got literal, waiting for output space
+ private const int WASH = 7; // o: got eob, possibly still output waiting
+ private const int END = 8; // x: got eob and all data flushed
+ private const int BADCODE = 9; // x: got error
+
+ internal int mode; // current inflate_codes mode
+
+ // mode dependent information
+ internal int len;
+
+ internal int[] tree; // pointer into tree
+ internal int tree_index = 0;
+ internal int need; // bits needed
+
+ internal int lit;
+
+ // if EXT or COPY, where and how much
+ internal int bitsToGet; // bits to get for extra
+ internal int dist; // distance back to copy from
+
+ internal byte lbits; // ltree bits decoded per branch
+ internal byte dbits; // dtree bits decoder per branch
+ internal int[] ltree; // literal/length/eob tree
+ internal int ltree_index; // literal/length/eob tree
+ internal int[] dtree; // distance tree
+ internal int dtree_index; // distance tree
+
+ internal InflateCodes()
+ {
+ }
+
+ internal void Init(int bl, int bd, int[] tl, int tl_index, int[] td, int td_index)
+ {
+ mode = START;
+ lbits = (byte)bl;
+ dbits = (byte)bd;
+ ltree = tl;
+ ltree_index = tl_index;
+ dtree = td;
+ dtree_index = td_index;
+ tree = null;
+ }
+
+ internal int Process(InflateBlocks blocks, int r)
+ {
+ int j; // temporary storage
+ int tindex; // temporary pointer
+ int e; // extra bits or operation
+ int b = 0; // bit buffer
+ int k = 0; // bits in bit buffer
+ int p = 0; // input data pointer
+ int n; // bytes available there
+ int q; // output window write pointer
+ int m; // bytes to end of window or read pointer
+ int f; // pointer to copy strings from
+
+ ZlibCodec z = blocks._codec;
+
+ // copy input/output information to locals (UPDATE macro restores)
+ p = z.NextIn;
+ n = z.AvailableBytesIn;
+ b = blocks.bitb;
+ k = blocks.bitk;
+ q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+
+ // process input and output based on current state
+ while (true)
+ {
+ switch (mode)
+ {
+ // waiting for "i:"=input, "o:"=output, "x:"=nothing
+ case START: // x: set up for LEN
+ if (m >= 258 && n >= 10)
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n;
+ z.TotalBytesIn += p - z.NextIn;
+ z.NextIn = p;
+ blocks.writeAt = q;
+ r = InflateFast(lbits, dbits, ltree, ltree_index, dtree, dtree_index, blocks, z);
+
+ p = z.NextIn;
+ n = z.AvailableBytesIn;
+ b = blocks.bitb;
+ k = blocks.bitk;
+ q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+
+ if (r != ZlibConstants.Z_OK)
+ {
+ mode = (r == ZlibConstants.Z_STREAM_END) ? WASH : BADCODE;
+ break;
+ }
+ }
+ need = lbits;
+ tree = ltree;
+ tree_index = ltree_index;
+
+ mode = LEN;
+ goto case LEN;
+
+ case LEN: // i: get length/literal/eob next
+ j = need;
+
+ while (k < j)
+ {
+ if (n != 0)
+ r = ZlibConstants.Z_OK;
+ else
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n;
+ z.TotalBytesIn += p - z.NextIn;
+ z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ n--;
+ b |= (z.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ tindex = (tree_index + (b & InternalInflateConstants.InflateMask[j])) * 3;
+
+ b >>= (tree[tindex + 1]);
+ k -= (tree[tindex + 1]);
+
+ e = tree[tindex];
+
+ if (e == 0)
+ {
+ // literal
+ lit = tree[tindex + 2];
+ mode = LIT;
+ break;
+ }
+ if ((e & 16) != 0)
+ {
+ // length
+ bitsToGet = e & 15;
+ len = tree[tindex + 2];
+ mode = LENEXT;
+ break;
+ }
+ if ((e & 64) == 0)
+ {
+ // next table
+ need = e;
+ tree_index = tindex / 3 + tree[tindex + 2];
+ break;
+ }
+ if ((e & 32) != 0)
+ {
+ // end of block
+ mode = WASH;
+ break;
+ }
+ mode = BADCODE; // invalid code
+ z.Message = "invalid literal/length code";
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n;
+ z.TotalBytesIn += p - z.NextIn;
+ z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+
+
+ case LENEXT: // i: getting length extra (have base)
+ j = bitsToGet;
+
+ while (k < j)
+ {
+ if (n != 0)
+ r = ZlibConstants.Z_OK;
+ else
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ n--; b |= (z.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ len += (b & InternalInflateConstants.InflateMask[j]);
+
+ b >>= j;
+ k -= j;
+
+ need = dbits;
+ tree = dtree;
+ tree_index = dtree_index;
+ mode = DIST;
+ goto case DIST;
+
+ case DIST: // i: get distance next
+ j = need;
+
+ while (k < j)
+ {
+ if (n != 0)
+ r = ZlibConstants.Z_OK;
+ else
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ n--; b |= (z.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ tindex = (tree_index + (b & InternalInflateConstants.InflateMask[j])) * 3;
+
+ b >>= tree[tindex + 1];
+ k -= tree[tindex + 1];
+
+ e = (tree[tindex]);
+ if ((e & 0x10) != 0)
+ {
+ // distance
+ bitsToGet = e & 15;
+ dist = tree[tindex + 2];
+ mode = DISTEXT;
+ break;
+ }
+ if ((e & 64) == 0)
+ {
+ // next table
+ need = e;
+ tree_index = tindex / 3 + tree[tindex + 2];
+ break;
+ }
+ mode = BADCODE; // invalid code
+ z.Message = "invalid distance code";
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+
+
+ case DISTEXT: // i: getting distance extra
+ j = bitsToGet;
+
+ while (k < j)
+ {
+ if (n != 0)
+ r = ZlibConstants.Z_OK;
+ else
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ n--; b |= (z.InputBuffer[p++] & 0xff) << k;
+ k += 8;
+ }
+
+ dist += (b & InternalInflateConstants.InflateMask[j]);
+
+ b >>= j;
+ k -= j;
+
+ mode = COPY;
+ goto case COPY;
+
+ case COPY: // o: copying bytes in window, waiting for space
+ f = q - dist;
+ while (f < 0)
+ {
+ // modulo window size-"while" instead
+ f += blocks.end; // of "if" handles invalid distances
+ }
+ while (len != 0)
+ {
+ if (m == 0)
+ {
+ if (q == blocks.end && blocks.readAt != 0)
+ {
+ q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+ }
+ if (m == 0)
+ {
+ blocks.writeAt = q; r = blocks.Flush(r);
+ q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+
+ if (q == blocks.end && blocks.readAt != 0)
+ {
+ q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+ }
+
+ if (m == 0)
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n;
+ z.TotalBytesIn += p - z.NextIn;
+ z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ }
+ }
+
+ blocks.window[q++] = blocks.window[f++]; m--;
+
+ if (f == blocks.end)
+ f = 0;
+ len--;
+ }
+ mode = START;
+ break;
+
+ case LIT: // o: got literal, waiting for output space
+ if (m == 0)
+ {
+ if (q == blocks.end && blocks.readAt != 0)
+ {
+ q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+ }
+ if (m == 0)
+ {
+ blocks.writeAt = q; r = blocks.Flush(r);
+ q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+
+ if (q == blocks.end && blocks.readAt != 0)
+ {
+ q = 0; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+ }
+ if (m == 0)
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ }
+ }
+ r = ZlibConstants.Z_OK;
+
+ blocks.window[q++] = (byte)lit; m--;
+
+ mode = START;
+ break;
+
+ case WASH: // o: got eob, possibly more output
+ if (k > 7)
+ {
+ // return unused byte, if any
+ k -= 8;
+ n++;
+ p--; // can always return one
+ }
+
+ blocks.writeAt = q; r = blocks.Flush(r);
+ q = blocks.writeAt; m = q < blocks.readAt ? blocks.readAt - q - 1 : blocks.end - q;
+
+ if (blocks.readAt != blocks.writeAt)
+ {
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ mode = END;
+ goto case END;
+
+ case END:
+ r = ZlibConstants.Z_STREAM_END;
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+
+ case BADCODE: // x: got error
+
+ r = ZlibConstants.Z_DATA_ERROR;
+
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+
+ default:
+ r = ZlibConstants.Z_STREAM_ERROR;
+
+ blocks.bitb = b; blocks.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ blocks.writeAt = q;
+ return blocks.Flush(r);
+ }
+ }
+ }
+
+
+ // Called with number of bytes left to write in window at least 258
+ // (the maximum string length) and number of input bytes available
+ // at least ten. The ten bytes are six bytes for the longest length/
+ // distance pair plus four bytes for overloading the bit buffer.
+
+ internal int InflateFast(int bl, int bd, int[] tl, int tl_index, int[] td, int td_index, InflateBlocks s, ZlibCodec z)
+ {
+ int t; // temporary pointer
+ int[] tp; // temporary pointer
+ int tp_index; // temporary pointer
+ int e; // extra bits or operation
+ int b; // bit buffer
+ int k; // bits in bit buffer
+ int p; // input data pointer
+ int n; // bytes available there
+ int q; // output window write pointer
+ int m; // bytes to end of window or read pointer
+ int ml; // mask for literal/length tree
+ int md; // mask for distance tree
+ int c; // bytes to copy
+ int d; // distance back to copy from
+ int r; // copy source pointer
+
+ int tp_index_t_3; // (tp_index+t)*3
+
+ // load input, output, bit values
+ p = z.NextIn; n = z.AvailableBytesIn; b = s.bitb; k = s.bitk;
+ q = s.writeAt; m = q < s.readAt ? s.readAt - q - 1 : s.end - q;
+
+ // initialize masks
+ ml = InternalInflateConstants.InflateMask[bl];
+ md = InternalInflateConstants.InflateMask[bd];
+
+ // do until not enough input or output space for fast loop
+ do
+ {
+ // assume called with m >= 258 && n >= 10
+ // get literal/length code
+ while (k < (20))
+ {
+ // max bits for literal/length code
+ n--;
+ b |= (z.InputBuffer[p++] & 0xff) << k; k += 8;
+ }
+
+ t = b & ml;
+ tp = tl;
+ tp_index = tl_index;
+ tp_index_t_3 = (tp_index + t) * 3;
+ if ((e = tp[tp_index_t_3]) == 0)
+ {
+ b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]);
+
+ s.window[q++] = (byte)tp[tp_index_t_3 + 2];
+ m--;
+ continue;
+ }
+ do
+ {
+
+ b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]);
+
+ if ((e & 16) != 0)
+ {
+ e &= 15;
+ c = tp[tp_index_t_3 + 2] + ((int)b & InternalInflateConstants.InflateMask[e]);
+
+ b >>= e; k -= e;
+
+ // decode distance base of block to copy
+ while (k < 15)
+ {
+ // max bits for distance code
+ n--;
+ b |= (z.InputBuffer[p++] & 0xff) << k; k += 8;
+ }
+
+ t = b & md;
+ tp = td;
+ tp_index = td_index;
+ tp_index_t_3 = (tp_index + t) * 3;
+ e = tp[tp_index_t_3];
+
+ do
+ {
+
+ b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]);
+
+ if ((e & 16) != 0)
+ {
+ // get extra bits to add to distance base
+ e &= 15;
+ while (k < e)
+ {
+ // get extra bits (up to 13)
+ n--;
+ b |= (z.InputBuffer[p++] & 0xff) << k; k += 8;
+ }
+
+ d = tp[tp_index_t_3 + 2] + (b & InternalInflateConstants.InflateMask[e]);
+
+ b >>= e; k -= e;
+
+ // do the copy
+ m -= c;
+ if (q >= d)
+ {
+ // offset before dest
+ // just copy
+ r = q - d;
+ if (q - r > 0 && 2 > (q - r))
+ {
+ s.window[q++] = s.window[r++]; // minimum count is three,
+ s.window[q++] = s.window[r++]; // so unroll loop a little
+ c -= 2;
+ }
+ else
+ {
+ Array.Copy(s.window, r, s.window, q, 2);
+ q += 2; r += 2; c -= 2;
+ }
+ }
+ else
+ {
+ // else offset after destination
+ r = q - d;
+ do
+ {
+ r += s.end; // force pointer in window
+ }
+ while (r < 0); // covers invalid distances
+ e = s.end - r;
+ if (c > e)
+ {
+ // if source crosses,
+ c -= e; // wrapped copy
+ if (q - r > 0 && e > (q - r))
+ {
+ do
+ {
+ s.window[q++] = s.window[r++];
+ }
+ while (--e != 0);
+ }
+ else
+ {
+ Array.Copy(s.window, r, s.window, q, e);
+ q += e; r += e; e = 0;
+ }
+ r = 0; // copy rest from start of window
+ }
+ }
+
+ // copy all or what's left
+ if (q - r > 0 && c > (q - r))
+ {
+ do
+ {
+ s.window[q++] = s.window[r++];
+ }
+ while (--c != 0);
+ }
+ else
+ {
+ Array.Copy(s.window, r, s.window, q, c);
+ q += c; r += c; c = 0;
+ }
+ break;
+ }
+ else if ((e & 64) == 0)
+ {
+ t += tp[tp_index_t_3 + 2];
+ t += (b & InternalInflateConstants.InflateMask[e]);
+ tp_index_t_3 = (tp_index + t) * 3;
+ e = tp[tp_index_t_3];
+ }
+ else
+ {
+ z.Message = "invalid distance code";
+
+ c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3);
+
+ s.bitb = b; s.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ s.writeAt = q;
+
+ return ZlibConstants.Z_DATA_ERROR;
+ }
+ }
+ while (true);
+ break;
+ }
+
+ if ((e & 64) == 0)
+ {
+ t += tp[tp_index_t_3 + 2];
+ t += (b & InternalInflateConstants.InflateMask[e]);
+ tp_index_t_3 = (tp_index + t) * 3;
+ if ((e = tp[tp_index_t_3]) == 0)
+ {
+ b >>= (tp[tp_index_t_3 + 1]); k -= (tp[tp_index_t_3 + 1]);
+ s.window[q++] = (byte)tp[tp_index_t_3 + 2];
+ m--;
+ break;
+ }
+ }
+ else if ((e & 32) != 0)
+ {
+ c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3);
+
+ s.bitb = b; s.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ s.writeAt = q;
+
+ return ZlibConstants.Z_STREAM_END;
+ }
+ else
+ {
+ z.Message = "invalid literal/length code";
+
+ c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3);
+
+ s.bitb = b; s.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ s.writeAt = q;
+
+ return ZlibConstants.Z_DATA_ERROR;
+ }
+ }
+ while (true);
+ }
+ while (m >= 258 && n >= 10);
+
+ // not enough input or output--restore pointers and return
+ c = z.AvailableBytesIn - n; c = (k >> 3) < c ? k >> 3 : c; n += c; p -= c; k -= (c << 3);
+
+ s.bitb = b; s.bitk = k;
+ z.AvailableBytesIn = n; z.TotalBytesIn += p - z.NextIn; z.NextIn = p;
+ s.writeAt = q;
+
+ return ZlibConstants.Z_OK;
+ }
+ }
+
+
+ internal sealed class InflateManager
+ {
+ // preset dictionary flag in zlib header
+ private const int PRESET_DICT = 0x20;
+
+ private const int Z_DEFLATED = 8;
+
+ private enum InflateManagerMode
+ {
+ METHOD = 0, // waiting for method byte
+ FLAG = 1, // waiting for flag byte
+ DICT4 = 2, // four dictionary check bytes to go
+ DICT3 = 3, // three dictionary check bytes to go
+ DICT2 = 4, // two dictionary check bytes to go
+ DICT1 = 5, // one dictionary check byte to go
+ DICT0 = 6, // waiting for inflateSetDictionary
+ BLOCKS = 7, // decompressing blocks
+ CHECK4 = 8, // four check bytes to go
+ CHECK3 = 9, // three check bytes to go
+ CHECK2 = 10, // two check bytes to go
+ CHECK1 = 11, // one check byte to go
+ DONE = 12, // finished check, done
+ BAD = 13, // got an error--stay here
+ }
+
+ private InflateManagerMode mode; // current inflate mode
+ internal ZlibCodec _codec; // pointer back to this zlib stream
+
+ // mode dependent information
+ internal int method; // if FLAGS, method byte
+
+ // if CHECK, check values to compare
+ internal uint computedCheck; // computed check value
+ internal uint expectedCheck; // stream check value
+
+ // if BAD, inflateSync's marker bytes count
+ internal int marker;
+
+ // mode independent information
+ //internal int nowrap; // flag for no wrapper
+ private bool _handleRfc1950HeaderBytes = true;
+ internal bool HandleRfc1950HeaderBytes
+ {
+ get { return _handleRfc1950HeaderBytes; }
+ set { _handleRfc1950HeaderBytes = value; }
+ }
+ internal int wbits; // log2(window size) (8..15, defaults to 15)
+
+ internal InflateBlocks blocks; // current inflate_blocks state
+
+ public InflateManager() { }
+
+ public InflateManager(bool expectRfc1950HeaderBytes)
+ {
+ _handleRfc1950HeaderBytes = expectRfc1950HeaderBytes;
+ }
+
+ internal int Reset()
+ {
+ _codec.TotalBytesIn = _codec.TotalBytesOut = 0;
+ _codec.Message = null;
+ mode = HandleRfc1950HeaderBytes ? InflateManagerMode.METHOD : InflateManagerMode.BLOCKS;
+ blocks.Reset();
+ return ZlibConstants.Z_OK;
+ }
+
+ internal int End()
+ {
+ if (blocks != null)
+ blocks.Free();
+ blocks = null;
+ return ZlibConstants.Z_OK;
+ }
+
+ internal int Initialize(ZlibCodec codec, int w)
+ {
+ _codec = codec;
+ _codec.Message = null;
+ blocks = null;
+
+ // handle undocumented nowrap option (no zlib header or check)
+ //nowrap = 0;
+ //if (w < 0)
+ //{
+ // w = - w;
+ // nowrap = 1;
+ //}
+
+ // set window size
+ if (w < 8 || w > 15)
+ {
+ End();
+ throw new ZlibException("Bad window size.");
+
+ //return ZlibConstants.Z_STREAM_ERROR;
+ }
+ wbits = w;
+
+ blocks = new InflateBlocks(codec,
+ HandleRfc1950HeaderBytes ? this : null,
+ 1 << w);
+
+ // reset state
+ Reset();
+ return ZlibConstants.Z_OK;
+ }
+
+
+ internal int Inflate(FlushType flush)
+ {
+ int b;
+
+ if (_codec.InputBuffer == null)
+ throw new ZlibException("InputBuffer is null. ");
+
+// int f = (flush == FlushType.Finish)
+// ? ZlibConstants.Z_BUF_ERROR
+// : ZlibConstants.Z_OK;
+
+ // workitem 8870
+ int f = ZlibConstants.Z_OK;
+ int r = ZlibConstants.Z_BUF_ERROR;
+
+ while (true)
+ {
+ switch (mode)
+ {
+ case InflateManagerMode.METHOD:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ if (((method = _codec.InputBuffer[_codec.NextIn++]) & 0xf) != Z_DEFLATED)
+ {
+ mode = InflateManagerMode.BAD;
+ _codec.Message = String.Format("unknown compression method (0x{0:X2})", method);
+ marker = 5; // can't try inflateSync
+ break;
+ }
+ if ((method >> 4) + 8 > wbits)
+ {
+ mode = InflateManagerMode.BAD;
+ _codec.Message = String.Format("invalid window size ({0})", (method >> 4) + 8);
+ marker = 5; // can't try inflateSync
+ break;
+ }
+ mode = InflateManagerMode.FLAG;
+ break;
+
+
+ case InflateManagerMode.FLAG:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ b = (_codec.InputBuffer[_codec.NextIn++]) & 0xff;
+
+ if ((((method << 8) + b) % 31) != 0)
+ {
+ mode = InflateManagerMode.BAD;
+ _codec.Message = "incorrect header check";
+ marker = 5; // can't try inflateSync
+ break;
+ }
+
+ mode = ((b & PRESET_DICT) == 0)
+ ? InflateManagerMode.BLOCKS
+ : InflateManagerMode.DICT4;
+ break;
+
+ case InflateManagerMode.DICT4:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ expectedCheck = (uint)((_codec.InputBuffer[_codec.NextIn++] << 24) & 0xff000000);
+ mode = InflateManagerMode.DICT3;
+ break;
+
+ case InflateManagerMode.DICT3:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 16) & 0x00ff0000);
+ mode = InflateManagerMode.DICT2;
+ break;
+
+ case InflateManagerMode.DICT2:
+
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 8) & 0x0000ff00);
+ mode = InflateManagerMode.DICT1;
+ break;
+
+
+ case InflateManagerMode.DICT1:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--; _codec.TotalBytesIn++;
+ expectedCheck += (uint)(_codec.InputBuffer[_codec.NextIn++] & 0x000000ff);
+ _codec._Adler32 = expectedCheck;
+ mode = InflateManagerMode.DICT0;
+ return ZlibConstants.Z_NEED_DICT;
+
+
+ case InflateManagerMode.DICT0:
+ mode = InflateManagerMode.BAD;
+ _codec.Message = "need dictionary";
+ marker = 0; // can try inflateSync
+ return ZlibConstants.Z_STREAM_ERROR;
+
+
+ case InflateManagerMode.BLOCKS:
+ r = blocks.Process(r);
+ if (r == ZlibConstants.Z_DATA_ERROR)
+ {
+ mode = InflateManagerMode.BAD;
+ marker = 0; // can try inflateSync
+ break;
+ }
+
+ if (r == ZlibConstants.Z_OK) r = f;
+
+ if (r != ZlibConstants.Z_STREAM_END)
+ return r;
+
+ r = f;
+ computedCheck = blocks.Reset();
+ if (!HandleRfc1950HeaderBytes)
+ {
+ mode = InflateManagerMode.DONE;
+ return ZlibConstants.Z_STREAM_END;
+ }
+ mode = InflateManagerMode.CHECK4;
+ break;
+
+ case InflateManagerMode.CHECK4:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ expectedCheck = (uint)((_codec.InputBuffer[_codec.NextIn++] << 24) & 0xff000000);
+ mode = InflateManagerMode.CHECK3;
+ break;
+
+ case InflateManagerMode.CHECK3:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--; _codec.TotalBytesIn++;
+ expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 16) & 0x00ff0000);
+ mode = InflateManagerMode.CHECK2;
+ break;
+
+ case InflateManagerMode.CHECK2:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--;
+ _codec.TotalBytesIn++;
+ expectedCheck += (uint)((_codec.InputBuffer[_codec.NextIn++] << 8) & 0x0000ff00);
+ mode = InflateManagerMode.CHECK1;
+ break;
+
+ case InflateManagerMode.CHECK1:
+ if (_codec.AvailableBytesIn == 0) return r;
+ r = f;
+ _codec.AvailableBytesIn--; _codec.TotalBytesIn++;
+ expectedCheck += (uint)(_codec.InputBuffer[_codec.NextIn++] & 0x000000ff);
+ if (computedCheck != expectedCheck)
+ {
+ mode = InflateManagerMode.BAD;
+ _codec.Message = "incorrect data check";
+ marker = 5; // can't try inflateSync
+ break;
+ }
+ mode = InflateManagerMode.DONE;
+ return ZlibConstants.Z_STREAM_END;
+
+ case InflateManagerMode.DONE:
+ return ZlibConstants.Z_STREAM_END;
+
+ case InflateManagerMode.BAD:
+ throw new ZlibException(String.Format("Bad state ({0})", _codec.Message));
+
+ default:
+ throw new ZlibException("Stream error.");
+
+ }
+ }
+ }
+
+
+
+ internal int SetDictionary(byte[] dictionary)
+ {
+ int index = 0;
+ int length = dictionary.Length;
+ if (mode != InflateManagerMode.DICT0)
+ throw new ZlibException("Stream error.");
+
+ if (Adler.Adler32(1, dictionary, 0, dictionary.Length) != _codec._Adler32)
+ {
+ return ZlibConstants.Z_DATA_ERROR;
+ }
+
+ _codec._Adler32 = Adler.Adler32(0, null, 0, 0);
+
+ if (length >= (1 << wbits))
+ {
+ length = (1 << wbits) - 1;
+ index = dictionary.Length - length;
+ }
+ blocks.SetDictionary(dictionary, index, length);
+ mode = InflateManagerMode.BLOCKS;
+ return ZlibConstants.Z_OK;
+ }
+
+
+ private static readonly byte[] mark = new byte[] { 0, 0, 0xff, 0xff };
+
+ internal int Sync()
+ {
+ int n; // number of bytes to look at
+ int p; // pointer to bytes
+ int m; // number of marker bytes found in a row
+ long r, w; // temporaries to save total_in and total_out
+
+ // set up
+ if (mode != InflateManagerMode.BAD)
+ {
+ mode = InflateManagerMode.BAD;
+ marker = 0;
+ }
+ if ((n = _codec.AvailableBytesIn) == 0)
+ return ZlibConstants.Z_BUF_ERROR;
+ p = _codec.NextIn;
+ m = marker;
+
+ // search
+ while (n != 0 && m < 4)
+ {
+ if (_codec.InputBuffer[p] == mark[m])
+ {
+ m++;
+ }
+ else if (_codec.InputBuffer[p] != 0)
+ {
+ m = 0;
+ }
+ else
+ {
+ m = 4 - m;
+ }
+ p++; n--;
+ }
+
+ // restore
+ _codec.TotalBytesIn += p - _codec.NextIn;
+ _codec.NextIn = p;
+ _codec.AvailableBytesIn = n;
+ marker = m;
+
+ // return no joy or set up to restart on a new block
+ if (m != 4)
+ {
+ return ZlibConstants.Z_DATA_ERROR;
+ }
+ r = _codec.TotalBytesIn;
+ w = _codec.TotalBytesOut;
+ Reset();
+ _codec.TotalBytesIn = r;
+ _codec.TotalBytesOut = w;
+ mode = InflateManagerMode.BLOCKS;
+ return ZlibConstants.Z_OK;
+ }
+
+
+ // Returns true if inflate is currently at the end of a block generated
+ // by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
+ // implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH
+ // but removes the length bytes of the resulting empty stored block. When
+ // decompressing, PPP checks that at the end of input packet, inflate is
+ // waiting for these length bytes.
+ internal int SyncPoint(ZlibCodec z)
+ {
+ return blocks.SyncPoint();
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/Iso8859Dash1Encoding.cs b/src/Hazelcast.Net/Polyfills/ZLib/Iso8859Dash1Encoding.cs
new file mode 100644
index 0000000000..b182de8111
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/Iso8859Dash1Encoding.cs
@@ -0,0 +1,183 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Ionic.Encoding
+{
+ ///
+ /// Provides a text encoder for the iso-8859-1 encoding, aka Latin1 encoding,
+ /// for platforms that do not support it, for example on Silverlight or some
+ /// Compact Framework platforms.
+ ///
+ public class Iso8859Dash1Encoding : System.Text.Encoding
+ {
+ ///
+ /// Gets the name registered with the
+ /// Internet Assigned Numbers Authority (IANA) for the current encoding.
+ ///
+ ///
+ /// Always returns "iso-8859-1".
+ ///
+ public override string WebName
+ {
+ get { return "iso-8859-1"; }
+ }
+
+ ///
+ /// Encodes a set of characters from a character array into
+ /// a byte array.
+ ///
+ ///
+ /// The actual number of bytes written into .
+ ///
+ /// The character array containing the set of characters to encode.
+ /// The index of the first character to encode.
+ /// The number of characters to encode.
+ /// The byte array to contain the resulting sequence of bytes.
+ /// The index at which to start writing the resulting sequence of bytes.
+ ///
+ public override int GetBytes(char[] chars, int start, int count, byte[] bytes, int byteIndex)
+ {
+ if (chars == null)
+ throw new ArgumentNullException("chars", "null array");
+
+ if (bytes == null)
+ throw new ArgumentNullException("bytes", "null array");
+
+ if (start < 0)
+ throw new ArgumentOutOfRangeException("start");
+ if (count < 0)
+ throw new ArgumentOutOfRangeException("charCount");
+
+ if ((chars.Length - start) < count)
+ throw new ArgumentOutOfRangeException("chars");
+
+ if ((byteIndex < 0) || (byteIndex > bytes.Length))
+ throw new ArgumentOutOfRangeException("byteIndex");
+
+ // iso-8859-1 is special in that it was adopted as the first page of
+ // UCS - ISO's Universal Coding Standard, described in ISO 10646,
+ // which is the same as Unicode. This means that a a Unicode
+ // character in the range of 0 to FF maps to the iso-8859-1 character
+ // with the same value. Because of that the encoding and decoding is
+ // trivial.
+ for (int i=0; i < count; i++)
+ {
+ char c = chars[start+i]; // get the unicode char
+
+ if (c >= '\x00FF') // out of range?
+ bytes[byteIndex+i] = (byte) '?';
+ else
+ bytes[byteIndex+i] = (byte) c;
+ }
+ return count;
+ }
+
+
+ ///
+ /// Decodes a sequence of bytes from the specified byte array into the specified character array.
+ ///
+ ///
+ /// The actual number of characters written into .
+ ///
+ /// The byte array containing the sequence of bytes to decode.
+ /// The index of the first byte to decode.
+ /// The number of bytes to decode.
+ /// The character array to contain the resulting set of characters.
+ /// The index at which to start writing the resulting set of characters.
+ ///
+ public override int GetChars(byte[] bytes, int start, int count, char[] chars, int charIndex)
+ {
+ if (chars == null)
+ throw new ArgumentNullException("chars", "null array");
+
+ if (bytes == null)
+ throw new ArgumentNullException("bytes", "null array");
+
+ if (start < 0)
+ throw new ArgumentOutOfRangeException("start");
+ if (count < 0)
+ throw new ArgumentOutOfRangeException("charCount");
+
+ if ((bytes.Length - start) < count)
+ throw new ArgumentOutOfRangeException("bytes");
+
+ if ((charIndex < 0) || (charIndex > chars.Length))
+ throw new ArgumentOutOfRangeException("charIndex");
+
+ // In the range 00 to FF, the Unicode characters are the same as the
+ // iso-8859-1 characters; because of that, decoding is trivial.
+ for (int i = 0; i < count; i++)
+ chars[charIndex + i] = (char) bytes[i + start];
+
+ return count;
+ }
+
+
+ ///
+ /// Calculates the number of bytes produced by encoding a set of characters
+ /// from the specified character array.
+ ///
+ ///
+ /// The number of bytes produced by encoding the specified characters. This class
+ /// alwas returns the value of .
+ ///
+ public override int GetByteCount(char[] chars, int index, int count)
+ {
+ return count;
+ }
+
+
+ ///
+ /// Calculates the number of characters produced by decoding a sequence
+ /// of bytes from the specified byte array.
+ ///
+ ///
+ /// The number of characters produced by decoding the specified sequence of bytes. This class
+ /// alwas returns the value of .
+ ///
+ public override int GetCharCount(byte[] bytes, int index, int count)
+ {
+ return count;
+ }
+
+
+ ///
+ /// Calculates the maximum number of bytes produced by encoding the specified number of characters.
+ ///
+ ///
+ /// The maximum number of bytes produced by encoding the specified number of characters. This
+ /// class alwas returns the value of .
+ ///
+ /// The number of characters to encode.
+ ///
+ public override int GetMaxByteCount(int charCount)
+ {
+ return charCount;
+ }
+
+ ///
+ /// Calculates the maximum number of characters produced by decoding the specified number of bytes.
+ ///
+ ///
+ /// The maximum number of characters produced by decoding the specified number of bytes. This class
+ /// alwas returns the value of .
+ ///
+ /// The number of bytes to decode.
+ public override int GetMaxCharCount(int byteCount)
+ {
+ return byteCount;
+ }
+
+ ///
+ /// Gets the number of characters that are supported by this encoding.
+ /// This property returns a maximum value of 256, as the encoding class
+ /// only supports single byte encodings (1 byte == 256 possible values).
+ ///
+ public static int CharacterCount
+ {
+ get { return 256; }
+ }
+
+ }
+}
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/ParallelDeflateOutputStream.cs b/src/Hazelcast.Net/Polyfills/ZLib/ParallelDeflateOutputStream.cs
new file mode 100644
index 0000000000..f75141595e
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/ParallelDeflateOutputStream.cs
@@ -0,0 +1,1386 @@
+//#define Trace
+
+// ParallelDeflateOutputStream.cs
+// ------------------------------------------------------------------
+//
+// A DeflateStream that does compression only, it uses a
+// divide-and-conquer approach with multiple threads to exploit multiple
+// CPUs for the DEFLATE computation.
+//
+// last saved: <2011-July-31 14:49:40>
+//
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009-2011 by Dino Chiesa
+// All rights reserved!
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+
+using System;
+using System.Collections.Generic;
+using System.Threading;
+using Ionic.Zlib;
+using System.IO;
+
+
+namespace Ionic.Zlib
+{
+ internal class WorkItem
+ {
+ public byte[] buffer;
+ public byte[] compressed;
+ public int crc;
+ public int index;
+ public int ordinal;
+ public int inputBytesAvailable;
+ public int compressedBytesAvailable;
+ public ZlibCodec compressor;
+
+ public WorkItem(int size,
+ Ionic.Zlib.CompressionLevel compressLevel,
+ CompressionStrategy strategy,
+ int ix)
+ {
+ this.buffer= new byte[size];
+ // alloc 5 bytes overhead for every block (margin of safety= 2)
+ int n = size + ((size / 32768)+1) * 5 * 2;
+ this.compressed = new byte[n];
+ this.compressor = new ZlibCodec();
+ this.compressor.InitializeDeflate(compressLevel, false);
+ this.compressor.OutputBuffer = this.compressed;
+ this.compressor.InputBuffer = this.buffer;
+ this.index = ix;
+ }
+ }
+
+ ///
+ /// A class for compressing streams using the
+ /// Deflate algorithm with multiple threads.
+ ///
+ ///
+ ///
+ ///
+ /// This class performs DEFLATE compression through writing. For
+ /// more information on the Deflate algorithm, see IETF RFC 1951,
+ /// "DEFLATE Compressed Data Format Specification version 1.3."
+ ///
+ ///
+ ///
+ /// This class is similar to , except
+ /// that this class is for compression only, and this implementation uses an
+ /// approach that employs multiple worker threads to perform the DEFLATE. On
+ /// a multi-cpu or multi-core computer, the performance of this class can be
+ /// significantly higher than the single-threaded DeflateStream, particularly
+ /// for larger streams. How large? Anything over 10mb is a good candidate
+ /// for parallel compression.
+ ///
+ ///
+ ///
+ /// The tradeoff is that this class uses more memory and more CPU than the
+ /// vanilla DeflateStream, and also is less efficient as a compressor. For
+ /// large files the size of the compressed data stream can be less than 1%
+ /// larger than the size of a compressed data stream from the vanialla
+ /// DeflateStream. For smaller files the difference can be larger. The
+ /// difference will also be larger if you set the BufferSize to be lower than
+ /// the default value. Your mileage may vary. Finally, for small files, the
+ /// ParallelDeflateOutputStream can be much slower than the vanilla
+ /// DeflateStream, because of the overhead associated to using the thread
+ /// pool.
+ ///
+ ///
+ ///
+ ///
+ public class ParallelDeflateOutputStream : System.IO.Stream
+ {
+
+ private static readonly int IO_BUFFER_SIZE_DEFAULT = 64 * 1024; // 128k
+ private static readonly int BufferPairsPerCore = 4;
+
+ private System.Collections.Generic.List _pool;
+ private bool _leaveOpen;
+ private bool emitting;
+ private System.IO.Stream _outStream;
+ private int _maxBufferPairs;
+ private int _bufferSize = IO_BUFFER_SIZE_DEFAULT;
+ private AutoResetEvent _newlyCompressedBlob;
+ //private ManualResetEvent _writingDone;
+ //private ManualResetEvent _sessionReset;
+ private object _outputLock = new object();
+ private bool _isClosed;
+ private bool _firstWriteDone;
+ private int _currentlyFilling;
+ private int _lastFilled;
+ private int _lastWritten;
+ private int _latestCompressed;
+ private int _Crc32;
+ private Ionic.Crc.CRC32 _runningCrc;
+ private object _latestLock = new object();
+ private System.Collections.Generic.Queue _toWrite;
+ private System.Collections.Generic.Queue _toFill;
+ private Int64 _totalBytesProcessed;
+ private Ionic.Zlib.CompressionLevel _compressLevel;
+ private volatile Exception _pendingException;
+ private bool _handlingException;
+ private object _eLock = new Object(); // protects _pendingException
+
+ // This bitfield is used only when Trace is defined.
+ //private TraceBits _DesiredTrace = TraceBits.Write | TraceBits.WriteBegin |
+ //TraceBits.WriteDone | TraceBits.Lifecycle | TraceBits.Fill | TraceBits.Flush |
+ //TraceBits.Session;
+
+ //private TraceBits _DesiredTrace = TraceBits.WriteBegin | TraceBits.WriteDone | TraceBits.Synch | TraceBits.Lifecycle | TraceBits.Session ;
+
+ private TraceBits _DesiredTrace =
+ TraceBits.Session |
+ TraceBits.Compress |
+ TraceBits.WriteTake |
+ TraceBits.WriteEnter |
+ TraceBits.EmitEnter |
+ TraceBits.EmitDone |
+ TraceBits.EmitLock |
+ TraceBits.EmitSkip |
+ TraceBits.EmitBegin;
+
+ ///
+ /// Create a ParallelDeflateOutputStream.
+ ///
+ ///
+ ///
+ ///
+ /// This stream compresses data written into it via the DEFLATE
+ /// algorithm (see RFC 1951), and writes out the compressed byte stream.
+ ///
+ ///
+ ///
+ /// The instance will use the default compression level, the default
+ /// buffer sizes and the default number of threads and buffers per
+ /// thread.
+ ///
+ ///
+ ///
+ /// This class is similar to ,
+ /// except that this implementation uses an approach that employs
+ /// multiple worker threads to perform the DEFLATE. On a multi-cpu or
+ /// multi-core computer, the performance of this class can be
+ /// significantly higher than the single-threaded DeflateStream,
+ /// particularly for larger streams. How large? Anything over 10mb is
+ /// a good candidate for parallel compression.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// This example shows how to use a ParallelDeflateOutputStream to compress
+ /// data. It reads a file, compresses it, and writes the compressed data to
+ /// a second, output file.
+ ///
+ ///
+ /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ /// int n= -1;
+ /// String outputFile = fileToCompress + ".compressed";
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ /// {
+ /// using (var raw = System.IO.File.Create(outputFile))
+ /// {
+ /// using (Stream compressor = new ParallelDeflateOutputStream(raw))
+ /// {
+ /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ /// {
+ /// compressor.Write(buffer, 0, n);
+ /// }
+ /// }
+ /// }
+ /// }
+ ///
+ ///
+ /// Dim buffer As Byte() = New Byte(4096) {}
+ /// Dim n As Integer = -1
+ /// Dim outputFile As String = (fileToCompress & ".compressed")
+ /// Using input As Stream = File.OpenRead(fileToCompress)
+ /// Using raw As FileStream = File.Create(outputFile)
+ /// Using compressor As Stream = New ParallelDeflateOutputStream(raw)
+ /// Do While (n <> 0)
+ /// If (n > 0) Then
+ /// compressor.Write(buffer, 0, n)
+ /// End If
+ /// n = input.Read(buffer, 0, buffer.Length)
+ /// Loop
+ /// End Using
+ /// End Using
+ /// End Using
+ ///
+ ///
+ /// The stream to which compressed data will be written.
+ public ParallelDeflateOutputStream(System.IO.Stream stream)
+ : this(stream, CompressionLevel.Default, CompressionStrategy.Default, false)
+ {
+ }
+
+ ///
+ /// Create a ParallelDeflateOutputStream using the specified CompressionLevel.
+ ///
+ ///
+ /// See the
+ /// constructor for example code.
+ ///
+ /// The stream to which compressed data will be written.
+ /// A tuning knob to trade speed for effectiveness.
+ public ParallelDeflateOutputStream(System.IO.Stream stream, CompressionLevel level)
+ : this(stream, level, CompressionStrategy.Default, false)
+ {
+ }
+
+ ///
+ /// Create a ParallelDeflateOutputStream and specify whether to leave the captive stream open
+ /// when the ParallelDeflateOutputStream is closed.
+ ///
+ ///
+ /// See the
+ /// constructor for example code.
+ ///
+ /// The stream to which compressed data will be written.
+ ///
+ /// true if the application would like the stream to remain open after inflation/deflation.
+ ///
+ public ParallelDeflateOutputStream(System.IO.Stream stream, bool leaveOpen)
+ : this(stream, CompressionLevel.Default, CompressionStrategy.Default, leaveOpen)
+ {
+ }
+
+ ///
+ /// Create a ParallelDeflateOutputStream and specify whether to leave the captive stream open
+ /// when the ParallelDeflateOutputStream is closed.
+ ///
+ ///
+ /// See the
+ /// constructor for example code.
+ ///
+ /// The stream to which compressed data will be written.
+ /// A tuning knob to trade speed for effectiveness.
+ ///
+ /// true if the application would like the stream to remain open after inflation/deflation.
+ ///
+ public ParallelDeflateOutputStream(System.IO.Stream stream, CompressionLevel level, bool leaveOpen)
+ : this(stream, CompressionLevel.Default, CompressionStrategy.Default, leaveOpen)
+ {
+ }
+
+ ///
+ /// Create a ParallelDeflateOutputStream using the specified
+ /// CompressionLevel and CompressionStrategy, and specifying whether to
+ /// leave the captive stream open when the ParallelDeflateOutputStream is
+ /// closed.
+ ///
+ ///
+ /// See the
+ /// constructor for example code.
+ ///
+ /// The stream to which compressed data will be written.
+ /// A tuning knob to trade speed for effectiveness.
+ ///
+ /// By tweaking this parameter, you may be able to optimize the compression for
+ /// data with particular characteristics.
+ ///
+ ///
+ /// true if the application would like the stream to remain open after inflation/deflation.
+ ///
+ public ParallelDeflateOutputStream(System.IO.Stream stream,
+ CompressionLevel level,
+ CompressionStrategy strategy,
+ bool leaveOpen)
+ {
+ TraceOutput(TraceBits.Lifecycle | TraceBits.Session, "-------------------------------------------------------");
+ TraceOutput(TraceBits.Lifecycle | TraceBits.Session, "Create {0:X8}", this.GetHashCode());
+ _outStream = stream;
+ _compressLevel= level;
+ Strategy = strategy;
+ _leaveOpen = leaveOpen;
+ this.MaxBufferPairs = 16; // default
+ }
+
+
+ ///
+ /// The ZLIB strategy to be used during compression.
+ ///
+ ///
+ public CompressionStrategy Strategy
+ {
+ get;
+ private set;
+ }
+
+ ///
+ /// The maximum number of buffer pairs to use.
+ ///
+ ///
+ ///
+ ///
+ /// This property sets an upper limit on the number of memory buffer
+ /// pairs to create. The implementation of this stream allocates
+ /// multiple buffers to facilitate parallel compression. As each buffer
+ /// fills up, this stream uses
+ /// ThreadPool.QueueUserWorkItem()
+ /// to compress those buffers in a background threadpool thread. After a
+ /// buffer is compressed, it is re-ordered and written to the output
+ /// stream.
+ ///
+ ///
+ ///
+ /// A higher number of buffer pairs enables a higher degree of
+ /// parallelism, which tends to increase the speed of compression on
+ /// multi-cpu computers. On the other hand, a higher number of buffer
+ /// pairs also implies a larger memory consumption, more active worker
+ /// threads, and a higher cpu utilization for any compression. This
+ /// property enables the application to limit its memory consumption and
+ /// CPU utilization behavior depending on requirements.
+ ///
+ ///
+ ///
+ /// For each compression "task" that occurs in parallel, there are 2
+ /// buffers allocated: one for input and one for output. This property
+ /// sets a limit for the number of pairs. The total amount of storage
+ /// space allocated for buffering will then be (N*S*2), where N is the
+ /// number of buffer pairs, S is the size of each buffer (). By default, DotNetZip allocates 4 buffer
+ /// pairs per CPU core, so if your machine has 4 cores, and you retain
+ /// the default buffer size of 128k, then the
+ /// ParallelDeflateOutputStream will use 4 * 4 * 2 * 128kb of buffer
+ /// memory in total, or 4mb, in blocks of 128kb. If you then set this
+ /// property to 8, then the number will be 8 * 2 * 128kb of buffer
+ /// memory, or 2mb.
+ ///
+ ///
+ ///
+ /// CPU utilization will also go up with additional buffers, because a
+ /// larger number of buffer pairs allows a larger number of background
+ /// threads to compress in parallel. If you find that parallel
+ /// compression is consuming too much memory or CPU, you can adjust this
+ /// value downward.
+ ///
+ ///
+ ///
+ /// The default value is 16. Different values may deliver better or
+ /// worse results, depending on your priorities and the dynamic
+ /// performance characteristics of your storage and compute resources.
+ ///
+ ///
+ ///
+ /// This property is not the number of buffer pairs to use; it is an
+ /// upper limit. An illustration: Suppose you have an application that
+ /// uses the default value of this property (which is 16), and it runs
+ /// on a machine with 2 CPU cores. In that case, DotNetZip will allocate
+ /// 4 buffer pairs per CPU core, for a total of 8 pairs. The upper
+ /// limit specified by this property has no effect.
+ ///
+ ///
+ ///
+ /// The application can set this value at any time, but it is effective
+ /// only before the first call to Write(), which is when the buffers are
+ /// allocated.
+ ///
+ ///
+ public int MaxBufferPairs
+ {
+ get
+ {
+ return _maxBufferPairs;
+ }
+ set
+ {
+ if (value < 4)
+ throw new ArgumentException("MaxBufferPairs",
+ "Value must be 4 or greater.");
+ _maxBufferPairs = value;
+ }
+ }
+
+ ///
+ /// The size of the buffers used by the compressor threads.
+ ///
+ ///
+ ///
+ ///
+ /// The default buffer size is 128k. The application can set this value
+ /// at any time, but it is effective only before the first Write().
+ ///
+ ///
+ ///
+ /// Larger buffer sizes implies larger memory consumption but allows
+ /// more efficient compression. Using smaller buffer sizes consumes less
+ /// memory but may result in less effective compression. For example,
+ /// using the default buffer size of 128k, the compression delivered is
+ /// within 1% of the compression delivered by the single-threaded . On the other hand, using a
+ /// BufferSize of 8k can result in a compressed data stream that is 5%
+ /// larger than that delivered by the single-threaded
+ /// DeflateStream. Excessively small buffer sizes can also cause
+ /// the speed of the ParallelDeflateOutputStream to drop, because of
+ /// larger thread scheduling overhead dealing with many many small
+ /// buffers.
+ ///
+ ///
+ ///
+ /// The total amount of storage space allocated for buffering will be
+ /// (N*S*2), where N is the number of buffer pairs, and S is the size of
+ /// each buffer (this property). There are 2 buffers used by the
+ /// compressor, one for input and one for output. By default, DotNetZip
+ /// allocates 4 buffer pairs per CPU core, so if your machine has 4
+ /// cores, then the number of buffer pairs used will be 16. If you
+ /// accept the default value of this property, 128k, then the
+ /// ParallelDeflateOutputStream will use 16 * 2 * 128kb of buffer memory
+ /// in total, or 4mb, in blocks of 128kb. If you set this property to
+ /// 64kb, then the number will be 16 * 2 * 64kb of buffer memory, or
+ /// 2mb.
+ ///
+ ///
+ ///
+ public int BufferSize
+ {
+ get { return _bufferSize;}
+ set
+ {
+ if (value < 1024)
+ throw new ArgumentOutOfRangeException("BufferSize",
+ "BufferSize must be greater than 1024 bytes");
+ _bufferSize = value;
+ }
+ }
+
+ ///
+ /// The CRC32 for the data that was written out, prior to compression.
+ ///
+ ///
+ /// This value is meaningful only after a call to Close().
+ ///
+ public int Crc32 { get { return _Crc32; } }
+
+
+ ///
+ /// The total number of uncompressed bytes processed by the ParallelDeflateOutputStream.
+ ///
+ ///
+ /// This value is meaningful only after a call to Close().
+ ///
+ public Int64 BytesProcessed { get { return _totalBytesProcessed; } }
+
+
+ private void _InitializePoolOfWorkItems()
+ {
+ _toWrite = new Queue();
+ _toFill = new Queue();
+ _pool = new System.Collections.Generic.List();
+ int nTasks = BufferPairsPerCore * Environment.ProcessorCount;
+ nTasks = Math.Min(nTasks, _maxBufferPairs);
+ for(int i=0; i < nTasks; i++)
+ {
+ _pool.Add(new WorkItem(_bufferSize, _compressLevel, Strategy, i));
+ _toFill.Enqueue(i);
+ }
+
+ _newlyCompressedBlob = new AutoResetEvent(false);
+ _runningCrc = new Ionic.Crc.CRC32();
+ _currentlyFilling = -1;
+ _lastFilled = -1;
+ _lastWritten = -1;
+ _latestCompressed = -1;
+ }
+
+
+
+
+ ///
+ /// Write data to the stream.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// To use the ParallelDeflateOutputStream to compress data, create a
+ /// ParallelDeflateOutputStream with CompressionMode.Compress, passing a
+ /// writable output stream. Then call Write() on that
+ /// ParallelDeflateOutputStream, providing uncompressed data as input. The
+ /// data sent to the output stream will be the compressed form of the data
+ /// written.
+ ///
+ ///
+ ///
+ /// To decompress data, use the class.
+ ///
+ ///
+ ///
+ /// The buffer holding data to write to the stream.
+ /// the offset within that data array to find the first byte to write.
+ /// the number of bytes to write.
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ bool mustWait = false;
+
+ // This method does this:
+ // 0. handles any pending exceptions
+ // 1. write any buffers that are ready to be written,
+ // 2. fills a work buffer; when full, flip state to 'Filled',
+ // 3. if more data to be written, goto step 1
+
+ if (_isClosed)
+ throw new InvalidOperationException();
+
+ // dispense any exceptions that occurred on the BG threads
+ if (_pendingException != null)
+ {
+ _handlingException = true;
+ var pe = _pendingException;
+ _pendingException = null;
+ throw pe;
+ }
+
+ if (count == 0) return;
+
+ if (!_firstWriteDone)
+ {
+ // Want to do this on first Write, first session, and not in the
+ // constructor. We want to allow MaxBufferPairs to
+ // change after construction, but before first Write.
+ _InitializePoolOfWorkItems();
+ _firstWriteDone = true;
+ }
+
+
+ do
+ {
+ // may need to make buffers available
+ EmitPendingBuffers(false, mustWait);
+
+ mustWait = false;
+ // use current buffer, or get a new buffer to fill
+ int ix = -1;
+ if (_currentlyFilling >= 0)
+ {
+ ix = _currentlyFilling;
+ TraceOutput(TraceBits.WriteTake,
+ "Write notake wi({0}) lf({1})",
+ ix,
+ _lastFilled);
+ }
+ else
+ {
+ TraceOutput(TraceBits.WriteTake, "Write take?");
+ if (_toFill.Count == 0)
+ {
+ // no available buffers, so... need to emit
+ // compressed buffers.
+ mustWait = true;
+ continue;
+ }
+
+ ix = _toFill.Dequeue();
+ TraceOutput(TraceBits.WriteTake,
+ "Write take wi({0}) lf({1})",
+ ix,
+ _lastFilled);
+ ++_lastFilled; // TODO: consider rollover?
+ }
+
+ WorkItem workitem = _pool[ix];
+
+ int limit = ((workitem.buffer.Length - workitem.inputBytesAvailable) > count)
+ ? count
+ : (workitem.buffer.Length - workitem.inputBytesAvailable);
+
+ workitem.ordinal = _lastFilled;
+
+ TraceOutput(TraceBits.Write,
+ "Write lock wi({0}) ord({1}) iba({2})",
+ workitem.index,
+ workitem.ordinal,
+ workitem.inputBytesAvailable
+ );
+
+ // copy from the provided buffer to our workitem, starting at
+ // the tail end of whatever data we might have in there currently.
+ Buffer.BlockCopy(buffer,
+ offset,
+ workitem.buffer,
+ workitem.inputBytesAvailable,
+ limit);
+
+ count -= limit;
+ offset += limit;
+ workitem.inputBytesAvailable += limit;
+ if (workitem.inputBytesAvailable == workitem.buffer.Length)
+ {
+ // No need for interlocked.increment: the Write()
+ // method is documented as not multi-thread safe, so
+ // we can assume Write() calls come in from only one
+ // thread.
+ TraceOutput(TraceBits.Write,
+ "Write QUWI wi({0}) ord({1}) iba({2}) nf({3})",
+ workitem.index,
+ workitem.ordinal,
+ workitem.inputBytesAvailable );
+
+ if (!ThreadPool.QueueUserWorkItem( _DeflateOne, workitem ))
+ throw new Exception("Cannot enqueue workitem");
+
+ _currentlyFilling = -1; // will get a new buffer next time
+ }
+ else
+ _currentlyFilling = ix;
+
+ if (count > 0)
+ TraceOutput(TraceBits.WriteEnter, "Write more");
+ }
+ while (count > 0); // until no more to write
+
+ TraceOutput(TraceBits.WriteEnter, "Write exit");
+ return;
+ }
+
+
+
+ private void _FlushFinish()
+ {
+ // After writing a series of compressed buffers, each one closed
+ // with Flush.Sync, we now write the final one as Flush.Finish,
+ // and then stop.
+ byte[] buffer = new byte[128];
+ var compressor = new ZlibCodec();
+ int rc = compressor.InitializeDeflate(_compressLevel, false);
+ compressor.InputBuffer = null;
+ compressor.NextIn = 0;
+ compressor.AvailableBytesIn = 0;
+ compressor.OutputBuffer = buffer;
+ compressor.NextOut = 0;
+ compressor.AvailableBytesOut = buffer.Length;
+ rc = compressor.Deflate(FlushType.Finish);
+
+ if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
+ throw new Exception("deflating: " + compressor.Message);
+
+ if (buffer.Length - compressor.AvailableBytesOut > 0)
+ {
+ TraceOutput(TraceBits.EmitBegin,
+ "Emit begin flush bytes({0})",
+ buffer.Length - compressor.AvailableBytesOut);
+
+ _outStream.Write(buffer, 0, buffer.Length - compressor.AvailableBytesOut);
+
+ TraceOutput(TraceBits.EmitDone,
+ "Emit done flush");
+ }
+
+ compressor.EndDeflate();
+
+ _Crc32 = _runningCrc.Crc32Result;
+ }
+
+
+ private void _Flush(bool lastInput)
+ {
+ if (_isClosed)
+ throw new InvalidOperationException();
+
+ if (emitting) return;
+
+ // compress any partial buffer
+ if (_currentlyFilling >= 0)
+ {
+ WorkItem workitem = _pool[_currentlyFilling];
+ _DeflateOne(workitem);
+ _currentlyFilling = -1; // get a new buffer next Write()
+ }
+
+ if (lastInput)
+ {
+ EmitPendingBuffers(true, false);
+ _FlushFinish();
+ }
+ else
+ {
+ EmitPendingBuffers(false, false);
+ }
+ }
+
+
+
+ ///
+ /// Flush the stream.
+ ///
+ public override void Flush()
+ {
+ if (_pendingException != null)
+ {
+ _handlingException = true;
+ var pe = _pendingException;
+ _pendingException = null;
+ throw pe;
+ }
+ if (_handlingException)
+ return;
+
+ _Flush(false);
+ }
+
+
+ ///
+ /// Close the stream.
+ ///
+ ///
+ /// You must call Close on the stream to guarantee that all of the data written in has
+ /// been compressed, and the compressed data has been written out.
+ ///
+ public override void Close()
+ {
+ TraceOutput(TraceBits.Session, "Close {0:X8}", this.GetHashCode());
+
+ if (_pendingException != null)
+ {
+ _handlingException = true;
+ var pe = _pendingException;
+ _pendingException = null;
+ throw pe;
+ }
+
+ if (_handlingException)
+ return;
+
+ if (_isClosed) return;
+
+ _Flush(true);
+
+ if (!_leaveOpen)
+ _outStream.Close();
+
+ _isClosed= true;
+ }
+
+
+
+ // workitem 10030 - implement a new Dispose method
+
+ /// Dispose the object
+ ///
+ ///
+ /// Because ParallelDeflateOutputStream is IDisposable, the
+ /// application must call this method when finished using the instance.
+ ///
+ ///
+ /// This method is generally called implicitly upon exit from
+ /// a using scope in C# (Using in VB).
+ ///
+ ///
+ new public void Dispose()
+ {
+ TraceOutput(TraceBits.Lifecycle, "Dispose {0:X8}", this.GetHashCode());
+ Close();
+ _pool = null;
+ Dispose(true);
+ }
+
+
+
+ /// The Dispose method
+ ///
+ /// indicates whether the Dispose method was invoked by user code.
+ ///
+ protected override void Dispose(bool disposing)
+ {
+ base.Dispose(disposing);
+ }
+
+
+ ///
+ /// Resets the stream for use with another stream.
+ ///
+ ///
+ /// Because the ParallelDeflateOutputStream is expensive to create, it
+ /// has been designed so that it can be recycled and re-used. You have
+ /// to call Close() on the stream first, then you can call Reset() on
+ /// it, to use it again on another stream.
+ ///
+ ///
+ ///
+ /// The new output stream for this era.
+ ///
+ ///
+ ///
+ ///
+ /// ParallelDeflateOutputStream deflater = null;
+ /// foreach (var inputFile in listOfFiles)
+ /// {
+ /// string outputFile = inputFile + ".compressed";
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(inputFile))
+ /// {
+ /// using (var outStream = System.IO.File.Create(outputFile))
+ /// {
+ /// if (deflater == null)
+ /// deflater = new ParallelDeflateOutputStream(outStream,
+ /// CompressionLevel.Best,
+ /// CompressionStrategy.Default,
+ /// true);
+ /// deflater.Reset(outStream);
+ ///
+ /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ /// {
+ /// deflater.Write(buffer, 0, n);
+ /// }
+ /// }
+ /// }
+ /// }
+ ///
+ ///
+ public void Reset(Stream stream)
+ {
+ TraceOutput(TraceBits.Session, "-------------------------------------------------------");
+ TraceOutput(TraceBits.Session, "Reset {0:X8} firstDone({1})", this.GetHashCode(), _firstWriteDone);
+
+ if (!_firstWriteDone) return;
+
+ // reset all status
+ _toWrite.Clear();
+ _toFill.Clear();
+ foreach (var workitem in _pool)
+ {
+ _toFill.Enqueue(workitem.index);
+ workitem.ordinal = -1;
+ }
+
+ _firstWriteDone = false;
+ _totalBytesProcessed = 0L;
+ _runningCrc = new Ionic.Crc.CRC32();
+ _isClosed= false;
+ _currentlyFilling = -1;
+ _lastFilled = -1;
+ _lastWritten = -1;
+ _latestCompressed = -1;
+ _outStream = stream;
+ }
+
+
+
+
+ private void EmitPendingBuffers(bool doAll, bool mustWait)
+ {
+ // When combining parallel deflation with a ZipSegmentedStream, it's
+ // possible for the ZSS to throw from within this method. In that
+ // case, Close/Dispose will be called on this stream, if this stream
+ // is employed within a using or try/finally pair as required. But
+ // this stream is unaware of the pending exception, so the Close()
+ // method invokes this method AGAIN. This can lead to a deadlock.
+ // Therefore, failfast if re-entering.
+
+ if (emitting) return;
+ emitting = true;
+ if (doAll || mustWait)
+ _newlyCompressedBlob.WaitOne();
+
+ do
+ {
+ int firstSkip = -1;
+ int millisecondsToWait = doAll ? 200 : (mustWait ? -1 : 0);
+ int nextToWrite = -1;
+
+ do
+ {
+ if (Monitor.TryEnter(_toWrite, millisecondsToWait))
+ {
+ nextToWrite = -1;
+ try
+ {
+ if (_toWrite.Count > 0)
+ nextToWrite = _toWrite.Dequeue();
+ }
+ finally
+ {
+ Monitor.Exit(_toWrite);
+ }
+
+ if (nextToWrite >= 0)
+ {
+ WorkItem workitem = _pool[nextToWrite];
+ if (workitem.ordinal != _lastWritten + 1)
+ {
+ // out of order. requeue and try again.
+ TraceOutput(TraceBits.EmitSkip,
+ "Emit skip wi({0}) ord({1}) lw({2}) fs({3})",
+ workitem.index,
+ workitem.ordinal,
+ _lastWritten,
+ firstSkip);
+
+ lock(_toWrite)
+ {
+ _toWrite.Enqueue(nextToWrite);
+ }
+
+ if (firstSkip == nextToWrite)
+ {
+ // We went around the list once.
+ // None of the items in the list is the one we want.
+ // Now wait for a compressor to signal again.
+ _newlyCompressedBlob.WaitOne();
+ firstSkip = -1;
+ }
+ else if (firstSkip == -1)
+ firstSkip = nextToWrite;
+
+ continue;
+ }
+
+ firstSkip = -1;
+
+ TraceOutput(TraceBits.EmitBegin,
+ "Emit begin wi({0}) ord({1}) cba({2})",
+ workitem.index,
+ workitem.ordinal,
+ workitem.compressedBytesAvailable);
+
+ _outStream.Write(workitem.compressed, 0, workitem.compressedBytesAvailable);
+ _runningCrc.Combine(workitem.crc, workitem.inputBytesAvailable);
+ _totalBytesProcessed += workitem.inputBytesAvailable;
+ workitem.inputBytesAvailable = 0;
+
+ TraceOutput(TraceBits.EmitDone,
+ "Emit done wi({0}) ord({1}) cba({2}) mtw({3})",
+ workitem.index,
+ workitem.ordinal,
+ workitem.compressedBytesAvailable,
+ millisecondsToWait);
+
+ _lastWritten = workitem.ordinal;
+ _toFill.Enqueue(workitem.index);
+
+ // don't wait next time through
+ if (millisecondsToWait == -1) millisecondsToWait = 0;
+ }
+ }
+ else
+ nextToWrite = -1;
+
+ } while (nextToWrite >= 0);
+
+ } while (doAll && (_lastWritten != _latestCompressed));
+
+ emitting = false;
+ }
+
+
+
+#if OLD
+ private void _PerpetualWriterMethod(object state)
+ {
+ TraceOutput(TraceBits.WriterThread, "_PerpetualWriterMethod START");
+
+ try
+ {
+ do
+ {
+ // wait for the next session
+ TraceOutput(TraceBits.Synch | TraceBits.WriterThread, "Synch _sessionReset.WaitOne(begin) PWM");
+ _sessionReset.WaitOne();
+ TraceOutput(TraceBits.Synch | TraceBits.WriterThread, "Synch _sessionReset.WaitOne(done) PWM");
+
+ if (_isDisposed) break;
+
+ TraceOutput(TraceBits.Synch | TraceBits.WriterThread, "Synch _sessionReset.Reset() PWM");
+ _sessionReset.Reset();
+
+ // repeatedly write buffers as they become ready
+ WorkItem workitem = null;
+ Ionic.Zlib.CRC32 c= new Ionic.Zlib.CRC32();
+ do
+ {
+ workitem = _pool[_nextToWrite % _pc];
+ lock(workitem)
+ {
+ if (_noMoreInputForThisSegment)
+ TraceOutput(TraceBits.Write,
+ "Write drain wi({0}) stat({1}) canuse({2}) cba({3})",
+ workitem.index,
+ workitem.status,
+ (workitem.status == (int)WorkItem.Status.Compressed),
+ workitem.compressedBytesAvailable);
+
+ do
+ {
+ if (workitem.status == (int)WorkItem.Status.Compressed)
+ {
+ TraceOutput(TraceBits.WriteBegin,
+ "Write begin wi({0}) stat({1}) cba({2})",
+ workitem.index,
+ workitem.status,
+ workitem.compressedBytesAvailable);
+
+ workitem.status = (int)WorkItem.Status.Writing;
+ _outStream.Write(workitem.compressed, 0, workitem.compressedBytesAvailable);
+ c.Combine(workitem.crc, workitem.inputBytesAvailable);
+ _totalBytesProcessed += workitem.inputBytesAvailable;
+ _nextToWrite++;
+ workitem.inputBytesAvailable= 0;
+ workitem.status = (int)WorkItem.Status.Done;
+
+ TraceOutput(TraceBits.WriteDone,
+ "Write done wi({0}) stat({1}) cba({2})",
+ workitem.index,
+ workitem.status,
+ workitem.compressedBytesAvailable);
+
+
+ Monitor.Pulse(workitem);
+ break;
+ }
+ else
+ {
+ int wcycles = 0;
+ // I've locked a workitem I cannot use.
+ // Therefore, wake someone else up, and then release the lock.
+ while (workitem.status != (int)WorkItem.Status.Compressed)
+ {
+ TraceOutput(TraceBits.WriteWait,
+ "Write waiting wi({0}) stat({1}) nw({2}) nf({3}) nomore({4})",
+ workitem.index,
+ workitem.status,
+ _nextToWrite, _nextToFill,
+ _noMoreInputForThisSegment );
+
+ if (_noMoreInputForThisSegment && _nextToWrite == _nextToFill)
+ break;
+
+ wcycles++;
+
+ // wake up someone else
+ Monitor.Pulse(workitem);
+ // release and wait
+ Monitor.Wait(workitem);
+
+ if (workitem.status == (int)WorkItem.Status.Compressed)
+ TraceOutput(TraceBits.WriteWait,
+ "Write A-OK wi({0}) stat({1}) iba({2}) cba({3}) cyc({4})",
+ workitem.index,
+ workitem.status,
+ workitem.inputBytesAvailable,
+ workitem.compressedBytesAvailable,
+ wcycles);
+ }
+
+ if (_noMoreInputForThisSegment && _nextToWrite == _nextToFill)
+ break;
+
+ }
+ }
+ while (true);
+ }
+
+ if (_noMoreInputForThisSegment)
+ TraceOutput(TraceBits.Write,
+ "Write nomore nw({0}) nf({1}) break({2})",
+ _nextToWrite, _nextToFill, (_nextToWrite == _nextToFill));
+
+ if (_noMoreInputForThisSegment && _nextToWrite == _nextToFill)
+ break;
+
+ } while (true);
+
+
+ // Finish:
+ // After writing a series of buffers, closing each one with
+ // Flush.Sync, we now write the final one as Flush.Finish, and
+ // then stop.
+ byte[] buffer = new byte[128];
+ ZlibCodec compressor = new ZlibCodec();
+ int rc = compressor.InitializeDeflate(_compressLevel, false);
+ compressor.InputBuffer = null;
+ compressor.NextIn = 0;
+ compressor.AvailableBytesIn = 0;
+ compressor.OutputBuffer = buffer;
+ compressor.NextOut = 0;
+ compressor.AvailableBytesOut = buffer.Length;
+ rc = compressor.Deflate(FlushType.Finish);
+
+ if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
+ throw new Exception("deflating: " + compressor.Message);
+
+ if (buffer.Length - compressor.AvailableBytesOut > 0)
+ {
+ TraceOutput(TraceBits.WriteBegin,
+ "Write begin flush bytes({0})",
+ buffer.Length - compressor.AvailableBytesOut);
+
+ _outStream.Write(buffer, 0, buffer.Length - compressor.AvailableBytesOut);
+
+ TraceOutput(TraceBits.WriteBegin,
+ "Write done flush");
+ }
+
+ compressor.EndDeflate();
+
+ _Crc32 = c.Crc32Result;
+
+ // signal that writing is complete:
+ TraceOutput(TraceBits.Synch, "Synch _writingDone.Set() PWM");
+ _writingDone.Set();
+ }
+ while (true);
+ }
+ catch (System.Exception exc1)
+ {
+ lock(_eLock)
+ {
+ // expose the exception to the main thread
+ if (_pendingException!=null)
+ _pendingException = exc1;
+ }
+ }
+
+ TraceOutput(TraceBits.WriterThread, "_PerpetualWriterMethod FINIS");
+ }
+#endif
+
+
+
+
+ private void _DeflateOne(Object wi)
+ {
+ // compress one buffer
+ WorkItem workitem = (WorkItem) wi;
+ try
+ {
+ int myItem = workitem.index;
+ Ionic.Crc.CRC32 crc = new Ionic.Crc.CRC32();
+
+ // calc CRC on the buffer
+ crc.SlurpBlock(workitem.buffer, 0, workitem.inputBytesAvailable);
+
+ // deflate it
+ DeflateOneSegment(workitem);
+
+ // update status
+ workitem.crc = crc.Crc32Result;
+ TraceOutput(TraceBits.Compress,
+ "Compress wi({0}) ord({1}) len({2})",
+ workitem.index,
+ workitem.ordinal,
+ workitem.compressedBytesAvailable
+ );
+
+ lock(_latestLock)
+ {
+ if (workitem.ordinal > _latestCompressed)
+ _latestCompressed = workitem.ordinal;
+ }
+ lock (_toWrite)
+ {
+ _toWrite.Enqueue(workitem.index);
+ }
+ _newlyCompressedBlob.Set();
+ }
+ catch (System.Exception exc1)
+ {
+ lock(_eLock)
+ {
+ // expose the exception to the main thread
+ if (_pendingException!=null)
+ _pendingException = exc1;
+ }
+ }
+ }
+
+
+
+
+ private bool DeflateOneSegment(WorkItem workitem)
+ {
+ ZlibCodec compressor = workitem.compressor;
+ int rc= 0;
+ compressor.ResetDeflate();
+ compressor.NextIn = 0;
+
+ compressor.AvailableBytesIn = workitem.inputBytesAvailable;
+
+ // step 1: deflate the buffer
+ compressor.NextOut = 0;
+ compressor.AvailableBytesOut = workitem.compressed.Length;
+ do
+ {
+ compressor.Deflate(FlushType.None);
+ }
+ while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0);
+
+ // step 2: flush (sync)
+ rc = compressor.Deflate(FlushType.Sync);
+
+ workitem.compressedBytesAvailable= (int) compressor.TotalBytesOut;
+ return true;
+ }
+
+
+ [System.Diagnostics.ConditionalAttribute("Trace")]
+ private void TraceOutput(TraceBits bits, string format, params object[] varParams)
+ {
+ if ((bits & _DesiredTrace) != 0)
+ {
+ lock(_outputLock)
+ {
+ int tid = Thread.CurrentThread.GetHashCode();
+#if !SILVERLIGHT
+ Console.ForegroundColor = (ConsoleColor) (tid % 8 + 8);
+#endif
+ Console.Write("{0:000} PDOS ", tid);
+ Console.WriteLine(format, varParams);
+#if !SILVERLIGHT
+ Console.ResetColor();
+#endif
+ }
+ }
+ }
+
+
+ // used only when Trace is defined
+ [Flags]
+ enum TraceBits : uint
+ {
+ None = 0,
+ NotUsed1 = 1,
+ EmitLock = 2,
+ EmitEnter = 4, // enter _EmitPending
+ EmitBegin = 8, // begin to write out
+ EmitDone = 16, // done writing out
+ EmitSkip = 32, // writer skipping a workitem
+ EmitAll = 58, // All Emit flags
+ Flush = 64,
+ Lifecycle = 128, // constructor/disposer
+ Session = 256, // Close/Reset
+ Synch = 512, // thread synchronization
+ Instance = 1024, // instance settings
+ Compress = 2048, // compress task
+ Write = 4096, // filling buffers, when caller invokes Write()
+ WriteEnter = 8192, // upon entry to Write()
+ WriteTake = 16384, // on _toFill.Take()
+ All = 0xffffffff,
+ }
+
+
+
+ ///
+ /// Indicates whether the stream supports Seek operations.
+ ///
+ ///
+ /// Always returns false.
+ ///
+ public override bool CanSeek
+ {
+ get { return false; }
+ }
+
+
+ ///
+ /// Indicates whether the stream supports Read operations.
+ ///
+ ///
+ /// Always returns false.
+ ///
+ public override bool CanRead
+ {
+ get {return false;}
+ }
+
+ ///
+ /// Indicates whether the stream supports Write operations.
+ ///
+ ///
+ /// Returns true if the provided stream is writable.
+ ///
+ public override bool CanWrite
+ {
+ get { return _outStream.CanWrite; }
+ }
+
+ ///
+ /// Reading this property always throws a NotSupportedException.
+ ///
+ public override long Length
+ {
+ get { throw new NotSupportedException(); }
+ }
+
+ ///
+ /// Returns the current position of the output stream.
+ ///
+ ///
+ ///
+ /// Because the output gets written by a background thread,
+ /// the value may change asynchronously. Setting this
+ /// property always throws a NotSupportedException.
+ ///
+ ///
+ public override long Position
+ {
+ get { return _outStream.Position; }
+ set { throw new NotSupportedException(); }
+ }
+
+ ///
+ /// This method always throws a NotSupportedException.
+ ///
+ ///
+ /// The buffer into which data would be read, IF THIS METHOD
+ /// ACTUALLY DID ANYTHING.
+ ///
+ ///
+ /// The offset within that data array at which to insert the
+ /// data that is read, IF THIS METHOD ACTUALLY DID
+ /// ANYTHING.
+ ///
+ ///
+ /// The number of bytes to write, IF THIS METHOD ACTUALLY DID
+ /// ANYTHING.
+ ///
+ /// nothing.
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ throw new NotSupportedException();
+ }
+
+ ///
+ /// This method always throws a NotSupportedException.
+ ///
+ ///
+ /// The offset to seek to....
+ /// IF THIS METHOD ACTUALLY DID ANYTHING.
+ ///
+ ///
+ /// The reference specifying how to apply the offset.... IF
+ /// THIS METHOD ACTUALLY DID ANYTHING.
+ ///
+ /// nothing. It always throws.
+ public override long Seek(long offset, System.IO.SeekOrigin origin)
+ {
+ throw new NotSupportedException();
+ }
+
+ ///
+ /// This method always throws a NotSupportedException.
+ ///
+ ///
+ /// The new value for the stream length.... IF
+ /// THIS METHOD ACTUALLY DID ANYTHING.
+ ///
+ public override void SetLength(long value)
+ {
+ throw new NotSupportedException();
+ }
+
+ }
+
+}
+
+
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/Tree.cs b/src/Hazelcast.Net/Polyfills/ZLib/Tree.cs
new file mode 100644
index 0000000000..1db8c4f43f
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/Tree.cs
@@ -0,0 +1,423 @@
+// Tree.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2009-October-28 13:29:50>
+//
+// ------------------------------------------------------------------
+//
+// This module defines classes for zlib compression and
+// decompression. This code is derived from the jzlib implementation of
+// zlib. In keeping with the license for jzlib, the copyright to that
+// code is below.
+//
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// This program is based on zlib-1.1.3; credit to authors
+// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+// and contributors of zlib.
+//
+// -----------------------------------------------------------------------
+
+
+using System;
+
+namespace Ionic.Zlib
+{
+ sealed class Tree
+ {
+ private static readonly int HEAP_SIZE = (2 * InternalConstants.L_CODES + 1);
+
+ // extra bits for each length code
+ internal static readonly int[] ExtraLengthBits = new int[]
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0
+ };
+
+ // extra bits for each distance code
+ internal static readonly int[] ExtraDistanceBits = new int[]
+ {
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13
+ };
+
+ // extra bits for each bit length code
+ internal static readonly int[] extra_blbits = new int[]{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7};
+
+ internal static readonly sbyte[] bl_order = new sbyte[]{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+
+ // The lengths of the bit length codes are sent in order of decreasing
+ // probability, to avoid transmitting the lengths for unused bit
+ // length codes.
+
+ internal const int Buf_size = 8 * 2;
+
+ // see definition of array dist_code below
+ //internal const int DIST_CODE_LEN = 512;
+
+ private static readonly sbyte[] _dist_code = new sbyte[]
+ {
+ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 0, 0, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29
+ };
+
+ internal static readonly sbyte[] LengthCode = new sbyte[]
+ {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+ 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15,
+ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28
+ };
+
+
+ internal static readonly int[] LengthBase = new int[]
+ {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28,
+ 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0
+ };
+
+
+ internal static readonly int[] DistanceBase = new int[]
+ {
+ 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
+ 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576
+ };
+
+
+ ///
+ /// Map from a distance to a distance code.
+ ///
+ ///
+ /// No side effects. _dist_code[256] and _dist_code[257] are never used.
+ ///
+ internal static int DistanceCode(int dist)
+ {
+ return (dist < 256)
+ ? _dist_code[dist]
+ : _dist_code[256 + SharedUtils.URShift(dist, 7)];
+ }
+
+ internal short[] dyn_tree; // the dynamic tree
+ internal int max_code; // largest code with non zero frequency
+ internal StaticTree staticTree; // the corresponding static tree
+
+ // Compute the optimal bit lengths for a tree and update the total bit length
+ // for the current block.
+ // IN assertion: the fields freq and dad are set, heap[heap_max] and
+ // above are the tree nodes sorted by increasing frequency.
+ // OUT assertions: the field len is set to the optimal bit length, the
+ // array bl_count contains the frequencies for each bit length.
+ // The length opt_len is updated; static_len is also updated if stree is
+ // not null.
+ internal void gen_bitlen(DeflateManager s)
+ {
+ short[] tree = dyn_tree;
+ short[] stree = staticTree.treeCodes;
+ int[] extra = staticTree.extraBits;
+ int base_Renamed = staticTree.extraBase;
+ int max_length = staticTree.maxLength;
+ int h; // heap index
+ int n, m; // iterate over the tree elements
+ int bits; // bit length
+ int xbits; // extra bits
+ short f; // frequency
+ int overflow = 0; // number of elements with bit length too large
+
+ for (bits = 0; bits <= InternalConstants.MAX_BITS; bits++)
+ s.bl_count[bits] = 0;
+
+ // In a first pass, compute the optimal bit lengths (which may
+ // overflow in the case of the bit length tree).
+ tree[s.heap[s.heap_max] * 2 + 1] = 0; // root of the heap
+
+ for (h = s.heap_max + 1; h < HEAP_SIZE; h++)
+ {
+ n = s.heap[h];
+ bits = tree[tree[n * 2 + 1] * 2 + 1] + 1;
+ if (bits > max_length)
+ {
+ bits = max_length; overflow++;
+ }
+ tree[n * 2 + 1] = (short) bits;
+ // We overwrite tree[n*2+1] which is no longer needed
+
+ if (n > max_code)
+ continue; // not a leaf node
+
+ s.bl_count[bits]++;
+ xbits = 0;
+ if (n >= base_Renamed)
+ xbits = extra[n - base_Renamed];
+ f = tree[n * 2];
+ s.opt_len += f * (bits + xbits);
+ if (stree != null)
+ s.static_len += f * (stree[n * 2 + 1] + xbits);
+ }
+ if (overflow == 0)
+ return ;
+
+ // This happens for example on obj2 and pic of the Calgary corpus
+ // Find the first bit length which could increase:
+ do
+ {
+ bits = max_length - 1;
+ while (s.bl_count[bits] == 0)
+ bits--;
+ s.bl_count[bits]--; // move one leaf down the tree
+ s.bl_count[bits + 1] = (short) (s.bl_count[bits + 1] + 2); // move one overflow item as its brother
+ s.bl_count[max_length]--;
+ // The brother of the overflow item also moves one step up,
+ // but this does not affect bl_count[max_length]
+ overflow -= 2;
+ }
+ while (overflow > 0);
+
+ for (bits = max_length; bits != 0; bits--)
+ {
+ n = s.bl_count[bits];
+ while (n != 0)
+ {
+ m = s.heap[--h];
+ if (m > max_code)
+ continue;
+ if (tree[m * 2 + 1] != bits)
+ {
+ s.opt_len = (int) (s.opt_len + ((long) bits - (long) tree[m * 2 + 1]) * (long) tree[m * 2]);
+ tree[m * 2 + 1] = (short) bits;
+ }
+ n--;
+ }
+ }
+ }
+
+ // Construct one Huffman tree and assigns the code bit strings and lengths.
+ // Update the total bit length for the current block.
+ // IN assertion: the field freq is set for all tree elements.
+ // OUT assertions: the fields len and code are set to the optimal bit length
+ // and corresponding code. The length opt_len is updated; static_len is
+ // also updated if stree is not null. The field max_code is set.
+ internal void build_tree(DeflateManager s)
+ {
+ short[] tree = dyn_tree;
+ short[] stree = staticTree.treeCodes;
+ int elems = staticTree.elems;
+ int n, m; // iterate over heap elements
+ int max_code = -1; // largest code with non zero frequency
+ int node; // new node being created
+
+ // Construct the initial heap, with least frequent element in
+ // heap[1]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
+ // heap[0] is not used.
+ s.heap_len = 0;
+ s.heap_max = HEAP_SIZE;
+
+ for (n = 0; n < elems; n++)
+ {
+ if (tree[n * 2] != 0)
+ {
+ s.heap[++s.heap_len] = max_code = n;
+ s.depth[n] = 0;
+ }
+ else
+ {
+ tree[n * 2 + 1] = 0;
+ }
+ }
+
+ // The pkzip format requires that at least one distance code exists,
+ // and that at least one bit should be sent even if there is only one
+ // possible code. So to avoid special checks later on we force at least
+ // two codes of non zero frequency.
+ while (s.heap_len < 2)
+ {
+ node = s.heap[++s.heap_len] = (max_code < 2?++max_code:0);
+ tree[node * 2] = 1;
+ s.depth[node] = 0;
+ s.opt_len--;
+ if (stree != null)
+ s.static_len -= stree[node * 2 + 1];
+ // node is 0 or 1 so it does not have extra bits
+ }
+ this.max_code = max_code;
+
+ // The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
+ // establish sub-heaps of increasing lengths:
+
+ for (n = s.heap_len / 2; n >= 1; n--)
+ s.pqdownheap(tree, n);
+
+ // Construct the Huffman tree by repeatedly combining the least two
+ // frequent nodes.
+
+ node = elems; // next internal node of the tree
+ do
+ {
+ // n = node of least frequency
+ n = s.heap[1];
+ s.heap[1] = s.heap[s.heap_len--];
+ s.pqdownheap(tree, 1);
+ m = s.heap[1]; // m = node of next least frequency
+
+ s.heap[--s.heap_max] = n; // keep the nodes sorted by frequency
+ s.heap[--s.heap_max] = m;
+
+ // Create a new node father of n and m
+ tree[node * 2] = unchecked((short) (tree[n * 2] + tree[m * 2]));
+ s.depth[node] = (sbyte) (System.Math.Max((byte) s.depth[n], (byte) s.depth[m]) + 1);
+ tree[n * 2 + 1] = tree[m * 2 + 1] = (short) node;
+
+ // and insert the new node in the heap
+ s.heap[1] = node++;
+ s.pqdownheap(tree, 1);
+ }
+ while (s.heap_len >= 2);
+
+ s.heap[--s.heap_max] = s.heap[1];
+
+ // At this point, the fields freq and dad are set. We can now
+ // generate the bit lengths.
+
+ gen_bitlen(s);
+
+ // The field len is now set, we can generate the bit codes
+ gen_codes(tree, max_code, s.bl_count);
+ }
+
+ // Generate the codes for a given tree and bit counts (which need not be
+ // optimal).
+ // IN assertion: the array bl_count contains the bit length statistics for
+ // the given tree and the field len is set for all tree elements.
+ // OUT assertion: the field code is set for all tree elements of non
+ // zero code length.
+ internal static void gen_codes(short[] tree, int max_code, short[] bl_count)
+ {
+ short[] next_code = new short[InternalConstants.MAX_BITS + 1]; // next code value for each bit length
+ short code = 0; // running code value
+ int bits; // bit index
+ int n; // code index
+
+ // The distribution counts are first used to generate the code values
+ // without bit reversal.
+ for (bits = 1; bits <= InternalConstants.MAX_BITS; bits++)
+ unchecked {
+ next_code[bits] = code = (short) ((code + bl_count[bits - 1]) << 1);
+ }
+
+ // Check that the bit counts in bl_count are consistent. The last code
+ // must be all ones.
+ //Assert (code + bl_count[MAX_BITS]-1 == (1<>= 1; //SharedUtils.URShift(code, 1);
+ res <<= 1;
+ }
+ while (--len > 0);
+ return res >> 1;
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/Zlib.cs b/src/Hazelcast.Net/Polyfills/ZLib/Zlib.cs
new file mode 100644
index 0000000000..dcfe72527b
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/Zlib.cs
@@ -0,0 +1,546 @@
+// Zlib.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009-2011 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// Last Saved: <2011-August-03 19:52:28>
+//
+// ------------------------------------------------------------------
+//
+// This module defines classes for ZLIB compression and
+// decompression. This code is derived from the jzlib implementation of
+// zlib, but significantly modified. The object model is not the same,
+// and many of the behaviors are new or different. Nonetheless, in
+// keeping with the license for jzlib, the copyright to that code is
+// included below.
+//
+// ------------------------------------------------------------------
+//
+// The following notice applies to jzlib:
+//
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// jzlib is based on zlib-1.1.3.
+//
+// The following notice applies to zlib:
+//
+// -----------------------------------------------------------------------
+//
+// Copyright (C) 1995-2004 Jean-loup Gailly and Mark Adler
+//
+// The ZLIB software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+//
+// Jean-loup Gailly jloup@gzip.org
+// Mark Adler madler@alumni.caltech.edu
+//
+// -----------------------------------------------------------------------
+
+
+
+using System;
+using Interop=System.Runtime.InteropServices;
+
+namespace Ionic.Zlib
+{
+
+ ///
+ /// Describes how to flush the current deflate operation.
+ ///
+ ///
+ /// The different FlushType values are useful when using a Deflate in a streaming application.
+ ///
+ public enum FlushType
+ {
+ /// No flush at all.
+ None = 0,
+
+ /// Closes the current block, but doesn't flush it to
+ /// the output. Used internally only in hypothetical
+ /// scenarios. This was supposed to be removed by Zlib, but it is
+ /// still in use in some edge cases.
+ ///
+ Partial,
+
+ ///
+ /// Use this during compression to specify that all pending output should be
+ /// flushed to the output buffer and the output should be aligned on a byte
+ /// boundary. You might use this in a streaming communication scenario, so that
+ /// the decompressor can get all input data available so far. When using this
+ /// with a ZlibCodec, AvailableBytesIn will be zero after the call if
+ /// enough output space has been provided before the call. Flushing will
+ /// degrade compression and so it should be used only when necessary.
+ ///
+ Sync,
+
+ ///
+ /// Use this during compression to specify that all output should be flushed, as
+ /// with FlushType.Sync, but also, the compression state should be reset
+ /// so that decompression can restart from this point if previous compressed
+ /// data has been damaged or if random access is desired. Using
+ /// FlushType.Full too often can significantly degrade the compression.
+ ///
+ Full,
+
+ /// Signals the end of the compression/decompression stream.
+ Finish,
+ }
+
+
+ ///
+ /// The compression level to be used when using a DeflateStream or ZlibStream with CompressionMode.Compress.
+ ///
+ public enum CompressionLevel
+ {
+ ///
+ /// None means that the data will be simply stored, with no change at all.
+ /// If you are producing ZIPs for use on Mac OSX, be aware that archives produced with CompressionLevel.None
+ /// cannot be opened with the default zip reader. Use a different CompressionLevel.
+ ///
+ None= 0,
+ ///
+ /// Same as None.
+ ///
+ Level0 = 0,
+
+ ///
+ /// The fastest but least effective compression.
+ ///
+ BestSpeed = 1,
+
+ ///
+ /// A synonym for BestSpeed.
+ ///
+ Level1 = 1,
+
+ ///
+ /// A little slower, but better, than level 1.
+ ///
+ Level2 = 2,
+
+ ///
+ /// A little slower, but better, than level 2.
+ ///
+ Level3 = 3,
+
+ ///
+ /// A little slower, but better, than level 3.
+ ///
+ Level4 = 4,
+
+ ///
+ /// A little slower than level 4, but with better compression.
+ ///
+ Level5 = 5,
+
+ ///
+ /// The default compression level, with a good balance of speed and compression efficiency.
+ ///
+ Default = 6,
+ ///
+ /// A synonym for Default.
+ ///
+ Level6 = 6,
+
+ ///
+ /// Pretty good compression!
+ ///
+ Level7 = 7,
+
+ ///
+ /// Better compression than Level7!
+ ///
+ Level8 = 8,
+
+ ///
+ /// The "best" compression, where best means greatest reduction in size of the input data stream.
+ /// This is also the slowest compression.
+ ///
+ BestCompression = 9,
+
+ ///
+ /// A synonym for BestCompression.
+ ///
+ Level9 = 9,
+ }
+
+ ///
+ /// Describes options for how the compression algorithm is executed. Different strategies
+ /// work better on different sorts of data. The strategy parameter can affect the compression
+ /// ratio and the speed of compression but not the correctness of the compresssion.
+ ///
+ public enum CompressionStrategy
+ {
+ ///
+ /// The default strategy is probably the best for normal data.
+ ///
+ Default = 0,
+
+ ///
+ /// The Filtered strategy is intended to be used most effectively with data produced by a
+ /// filter or predictor. By this definition, filtered data consists mostly of small
+ /// values with a somewhat random distribution. In this case, the compression algorithm
+ /// is tuned to compress them better. The effect of Filtered is to force more Huffman
+ /// coding and less string matching; it is a half-step between Default and HuffmanOnly.
+ ///
+ Filtered = 1,
+
+ ///
+ /// Using HuffmanOnly will force the compressor to do Huffman encoding only, with no
+ /// string matching.
+ ///
+ HuffmanOnly = 2,
+ }
+
+
+ ///
+ /// An enum to specify the direction of transcoding - whether to compress or decompress.
+ ///
+ public enum CompressionMode
+ {
+ ///
+ /// Used to specify that the stream should compress the data.
+ ///
+ Compress= 0,
+ ///
+ /// Used to specify that the stream should decompress the data.
+ ///
+ Decompress = 1,
+ }
+
+
+ ///
+ /// A general purpose exception class for exceptions in the Zlib library.
+ ///
+ [Interop.GuidAttribute("ebc25cf6-9120-4283-b972-0e5520d0000E")]
+ public class ZlibException : System.Exception
+ {
+ ///
+ /// The ZlibException class captures exception information generated
+ /// by the Zlib library.
+ ///
+ public ZlibException()
+ : base()
+ {
+ }
+
+ ///
+ /// This ctor collects a message attached to the exception.
+ ///
+ /// the message for the exception.
+ public ZlibException(System.String s)
+ : base(s)
+ {
+ }
+ }
+
+
+ internal class SharedUtils
+ {
+ ///
+ /// Performs an unsigned bitwise right shift with the specified number
+ ///
+ /// Number to operate on
+ /// Ammount of bits to shift
+ /// The resulting number from the shift operation
+ public static int URShift(int number, int bits)
+ {
+ return (int)((uint)number >> bits);
+ }
+
+#if NOT
+ ///
+ /// Performs an unsigned bitwise right shift with the specified number
+ ///
+ /// Number to operate on
+ /// Ammount of bits to shift
+ /// The resulting number from the shift operation
+ public static long URShift(long number, int bits)
+ {
+ return (long) ((UInt64)number >> bits);
+ }
+#endif
+
+ ///
+ /// Reads a number of characters from the current source TextReader and writes
+ /// the data to the target array at the specified index.
+ ///
+ ///
+ /// The source TextReader to read from
+ /// Contains the array of characteres read from the source TextReader.
+ /// The starting index of the target array.
+ /// The maximum number of characters to read from the source TextReader.
+ ///
+ ///
+ /// The number of characters read. The number will be less than or equal to
+ /// count depending on the data available in the source TextReader. Returns -1
+ /// if the end of the stream is reached.
+ ///
+ public static System.Int32 ReadInput(System.IO.TextReader sourceTextReader, byte[] target, int start, int count)
+ {
+ // Returns 0 bytes if not enough space in target
+ if (target.Length == 0) return 0;
+
+ char[] charArray = new char[target.Length];
+ int bytesRead = sourceTextReader.Read(charArray, start, count);
+
+ // Returns -1 if EOF
+ if (bytesRead == 0) return -1;
+
+ for (int index = start; index < start + bytesRead; index++)
+ target[index] = (byte)charArray[index];
+
+ return bytesRead;
+ }
+
+
+ internal static byte[] ToByteArray(System.String sourceString)
+ {
+ return System.Text.UTF8Encoding.UTF8.GetBytes(sourceString);
+ }
+
+
+ internal static char[] ToCharArray(byte[] byteArray)
+ {
+ return System.Text.UTF8Encoding.UTF8.GetChars(byteArray);
+ }
+ }
+
+ internal static class InternalConstants
+ {
+ internal static readonly int MAX_BITS = 15;
+ internal static readonly int BL_CODES = 19;
+ internal static readonly int D_CODES = 30;
+ internal static readonly int LITERALS = 256;
+ internal static readonly int LENGTH_CODES = 29;
+ internal static readonly int L_CODES = (LITERALS + 1 + LENGTH_CODES);
+
+ // Bit length codes must not exceed MAX_BL_BITS bits
+ internal static readonly int MAX_BL_BITS = 7;
+
+ // repeat previous bit length 3-6 times (2 bits of repeat count)
+ internal static readonly int REP_3_6 = 16;
+
+ // repeat a zero length 3-10 times (3 bits of repeat count)
+ internal static readonly int REPZ_3_10 = 17;
+
+ // repeat a zero length 11-138 times (7 bits of repeat count)
+ internal static readonly int REPZ_11_138 = 18;
+
+ }
+
+ internal sealed class StaticTree
+ {
+ internal static readonly short[] lengthAndLiteralsTreeCodes = new short[] {
+ 12, 8, 140, 8, 76, 8, 204, 8, 44, 8, 172, 8, 108, 8, 236, 8,
+ 28, 8, 156, 8, 92, 8, 220, 8, 60, 8, 188, 8, 124, 8, 252, 8,
+ 2, 8, 130, 8, 66, 8, 194, 8, 34, 8, 162, 8, 98, 8, 226, 8,
+ 18, 8, 146, 8, 82, 8, 210, 8, 50, 8, 178, 8, 114, 8, 242, 8,
+ 10, 8, 138, 8, 74, 8, 202, 8, 42, 8, 170, 8, 106, 8, 234, 8,
+ 26, 8, 154, 8, 90, 8, 218, 8, 58, 8, 186, 8, 122, 8, 250, 8,
+ 6, 8, 134, 8, 70, 8, 198, 8, 38, 8, 166, 8, 102, 8, 230, 8,
+ 22, 8, 150, 8, 86, 8, 214, 8, 54, 8, 182, 8, 118, 8, 246, 8,
+ 14, 8, 142, 8, 78, 8, 206, 8, 46, 8, 174, 8, 110, 8, 238, 8,
+ 30, 8, 158, 8, 94, 8, 222, 8, 62, 8, 190, 8, 126, 8, 254, 8,
+ 1, 8, 129, 8, 65, 8, 193, 8, 33, 8, 161, 8, 97, 8, 225, 8,
+ 17, 8, 145, 8, 81, 8, 209, 8, 49, 8, 177, 8, 113, 8, 241, 8,
+ 9, 8, 137, 8, 73, 8, 201, 8, 41, 8, 169, 8, 105, 8, 233, 8,
+ 25, 8, 153, 8, 89, 8, 217, 8, 57, 8, 185, 8, 121, 8, 249, 8,
+ 5, 8, 133, 8, 69, 8, 197, 8, 37, 8, 165, 8, 101, 8, 229, 8,
+ 21, 8, 149, 8, 85, 8, 213, 8, 53, 8, 181, 8, 117, 8, 245, 8,
+ 13, 8, 141, 8, 77, 8, 205, 8, 45, 8, 173, 8, 109, 8, 237, 8,
+ 29, 8, 157, 8, 93, 8, 221, 8, 61, 8, 189, 8, 125, 8, 253, 8,
+ 19, 9, 275, 9, 147, 9, 403, 9, 83, 9, 339, 9, 211, 9, 467, 9,
+ 51, 9, 307, 9, 179, 9, 435, 9, 115, 9, 371, 9, 243, 9, 499, 9,
+ 11, 9, 267, 9, 139, 9, 395, 9, 75, 9, 331, 9, 203, 9, 459, 9,
+ 43, 9, 299, 9, 171, 9, 427, 9, 107, 9, 363, 9, 235, 9, 491, 9,
+ 27, 9, 283, 9, 155, 9, 411, 9, 91, 9, 347, 9, 219, 9, 475, 9,
+ 59, 9, 315, 9, 187, 9, 443, 9, 123, 9, 379, 9, 251, 9, 507, 9,
+ 7, 9, 263, 9, 135, 9, 391, 9, 71, 9, 327, 9, 199, 9, 455, 9,
+ 39, 9, 295, 9, 167, 9, 423, 9, 103, 9, 359, 9, 231, 9, 487, 9,
+ 23, 9, 279, 9, 151, 9, 407, 9, 87, 9, 343, 9, 215, 9, 471, 9,
+ 55, 9, 311, 9, 183, 9, 439, 9, 119, 9, 375, 9, 247, 9, 503, 9,
+ 15, 9, 271, 9, 143, 9, 399, 9, 79, 9, 335, 9, 207, 9, 463, 9,
+ 47, 9, 303, 9, 175, 9, 431, 9, 111, 9, 367, 9, 239, 9, 495, 9,
+ 31, 9, 287, 9, 159, 9, 415, 9, 95, 9, 351, 9, 223, 9, 479, 9,
+ 63, 9, 319, 9, 191, 9, 447, 9, 127, 9, 383, 9, 255, 9, 511, 9,
+ 0, 7, 64, 7, 32, 7, 96, 7, 16, 7, 80, 7, 48, 7, 112, 7,
+ 8, 7, 72, 7, 40, 7, 104, 7, 24, 7, 88, 7, 56, 7, 120, 7,
+ 4, 7, 68, 7, 36, 7, 100, 7, 20, 7, 84, 7, 52, 7, 116, 7,
+ 3, 8, 131, 8, 67, 8, 195, 8, 35, 8, 163, 8, 99, 8, 227, 8
+ };
+
+ internal static readonly short[] distTreeCodes = new short[] {
+ 0, 5, 16, 5, 8, 5, 24, 5, 4, 5, 20, 5, 12, 5, 28, 5,
+ 2, 5, 18, 5, 10, 5, 26, 5, 6, 5, 22, 5, 14, 5, 30, 5,
+ 1, 5, 17, 5, 9, 5, 25, 5, 5, 5, 21, 5, 13, 5, 29, 5,
+ 3, 5, 19, 5, 11, 5, 27, 5, 7, 5, 23, 5 };
+
+ internal static readonly StaticTree Literals;
+ internal static readonly StaticTree Distances;
+ internal static readonly StaticTree BitLengths;
+
+ internal short[] treeCodes; // static tree or null
+ internal int[] extraBits; // extra bits for each code or null
+ internal int extraBase; // base index for extra_bits
+ internal int elems; // max number of elements in the tree
+ internal int maxLength; // max bit length for the codes
+
+ private StaticTree(short[] treeCodes, int[] extraBits, int extraBase, int elems, int maxLength)
+ {
+ this.treeCodes = treeCodes;
+ this.extraBits = extraBits;
+ this.extraBase = extraBase;
+ this.elems = elems;
+ this.maxLength = maxLength;
+ }
+ static StaticTree()
+ {
+ Literals = new StaticTree(lengthAndLiteralsTreeCodes, Tree.ExtraLengthBits, InternalConstants.LITERALS + 1, InternalConstants.L_CODES, InternalConstants.MAX_BITS);
+ Distances = new StaticTree(distTreeCodes, Tree.ExtraDistanceBits, 0, InternalConstants.D_CODES, InternalConstants.MAX_BITS);
+ BitLengths = new StaticTree(null, Tree.extra_blbits, 0, InternalConstants.BL_CODES, InternalConstants.MAX_BL_BITS);
+ }
+ }
+
+
+
+ ///
+ /// Computes an Adler-32 checksum.
+ ///
+ ///
+ /// The Adler checksum is similar to a CRC checksum, but faster to compute, though less
+ /// reliable. It is used in producing RFC1950 compressed streams. The Adler checksum
+ /// is a required part of the "ZLIB" standard. Applications will almost never need to
+ /// use this class directly.
+ ///
+ ///
+ ///
+ public sealed class Adler
+ {
+ // largest prime smaller than 65536
+ private static readonly uint BASE = 65521;
+ // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
+ private static readonly int NMAX = 5552;
+
+
+#pragma warning disable 3001
+#pragma warning disable 3002
+
+ ///
+ /// Calculates the Adler32 checksum.
+ ///
+ ///
+ ///
+ /// This is used within ZLIB. You probably don't need to use this directly.
+ ///
+ ///
+ ///
+ /// To compute an Adler32 checksum on a byte array:
+ ///
+ /// var adler = Adler.Adler32(0, null, 0, 0);
+ /// adler = Adler.Adler32(adler, buffer, index, length);
+ ///
+ ///
+ public static uint Adler32(uint adler, byte[] buf, int index, int len)
+ {
+ if (buf == null)
+ return 1;
+
+ uint s1 = (uint) (adler & 0xffff);
+ uint s2 = (uint) ((adler >> 16) & 0xffff);
+
+ while (len > 0)
+ {
+ int k = len < NMAX ? len : NMAX;
+ len -= k;
+ while (k >= 16)
+ {
+ //s1 += (buf[index++] & 0xff); s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ s1 += buf[index++]; s2 += s1;
+ k -= 16;
+ }
+ if (k != 0)
+ {
+ do
+ {
+ s1 += buf[index++];
+ s2 += s1;
+ }
+ while (--k != 0);
+ }
+ s1 %= BASE;
+ s2 %= BASE;
+ }
+ return (uint)((s2 << 16) | s1);
+ }
+#pragma warning restore 3001
+#pragma warning restore 3002
+
+ }
+
+}
\ No newline at end of file
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/ZlibBaseStream.cs b/src/Hazelcast.Net/Polyfills/ZLib/ZlibBaseStream.cs
new file mode 100644
index 0000000000..700ab7ba85
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/ZlibBaseStream.cs
@@ -0,0 +1,627 @@
+// ZlibBaseStream.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2011-August-06 21:22:38>
+//
+// ------------------------------------------------------------------
+//
+// This module defines the ZlibBaseStream class, which is an intnernal
+// base class for DeflateStream, ZlibStream and GZipStream.
+//
+// ------------------------------------------------------------------
+
+using System;
+using System.IO;
+
+namespace Ionic.Zlib
+{
+
+ internal enum ZlibStreamFlavor { ZLIB = 1950, DEFLATE = 1951, GZIP = 1952 }
+
+ internal class ZlibBaseStream : System.IO.Stream
+ {
+ protected internal ZlibCodec _z = null; // deferred init... new ZlibCodec();
+
+ protected internal StreamMode _streamMode = StreamMode.Undefined;
+ protected internal FlushType _flushMode;
+ protected internal ZlibStreamFlavor _flavor;
+ protected internal CompressionMode _compressionMode;
+ protected internal CompressionLevel _level;
+ protected internal bool _leaveOpen;
+ protected internal byte[] _workingBuffer;
+ protected internal int _bufferSize = ZlibConstants.WorkingBufferSizeDefault;
+ protected internal byte[] _buf1 = new byte[1];
+
+ protected internal System.IO.Stream _stream;
+ protected internal CompressionStrategy Strategy = CompressionStrategy.Default;
+
+ // workitem 7159
+ Ionic.Crc.CRC32 crc;
+ protected internal string _GzipFileName;
+ protected internal string _GzipComment;
+ protected internal DateTime _GzipMtime;
+ protected internal int _gzipHeaderByteCount;
+
+ internal int Crc32 { get { if (crc == null) return 0; return crc.Crc32Result; } }
+
+ public ZlibBaseStream(System.IO.Stream stream,
+ CompressionMode compressionMode,
+ CompressionLevel level,
+ ZlibStreamFlavor flavor,
+ bool leaveOpen)
+ : base()
+ {
+ this._flushMode = FlushType.None;
+ //this._workingBuffer = new byte[WORKING_BUFFER_SIZE_DEFAULT];
+ this._stream = stream;
+ this._leaveOpen = leaveOpen;
+ this._compressionMode = compressionMode;
+ this._flavor = flavor;
+ this._level = level;
+ // workitem 7159
+ if (flavor == ZlibStreamFlavor.GZIP)
+ {
+ this.crc = new Ionic.Crc.CRC32();
+ }
+ }
+
+
+ protected internal bool _wantCompress
+ {
+ get
+ {
+ return (this._compressionMode == CompressionMode.Compress);
+ }
+ }
+
+ private ZlibCodec z
+ {
+ get
+ {
+ if (_z == null)
+ {
+ bool wantRfc1950Header = (this._flavor == ZlibStreamFlavor.ZLIB);
+ _z = new ZlibCodec();
+ if (this._compressionMode == CompressionMode.Decompress)
+ {
+ _z.InitializeInflate(wantRfc1950Header);
+ }
+ else
+ {
+ _z.Strategy = Strategy;
+ _z.InitializeDeflate(this._level, wantRfc1950Header);
+ }
+ }
+ return _z;
+ }
+ }
+
+
+
+ private byte[] workingBuffer
+ {
+ get
+ {
+ if (_workingBuffer == null)
+ _workingBuffer = new byte[_bufferSize];
+ return _workingBuffer;
+ }
+ }
+
+
+
+ public override void Write(System.Byte[] buffer, int offset, int count)
+ {
+ // workitem 7159
+ // calculate the CRC on the unccompressed data (before writing)
+ if (crc != null)
+ crc.SlurpBlock(buffer, offset, count);
+
+ if (_streamMode == StreamMode.Undefined)
+ _streamMode = StreamMode.Writer;
+ else if (_streamMode != StreamMode.Writer)
+ throw new ZlibException("Cannot Write after Reading.");
+
+ if (count == 0)
+ return;
+
+ // first reference of z property will initialize the private var _z
+ z.InputBuffer = buffer;
+ _z.NextIn = offset;
+ _z.AvailableBytesIn = count;
+ bool done = false;
+ do
+ {
+ _z.OutputBuffer = workingBuffer;
+ _z.NextOut = 0;
+ _z.AvailableBytesOut = _workingBuffer.Length;
+ int rc = (_wantCompress)
+ ? _z.Deflate(_flushMode)
+ : _z.Inflate(_flushMode);
+ if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
+ throw new ZlibException((_wantCompress ? "de" : "in") + "flating: " + _z.Message);
+
+ //if (_workingBuffer.Length - _z.AvailableBytesOut > 0)
+ _stream.Write(_workingBuffer, 0, _workingBuffer.Length - _z.AvailableBytesOut);
+
+ done = _z.AvailableBytesIn == 0 && _z.AvailableBytesOut != 0;
+
+ // If GZIP and de-compress, we're done when 8 bytes remain.
+ if (_flavor == ZlibStreamFlavor.GZIP && !_wantCompress)
+ done = (_z.AvailableBytesIn == 8 && _z.AvailableBytesOut != 0);
+
+ }
+ while (!done);
+ }
+
+
+
+ private void finish()
+ {
+ if (_z == null) return;
+
+ if (_streamMode == StreamMode.Writer)
+ {
+ bool done = false;
+ do
+ {
+ _z.OutputBuffer = workingBuffer;
+ _z.NextOut = 0;
+ _z.AvailableBytesOut = _workingBuffer.Length;
+ int rc = (_wantCompress)
+ ? _z.Deflate(FlushType.Finish)
+ : _z.Inflate(FlushType.Finish);
+
+ if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
+ {
+ string verb = (_wantCompress ? "de" : "in") + "flating";
+ if (_z.Message == null)
+ throw new ZlibException(String.Format("{0}: (rc = {1})", verb, rc));
+ else
+ throw new ZlibException(verb + ": " + _z.Message);
+ }
+
+ if (_workingBuffer.Length - _z.AvailableBytesOut > 0)
+ {
+ _stream.Write(_workingBuffer, 0, _workingBuffer.Length - _z.AvailableBytesOut);
+ }
+
+ done = _z.AvailableBytesIn == 0 && _z.AvailableBytesOut != 0;
+ // If GZIP and de-compress, we're done when 8 bytes remain.
+ if (_flavor == ZlibStreamFlavor.GZIP && !_wantCompress)
+ done = (_z.AvailableBytesIn == 8 && _z.AvailableBytesOut != 0);
+
+ }
+ while (!done);
+
+ Flush();
+
+ // workitem 7159
+ if (_flavor == ZlibStreamFlavor.GZIP)
+ {
+ if (_wantCompress)
+ {
+ // Emit the GZIP trailer: CRC32 and size mod 2^32
+ int c1 = crc.Crc32Result;
+ _stream.Write(BitConverter.GetBytes(c1), 0, 4);
+ int c2 = (Int32)(crc.TotalBytesRead & 0x00000000FFFFFFFF);
+ _stream.Write(BitConverter.GetBytes(c2), 0, 4);
+ }
+ else
+ {
+ throw new ZlibException("Writing with decompression is not supported.");
+ }
+ }
+ }
+ // workitem 7159
+ else if (_streamMode == StreamMode.Reader)
+ {
+ if (_flavor == ZlibStreamFlavor.GZIP)
+ {
+ if (!_wantCompress)
+ {
+ // workitem 8501: handle edge case (decompress empty stream)
+ if (_z.TotalBytesOut == 0L)
+ return;
+
+ // Read and potentially verify the GZIP trailer:
+ // CRC32 and size mod 2^32
+ byte[] trailer = new byte[8];
+
+ // workitems 8679 & 12554
+ if (_z.AvailableBytesIn < 8)
+ {
+ // Make sure we have read to the end of the stream
+ Array.Copy(_z.InputBuffer, _z.NextIn, trailer, 0, _z.AvailableBytesIn);
+ int bytesNeeded = 8 - _z.AvailableBytesIn;
+ int bytesRead = _stream.Read(trailer,
+ _z.AvailableBytesIn,
+ bytesNeeded);
+ if (bytesNeeded != bytesRead)
+ {
+ throw new ZlibException(String.Format("Missing or incomplete GZIP trailer. Expected 8 bytes, got {0}.",
+ _z.AvailableBytesIn + bytesRead));
+ }
+ }
+ else
+ {
+ Array.Copy(_z.InputBuffer, _z.NextIn, trailer, 0, trailer.Length);
+ }
+
+ Int32 crc32_expected = BitConverter.ToInt32(trailer, 0);
+ Int32 crc32_actual = crc.Crc32Result;
+ Int32 isize_expected = BitConverter.ToInt32(trailer, 4);
+ Int32 isize_actual = (Int32)(_z.TotalBytesOut & 0x00000000FFFFFFFF);
+
+ if (crc32_actual != crc32_expected)
+ throw new ZlibException(String.Format("Bad CRC32 in GZIP trailer. (actual({0:X8})!=expected({1:X8}))", crc32_actual, crc32_expected));
+
+ if (isize_actual != isize_expected)
+ throw new ZlibException(String.Format("Bad size in GZIP trailer. (actual({0})!=expected({1}))", isize_actual, isize_expected));
+
+ }
+ else
+ {
+ throw new ZlibException("Reading with compression is not supported.");
+ }
+ }
+ }
+ }
+
+
+ private void end()
+ {
+ if (z == null)
+ return;
+ if (_wantCompress)
+ {
+ _z.EndDeflate();
+ }
+ else
+ {
+ _z.EndInflate();
+ }
+ _z = null;
+ }
+
+
+ public override void Close()
+ {
+ if (_stream == null) return;
+ try
+ {
+ finish();
+ }
+ finally
+ {
+ end();
+ if (!_leaveOpen) _stream.Close();
+ _stream = null;
+ }
+ }
+
+ public override void Flush()
+ {
+ _stream.Flush();
+ }
+
+ public override System.Int64 Seek(System.Int64 offset, System.IO.SeekOrigin origin)
+ {
+ throw new NotImplementedException();
+ //_outStream.Seek(offset, origin);
+ }
+ public override void SetLength(System.Int64 value)
+ {
+ _stream.SetLength(value);
+ }
+
+
+#if NOT
+ public int Read()
+ {
+ if (Read(_buf1, 0, 1) == 0)
+ return 0;
+ // calculate CRC after reading
+ if (crc!=null)
+ crc.SlurpBlock(_buf1,0,1);
+ return (_buf1[0] & 0xFF);
+ }
+#endif
+
+ private bool nomoreinput = false;
+
+
+
+ private string ReadZeroTerminatedString()
+ {
+ var list = new System.Collections.Generic.List();
+ bool done = false;
+ do
+ {
+ // workitem 7740
+ int n = _stream.Read(_buf1, 0, 1);
+ if (n != 1)
+ throw new ZlibException("Unexpected EOF reading GZIP header.");
+ else
+ {
+ if (_buf1[0] == 0)
+ done = true;
+ else
+ list.Add(_buf1[0]);
+ }
+ } while (!done);
+ byte[] a = list.ToArray();
+ return GZipStream.iso8859dash1.GetString(a, 0, a.Length);
+ }
+
+
+ private int _ReadAndValidateGzipHeader()
+ {
+ int totalBytesRead = 0;
+ // read the header on the first read
+ byte[] header = new byte[10];
+ int n = _stream.Read(header, 0, header.Length);
+
+ // workitem 8501: handle edge case (decompress empty stream)
+ if (n == 0)
+ return 0;
+
+ if (n != 10)
+ throw new ZlibException("Not a valid GZIP stream.");
+
+ if (header[0] != 0x1F || header[1] != 0x8B || header[2] != 8)
+ throw new ZlibException("Bad GZIP header.");
+
+ Int32 timet = BitConverter.ToInt32(header, 4);
+ _GzipMtime = GZipStream._unixEpoch.AddSeconds(timet);
+ totalBytesRead += n;
+ if ((header[3] & 0x04) == 0x04)
+ {
+ // read and discard extra field
+ n = _stream.Read(header, 0, 2); // 2-byte length field
+ totalBytesRead += n;
+
+ Int16 extraLength = (Int16)(header[0] + header[1] * 256);
+ byte[] extra = new byte[extraLength];
+ n = _stream.Read(extra, 0, extra.Length);
+ if (n != extraLength)
+ throw new ZlibException("Unexpected end-of-file reading GZIP header.");
+ totalBytesRead += n;
+ }
+ if ((header[3] & 0x08) == 0x08)
+ _GzipFileName = ReadZeroTerminatedString();
+ if ((header[3] & 0x10) == 0x010)
+ _GzipComment = ReadZeroTerminatedString();
+ if ((header[3] & 0x02) == 0x02)
+ Read(_buf1, 0, 1); // CRC16, ignore
+
+ return totalBytesRead;
+ }
+
+
+
+ public override System.Int32 Read(System.Byte[] buffer, System.Int32 offset, System.Int32 count)
+ {
+ // According to MS documentation, any implementation of the IO.Stream.Read function must:
+ // (a) throw an exception if offset & count reference an invalid part of the buffer,
+ // or if count < 0, or if buffer is null
+ // (b) return 0 only upon EOF, or if count = 0
+ // (c) if not EOF, then return at least 1 byte, up to bytes
+
+ if (_streamMode == StreamMode.Undefined)
+ {
+ if (!this._stream.CanRead) throw new ZlibException("The stream is not readable.");
+ // for the first read, set up some controls.
+ _streamMode = StreamMode.Reader;
+ // (The first reference to _z goes through the private accessor which
+ // may initialize it.)
+ z.AvailableBytesIn = 0;
+ if (_flavor == ZlibStreamFlavor.GZIP)
+ {
+ _gzipHeaderByteCount = _ReadAndValidateGzipHeader();
+ // workitem 8501: handle edge case (decompress empty stream)
+ if (_gzipHeaderByteCount == 0)
+ return 0;
+ }
+ }
+
+ if (_streamMode != StreamMode.Reader)
+ throw new ZlibException("Cannot Read after Writing.");
+
+ if (count == 0) return 0;
+ if (nomoreinput && _wantCompress) return 0; // workitem 8557
+ if (buffer == null) throw new ArgumentNullException("buffer");
+ if (count < 0) throw new ArgumentOutOfRangeException("count");
+ if (offset < buffer.GetLowerBound(0)) throw new ArgumentOutOfRangeException("offset");
+ if ((offset + count) > buffer.GetLength(0)) throw new ArgumentOutOfRangeException("count");
+
+ int rc = 0;
+
+ // set up the output of the deflate/inflate codec:
+ _z.OutputBuffer = buffer;
+ _z.NextOut = offset;
+ _z.AvailableBytesOut = count;
+
+ // This is necessary in case _workingBuffer has been resized. (new byte[])
+ // (The first reference to _workingBuffer goes through the private accessor which
+ // may initialize it.)
+ _z.InputBuffer = workingBuffer;
+
+ do
+ {
+ // need data in _workingBuffer in order to deflate/inflate. Here, we check if we have any.
+ if ((_z.AvailableBytesIn == 0) && (!nomoreinput))
+ {
+ // No data available, so try to Read data from the captive stream.
+ _z.NextIn = 0;
+ _z.AvailableBytesIn = _stream.Read(_workingBuffer, 0, _workingBuffer.Length);
+ if (_z.AvailableBytesIn == 0)
+ nomoreinput = true;
+
+ }
+ // we have data in InputBuffer; now compress or decompress as appropriate
+ rc = (_wantCompress)
+ ? _z.Deflate(_flushMode)
+ : _z.Inflate(_flushMode);
+
+ if (nomoreinput && (rc == ZlibConstants.Z_BUF_ERROR))
+ return 0;
+
+ if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
+ throw new ZlibException(String.Format("{0}flating: rc={1} msg={2}", (_wantCompress ? "de" : "in"), rc, _z.Message));
+
+ if ((nomoreinput || rc == ZlibConstants.Z_STREAM_END) && (_z.AvailableBytesOut == count))
+ break; // nothing more to read
+ }
+ //while (_z.AvailableBytesOut == count && rc == ZlibConstants.Z_OK);
+ while (_z.AvailableBytesOut > 0 && !nomoreinput && rc == ZlibConstants.Z_OK);
+
+
+ // workitem 8557
+ // is there more room in output?
+ if (_z.AvailableBytesOut > 0)
+ {
+ if (rc == ZlibConstants.Z_OK && _z.AvailableBytesIn == 0)
+ {
+ // deferred
+ }
+
+ // are we completely done reading?
+ if (nomoreinput)
+ {
+ // and in compression?
+ if (_wantCompress)
+ {
+ // no more input data available; therefore we flush to
+ // try to complete the read
+ rc = _z.Deflate(FlushType.Finish);
+
+ if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
+ throw new ZlibException(String.Format("Deflating: rc={0} msg={1}", rc, _z.Message));
+ }
+ }
+ }
+
+
+ rc = (count - _z.AvailableBytesOut);
+
+ // calculate CRC after reading
+ if (crc != null)
+ crc.SlurpBlock(buffer, offset, rc);
+
+ return rc;
+ }
+
+
+
+ public override System.Boolean CanRead
+ {
+ get { return this._stream.CanRead; }
+ }
+
+ public override System.Boolean CanSeek
+ {
+ get { return this._stream.CanSeek; }
+ }
+
+ public override System.Boolean CanWrite
+ {
+ get { return this._stream.CanWrite; }
+ }
+
+ public override System.Int64 Length
+ {
+ get { return _stream.Length; }
+ }
+
+ public override long Position
+ {
+ get { throw new NotImplementedException(); }
+ set { throw new NotImplementedException(); }
+ }
+
+ internal enum StreamMode
+ {
+ Writer,
+ Reader,
+ Undefined,
+ }
+
+
+ public static void CompressString(String s, Stream compressor)
+ {
+ byte[] uncompressed = System.Text.Encoding.UTF8.GetBytes(s);
+ using (compressor)
+ {
+ compressor.Write(uncompressed, 0, uncompressed.Length);
+ }
+ }
+
+ public static void CompressBuffer(byte[] b, Stream compressor)
+ {
+ // workitem 8460
+ using (compressor)
+ {
+ compressor.Write(b, 0, b.Length);
+ }
+ }
+
+ public static String UncompressString(byte[] compressed, Stream decompressor)
+ {
+ // workitem 8460
+ byte[] working = new byte[1024];
+ var encoding = System.Text.Encoding.UTF8;
+ using (var output = new MemoryStream())
+ {
+ using (decompressor)
+ {
+ int n;
+ while ((n = decompressor.Read(working, 0, working.Length)) != 0)
+ {
+ output.Write(working, 0, n);
+ }
+ }
+
+ // reset to allow read from start
+ output.Seek(0, SeekOrigin.Begin);
+ var sr = new StreamReader(output, encoding);
+ return sr.ReadToEnd();
+ }
+ }
+
+ public static byte[] UncompressBuffer(byte[] compressed, Stream decompressor)
+ {
+ // workitem 8460
+ byte[] working = new byte[1024];
+ using (var output = new MemoryStream())
+ {
+ using (decompressor)
+ {
+ int n;
+ while ((n = decompressor.Read(working, 0, working.Length)) != 0)
+ {
+ output.Write(working, 0, n);
+ }
+ }
+ return output.ToArray();
+ }
+ }
+
+ }
+
+
+}
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/ZlibCodec.cs b/src/Hazelcast.Net/Polyfills/ZLib/ZlibCodec.cs
new file mode 100644
index 0000000000..ab0abcf3d3
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/ZlibCodec.cs
@@ -0,0 +1,717 @@
+// ZlibCodec.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2009-November-03 15:40:51>
+//
+// ------------------------------------------------------------------
+//
+// This module defines a Codec for ZLIB compression and
+// decompression. This code extends code that was based the jzlib
+// implementation of zlib, but this code is completely novel. The codec
+// class is new, and encapsulates some behaviors that are new, and some
+// that were present in other classes in the jzlib code base. In
+// keeping with the license for jzlib, the copyright to the jzlib code
+// is included below.
+//
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// This program is based on zlib-1.1.3; credit to authors
+// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+// and contributors of zlib.
+//
+// -----------------------------------------------------------------------
+
+
+using System;
+using Interop=System.Runtime.InteropServices;
+
+namespace Ionic.Zlib
+{
+ ///
+ /// Encoder and Decoder for ZLIB and DEFLATE (IETF RFC1950 and RFC1951).
+ ///
+ ///
+ ///
+ /// This class compresses and decompresses data according to the Deflate algorithm
+ /// and optionally, the ZLIB format, as documented in RFC 1950 - ZLIB and RFC 1951 - DEFLATE.
+ ///
+ [Interop.GuidAttribute("ebc25cf6-9120-4283-b972-0e5520d0000D")]
+ [Interop.ComVisible(true)]
+#if !NETCF
+ [Interop.ClassInterface(Interop.ClassInterfaceType.AutoDispatch)]
+#endif
+ sealed public class ZlibCodec
+ {
+ ///
+ /// The buffer from which data is taken.
+ ///
+ public byte[] InputBuffer;
+
+ ///
+ /// An index into the InputBuffer array, indicating where to start reading.
+ ///
+ public int NextIn;
+
+ ///
+ /// The number of bytes available in the InputBuffer, starting at NextIn.
+ ///
+ ///
+ /// Generally you should set this to InputBuffer.Length before the first Inflate() or Deflate() call.
+ /// The class will update this number as calls to Inflate/Deflate are made.
+ ///
+ public int AvailableBytesIn;
+
+ ///
+ /// Total number of bytes read so far, through all calls to Inflate()/Deflate().
+ ///
+ public long TotalBytesIn;
+
+ ///
+ /// Buffer to store output data.
+ ///
+ public byte[] OutputBuffer;
+
+ ///
+ /// An index into the OutputBuffer array, indicating where to start writing.
+ ///
+ public int NextOut;
+
+ ///
+ /// The number of bytes available in the OutputBuffer, starting at NextOut.
+ ///
+ ///
+ /// Generally you should set this to OutputBuffer.Length before the first Inflate() or Deflate() call.
+ /// The class will update this number as calls to Inflate/Deflate are made.
+ ///
+ public int AvailableBytesOut;
+
+ ///
+ /// Total number of bytes written to the output so far, through all calls to Inflate()/Deflate().
+ ///
+ public long TotalBytesOut;
+
+ ///
+ /// used for diagnostics, when something goes wrong!
+ ///
+ public System.String Message;
+
+ internal DeflateManager dstate;
+ internal InflateManager istate;
+
+ internal uint _Adler32;
+
+ ///
+ /// The compression level to use in this codec. Useful only in compression mode.
+ ///
+ public CompressionLevel CompressLevel = CompressionLevel.Default;
+
+ ///
+ /// The number of Window Bits to use.
+ ///
+ ///
+ /// This gauges the size of the sliding window, and hence the
+ /// compression effectiveness as well as memory consumption. It's best to just leave this
+ /// setting alone if you don't know what it is. The maximum value is 15 bits, which implies
+ /// a 32k window.
+ ///
+ public int WindowBits = ZlibConstants.WindowBitsDefault;
+
+ ///
+ /// The compression strategy to use.
+ ///
+ ///
+ /// This is only effective in compression. The theory offered by ZLIB is that different
+ /// strategies could potentially produce significant differences in compression behavior
+ /// for different data sets. Unfortunately I don't have any good recommendations for how
+ /// to set it differently. When I tested changing the strategy I got minimally different
+ /// compression performance. It's best to leave this property alone if you don't have a
+ /// good feel for it. Or, you may want to produce a test harness that runs through the
+ /// different strategy options and evaluates them on different file types. If you do that,
+ /// let me know your results.
+ ///
+ public CompressionStrategy Strategy = CompressionStrategy.Default;
+
+
+ ///
+ /// The Adler32 checksum on the data transferred through the codec so far. You probably don't need to look at this.
+ ///
+ public int Adler32 { get { return (int)_Adler32; } }
+
+
+ ///
+ /// Create a ZlibCodec.
+ ///
+ ///
+ /// If you use this default constructor, you will later have to explicitly call
+ /// InitializeInflate() or InitializeDeflate() before using the ZlibCodec to compress
+ /// or decompress.
+ ///
+ public ZlibCodec() { }
+
+ ///
+ /// Create a ZlibCodec that either compresses or decompresses.
+ ///
+ ///
+ /// Indicates whether the codec should compress (deflate) or decompress (inflate).
+ ///
+ public ZlibCodec(CompressionMode mode)
+ {
+ if (mode == CompressionMode.Compress)
+ {
+ int rc = InitializeDeflate();
+ if (rc != ZlibConstants.Z_OK) throw new ZlibException("Cannot initialize for deflate.");
+ }
+ else if (mode == CompressionMode.Decompress)
+ {
+ int rc = InitializeInflate();
+ if (rc != ZlibConstants.Z_OK) throw new ZlibException("Cannot initialize for inflate.");
+ }
+ else throw new ZlibException("Invalid ZlibStreamFlavor.");
+ }
+
+ ///
+ /// Initialize the inflation state.
+ ///
+ ///
+ /// It is not necessary to call this before using the ZlibCodec to inflate data;
+ /// It is implicitly called when you call the constructor.
+ ///
+ /// Z_OK if everything goes well.
+ public int InitializeInflate()
+ {
+ return InitializeInflate(this.WindowBits);
+ }
+
+ ///
+ /// Initialize the inflation state with an explicit flag to
+ /// govern the handling of RFC1950 header bytes.
+ ///
+ ///
+ ///
+ /// By default, the ZLIB header defined in RFC 1950 is expected. If
+ /// you want to read a zlib stream you should specify true for
+ /// expectRfc1950Header. If you have a deflate stream, you will want to specify
+ /// false. It is only necessary to invoke this initializer explicitly if you
+ /// want to specify false.
+ ///
+ ///
+ /// whether to expect an RFC1950 header byte
+ /// pair when reading the stream of data to be inflated.
+ ///
+ /// Z_OK if everything goes well.
+ public int InitializeInflate(bool expectRfc1950Header)
+ {
+ return InitializeInflate(this.WindowBits, expectRfc1950Header);
+ }
+
+ ///
+ /// Initialize the ZlibCodec for inflation, with the specified number of window bits.
+ ///
+ /// The number of window bits to use. If you need to ask what that is,
+ /// then you shouldn't be calling this initializer.
+ /// Z_OK if all goes well.
+ public int InitializeInflate(int windowBits)
+ {
+ this.WindowBits = windowBits;
+ return InitializeInflate(windowBits, true);
+ }
+
+ ///
+ /// Initialize the inflation state with an explicit flag to govern the handling of
+ /// RFC1950 header bytes.
+ ///
+ ///
+ ///
+ /// If you want to read a zlib stream you should specify true for
+ /// expectRfc1950Header. In this case, the library will expect to find a ZLIB
+ /// header, as defined in RFC
+ /// 1950, in the compressed stream. If you will be reading a DEFLATE or
+ /// GZIP stream, which does not have such a header, you will want to specify
+ /// false.
+ ///
+ ///
+ /// whether to expect an RFC1950 header byte pair when reading
+ /// the stream of data to be inflated.
+ /// The number of window bits to use. If you need to ask what that is,
+ /// then you shouldn't be calling this initializer.
+ /// Z_OK if everything goes well.
+ public int InitializeInflate(int windowBits, bool expectRfc1950Header)
+ {
+ this.WindowBits = windowBits;
+ if (dstate != null) throw new ZlibException("You may not call InitializeInflate() after calling InitializeDeflate().");
+ istate = new InflateManager(expectRfc1950Header);
+ return istate.Initialize(this, windowBits);
+ }
+
+ ///
+ /// Inflate the data in the InputBuffer, placing the result in the OutputBuffer.
+ ///
+ ///
+ /// You must have set InputBuffer and OutputBuffer, NextIn and NextOut, and AvailableBytesIn and
+ /// AvailableBytesOut before calling this method.
+ ///
+ ///
+ ///
+ /// private void InflateBuffer()
+ /// {
+ /// int bufferSize = 1024;
+ /// byte[] buffer = new byte[bufferSize];
+ /// ZlibCodec decompressor = new ZlibCodec();
+ ///
+ /// Console.WriteLine("\n============================================");
+ /// Console.WriteLine("Size of Buffer to Inflate: {0} bytes.", CompressedBytes.Length);
+ /// MemoryStream ms = new MemoryStream(DecompressedBytes);
+ ///
+ /// int rc = decompressor.InitializeInflate();
+ ///
+ /// decompressor.InputBuffer = CompressedBytes;
+ /// decompressor.NextIn = 0;
+ /// decompressor.AvailableBytesIn = CompressedBytes.Length;
+ ///
+ /// decompressor.OutputBuffer = buffer;
+ ///
+ /// // pass 1: inflate
+ /// do
+ /// {
+ /// decompressor.NextOut = 0;
+ /// decompressor.AvailableBytesOut = buffer.Length;
+ /// rc = decompressor.Inflate(FlushType.None);
+ ///
+ /// if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
+ /// throw new Exception("inflating: " + decompressor.Message);
+ ///
+ /// ms.Write(decompressor.OutputBuffer, 0, buffer.Length - decompressor.AvailableBytesOut);
+ /// }
+ /// while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0);
+ ///
+ /// // pass 2: finish and flush
+ /// do
+ /// {
+ /// decompressor.NextOut = 0;
+ /// decompressor.AvailableBytesOut = buffer.Length;
+ /// rc = decompressor.Inflate(FlushType.Finish);
+ ///
+ /// if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
+ /// throw new Exception("inflating: " + decompressor.Message);
+ ///
+ /// if (buffer.Length - decompressor.AvailableBytesOut > 0)
+ /// ms.Write(buffer, 0, buffer.Length - decompressor.AvailableBytesOut);
+ /// }
+ /// while (decompressor.AvailableBytesIn > 0 || decompressor.AvailableBytesOut == 0);
+ ///
+ /// decompressor.EndInflate();
+ /// }
+ ///
+ ///
+ ///
+ /// The flush to use when inflating.
+ /// Z_OK if everything goes well.
+ public int Inflate(FlushType flush)
+ {
+ if (istate == null)
+ throw new ZlibException("No Inflate State!");
+ return istate.Inflate(flush);
+ }
+
+
+ ///
+ /// Ends an inflation session.
+ ///
+ ///
+ /// Call this after successively calling Inflate(). This will cause all buffers to be flushed.
+ /// After calling this you cannot call Inflate() without a intervening call to one of the
+ /// InitializeInflate() overloads.
+ ///
+ /// Z_OK if everything goes well.
+ public int EndInflate()
+ {
+ if (istate == null)
+ throw new ZlibException("No Inflate State!");
+ int ret = istate.End();
+ istate = null;
+ return ret;
+ }
+
+ ///
+ /// I don't know what this does!
+ ///
+ /// Z_OK if everything goes well.
+ public int SyncInflate()
+ {
+ if (istate == null)
+ throw new ZlibException("No Inflate State!");
+ return istate.Sync();
+ }
+
+ ///
+ /// Initialize the ZlibCodec for deflation operation.
+ ///
+ ///
+ /// The codec will use the MAX window bits and the default level of compression.
+ ///
+ ///
+ ///
+ /// int bufferSize = 40000;
+ /// byte[] CompressedBytes = new byte[bufferSize];
+ /// byte[] DecompressedBytes = new byte[bufferSize];
+ ///
+ /// ZlibCodec compressor = new ZlibCodec();
+ ///
+ /// compressor.InitializeDeflate(CompressionLevel.Default);
+ ///
+ /// compressor.InputBuffer = System.Text.ASCIIEncoding.ASCII.GetBytes(TextToCompress);
+ /// compressor.NextIn = 0;
+ /// compressor.AvailableBytesIn = compressor.InputBuffer.Length;
+ ///
+ /// compressor.OutputBuffer = CompressedBytes;
+ /// compressor.NextOut = 0;
+ /// compressor.AvailableBytesOut = CompressedBytes.Length;
+ ///
+ /// while (compressor.TotalBytesIn != TextToCompress.Length && compressor.TotalBytesOut < bufferSize)
+ /// {
+ /// compressor.Deflate(FlushType.None);
+ /// }
+ ///
+ /// while (true)
+ /// {
+ /// int rc= compressor.Deflate(FlushType.Finish);
+ /// if (rc == ZlibConstants.Z_STREAM_END) break;
+ /// }
+ ///
+ /// compressor.EndDeflate();
+ ///
+ ///
+ ///
+ /// Z_OK if all goes well. You generally don't need to check the return code.
+ public int InitializeDeflate()
+ {
+ return _InternalInitializeDeflate(true);
+ }
+
+ ///
+ /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel.
+ ///
+ ///
+ /// The codec will use the maximum window bits (15) and the specified
+ /// CompressionLevel. It will emit a ZLIB stream as it compresses.
+ ///
+ /// The compression level for the codec.
+ /// Z_OK if all goes well.
+ public int InitializeDeflate(CompressionLevel level)
+ {
+ this.CompressLevel = level;
+ return _InternalInitializeDeflate(true);
+ }
+
+
+ ///
+ /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel,
+ /// and the explicit flag governing whether to emit an RFC1950 header byte pair.
+ ///
+ ///
+ /// The codec will use the maximum window bits (15) and the specified CompressionLevel.
+ /// If you want to generate a zlib stream, you should specify true for
+ /// wantRfc1950Header. In this case, the library will emit a ZLIB
+ /// header, as defined in RFC
+ /// 1950, in the compressed stream.
+ ///
+ /// The compression level for the codec.
+ /// whether to emit an initial RFC1950 byte pair in the compressed stream.
+ /// Z_OK if all goes well.
+ public int InitializeDeflate(CompressionLevel level, bool wantRfc1950Header)
+ {
+ this.CompressLevel = level;
+ return _InternalInitializeDeflate(wantRfc1950Header);
+ }
+
+
+ ///
+ /// Initialize the ZlibCodec for deflation operation, using the specified CompressionLevel,
+ /// and the specified number of window bits.
+ ///
+ ///
+ /// The codec will use the specified number of window bits and the specified CompressionLevel.
+ ///
+ /// The compression level for the codec.
+ /// the number of window bits to use. If you don't know what this means, don't use this method.
+ /// Z_OK if all goes well.
+ public int InitializeDeflate(CompressionLevel level, int bits)
+ {
+ this.CompressLevel = level;
+ this.WindowBits = bits;
+ return _InternalInitializeDeflate(true);
+ }
+
+ ///
+ /// Initialize the ZlibCodec for deflation operation, using the specified
+ /// CompressionLevel, the specified number of window bits, and the explicit flag
+ /// governing whether to emit an RFC1950 header byte pair.
+ ///
+ ///
+ /// The compression level for the codec.
+ /// whether to emit an initial RFC1950 byte pair in the compressed stream.
+ /// the number of window bits to use. If you don't know what this means, don't use this method.
+ /// Z_OK if all goes well.
+ public int InitializeDeflate(CompressionLevel level, int bits, bool wantRfc1950Header)
+ {
+ this.CompressLevel = level;
+ this.WindowBits = bits;
+ return _InternalInitializeDeflate(wantRfc1950Header);
+ }
+
+ private int _InternalInitializeDeflate(bool wantRfc1950Header)
+ {
+ if (istate != null) throw new ZlibException("You may not call InitializeDeflate() after calling InitializeInflate().");
+ dstate = new DeflateManager();
+ dstate.WantRfc1950HeaderBytes = wantRfc1950Header;
+
+ return dstate.Initialize(this, this.CompressLevel, this.WindowBits, this.Strategy);
+ }
+
+ ///
+ /// Deflate one batch of data.
+ ///
+ ///
+ /// You must have set InputBuffer and OutputBuffer before calling this method.
+ ///
+ ///
+ ///
+ /// private void DeflateBuffer(CompressionLevel level)
+ /// {
+ /// int bufferSize = 1024;
+ /// byte[] buffer = new byte[bufferSize];
+ /// ZlibCodec compressor = new ZlibCodec();
+ ///
+ /// Console.WriteLine("\n============================================");
+ /// Console.WriteLine("Size of Buffer to Deflate: {0} bytes.", UncompressedBytes.Length);
+ /// MemoryStream ms = new MemoryStream();
+ ///
+ /// int rc = compressor.InitializeDeflate(level);
+ ///
+ /// compressor.InputBuffer = UncompressedBytes;
+ /// compressor.NextIn = 0;
+ /// compressor.AvailableBytesIn = UncompressedBytes.Length;
+ ///
+ /// compressor.OutputBuffer = buffer;
+ ///
+ /// // pass 1: deflate
+ /// do
+ /// {
+ /// compressor.NextOut = 0;
+ /// compressor.AvailableBytesOut = buffer.Length;
+ /// rc = compressor.Deflate(FlushType.None);
+ ///
+ /// if (rc != ZlibConstants.Z_OK && rc != ZlibConstants.Z_STREAM_END)
+ /// throw new Exception("deflating: " + compressor.Message);
+ ///
+ /// ms.Write(compressor.OutputBuffer, 0, buffer.Length - compressor.AvailableBytesOut);
+ /// }
+ /// while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0);
+ ///
+ /// // pass 2: finish and flush
+ /// do
+ /// {
+ /// compressor.NextOut = 0;
+ /// compressor.AvailableBytesOut = buffer.Length;
+ /// rc = compressor.Deflate(FlushType.Finish);
+ ///
+ /// if (rc != ZlibConstants.Z_STREAM_END && rc != ZlibConstants.Z_OK)
+ /// throw new Exception("deflating: " + compressor.Message);
+ ///
+ /// if (buffer.Length - compressor.AvailableBytesOut > 0)
+ /// ms.Write(buffer, 0, buffer.Length - compressor.AvailableBytesOut);
+ /// }
+ /// while (compressor.AvailableBytesIn > 0 || compressor.AvailableBytesOut == 0);
+ ///
+ /// compressor.EndDeflate();
+ ///
+ /// ms.Seek(0, SeekOrigin.Begin);
+ /// CompressedBytes = new byte[compressor.TotalBytesOut];
+ /// ms.Read(CompressedBytes, 0, CompressedBytes.Length);
+ /// }
+ ///
+ ///
+ /// whether to flush all data as you deflate. Generally you will want to
+ /// use Z_NO_FLUSH here, in a series of calls to Deflate(), and then call EndDeflate() to
+ /// flush everything.
+ ///
+ /// Z_OK if all goes well.
+ public int Deflate(FlushType flush)
+ {
+ if (dstate == null)
+ throw new ZlibException("No Deflate State!");
+ return dstate.Deflate(flush);
+ }
+
+ ///
+ /// End a deflation session.
+ ///
+ ///
+ /// Call this after making a series of one or more calls to Deflate(). All buffers are flushed.
+ ///
+ /// Z_OK if all goes well.
+ public int EndDeflate()
+ {
+ if (dstate == null)
+ throw new ZlibException("No Deflate State!");
+ // TODO: dinoch Tue, 03 Nov 2009 15:39 (test this)
+ //int ret = dstate.End();
+ dstate = null;
+ return ZlibConstants.Z_OK; //ret;
+ }
+
+ ///
+ /// Reset a codec for another deflation session.
+ ///
+ ///
+ /// Call this to reset the deflation state. For example if a thread is deflating
+ /// non-consecutive blocks, you can call Reset() after the Deflate(Sync) of the first
+ /// block and before the next Deflate(None) of the second block.
+ ///
+ /// Z_OK if all goes well.
+ public void ResetDeflate()
+ {
+ if (dstate == null)
+ throw new ZlibException("No Deflate State!");
+ dstate.Reset();
+ }
+
+
+ ///
+ /// Set the CompressionStrategy and CompressionLevel for a deflation session.
+ ///
+ /// the level of compression to use.
+ /// the strategy to use for compression.
+ /// Z_OK if all goes well.
+ public int SetDeflateParams(CompressionLevel level, CompressionStrategy strategy)
+ {
+ if (dstate == null)
+ throw new ZlibException("No Deflate State!");
+ return dstate.SetParams(level, strategy);
+ }
+
+
+ ///
+ /// Set the dictionary to be used for either Inflation or Deflation.
+ ///
+ /// The dictionary bytes to use.
+ /// Z_OK if all goes well.
+ public int SetDictionary(byte[] dictionary)
+ {
+ if (istate != null)
+ return istate.SetDictionary(dictionary);
+
+ if (dstate != null)
+ return dstate.SetDictionary(dictionary);
+
+ throw new ZlibException("No Inflate or Deflate state!");
+ }
+
+ // Flush as much pending output as possible. All deflate() output goes
+ // through this function so some applications may wish to modify it
+ // to avoid allocating a large strm->next_out buffer and copying into it.
+ // (See also read_buf()).
+ internal void flush_pending()
+ {
+ int len = dstate.pendingCount;
+
+ if (len > AvailableBytesOut)
+ len = AvailableBytesOut;
+ if (len == 0)
+ return;
+
+ if (dstate.pending.Length <= dstate.nextPending ||
+ OutputBuffer.Length <= NextOut ||
+ dstate.pending.Length < (dstate.nextPending + len) ||
+ OutputBuffer.Length < (NextOut + len))
+ {
+ throw new ZlibException(String.Format("Invalid State. (pending.Length={0}, pendingCount={1})",
+ dstate.pending.Length, dstate.pendingCount));
+ }
+
+ Array.Copy(dstate.pending, dstate.nextPending, OutputBuffer, NextOut, len);
+
+ NextOut += len;
+ dstate.nextPending += len;
+ TotalBytesOut += len;
+ AvailableBytesOut -= len;
+ dstate.pendingCount -= len;
+ if (dstate.pendingCount == 0)
+ {
+ dstate.nextPending = 0;
+ }
+ }
+
+ // Read a new buffer from the current input stream, update the adler32
+ // and total number of bytes read. All deflate() input goes through
+ // this function so some applications may wish to modify it to avoid
+ // allocating a large strm->next_in buffer and copying from it.
+ // (See also flush_pending()).
+ internal int read_buf(byte[] buf, int start, int size)
+ {
+ int len = AvailableBytesIn;
+
+ if (len > size)
+ len = size;
+ if (len == 0)
+ return 0;
+
+ AvailableBytesIn -= len;
+
+ if (dstate.WantRfc1950HeaderBytes)
+ {
+ _Adler32 = Adler.Adler32(_Adler32, InputBuffer, NextIn, len);
+ }
+ Array.Copy(InputBuffer, NextIn, buf, start, len);
+ NextIn += len;
+ TotalBytesIn += len;
+ return len;
+ }
+
+ }
+}
\ No newline at end of file
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/ZlibConstants.cs b/src/Hazelcast.Net/Polyfills/ZLib/ZlibConstants.cs
new file mode 100644
index 0000000000..59ae7300aa
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/ZlibConstants.cs
@@ -0,0 +1,128 @@
+// ZlibConstants.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2009-November-03 18:50:19>
+//
+// ------------------------------------------------------------------
+//
+// This module defines constants used by the zlib class library. This
+// code is derived from the jzlib implementation of zlib, but
+// significantly modified. In keeping with the license for jzlib, the
+// copyright to that code is included here.
+//
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2000,2001,2002,2003 ymnk, JCraft,Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the distribution.
+//
+// 3. The names of the authors may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
+// INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// -----------------------------------------------------------------------
+//
+// This program is based on zlib-1.1.3; credit to authors
+// Jean-loup Gailly(jloup@gzip.org) and Mark Adler(madler@alumni.caltech.edu)
+// and contributors of zlib.
+//
+// -----------------------------------------------------------------------
+
+
+using System;
+
+namespace Ionic.Zlib
+{
+ ///
+ /// A bunch of constants used in the Zlib interface.
+ ///
+ public static class ZlibConstants
+ {
+ ///
+ /// The maximum number of window bits for the Deflate algorithm.
+ ///
+ public const int WindowBitsMax = 15; // 32K LZ77 window
+
+ ///
+ /// The default number of window bits for the Deflate algorithm.
+ ///
+ public const int WindowBitsDefault = WindowBitsMax;
+
+ ///
+ /// indicates everything is A-OK
+ ///
+ public const int Z_OK = 0;
+
+ ///
+ /// Indicates that the last operation reached the end of the stream.
+ ///
+ public const int Z_STREAM_END = 1;
+
+ ///
+ /// The operation ended in need of a dictionary.
+ ///
+ public const int Z_NEED_DICT = 2;
+
+ ///
+ /// There was an error with the stream - not enough data, not open and readable, etc.
+ ///
+ public const int Z_STREAM_ERROR = -2;
+
+ ///
+ /// There was an error with the data - not enough data, bad data, etc.
+ ///
+ public const int Z_DATA_ERROR = -3;
+
+ ///
+ /// There was an error with the working buffer.
+ ///
+ public const int Z_BUF_ERROR = -5;
+
+ ///
+ /// The size of the working buffer used in the ZlibCodec class. Defaults to 8192 bytes.
+ ///
+#if NETCF
+ public const int WorkingBufferSizeDefault = 8192;
+#else
+ public const int WorkingBufferSizeDefault = 16384;
+#endif
+ ///
+ /// The minimum size of the working buffer used in the ZlibCodec class. Currently it is 128 bytes.
+ ///
+ public const int WorkingBufferSizeMin = 1024;
+ }
+
+}
+
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/ZlibStream.cs b/src/Hazelcast.Net/Polyfills/ZLib/ZlibStream.cs
new file mode 100644
index 0000000000..88ddca9d19
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/ZlibStream.cs
@@ -0,0 +1,725 @@
+// ZlibStream.cs
+// ------------------------------------------------------------------
+//
+// Copyright (c) 2009 Dino Chiesa and Microsoft Corporation.
+// All rights reserved.
+//
+// This code module is part of DotNetZip, a zipfile class library.
+//
+// ------------------------------------------------------------------
+//
+// This code is licensed under the Microsoft Public License.
+// See the file License.txt for the license details.
+// More info on: http://dotnetzip.codeplex.com
+//
+// ------------------------------------------------------------------
+//
+// last saved (in emacs):
+// Time-stamp: <2011-July-31 14:53:33>
+//
+// ------------------------------------------------------------------
+//
+// This module defines the ZlibStream class, which is similar in idea to
+// the System.IO.Compression.DeflateStream and
+// System.IO.Compression.GZipStream classes in the .NET BCL.
+//
+// ------------------------------------------------------------------
+
+using System;
+using System.IO;
+
+namespace Ionic.Zlib
+{
+
+ ///
+ /// Represents a Zlib stream for compression or decompression.
+ ///
+ ///
+ ///
+ ///
+ /// The ZlibStream is a Decorator on a . It adds ZLIB compression or decompression to any
+ /// stream.
+ ///
+ ///
+ /// Using this stream, applications can compress or decompress data via
+ /// stream Read() and Write() operations. Either compresssion or
+ /// decompression can occur through either reading or writing. The compression
+ /// format used is ZLIB, which is documented in IETF RFC 1950, "ZLIB Compressed
+ /// Data Format Specification version 3.3". This implementation of ZLIB always uses
+ /// DEFLATE as the compression method. (see IETF RFC 1951, "DEFLATE
+ /// Compressed Data Format Specification version 1.3.")
+ ///
+ ///
+ /// The ZLIB format allows for varying compression methods, window sizes, and dictionaries.
+ /// This implementation always uses the DEFLATE compression method, a preset dictionary,
+ /// and 15 window bits by default.
+ ///
+ ///
+ ///
+ /// This class is similar to , except that it adds the
+ /// RFC1950 header and trailer bytes to a compressed stream when compressing, or expects
+ /// the RFC1950 header and trailer bytes when decompressing. It is also similar to the
+ /// .
+ ///
+ ///
+ ///
+ ///
+ public class ZlibStream : System.IO.Stream
+ {
+ internal ZlibBaseStream _baseStream;
+ bool _disposed;
+
+ ///
+ /// Create a ZlibStream using the specified CompressionMode.
+ ///
+ ///
+ ///
+ ///
+ /// When mode is CompressionMode.Compress, the ZlibStream
+ /// will use the default compression level. The "captive" stream will be
+ /// closed when the ZlibStream is closed.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// This example uses a ZlibStream to compress a file, and writes the
+ /// compressed data to another file.
+ ///
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ /// {
+ /// using (var raw = System.IO.File.Create(fileToCompress + ".zlib"))
+ /// {
+ /// using (Stream compressor = new ZlibStream(raw, CompressionMode.Compress))
+ /// {
+ /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ /// int n;
+ /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ /// {
+ /// compressor.Write(buffer, 0, n);
+ /// }
+ /// }
+ /// }
+ /// }
+ ///
+ ///
+ /// Using input As Stream = File.OpenRead(fileToCompress)
+ /// Using raw As FileStream = File.Create(fileToCompress & ".zlib")
+ /// Using compressor As Stream = New ZlibStream(raw, CompressionMode.Compress)
+ /// Dim buffer As Byte() = New Byte(4096) {}
+ /// Dim n As Integer = -1
+ /// Do While (n <> 0)
+ /// If (n > 0) Then
+ /// compressor.Write(buffer, 0, n)
+ /// End If
+ /// n = input.Read(buffer, 0, buffer.Length)
+ /// Loop
+ /// End Using
+ /// End Using
+ /// End Using
+ ///
+ ///
+ ///
+ /// The stream which will be read or written.
+ /// Indicates whether the ZlibStream will compress or decompress.
+ public ZlibStream(System.IO.Stream stream, CompressionMode mode)
+ : this(stream, mode, CompressionLevel.Default, false)
+ {
+ }
+
+ ///
+ /// Create a ZlibStream using the specified CompressionMode and
+ /// the specified CompressionLevel.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// When mode is CompressionMode.Decompress, the level parameter is ignored.
+ /// The "captive" stream will be closed when the ZlibStream is closed.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// This example uses a ZlibStream to compress data from a file, and writes the
+ /// compressed data to another file.
+ ///
+ ///
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ /// {
+ /// using (var raw = System.IO.File.Create(fileToCompress + ".zlib"))
+ /// {
+ /// using (Stream compressor = new ZlibStream(raw,
+ /// CompressionMode.Compress,
+ /// CompressionLevel.BestCompression))
+ /// {
+ /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ /// int n;
+ /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ /// {
+ /// compressor.Write(buffer, 0, n);
+ /// }
+ /// }
+ /// }
+ /// }
+ ///
+ ///
+ ///
+ /// Using input As Stream = File.OpenRead(fileToCompress)
+ /// Using raw As FileStream = File.Create(fileToCompress & ".zlib")
+ /// Using compressor As Stream = New ZlibStream(raw, CompressionMode.Compress, CompressionLevel.BestCompression)
+ /// Dim buffer As Byte() = New Byte(4096) {}
+ /// Dim n As Integer = -1
+ /// Do While (n <> 0)
+ /// If (n > 0) Then
+ /// compressor.Write(buffer, 0, n)
+ /// End If
+ /// n = input.Read(buffer, 0, buffer.Length)
+ /// Loop
+ /// End Using
+ /// End Using
+ /// End Using
+ ///
+ ///
+ ///
+ /// The stream to be read or written while deflating or inflating.
+ /// Indicates whether the ZlibStream will compress or decompress.
+ /// A tuning knob to trade speed for effectiveness.
+ public ZlibStream(System.IO.Stream stream, CompressionMode mode, CompressionLevel level)
+ : this(stream, mode, level, false)
+ {
+ }
+
+ ///
+ /// Create a ZlibStream using the specified CompressionMode, and
+ /// explicitly specify whether the captive stream should be left open after
+ /// Deflation or Inflation.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// When mode is CompressionMode.Compress, the ZlibStream will use
+ /// the default compression level.
+ ///
+ ///
+ ///
+ /// This constructor allows the application to request that the captive stream
+ /// remain open after the deflation or inflation occurs. By default, after
+ /// Close() is called on the stream, the captive stream is also
+ /// closed. In some cases this is not desired, for example if the stream is a
+ /// that will be re-read after
+ /// compression. Specify true for the parameter to leave the stream
+ /// open.
+ ///
+ ///
+ ///
+ /// See the other overloads of this constructor for example code.
+ ///
+ ///
+ ///
+ ///
+ /// The stream which will be read or written. This is called the
+ /// "captive" stream in other places in this documentation.
+ /// Indicates whether the ZlibStream will compress or decompress.
+ /// true if the application would like the stream to remain
+ /// open after inflation/deflation.
+ public ZlibStream(System.IO.Stream stream, CompressionMode mode, bool leaveOpen)
+ : this(stream, mode, CompressionLevel.Default, leaveOpen)
+ {
+ }
+
+ ///
+ /// Create a ZlibStream using the specified CompressionMode
+ /// and the specified CompressionLevel, and explicitly specify
+ /// whether the stream should be left open after Deflation or Inflation.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// This constructor allows the application to request that the captive
+ /// stream remain open after the deflation or inflation occurs. By
+ /// default, after Close() is called on the stream, the captive
+ /// stream is also closed. In some cases this is not desired, for example
+ /// if the stream is a that will be
+ /// re-read after compression. Specify true for the parameter to leave the stream open.
+ ///
+ ///
+ ///
+ /// When mode is CompressionMode.Decompress, the level parameter is
+ /// ignored.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// This example shows how to use a ZlibStream to compress the data from a file,
+ /// and store the result into another file. The filestream remains open to allow
+ /// additional data to be written to it.
+ ///
+ ///
+ /// using (var output = System.IO.File.Create(fileToCompress + ".zlib"))
+ /// {
+ /// using (System.IO.Stream input = System.IO.File.OpenRead(fileToCompress))
+ /// {
+ /// using (Stream compressor = new ZlibStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, true))
+ /// {
+ /// byte[] buffer = new byte[WORKING_BUFFER_SIZE];
+ /// int n;
+ /// while ((n= input.Read(buffer, 0, buffer.Length)) != 0)
+ /// {
+ /// compressor.Write(buffer, 0, n);
+ /// }
+ /// }
+ /// }
+ /// // can write additional data to the output stream here
+ /// }
+ ///
+ ///
+ /// Using output As FileStream = File.Create(fileToCompress & ".zlib")
+ /// Using input As Stream = File.OpenRead(fileToCompress)
+ /// Using compressor As Stream = New ZlibStream(output, CompressionMode.Compress, CompressionLevel.BestCompression, True)
+ /// Dim buffer As Byte() = New Byte(4096) {}
+ /// Dim n As Integer = -1
+ /// Do While (n <> 0)
+ /// If (n > 0) Then
+ /// compressor.Write(buffer, 0, n)
+ /// End If
+ /// n = input.Read(buffer, 0, buffer.Length)
+ /// Loop
+ /// End Using
+ /// End Using
+ /// ' can write additional data to the output stream here.
+ /// End Using
+ ///
+ ///
+ ///
+ /// The stream which will be read or written.
+ ///
+ /// Indicates whether the ZlibStream will compress or decompress.
+ ///
+ ///
+ /// true if the application would like the stream to remain open after
+ /// inflation/deflation.
+ ///
+ ///
+ ///
+ /// A tuning knob to trade speed for effectiveness. This parameter is
+ /// effective only when mode is CompressionMode.Compress.
+ ///
+ public ZlibStream(System.IO.Stream stream, CompressionMode mode, CompressionLevel level, bool leaveOpen)
+ {
+ _baseStream = new ZlibBaseStream(stream, mode, level, ZlibStreamFlavor.ZLIB, leaveOpen);
+ }
+
+ #region Zlib properties
+
+ ///
+ /// This property sets the flush behavior on the stream.
+ /// Sorry, though, not sure exactly how to describe all the various settings.
+ ///
+ virtual public FlushType FlushMode
+ {
+ get { return (this._baseStream._flushMode); }
+ set
+ {
+ if (_disposed) throw new ObjectDisposedException("ZlibStream");
+ this._baseStream._flushMode = value;
+ }
+ }
+
+ ///
+ /// The size of the working buffer for the compression codec.
+ ///
+ ///
+ ///
+ ///
+ /// The working buffer is used for all stream operations. The default size is
+ /// 1024 bytes. The minimum size is 128 bytes. You may get better performance
+ /// with a larger buffer. Then again, you might not. You would have to test
+ /// it.
+ ///
+ ///
+ ///
+ /// Set this before the first call to Read() or Write() on the
+ /// stream. If you try to set it afterwards, it will throw.
+ ///
+ ///
+ public int BufferSize
+ {
+ get
+ {
+ return this._baseStream._bufferSize;
+ }
+ set
+ {
+ if (_disposed) throw new ObjectDisposedException("ZlibStream");
+ if (this._baseStream._workingBuffer != null)
+ throw new ZlibException("The working buffer is already set.");
+ if (value < ZlibConstants.WorkingBufferSizeMin)
+ throw new ZlibException(String.Format("Don't be silly. {0} bytes?? Use a bigger buffer, at least {1}.", value, ZlibConstants.WorkingBufferSizeMin));
+ this._baseStream._bufferSize = value;
+ }
+ }
+
+ /// Returns the total number of bytes input so far.
+ virtual public long TotalIn
+ {
+ get { return this._baseStream._z.TotalBytesIn; }
+ }
+
+ /// Returns the total number of bytes output so far.
+ virtual public long TotalOut
+ {
+ get { return this._baseStream._z.TotalBytesOut; }
+ }
+
+ #endregion
+
+ #region System.IO.Stream methods
+
+ ///
+ /// Dispose the stream.
+ ///
+ ///
+ ///
+ /// This may or may not result in a Close() call on the captive
+ /// stream. See the constructors that have a leaveOpen parameter
+ /// for more information.
+ ///
+ ///
+ /// This method may be invoked in two distinct scenarios. If disposing
+ /// == true, the method has been called directly or indirectly by a
+ /// user's code, for example via the public Dispose() method. In this
+ /// case, both managed and unmanaged resources can be referenced and
+ /// disposed. If disposing == false, the method has been called by the
+ /// runtime from inside the object finalizer and this method should not
+ /// reference other objects; in that case only unmanaged resources must
+ /// be referenced or disposed.
+ ///
+ ///
+ ///
+ /// indicates whether the Dispose method was invoked by user code.
+ ///
+ protected override void Dispose(bool disposing)
+ {
+ try
+ {
+ if (!_disposed)
+ {
+ if (disposing && (this._baseStream != null))
+ this._baseStream.Close();
+ _disposed = true;
+ }
+ }
+ finally
+ {
+ base.Dispose(disposing);
+ }
+ }
+
+
+ ///
+ /// Indicates whether the stream can be read.
+ ///
+ ///
+ /// The return value depends on whether the captive stream supports reading.
+ ///
+ public override bool CanRead
+ {
+ get
+ {
+ if (_disposed) throw new ObjectDisposedException("ZlibStream");
+ return _baseStream._stream.CanRead;
+ }
+ }
+
+ ///
+ /// Indicates whether the stream supports Seek operations.
+ ///
+ ///
+ /// Always returns false.
+ ///
+ public override bool CanSeek
+ {
+ get { return false; }
+ }
+
+ ///
+ /// Indicates whether the stream can be written.
+ ///
+ ///
+ /// The return value depends on whether the captive stream supports writing.
+ ///
+ public override bool CanWrite
+ {
+ get
+ {
+ if (_disposed) throw new ObjectDisposedException("ZlibStream");
+ return _baseStream._stream.CanWrite;
+ }
+ }
+
+ ///
+ /// Flush the stream.
+ ///
+ public override void Flush()
+ {
+ if (_disposed) throw new ObjectDisposedException("ZlibStream");
+ _baseStream.Flush();
+ }
+
+ ///
+ /// Reading this property always throws a .
+ ///
+ public override long Length
+ {
+ get { throw new NotSupportedException(); }
+ }
+
+ ///
+ /// The position of the stream pointer.
+ ///
+ ///
+ ///
+ /// Setting this property always throws a . Reading will return the total bytes
+ /// written out, if used in writing, or the total bytes read in, if used in
+ /// reading. The count may refer to compressed bytes or uncompressed bytes,
+ /// depending on how you've used the stream.
+ ///
+ public override long Position
+ {
+ get
+ {
+ if (this._baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Writer)
+ return this._baseStream._z.TotalBytesOut;
+ if (this._baseStream._streamMode == Ionic.Zlib.ZlibBaseStream.StreamMode.Reader)
+ return this._baseStream._z.TotalBytesIn;
+ return 0;
+ }
+
+ set { throw new NotSupportedException(); }
+ }
+
+ ///
+ /// Read data from the stream.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// If you wish to use the ZlibStream to compress data while reading,
+ /// you can create a ZlibStream with CompressionMode.Compress,
+ /// providing an uncompressed data stream. Then call Read() on that
+ /// ZlibStream, and the data read will be compressed. If you wish to
+ /// use the ZlibStream to decompress data while reading, you can create
+ /// a ZlibStream with CompressionMode.Decompress, providing a
+ /// readable compressed data stream. Then call Read() on that
+ /// ZlibStream, and the data will be decompressed as it is read.
+ ///
+ ///
+ ///
+ /// A ZlibStream can be used for Read() or Write(), but
+ /// not both.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// The buffer into which the read data should be placed.
+ ///
+ ///
+ /// the offset within that data array to put the first byte read.
+ ///
+ /// the number of bytes to read.
+ ///
+ /// the number of bytes read
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ if (_disposed) throw new ObjectDisposedException("ZlibStream");
+ return _baseStream.Read(buffer, offset, count);
+ }
+
+ ///
+ /// Calling this method always throws a .
+ ///
+ ///
+ /// The offset to seek to....
+ /// IF THIS METHOD ACTUALLY DID ANYTHING.
+ ///
+ ///
+ /// The reference specifying how to apply the offset.... IF
+ /// THIS METHOD ACTUALLY DID ANYTHING.
+ ///
+ ///
+ /// nothing. This method always throws.
+ public override long Seek(long offset, System.IO.SeekOrigin origin)
+ {
+ throw new NotSupportedException();
+ }
+
+ ///
+ /// Calling this method always throws a .
+ ///
+ ///
+ /// The new value for the stream length.... IF
+ /// THIS METHOD ACTUALLY DID ANYTHING.
+ ///
+ public override void SetLength(long value)
+ {
+ throw new NotSupportedException();
+ }
+
+ ///
+ /// Write data to the stream.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// If you wish to use the ZlibStream to compress data while writing,
+ /// you can create a ZlibStream with CompressionMode.Compress,
+ /// and a writable output stream. Then call Write() on that
+ /// ZlibStream, providing uncompressed data as input. The data sent to
+ /// the output stream will be the compressed form of the data written. If you
+ /// wish to use the ZlibStream to decompress data while writing, you
+ /// can create a ZlibStream with CompressionMode.Decompress, and a
+ /// writable output stream. Then call Write() on that stream,
+ /// providing previously compressed data. The data sent to the output stream
+ /// will be the decompressed form of the data written.
+ ///
+ ///
+ ///
+ /// A ZlibStream can be used for Read() or Write(), but not both.
+ ///
+ ///
+ /// The buffer holding data to write to the stream.
+ /// the offset within that data array to find the first byte to write.
+ /// the number of bytes to write.
+ public override void Write(byte[] buffer, int offset, int count)
+ {
+ if (_disposed) throw new ObjectDisposedException("ZlibStream");
+ _baseStream.Write(buffer, offset, count);
+ }
+ #endregion
+
+
+ ///
+ /// Compress a string into a byte array using ZLIB.
+ ///
+ ///
+ ///
+ /// Uncompress it with .
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A string to compress. The string will first be encoded
+ /// using UTF8, then compressed.
+ ///
+ ///
+ /// The string in compressed form
+ public static byte[] CompressString(String s)
+ {
+ using (var ms = new MemoryStream())
+ {
+ Stream compressor =
+ new ZlibStream(ms, CompressionMode.Compress, CompressionLevel.BestCompression);
+ ZlibBaseStream.CompressString(s, compressor);
+ return ms.ToArray();
+ }
+ }
+
+
+ ///
+ /// Compress a byte array into a new byte array using ZLIB.
+ ///
+ ///
+ ///
+ /// Uncompress it with .
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A buffer to compress.
+ ///
+ ///
+ /// The data in compressed form
+ public static byte[] CompressBuffer(byte[] b)
+ {
+ using (var ms = new MemoryStream())
+ {
+ Stream compressor =
+ new ZlibStream( ms, CompressionMode.Compress, CompressionLevel.BestCompression );
+
+ ZlibBaseStream.CompressBuffer(b, compressor);
+ return ms.ToArray();
+ }
+ }
+
+
+ ///
+ /// Uncompress a ZLIB-compressed byte array into a single string.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A buffer containing ZLIB-compressed data.
+ ///
+ ///
+ /// The uncompressed string
+ public static String UncompressString(byte[] compressed)
+ {
+ using (var input = new MemoryStream(compressed))
+ {
+ Stream decompressor =
+ new ZlibStream(input, CompressionMode.Decompress);
+
+ return ZlibBaseStream.UncompressString(compressed, decompressor);
+ }
+ }
+
+
+ ///
+ /// Uncompress a ZLIB-compressed byte array into a byte array.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A buffer containing ZLIB-compressed data.
+ ///
+ ///
+ /// The data in uncompressed form
+ public static byte[] UncompressBuffer(byte[] compressed)
+ {
+ using (var input = new MemoryStream(compressed))
+ {
+ Stream decompressor =
+ new ZlibStream( input, CompressionMode.Decompress );
+
+ return ZlibBaseStream.UncompressBuffer(compressed, decompressor);
+ }
+ }
+
+ }
+
+
+}
\ No newline at end of file
diff --git a/src/Hazelcast.Net/Polyfills/ZLib/__Exclude.cs b/src/Hazelcast.Net/Polyfills/ZLib/__Exclude.cs
new file mode 100644
index 0000000000..9fced80524
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLib/__Exclude.cs
@@ -0,0 +1,7 @@
+
+#if NET6_0_OR_GREATER
+
+// ensure that Hazelcast.Polyfills.ZLib is *not* compiled for .NET 6+
+#error Hazelcast.Polyfills.ZLib code should not build for this target framework!
+
+#endif
\ No newline at end of file
diff --git a/src/Hazelcast.Net/Polyfills/ZLibStreamFactory.cs b/src/Hazelcast.Net/Polyfills/ZLibStreamFactory.cs
new file mode 100644
index 0000000000..c31068af20
--- /dev/null
+++ b/src/Hazelcast.Net/Polyfills/ZLibStreamFactory.cs
@@ -0,0 +1,45 @@
+using System.IO;
+
+#if NET6_0_OR_GREATER
+using System.IO.Compression;
+#endif
+
+namespace Hazelcast.Polyfills;
+
+// System.IO.Compression.ZLibStream is available starting with .NET 6, and
+// before that there is no way to zip files in stock .NET that can be un-zipped
+// by Java on Hazelcast members (for metrics). This attempts at providing
+// a solution for .NET pre-6.
+//
+// We used to use DotNetZip but that library has a medium security issue that
+// bothers some of our users (silly thing about using random number generator).
+//
+// back-porting the .NET 6 ZLibStream class proves problematic as deep down
+// it P/Invokes ZLIB and who knows what' available on .NET Framework?
+//
+// so... we're bringing a small subset of DotNetZip (which is not impacted by
+// the security issue) into our codebase. Code is available on GitHub under the
+// MS-PL license.
+//
+// this factory ensures we create the proper zipping stream depending on framework.
+
+internal static class ZLibStreamFactory
+{
+#if NET6_0_OR_GREATER
+
+ public static Stream Compress(Stream stream, bool leaveOpen)
+ => new ZLibStream(stream, CompressionLevel.Fastest, leaveOpen);
+
+ public static Stream Decompress(Stream stream, bool leaveOpen)
+ => new ZLibStream(stream, CompressionMode.Decompress, leaveOpen);
+
+#else
+
+ public static Stream Compress(Stream stream, bool leaveOpen)
+ => new Ionic.Zlib.ZlibStream(stream, Ionic.Zlib.CompressionMode.Compress, Ionic.Zlib.CompressionLevel.BestSpeed, leaveOpen);
+
+ public static Stream Decompress(Stream stream, bool leaveOpen)
+ => new Ionic.Zlib.ZlibStream(stream, Ionic.Zlib.CompressionMode.Decompress, leaveOpen);
+
+#endif
+}
\ No newline at end of file