##// END OF EJS Templates
copies-rust: tokenize all paths into integer...
copies-rust: tokenize all paths into integer Copy information for each changesets tend to affect a small new number of path. However, each of these path might be handled a large number of time. Handling HgPathBuf (aka `Vec<u8>`) is expensive. Handling integer is cheap. With this patch we: - turn any input path into an integer "token" early, - do all the internal logic using such "token", - turn "token" back into path right before returning a result. This gives use a quite significant performance boost in our slower cases. Repo Case Source-Rev Dest-Rev # of revisions old time new time Difference Factor time per rev --------------------------------------------------------------------------------------------------------------------------------------------------------------- pypy x000_revs_x000_added_x_copies 8a3b5bfd266e 2c68e87c3efe : 6780 revs, 0.092828 s, 0.081225 s, -0.011603 s, × 0.8750, 11 µs/rev pypy x0000_revs_x_added_0_copies d1defd0dc478 c9cb1334cc78 : 43645 revs, 0.711975 s, 0.586011 s, -0.125964 s, × 0.8231, 13 µs/rev pypy x0000_revs_xx000_added_x000_copies 08ea3258278e d9fa043f30c0 : 11316 revs, 0.124505 s, 0.114173 s, -0.010332 s, × 0.9170, 10 µs/rev netbeans x000_revs_x000_added_x000_copies ff453e9fee32 411350406ec2 : 5750 revs, 0.072072 s, 0.061004 s, -0.011068 s, × 0.8464, 10 µs/rev netbeans x0000_revs_xx000_added_x000_copies 588c2d1ced70 1aad62e59ddd : 66949 revs, 0.682732 s, 0.535874 s, -0.146858 s, × 0.7849, 8 µs/rev mozilla-central x00000_revs_x0000_added_x0000_copies 6832ae71433c 4c222a1d9a00 : 153721 revs, 1.935918 s, 1.781383 s, -0.154535 s, × 0.9202, 11 µs/rev mozilla-central x00000_revs_x00000_added_x000_copies 76caed42cf7c 1daa622bbe42 : 204976 revs, 2.827320 s, 2.603867 s, -0.223453 s, × 0.9210, 12 µs/rev mozilla-try x0000_revs_xx000_added_x000_copies 89294cd501d9 7ccb2fc7ccb5 : 97052 revs, 3.243010 s, 1.529120 s, -1.713890 s, × 0.4715, 15 µs/rev mozilla-try x00000_revs_x_added_0_copies 6a320851d377 1ebb79acd503 : 363753 revs, 5.693818 s, 4.842699 s, -0.851119 s, × 0.8505, 13 µs/rev mozilla-try x00000_revs_x_added_x_copies 5173c4b6f97c 95d83ee7242d : 362229 revs, 5.677655 s, 4.761732 s, -0.915923 s, × 0.8387, 13 µs/rev mozilla-try x00000_revs_x000_added_x_copies 9126823d0e9c ca82787bb23c : 359344 revs, 5.563370 s, 4.733912 s, -0.829458 s, × 0.8509, 13 µs/rev mozilla-try x00000_revs_x0000_added_x0000_copies 8d3fafa80d4b eb884023b810 : 192665 revs, 2.864099 s, 2.593410 s, -0.270689 s, × 0.9055, 13 µs/rev mozilla-try x00000_revs_x00000_added_x0000_copies 1b661134e2ca 1ae03d022d6d : 228985 revs, 113.297287 s, 41.041198 s, -72.256089 s, × 0.3622, 179 µs/rev mozilla-try x00000_revs_x00000_added_x000_copies 9b2a99adc05e 8e29777b48e6 : 382065 revs, 59.498652 s, 27.915689 s, -31.582963 s, × 0.4692, 73 µs/rev Full timing comparison between this revision and the previous one: Repo Case Source-Rev Dest-Rev # of revisions old time new time Difference Factor time per rev --------------------------------------------------------------------------------------------------------------------------------------------------------------- mercurial x_revs_x_added_0_copies ad6b123de1c7 39cfcef4f463 : 1 revs, 0.000042 s, 0.000042 s, +0.000000 s, × 1.0000, 42 µs/rev mercurial x_revs_x_added_x_copies 2b1c78674230 0c1d10351869 : 6 revs, 0.000104 s, 0.000110 s, +0.000006 s, × 1.0577, 18 µs/rev mercurial x000_revs_x000_added_x_copies 81f8ff2a9bf2 dd3267698d84 : 1032 revs, 0.004913 s, 0.004918 s, +0.000005 s, × 1.0010, 4 µs/rev pypy x_revs_x_added_0_copies aed021ee8ae8 099ed31b181b : 9 revs, 0.000191 s, 0.000195 s, +0.000004 s, × 1.0209, 21 µs/rev pypy x_revs_x000_added_0_copies 4aa4e1f8e19a 359343b9ac0e : 1 revs, 0.000050 s, 0.000049 s, -0.000001 s, × 0.9800, 49 µs/rev pypy x_revs_x_added_x_copies ac52eb7bbbb0 72e022663155 : 7 revs, 0.000112 s, 0.000112 s, +0.000000 s, × 1.0000, 16 µs/rev pypy x_revs_x00_added_x_copies c3b14617fbd7 ace7255d9a26 : 1 revs, 0.000288 s, 0.000324 s, +0.000036 s, × 1.1250, 324 µs/rev pypy x_revs_x000_added_x000_copies df6f7a526b60 a83dc6a2d56f : 6 revs, 0.010411 s, 0.010611 s, +0.000200 s, × 1.0192, 1768 µs/rev pypy x000_revs_xx00_added_0_copies 89a76aede314 2f22446ff07e : 4785 revs, 0.052852 s, 0.050835 s, -0.002017 s, × 0.9618, 10 µs/rev pypy x000_revs_x000_added_x_copies 8a3b5bfd266e 2c68e87c3efe : 6780 revs, 0.092828 s, 0.081225 s, -0.011603 s, × 0.8750, 11 µs/rev pypy x000_revs_x000_added_x000_copies 89a76aede314 7b3dda341c84 : 5441 revs, 0.063269 s, 0.061291 s, -0.001978 s, × 0.9687, 11 µs/rev pypy x0000_revs_x_added_0_copies d1defd0dc478 c9cb1334cc78 : 43645 revs, 0.711975 s, 0.586011 s, -0.125964 s, × 0.8231, 13 µs/rev pypy x0000_revs_xx000_added_0_copies bf2c629d0071 4ffed77c095c : 2 revs, 0.012771 s, 0.012824 s, +0.000053 s, × 1.0042, 6412 µs/rev pypy x0000_revs_xx000_added_x000_copies 08ea3258278e d9fa043f30c0 : 11316 revs, 0.124505 s, 0.114173 s, -0.010332 s, × 0.9170, 10 µs/rev netbeans x_revs_x_added_0_copies fb0955ffcbcd a01e9239f9e7 : 2 revs, 0.000082 s, 0.000085 s, +0.000003 s, × 1.0366, 42 µs/rev netbeans x_revs_x000_added_0_copies 6f360122949f 20eb231cc7d0 : 2 revs, 0.000111 s, 0.000108 s, -0.000003 s, × 0.9730, 54 µs/rev netbeans x_revs_x_added_x_copies 1ada3faf6fb6 5a39d12eecf4 : 3 revs, 0.000171 s, 0.000175 s, +0.000004 s, × 1.0234, 58 µs/rev netbeans x_revs_x00_added_x_copies 35be93ba1e2c 9eec5e90c05f : 9 revs, 0.000708 s, 0.000719 s, +0.000011 s, × 1.0155, 79 µs/rev netbeans x000_revs_xx00_added_0_copies eac3045b4fdd 51d4ae7f1290 : 1421 revs, 0.010608 s, 0.010175 s, -0.000433 s, × 0.9592, 7 µs/rev netbeans x000_revs_x000_added_x_copies e2063d266acd 6081d72689dc : 1533 revs, 0.015635 s, 0.015569 s, -0.000066 s, × 0.9958, 10 µs/rev netbeans x000_revs_x000_added_x000_copies ff453e9fee32 411350406ec2 : 5750 revs, 0.072072 s, 0.061004 s, -0.011068 s, × 0.8464, 10 µs/rev netbeans x0000_revs_xx000_added_x000_copies 588c2d1ced70 1aad62e59ddd : 66949 revs, 0.682732 s, 0.535874 s, -0.146858 s, × 0.7849, 8 µs/rev mozilla-central x_revs_x_added_0_copies 3697f962bb7b 7015fcdd43a2 : 2 revs, 0.000090 s, 0.000090 s, +0.000000 s, × 1.0000, 45 µs/rev mozilla-central x_revs_x000_added_0_copies dd390860c6c9 40d0c5bed75d : 8 revs, 0.000210 s, 0.000281 s, +0.000071 s, × 1.3381, 35 µs/rev mozilla-central x_revs_x_added_x_copies 8d198483ae3b 14207ffc2b2f : 9 revs, 0.000182 s, 0.000187 s, +0.000005 s, × 1.0275, 20 µs/rev mozilla-central x_revs_x00_added_x_copies 98cbc58cc6bc 446a150332c3 : 7 revs, 0.000594 s, 0.000660 s, +0.000066 s, × 1.1111, 94 µs/rev mozilla-central x_revs_x000_added_x000_copies 3c684b4b8f68 0a5e72d1b479 : 3 revs, 0.003102 s, 0.003385 s, +0.000283 s, × 1.0912, 1128 µs/rev mozilla-central x_revs_x0000_added_x0000_copies effb563bb7e5 c07a39dc4e80 : 6 revs, 0.060234 s, 0.069812 s, +0.009578 s, × 1.1590, 11635 µs/rev mozilla-central x000_revs_xx00_added_0_copies 6100d773079a 04a55431795e : 1593 revs, 0.006300 s, 0.006503 s, +0.000203 s, × 1.0322, 4 µs/rev mozilla-central x000_revs_x000_added_x_copies 9f17a6fc04f9 2d37b966abed : 41 revs, 0.004817 s, 0.004988 s, +0.000171 s, × 1.0355, 121 µs/rev mozilla-central x000_revs_x000_added_x000_copies 7c97034feb78 4407bd0c6330 : 7839 revs, 0.065451 s, 0.063963 s, -0.001488 s, × 0.9773, 8 µs/rev mozilla-central x0000_revs_xx000_added_0_copies 9eec5917337d 67118cc6dcad : 615 revs, 0.026282 s, 0.026225 s, -0.000057 s, × 0.9978, 42 µs/rev mozilla-central x0000_revs_xx000_added_x000_copies f78c615a656c 96a38b690156 : 30263 revs, 0.206873 s, 0.201377 s, -0.005496 s, × 0.9734, 6 µs/rev mozilla-central x00000_revs_x0000_added_x0000_copies 6832ae71433c 4c222a1d9a00 : 153721 revs, 1.935918 s, 1.781383 s, -0.154535 s, × 0.9202, 11 µs/rev mozilla-central x00000_revs_x00000_added_x000_copies 76caed42cf7c 1daa622bbe42 : 204976 revs, 2.827320 s, 2.603867 s, -0.223453 s, × 0.9210, 12 µs/rev mozilla-try x_revs_x_added_0_copies aaf6dde0deb8 9790f499805a : 2 revs, 0.000842 s, 0.000845 s, +0.000003 s, × 1.0036, 422 µs/rev mozilla-try x_revs_x000_added_0_copies d8d0222927b4 5bb8ce8c7450 : 2 revs, 0.000870 s, 0.000862 s, -0.000008 s, × 0.9908, 431 µs/rev mozilla-try x_revs_x_added_x_copies 092fcca11bdb 936255a0384a : 4 revs, 0.000165 s, 0.000161 s, -0.000004 s, × 0.9758, 40 µs/rev mozilla-try x_revs_x00_added_x_copies b53d2fadbdb5 017afae788ec : 2 revs, 0.001145 s, 0.001163 s, +0.000018 s, × 1.0157, 581 µs/rev mozilla-try x_revs_x000_added_x000_copies 20408ad61ce5 6f0ee96e21ad : 1 revs, 0.026500 s, 0.032414 s, +0.005914 s, × 1.2232, 32414 µs/rev mozilla-try x_revs_x0000_added_x0000_copies effb563bb7e5 c07a39dc4e80 : 6 revs, 0.059407 s, 0.070149 s, +0.010742 s, × 1.1808, 11691 µs/rev mozilla-try x000_revs_xx00_added_0_copies 6100d773079a 04a55431795e : 1593 revs, 0.006325 s, 0.006526 s, +0.000201 s, × 1.0318, 4 µs/rev mozilla-try x000_revs_x000_added_x_copies 9f17a6fc04f9 2d37b966abed : 41 revs, 0.005171 s, 0.005187 s, +0.000016 s, × 1.0031, 126 µs/rev mozilla-try x000_revs_x000_added_x000_copies 1346fd0130e4 4c65cbdabc1f : 6657 revs, 0.066837 s, 0.065047 s, -0.001790 s, × 0.9732, 9 µs/rev mozilla-try x0000_revs_x_added_0_copies 63519bfd42ee a36a2a865d92 : 40314 revs, 0.314252 s, 0.301129 s, -0.013123 s, × 0.9582, 7 µs/rev mozilla-try x0000_revs_x_added_x_copies 9fe69ff0762d bcabf2a78927 : 38690 revs, 0.304160 s, 0.280683 s, -0.023477 s, × 0.9228, 7 µs/rev mozilla-try x0000_revs_xx000_added_x_copies 156f6e2674f2 4d0f2c178e66 : 8598 revs, 0.089223 s, 0.084897 s, -0.004326 s, × 0.9515, 9 µs/rev mozilla-try x0000_revs_xx000_added_0_copies 9eec5917337d 67118cc6dcad : 615 revs, 0.026711 s, 0.026620 s, -0.000091 s, × 0.9966, 43 µs/rev mozilla-try x0000_revs_xx000_added_x000_copies 89294cd501d9 7ccb2fc7ccb5 : 97052 revs, 3.243010 s, 1.529120 s, -1.713890 s, × 0.4715, 15 µs/rev mozilla-try x0000_revs_x0000_added_x0000_copies e928c65095ed e951f4ad123a : 52031 revs, 0.756500 s, 0.738709 s, -0.017791 s, × 0.9765, 14 µs/rev mozilla-try x00000_revs_x_added_0_copies 6a320851d377 1ebb79acd503 : 363753 revs, 5.693818 s, 4.842699 s, -0.851119 s, × 0.8505, 13 µs/rev mozilla-try x00000_revs_x00000_added_0_copies dc8a3ca7010e d16fde900c9c : 34414 revs, 0.590904 s, 0.596946 s, +0.006042 s, × 1.0102, 17 µs/rev mozilla-try x00000_revs_x_added_x_copies 5173c4b6f97c 95d83ee7242d : 362229 revs, 5.677655 s, 4.761732 s, -0.915923 s, × 0.8387, 13 µs/rev mozilla-try x00000_revs_x000_added_x_copies 9126823d0e9c ca82787bb23c : 359344 revs, 5.563370 s, 4.733912 s, -0.829458 s, × 0.8509, 13 µs/rev mozilla-try x00000_revs_x0000_added_x0000_copies 8d3fafa80d4b eb884023b810 : 192665 revs, 2.864099 s, 2.593410 s, -0.270689 s, × 0.9055, 13 µs/rev mozilla-try x00000_revs_x00000_added_x0000_copies 1b661134e2ca 1ae03d022d6d : 228985 revs, 113.297287 s, 41.041198 s, -72.256089 s, × 0.3622, 179 µs/rev mozilla-try x00000_revs_x00000_added_x000_copies 9b2a99adc05e 8e29777b48e6 : 382065 revs, 59.498652 s, 27.915689 s, -31.582963 s, × 0.4692, 73 µs/rev Full timing comparison between this revision and the filelog copy tracing. Repo Case Source-Rev Dest-Rev # of revisions filelog sidedata Difference Factor time per rev --------------------------------------------------------------------------------------------------------------------------------------------------------------- mercurial x_revs_x_added_0_copies ad6b123de1c7 39cfcef4f463 : 1 revs, 0.000903 s, 0.000042 s, -0.000861 s, × 0.0465, 41 µs/rev mercurial x_revs_x_added_x_copies 2b1c78674230 0c1d10351869 : 6 revs, 0.001861 s, 0.000110 s, -0.001751 s, × 0.0591, 18 µs/rev mercurial x000_revs_x000_added_x_copies 81f8ff2a9bf2 dd3267698d84 : 1032 revs, 0.018577 s, 0.004918 s, -0.013659 s, × 0.2647, 4 µs/rev pypy x_revs_x_added_0_copies aed021ee8ae8 099ed31b181b : 9 revs, 0.001519 s, 0.000195 s, -0.001324 s, × 0.1283, 21 µs/rev pypy x_revs_x000_added_0_copies 4aa4e1f8e19a 359343b9ac0e : 1 revs, 0.213855 s, 0.000049 s, -0.350d73 s, × 0.0002, 48 µs/rev pypy x_revs_x_added_x_copies ac52eb7bbbb0 72e022663155 : 7 revs, 0.017022 s, 0.000112 s, -0.016910 s, × 0.0065, 15 µs/rev pypy x_revs_x00_added_x_copies c3b14617fbd7 ace7255d9a26 : 1 revs, 0.019398 s, 0.000324 s, -0.019074 s, × 0.0167, 323 µs/rev pypy x_revs_x000_added_x000_copies df6f7a526b60 a83dc6a2d56f : 6 revs, 0.769467 s, 0.010611 s, -0.758856 s, × 0.0137, 1768 µs/rev pypy x000_revs_xx00_added_0_copies 89a76aede314 2f22446ff07e : 4785 revs, 1.221952 s, 0.050835 s, -1.171117 s, × 0.0416, 10 µs/rev pypy x000_revs_x000_added_x_copies 8a3b5bfd266e 2c68e87c3efe : 6780 revs, 1.304007 s, 0.081225 s, -1.222782 s, × 0.0622, 11 µs/rev pypy x000_revs_x000_added_x000_copies 89a76aede314 7b3dda341c84 : 5441 revs, 1.686610 s, 0.061291 s, -1.625319 s, × 0.0363, 11 µs/rev pypy x0000_revs_x_added_0_copies d1defd0dc478 c9cb1334cc78 : 43645 revs, 0.001107 s, 0.586011 s, +0.584904 s, × 529.36, 13 µs/rev pypy x0000_revs_xx000_added_0_copies bf2c629d0071 4ffed77c095c : 2 revs, 1.100760 s, 0.012824 s, -1.087936 s, × 0.0116, 6408 µs/rev pypy x0000_revs_xx000_added_x000_copies 08ea3258278e d9fa043f30c0 : 11316 revs, 1.350547 s, 0.114173 s, -1.236374 s, × 0.0845, 10 µs/rev netbeans x_revs_x_added_0_copies fb0955ffcbcd a01e9239f9e7 : 2 revs, 0.027864 s, 0.000085 s, -0.027779 s, × 0.0030, 42 µs/rev netbeans x_revs_x000_added_0_copies 6f360122949f 20eb231cc7d0 : 2 revs, 0.132479 s, 0.000108 s, -0.132371 s, × 0.0008, 53 µs/rev netbeans x_revs_x_added_x_copies 1ada3faf6fb6 5a39d12eecf4 : 3 revs, 0.025405 s, 0.000175 s, -0.025230 s, × 0.0068, 58 µs/rev netbeans x_revs_x00_added_x_copies 35be93ba1e2c 9eec5e90c05f : 9 revs, 0.053244 s, 0.000719 s, -0.052525 s, × 0.0135, 79 µs/rev netbeans x000_revs_xx00_added_0_copies eac3045b4fdd 51d4ae7f1290 : 1421 revs, 0.038017 s, 0.010175 s, -0.027842 s, × 0.2676, 7 µs/rev netbeans x000_revs_x000_added_x_copies e2063d266acd 6081d72689dc : 1533 revs, 0.198308 s, 0.015569 s, -0.182739 s, × 0.0785, 10 µs/rev netbeans x000_revs_x000_added_x000_copies ff453e9fee32 411350406ec2 : 5750 revs, 0.949749 s, 0.061004 s, -0.888745 s, × 0.0642, 10 µs/rev netbeans x0000_revs_xx000_added_x000_copies 588c2d1ced70 1aad62e59ddd : 66949 revs, 3.932262 s, 0.535874 s, -3.396388 s, × 0.1362, 8 µs/rev mozilla-central x_revs_x_added_0_copies 3697f962bb7b 7015fcdd43a2 : 2 revs, 0.024490 s, 0.000090 s, -0.024400 s, × 0.0036, 44 µs/rev mozilla-central x_revs_x000_added_0_copies dd390860c6c9 40d0c5bed75d : 8 revs, 0.143885 s, 0.000281 s, -0.143604 s, × 0.0019, 35 µs/rev mozilla-central x_revs_x_added_x_copies 8d198483ae3b 14207ffc2b2f : 9 revs, 0.025471 s, 0.000187 s, -0.025284 s, × 0.0073, 20 µs/rev mozilla-central x_revs_x00_added_x_copies 98cbc58cc6bc 446a150332c3 : 7 revs, 0.086013 s, 0.000660 s, -0.085353 s, × 0.0076, 94 µs/rev mozilla-central x_revs_x000_added_x000_copies 3c684b4b8f68 0a5e72d1b479 : 3 revs, 0.200726 s, 0.003385 s, -0.197341 s, × 0.0168, 1127 µs/rev mozilla-central x_revs_x0000_added_x0000_copies effb563bb7e5 c07a39dc4e80 : 6 revs, 2.224171 s, 0.069812 s, -2.154359 s, × 0.0313, 11633 µs/rev mozilla-central x000_revs_xx00_added_0_copies 6100d773079a 04a55431795e : 1593 revs, 0.090780 s, 0.006503 s, -0.084277 s, × 0.0716, 4 µs/rev mozilla-central x000_revs_x000_added_x_copies 9f17a6fc04f9 2d37b966abed : 41 revs, 0.764805 s, 0.004988 s, -0.759817 s, × 0.0065, 121 µs/rev mozilla-central x000_revs_x000_added_x000_copies 7c97034feb78 4407bd0c6330 : 7839 revs, 1.161405 s, 0.063963 s, -1.097442 s, × 0.0550, 8 µs/rev mozilla-central x0000_revs_xx000_added_0_copies 9eec5917337d 67118cc6dcad : 615 revs, 6.816186 s, 0.026225 s, -6.789961 s, × 0.0038, 42 µs/rev mozilla-central x0000_revs_xx000_added_x000_copies f78c615a656c 96a38b690156 : 30263 revs, 3.374819 s, 0.201377 s, -3.173442 s, × 0.0596, 6 µs/rev mozilla-central x00000_revs_x0000_added_x0000_copies 6832ae71433c 4c222a1d9a00 : 153721 revs, 16.285469 s, 1.781383 s, -14.504086 s, × 0.1093, 11 µs/rev mozilla-central x00000_revs_x00000_added_x000_copies 76caed42cf7c 1daa622bbe42 : 204976 revs, 21.207733 s, 2.603867 s, -18.603866 s, × 0.1227, 12 µs/rev mozilla-try x_revs_x_added_0_copies aaf6dde0deb8 9790f499805a : 2 revs, 0.080843 s, 0.000845 s, -0.079998 s, × 0.0104, 422 µs/rev mozilla-try x_revs_x000_added_0_copies d8d0222927b4 5bb8ce8c7450 : 2 revs, 0.511068 s, 0.000862 s, -0.510206 s, × 0.0016, 430 µs/rev mozilla-try x_revs_x_added_x_copies 092fcca11bdb 936255a0384a : 4 revs, 0.021573 s, 0.000161 s, -0.021412 s, × 0.0074, 40 µs/rev mozilla-try x_revs_x00_added_x_copies b53d2fadbdb5 017afae788ec : 2 revs, 0.227726 s, 0.001163 s, -0.226563 s, × 0.0051, 581 µs/rev mozilla-try x_revs_x000_added_x000_copies 20408ad61ce5 6f0ee96e21ad : 1 revs, 1.120448 s, 0.032414 s, -1.088034 s, × 0.0289, 32381 µs/rev mozilla-try x_revs_x0000_added_x0000_copies effb563bb7e5 c07a39dc4e80 : 6 revs, 2.241713 s, 0.070149 s, -2.171564 s, × 0.0312, 11689 µs/rev mozilla-try x000_revs_xx00_added_0_copies 6100d773079a 04a55431795e : 1593 revs, 0.090633 s, 0.006526 s, -0.084107 s, × 0.0720, 4 µs/rev mozilla-try x000_revs_x000_added_x_copies 9f17a6fc04f9 2d37b966abed : 41 revs, 0.770403 s, 0.005187 s, -0.765216 s, × 0.0067, 126 µs/rev mozilla-try x000_revs_x000_added_x000_copies 1346fd0130e4 4c65cbdabc1f : 6657 revs, 1.184557 s, 0.065047 s, -1.119510 s, × 0.0549, 9 µs/rev mozilla-try x0000_revs_x_added_0_copies 63519bfd42ee a36a2a865d92 : 40314 revs, 0.085790 s, 0.301129 s, +0.215339 s, × 3.5100, 7 µs/rev mozilla-try x0000_revs_x_added_x_copies 9fe69ff0762d bcabf2a78927 : 38690 revs, 0.080616 s, 0.280683 s, +0.200067 s, × 3.4817, 7 µs/rev mozilla-try x0000_revs_xx000_added_x_copies 156f6e2674f2 4d0f2c178e66 : 8598 revs, 7.712554 s, 0.084897 s, -7.627657 s, × 0.0110, 9 µs/rev mozilla-try x0000_revs_xx000_added_0_copies 9eec5917337d 67118cc6dcad : 615 revs, 6.937294 s, 0.026620 s, -6.910674 s, × 0.0038, 43 µs/rev mozilla-try x0000_revs_xx000_added_x000_copies 89294cd501d9 7ccb2fc7ccb5 : 97052 revs, 7.712313 s, 1.529120 s, -6.183193 s, × 0.1982, 15 µs/rev mozilla-try x0000_revs_x0000_added_x0000_copies e928c65095ed e951f4ad123a : 52031 revs, 9.966910 s, 0.738709 s, -9.228201 s, × 0.0741, 14 µs/rev mozilla-try x00000_revs_x_added_0_copies 6a320851d377 1ebb79acd503 : 363753 revs, 0.090397 s, 4.842699 s, +4.752302 s, × 53.571, 13 µs/rev mozilla-try x00000_revs_x00000_added_0_copies dc8a3ca7010e d16fde900c9c : 34414 revs, 27.817167 s, 0.596946 s, -27.220221 s, × 0.0214, 17 µs/rev mozilla-try x00000_revs_x_added_x_copies 5173c4b6f97c 95d83ee7242d : 362229 revs, 0.091305 s, 4.761732 s, +4.670427 s, × 52.151, 13 µs/rev mozilla-try x00000_revs_x000_added_x_copies 9126823d0e9c ca82787bb23c : 359344 revs, 0.231183 s, 4.733912 s, +4.502729 s, × 20.476, 13 µs/rev mozilla-try x00000_revs_x0000_added_x0000_copies 8d3fafa80d4b eb884023b810 : 192665 revs, 19.830617 s, 2.593410 s, -17.237207 s, × 0.1307, 13 µs/rev mozilla-try x00000_revs_x00000_added_x0000_copies 1b661134e2ca 1ae03d022d6d : 228985 revs, 21.743873 s, 41.041198 s, +19.297325 s, × 1.8874, 179 µs/rev mozilla-try x00000_revs_x00000_added_x000_copies 9b2a99adc05e 8e29777b48e6 : 382065 revs, 25.935037 s, 27.915689 s, +1.980652 s, × 1.0763, 73 µs/rev Differential Revision: https://phab.mercurial-scm.org/D9493

File last commit:

r44605:5e84a96d default
r46766:c6bc77f7 default
Show More
test_decompressor.py
1714 lines | 53.3 KiB | text/x-python | PythonLexer
import io
import os
import random
import struct
import sys
import tempfile
import unittest
import zstandard as zstd
from .common import (
generate_samples,
make_cffi,
NonClosingBytesIO,
OpCountingBytesIO,
TestCase,
)
if sys.version_info[0] >= 3:
next = lambda it: it.__next__()
else:
next = lambda it: it.next()
@make_cffi
class TestFrameHeaderSize(TestCase):
def test_empty(self):
with self.assertRaisesRegex(
zstd.ZstdError,
"could not determine frame header size: Src size " "is incorrect",
):
zstd.frame_header_size(b"")
def test_too_small(self):
with self.assertRaisesRegex(
zstd.ZstdError,
"could not determine frame header size: Src size " "is incorrect",
):
zstd.frame_header_size(b"foob")
def test_basic(self):
# It doesn't matter that it isn't a valid frame.
self.assertEqual(zstd.frame_header_size(b"long enough but no magic"), 6)
@make_cffi
class TestFrameContentSize(TestCase):
def test_empty(self):
with self.assertRaisesRegex(
zstd.ZstdError, "error when determining content size"
):
zstd.frame_content_size(b"")
def test_too_small(self):
with self.assertRaisesRegex(
zstd.ZstdError, "error when determining content size"
):
zstd.frame_content_size(b"foob")
def test_bad_frame(self):
with self.assertRaisesRegex(
zstd.ZstdError, "error when determining content size"
):
zstd.frame_content_size(b"invalid frame header")
def test_unknown(self):
cctx = zstd.ZstdCompressor(write_content_size=False)
frame = cctx.compress(b"foobar")
self.assertEqual(zstd.frame_content_size(frame), -1)
def test_empty(self):
cctx = zstd.ZstdCompressor()
frame = cctx.compress(b"")
self.assertEqual(zstd.frame_content_size(frame), 0)
def test_basic(self):
cctx = zstd.ZstdCompressor()
frame = cctx.compress(b"foobar")
self.assertEqual(zstd.frame_content_size(frame), 6)
@make_cffi
class TestDecompressor(TestCase):
def test_memory_size(self):
dctx = zstd.ZstdDecompressor()
self.assertGreater(dctx.memory_size(), 100)
@make_cffi
class TestDecompressor_decompress(TestCase):
def test_empty_input(self):
dctx = zstd.ZstdDecompressor()
with self.assertRaisesRegex(
zstd.ZstdError, "error determining content size from frame header"
):
dctx.decompress(b"")
def test_invalid_input(self):
dctx = zstd.ZstdDecompressor()
with self.assertRaisesRegex(
zstd.ZstdError, "error determining content size from frame header"
):
dctx.decompress(b"foobar")
def test_input_types(self):
cctx = zstd.ZstdCompressor(level=1)
compressed = cctx.compress(b"foo")
mutable_array = bytearray(len(compressed))
mutable_array[:] = compressed
sources = [
memoryview(compressed),
bytearray(compressed),
mutable_array,
]
dctx = zstd.ZstdDecompressor()
for source in sources:
self.assertEqual(dctx.decompress(source), b"foo")
def test_no_content_size_in_frame(self):
cctx = zstd.ZstdCompressor(write_content_size=False)
compressed = cctx.compress(b"foobar")
dctx = zstd.ZstdDecompressor()
with self.assertRaisesRegex(
zstd.ZstdError, "could not determine content size in frame header"
):
dctx.decompress(compressed)
def test_content_size_present(self):
cctx = zstd.ZstdCompressor()
compressed = cctx.compress(b"foobar")
dctx = zstd.ZstdDecompressor()
decompressed = dctx.decompress(compressed)
self.assertEqual(decompressed, b"foobar")
def test_empty_roundtrip(self):
cctx = zstd.ZstdCompressor()
compressed = cctx.compress(b"")
dctx = zstd.ZstdDecompressor()
decompressed = dctx.decompress(compressed)
self.assertEqual(decompressed, b"")
def test_max_output_size(self):
cctx = zstd.ZstdCompressor(write_content_size=False)
source = b"foobar" * 256
compressed = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
# Will fit into buffer exactly the size of input.
decompressed = dctx.decompress(compressed, max_output_size=len(source))
self.assertEqual(decompressed, source)
# Input size - 1 fails
with self.assertRaisesRegex(
zstd.ZstdError, "decompression error: did not decompress full frame"
):
dctx.decompress(compressed, max_output_size=len(source) - 1)
# Input size + 1 works
decompressed = dctx.decompress(
compressed, max_output_size=len(source) + 1
)
self.assertEqual(decompressed, source)
# A much larger buffer works.
decompressed = dctx.decompress(
compressed, max_output_size=len(source) * 64
)
self.assertEqual(decompressed, source)
def test_stupidly_large_output_buffer(self):
cctx = zstd.ZstdCompressor(write_content_size=False)
compressed = cctx.compress(b"foobar" * 256)
dctx = zstd.ZstdDecompressor()
# Will get OverflowError on some Python distributions that can't
# handle really large integers.
with self.assertRaises((MemoryError, OverflowError)):
dctx.decompress(compressed, max_output_size=2 ** 62)
def test_dictionary(self):
samples = []
for i in range(128):
samples.append(b"foo" * 64)
samples.append(b"bar" * 64)
samples.append(b"foobar" * 64)
d = zstd.train_dictionary(8192, samples)
orig = b"foobar" * 16384
cctx = zstd.ZstdCompressor(level=1, dict_data=d)
compressed = cctx.compress(orig)
dctx = zstd.ZstdDecompressor(dict_data=d)
decompressed = dctx.decompress(compressed)
self.assertEqual(decompressed, orig)
def test_dictionary_multiple(self):
samples = []
for i in range(128):
samples.append(b"foo" * 64)
samples.append(b"bar" * 64)
samples.append(b"foobar" * 64)
d = zstd.train_dictionary(8192, samples)
sources = (b"foobar" * 8192, b"foo" * 8192, b"bar" * 8192)
compressed = []
cctx = zstd.ZstdCompressor(level=1, dict_data=d)
for source in sources:
compressed.append(cctx.compress(source))
dctx = zstd.ZstdDecompressor(dict_data=d)
for i in range(len(sources)):
decompressed = dctx.decompress(compressed[i])
self.assertEqual(decompressed, sources[i])
def test_max_window_size(self):
with open(__file__, "rb") as fh:
source = fh.read()
# If we write a content size, the decompressor engages single pass
# mode and the window size doesn't come into play.
cctx = zstd.ZstdCompressor(write_content_size=False)
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor(max_window_size=2 ** zstd.WINDOWLOG_MIN)
with self.assertRaisesRegex(
zstd.ZstdError,
"decompression error: Frame requires too much memory",
):
dctx.decompress(frame, max_output_size=len(source))
@make_cffi
class TestDecompressor_copy_stream(TestCase):
def test_no_read(self):
source = object()
dest = io.BytesIO()
dctx = zstd.ZstdDecompressor()
with self.assertRaises(ValueError):
dctx.copy_stream(source, dest)
def test_no_write(self):
source = io.BytesIO()
dest = object()
dctx = zstd.ZstdDecompressor()
with self.assertRaises(ValueError):
dctx.copy_stream(source, dest)
def test_empty(self):
source = io.BytesIO()
dest = io.BytesIO()
dctx = zstd.ZstdDecompressor()
# TODO should this raise an error?
r, w = dctx.copy_stream(source, dest)
self.assertEqual(r, 0)
self.assertEqual(w, 0)
self.assertEqual(dest.getvalue(), b"")
def test_large_data(self):
source = io.BytesIO()
for i in range(255):
source.write(struct.Struct(">B").pack(i) * 16384)
source.seek(0)
compressed = io.BytesIO()
cctx = zstd.ZstdCompressor()
cctx.copy_stream(source, compressed)
compressed.seek(0)
dest = io.BytesIO()
dctx = zstd.ZstdDecompressor()
r, w = dctx.copy_stream(compressed, dest)
self.assertEqual(r, len(compressed.getvalue()))
self.assertEqual(w, len(source.getvalue()))
def test_read_write_size(self):
source = OpCountingBytesIO(
zstd.ZstdCompressor().compress(b"foobarfoobar")
)
dest = OpCountingBytesIO()
dctx = zstd.ZstdDecompressor()
r, w = dctx.copy_stream(source, dest, read_size=1, write_size=1)
self.assertEqual(r, len(source.getvalue()))
self.assertEqual(w, len(b"foobarfoobar"))
self.assertEqual(source._read_count, len(source.getvalue()) + 1)
self.assertEqual(dest._write_count, len(dest.getvalue()))
@make_cffi
class TestDecompressor_stream_reader(TestCase):
def test_context_manager(self):
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(b"foo") as reader:
with self.assertRaisesRegex(
ValueError, "cannot __enter__ multiple times"
):
with reader as reader2:
pass
def test_not_implemented(self):
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(b"foo") as reader:
with self.assertRaises(io.UnsupportedOperation):
reader.readline()
with self.assertRaises(io.UnsupportedOperation):
reader.readlines()
with self.assertRaises(io.UnsupportedOperation):
iter(reader)
with self.assertRaises(io.UnsupportedOperation):
next(reader)
with self.assertRaises(io.UnsupportedOperation):
reader.write(b"foo")
with self.assertRaises(io.UnsupportedOperation):
reader.writelines([])
def test_constant_methods(self):
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(b"foo") as reader:
self.assertFalse(reader.closed)
self.assertTrue(reader.readable())
self.assertFalse(reader.writable())
self.assertTrue(reader.seekable())
self.assertFalse(reader.isatty())
self.assertFalse(reader.closed)
self.assertIsNone(reader.flush())
self.assertFalse(reader.closed)
self.assertTrue(reader.closed)
def test_read_closed(self):
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(b"foo") as reader:
reader.close()
self.assertTrue(reader.closed)
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.read(1)
def test_read_sizes(self):
cctx = zstd.ZstdCompressor()
foo = cctx.compress(b"foo")
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(foo) as reader:
with self.assertRaisesRegex(
ValueError, "cannot read negative amounts less than -1"
):
reader.read(-2)
self.assertEqual(reader.read(0), b"")
self.assertEqual(reader.read(), b"foo")
def test_read_buffer(self):
cctx = zstd.ZstdCompressor()
source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(frame) as reader:
self.assertEqual(reader.tell(), 0)
# We should get entire frame in one read.
result = reader.read(8192)
self.assertEqual(result, source)
self.assertEqual(reader.tell(), len(source))
# Read after EOF should return empty bytes.
self.assertEqual(reader.read(1), b"")
self.assertEqual(reader.tell(), len(result))
self.assertTrue(reader.closed)
def test_read_buffer_small_chunks(self):
cctx = zstd.ZstdCompressor()
source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
chunks = []
with dctx.stream_reader(frame, read_size=1) as reader:
while True:
chunk = reader.read(1)
if not chunk:
break
chunks.append(chunk)
self.assertEqual(reader.tell(), sum(map(len, chunks)))
self.assertEqual(b"".join(chunks), source)
def test_read_stream(self):
cctx = zstd.ZstdCompressor()
source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(io.BytesIO(frame)) as reader:
self.assertEqual(reader.tell(), 0)
chunk = reader.read(8192)
self.assertEqual(chunk, source)
self.assertEqual(reader.tell(), len(source))
self.assertEqual(reader.read(1), b"")
self.assertEqual(reader.tell(), len(source))
self.assertFalse(reader.closed)
self.assertTrue(reader.closed)
def test_read_stream_small_chunks(self):
cctx = zstd.ZstdCompressor()
source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
chunks = []
with dctx.stream_reader(io.BytesIO(frame), read_size=1) as reader:
while True:
chunk = reader.read(1)
if not chunk:
break
chunks.append(chunk)
self.assertEqual(reader.tell(), sum(map(len, chunks)))
self.assertEqual(b"".join(chunks), source)
def test_read_after_exit(self):
cctx = zstd.ZstdCompressor()
frame = cctx.compress(b"foo" * 60)
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(frame) as reader:
while reader.read(16):
pass
self.assertTrue(reader.closed)
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.read(10)
def test_illegal_seeks(self):
cctx = zstd.ZstdCompressor()
frame = cctx.compress(b"foo" * 60)
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(frame) as reader:
with self.assertRaisesRegex(
ValueError, "cannot seek to negative position"
):
reader.seek(-1, os.SEEK_SET)
reader.read(1)
with self.assertRaisesRegex(
ValueError, "cannot seek zstd decompression stream backwards"
):
reader.seek(0, os.SEEK_SET)
with self.assertRaisesRegex(
ValueError, "cannot seek zstd decompression stream backwards"
):
reader.seek(-1, os.SEEK_CUR)
with self.assertRaisesRegex(
ValueError,
"zstd decompression streams cannot be seeked with SEEK_END",
):
reader.seek(0, os.SEEK_END)
reader.close()
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.seek(4, os.SEEK_SET)
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.seek(0)
def test_seek(self):
source = b"foobar" * 60
cctx = zstd.ZstdCompressor()
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(frame) as reader:
reader.seek(3)
self.assertEqual(reader.read(3), b"bar")
reader.seek(4, os.SEEK_CUR)
self.assertEqual(reader.read(2), b"ar")
def test_no_context_manager(self):
source = b"foobar" * 60
cctx = zstd.ZstdCompressor()
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(frame)
self.assertEqual(reader.read(6), b"foobar")
self.assertEqual(reader.read(18), b"foobar" * 3)
self.assertFalse(reader.closed)
# Calling close prevents subsequent use.
reader.close()
self.assertTrue(reader.closed)
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.read(6)
def test_read_after_error(self):
source = io.BytesIO(b"")
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(source)
with reader:
reader.read(0)
with reader:
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.read(100)
def test_partial_read(self):
# Inspired by https://github.com/indygreg/python-zstandard/issues/71.
buffer = io.BytesIO()
cctx = zstd.ZstdCompressor()
writer = cctx.stream_writer(buffer)
writer.write(bytearray(os.urandom(1000000)))
writer.flush(zstd.FLUSH_FRAME)
buffer.seek(0)
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(buffer)
while True:
chunk = reader.read(8192)
if not chunk:
break
def test_read_multiple_frames(self):
cctx = zstd.ZstdCompressor()
source = io.BytesIO()
writer = cctx.stream_writer(source)
writer.write(b"foo")
writer.flush(zstd.FLUSH_FRAME)
writer.write(b"bar")
writer.flush(zstd.FLUSH_FRAME)
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(source.getvalue())
self.assertEqual(reader.read(2), b"fo")
self.assertEqual(reader.read(2), b"o")
self.assertEqual(reader.read(2), b"ba")
self.assertEqual(reader.read(2), b"r")
source.seek(0)
reader = dctx.stream_reader(source)
self.assertEqual(reader.read(2), b"fo")
self.assertEqual(reader.read(2), b"o")
self.assertEqual(reader.read(2), b"ba")
self.assertEqual(reader.read(2), b"r")
reader = dctx.stream_reader(source.getvalue())
self.assertEqual(reader.read(3), b"foo")
self.assertEqual(reader.read(3), b"bar")
source.seek(0)
reader = dctx.stream_reader(source)
self.assertEqual(reader.read(3), b"foo")
self.assertEqual(reader.read(3), b"bar")
reader = dctx.stream_reader(source.getvalue())
self.assertEqual(reader.read(4), b"foo")
self.assertEqual(reader.read(4), b"bar")
source.seek(0)
reader = dctx.stream_reader(source)
self.assertEqual(reader.read(4), b"foo")
self.assertEqual(reader.read(4), b"bar")
reader = dctx.stream_reader(source.getvalue())
self.assertEqual(reader.read(128), b"foo")
self.assertEqual(reader.read(128), b"bar")
source.seek(0)
reader = dctx.stream_reader(source)
self.assertEqual(reader.read(128), b"foo")
self.assertEqual(reader.read(128), b"bar")
# Now tests for reads spanning frames.
reader = dctx.stream_reader(source.getvalue(), read_across_frames=True)
self.assertEqual(reader.read(3), b"foo")
self.assertEqual(reader.read(3), b"bar")
source.seek(0)
reader = dctx.stream_reader(source, read_across_frames=True)
self.assertEqual(reader.read(3), b"foo")
self.assertEqual(reader.read(3), b"bar")
reader = dctx.stream_reader(source.getvalue(), read_across_frames=True)
self.assertEqual(reader.read(6), b"foobar")
source.seek(0)
reader = dctx.stream_reader(source, read_across_frames=True)
self.assertEqual(reader.read(6), b"foobar")
reader = dctx.stream_reader(source.getvalue(), read_across_frames=True)
self.assertEqual(reader.read(7), b"foobar")
source.seek(0)
reader = dctx.stream_reader(source, read_across_frames=True)
self.assertEqual(reader.read(7), b"foobar")
reader = dctx.stream_reader(source.getvalue(), read_across_frames=True)
self.assertEqual(reader.read(128), b"foobar")
source.seek(0)
reader = dctx.stream_reader(source, read_across_frames=True)
self.assertEqual(reader.read(128), b"foobar")
def test_readinto(self):
cctx = zstd.ZstdCompressor()
foo = cctx.compress(b"foo")
dctx = zstd.ZstdDecompressor()
# Attempting to readinto() a non-writable buffer fails.
# The exact exception varies based on the backend.
reader = dctx.stream_reader(foo)
with self.assertRaises(Exception):
reader.readinto(b"foobar")
# readinto() with sufficiently large destination.
b = bytearray(1024)
reader = dctx.stream_reader(foo)
self.assertEqual(reader.readinto(b), 3)
self.assertEqual(b[0:3], b"foo")
self.assertEqual(reader.readinto(b), 0)
self.assertEqual(b[0:3], b"foo")
# readinto() with small reads.
b = bytearray(1024)
reader = dctx.stream_reader(foo, read_size=1)
self.assertEqual(reader.readinto(b), 3)
self.assertEqual(b[0:3], b"foo")
# Too small destination buffer.
b = bytearray(2)
reader = dctx.stream_reader(foo)
self.assertEqual(reader.readinto(b), 2)
self.assertEqual(b[:], b"fo")
def test_readinto1(self):
cctx = zstd.ZstdCompressor()
foo = cctx.compress(b"foo")
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(foo)
with self.assertRaises(Exception):
reader.readinto1(b"foobar")
# Sufficiently large destination.
b = bytearray(1024)
reader = dctx.stream_reader(foo)
self.assertEqual(reader.readinto1(b), 3)
self.assertEqual(b[0:3], b"foo")
self.assertEqual(reader.readinto1(b), 0)
self.assertEqual(b[0:3], b"foo")
# readinto() with small reads.
b = bytearray(1024)
reader = dctx.stream_reader(foo, read_size=1)
self.assertEqual(reader.readinto1(b), 3)
self.assertEqual(b[0:3], b"foo")
# Too small destination buffer.
b = bytearray(2)
reader = dctx.stream_reader(foo)
self.assertEqual(reader.readinto1(b), 2)
self.assertEqual(b[:], b"fo")
def test_readall(self):
cctx = zstd.ZstdCompressor()
foo = cctx.compress(b"foo")
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(foo)
self.assertEqual(reader.readall(), b"foo")
def test_read1(self):
cctx = zstd.ZstdCompressor()
foo = cctx.compress(b"foo")
dctx = zstd.ZstdDecompressor()
b = OpCountingBytesIO(foo)
reader = dctx.stream_reader(b)
self.assertEqual(reader.read1(), b"foo")
self.assertEqual(b._read_count, 1)
b = OpCountingBytesIO(foo)
reader = dctx.stream_reader(b)
self.assertEqual(reader.read1(0), b"")
self.assertEqual(reader.read1(2), b"fo")
self.assertEqual(b._read_count, 1)
self.assertEqual(reader.read1(1), b"o")
self.assertEqual(b._read_count, 1)
self.assertEqual(reader.read1(1), b"")
self.assertEqual(b._read_count, 2)
def test_read_lines(self):
cctx = zstd.ZstdCompressor()
source = b"\n".join(
("line %d" % i).encode("ascii") for i in range(1024)
)
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(frame)
tr = io.TextIOWrapper(reader, encoding="utf-8")
lines = []
for line in tr:
lines.append(line.encode("utf-8"))
self.assertEqual(len(lines), 1024)
self.assertEqual(b"".join(lines), source)
reader = dctx.stream_reader(frame)
tr = io.TextIOWrapper(reader, encoding="utf-8")
lines = tr.readlines()
self.assertEqual(len(lines), 1024)
self.assertEqual("".join(lines).encode("utf-8"), source)
reader = dctx.stream_reader(frame)
tr = io.TextIOWrapper(reader, encoding="utf-8")
lines = []
while True:
line = tr.readline()
if not line:
break
lines.append(line.encode("utf-8"))
self.assertEqual(len(lines), 1024)
self.assertEqual(b"".join(lines), source)
@make_cffi
class TestDecompressor_decompressobj(TestCase):
def test_simple(self):
data = zstd.ZstdCompressor(level=1).compress(b"foobar")
dctx = zstd.ZstdDecompressor()
dobj = dctx.decompressobj()
self.assertEqual(dobj.decompress(data), b"foobar")
self.assertIsNone(dobj.flush())
self.assertIsNone(dobj.flush(10))
self.assertIsNone(dobj.flush(length=100))
def test_input_types(self):
compressed = zstd.ZstdCompressor(level=1).compress(b"foo")
dctx = zstd.ZstdDecompressor()
mutable_array = bytearray(len(compressed))
mutable_array[:] = compressed
sources = [
memoryview(compressed),
bytearray(compressed),
mutable_array,
]
for source in sources:
dobj = dctx.decompressobj()
self.assertIsNone(dobj.flush())
self.assertIsNone(dobj.flush(10))
self.assertIsNone(dobj.flush(length=100))
self.assertEqual(dobj.decompress(source), b"foo")
self.assertIsNone(dobj.flush())
def test_reuse(self):
data = zstd.ZstdCompressor(level=1).compress(b"foobar")
dctx = zstd.ZstdDecompressor()
dobj = dctx.decompressobj()
dobj.decompress(data)
with self.assertRaisesRegex(
zstd.ZstdError, "cannot use a decompressobj"
):
dobj.decompress(data)
self.assertIsNone(dobj.flush())
def test_bad_write_size(self):
dctx = zstd.ZstdDecompressor()
with self.assertRaisesRegex(ValueError, "write_size must be positive"):
dctx.decompressobj(write_size=0)
def test_write_size(self):
source = b"foo" * 64 + b"bar" * 128
data = zstd.ZstdCompressor(level=1).compress(source)
dctx = zstd.ZstdDecompressor()
for i in range(128):
dobj = dctx.decompressobj(write_size=i + 1)
self.assertEqual(dobj.decompress(data), source)
def decompress_via_writer(data):
buffer = io.BytesIO()
dctx = zstd.ZstdDecompressor()
decompressor = dctx.stream_writer(buffer)
decompressor.write(data)
return buffer.getvalue()
@make_cffi
class TestDecompressor_stream_writer(TestCase):
def test_io_api(self):
buffer = io.BytesIO()
dctx = zstd.ZstdDecompressor()
writer = dctx.stream_writer(buffer)
self.assertFalse(writer.closed)
self.assertFalse(writer.isatty())
self.assertFalse(writer.readable())
with self.assertRaises(io.UnsupportedOperation):
writer.readline()
with self.assertRaises(io.UnsupportedOperation):
writer.readline(42)
with self.assertRaises(io.UnsupportedOperation):
writer.readline(size=42)
with self.assertRaises(io.UnsupportedOperation):
writer.readlines()
with self.assertRaises(io.UnsupportedOperation):
writer.readlines(42)
with self.assertRaises(io.UnsupportedOperation):
writer.readlines(hint=42)
with self.assertRaises(io.UnsupportedOperation):
writer.seek(0)
with self.assertRaises(io.UnsupportedOperation):
writer.seek(10, os.SEEK_SET)
self.assertFalse(writer.seekable())
with self.assertRaises(io.UnsupportedOperation):
writer.tell()
with self.assertRaises(io.UnsupportedOperation):
writer.truncate()
with self.assertRaises(io.UnsupportedOperation):
writer.truncate(42)
with self.assertRaises(io.UnsupportedOperation):
writer.truncate(size=42)
self.assertTrue(writer.writable())
with self.assertRaises(io.UnsupportedOperation):
writer.writelines([])
with self.assertRaises(io.UnsupportedOperation):
writer.read()
with self.assertRaises(io.UnsupportedOperation):
writer.read(42)
with self.assertRaises(io.UnsupportedOperation):
writer.read(size=42)
with self.assertRaises(io.UnsupportedOperation):
writer.readall()
with self.assertRaises(io.UnsupportedOperation):
writer.readinto(None)
with self.assertRaises(io.UnsupportedOperation):
writer.fileno()
def test_fileno_file(self):
with tempfile.TemporaryFile("wb") as tf:
dctx = zstd.ZstdDecompressor()
writer = dctx.stream_writer(tf)
self.assertEqual(writer.fileno(), tf.fileno())
def test_close(self):
foo = zstd.ZstdCompressor().compress(b"foo")
buffer = NonClosingBytesIO()
dctx = zstd.ZstdDecompressor()
writer = dctx.stream_writer(buffer)
writer.write(foo)
self.assertFalse(writer.closed)
self.assertFalse(buffer.closed)
writer.close()
self.assertTrue(writer.closed)
self.assertTrue(buffer.closed)
with self.assertRaisesRegex(ValueError, "stream is closed"):
writer.write(b"")
with self.assertRaisesRegex(ValueError, "stream is closed"):
writer.flush()
with self.assertRaisesRegex(ValueError, "stream is closed"):
with writer:
pass
self.assertEqual(buffer.getvalue(), b"foo")
# Context manager exit should close stream.
buffer = NonClosingBytesIO()
writer = dctx.stream_writer(buffer)
with writer:
writer.write(foo)
self.assertTrue(writer.closed)
self.assertEqual(buffer.getvalue(), b"foo")
def test_flush(self):
buffer = OpCountingBytesIO()
dctx = zstd.ZstdDecompressor()
writer = dctx.stream_writer(buffer)
writer.flush()
self.assertEqual(buffer._flush_count, 1)
writer.flush()
self.assertEqual(buffer._flush_count, 2)
def test_empty_roundtrip(self):
cctx = zstd.ZstdCompressor()
empty = cctx.compress(b"")
self.assertEqual(decompress_via_writer(empty), b"")
def test_input_types(self):
cctx = zstd.ZstdCompressor(level=1)
compressed = cctx.compress(b"foo")
mutable_array = bytearray(len(compressed))
mutable_array[:] = compressed
sources = [
memoryview(compressed),
bytearray(compressed),
mutable_array,
]
dctx = zstd.ZstdDecompressor()
for source in sources:
buffer = io.BytesIO()
decompressor = dctx.stream_writer(buffer)
decompressor.write(source)
self.assertEqual(buffer.getvalue(), b"foo")
buffer = NonClosingBytesIO()
with dctx.stream_writer(buffer) as decompressor:
self.assertEqual(decompressor.write(source), 3)
self.assertEqual(buffer.getvalue(), b"foo")
buffer = io.BytesIO()
writer = dctx.stream_writer(buffer, write_return_read=True)
self.assertEqual(writer.write(source), len(source))
self.assertEqual(buffer.getvalue(), b"foo")
def test_large_roundtrip(self):
chunks = []
for i in range(255):
chunks.append(struct.Struct(">B").pack(i) * 16384)
orig = b"".join(chunks)
cctx = zstd.ZstdCompressor()
compressed = cctx.compress(orig)
self.assertEqual(decompress_via_writer(compressed), orig)
def test_multiple_calls(self):
chunks = []
for i in range(255):
for j in range(255):
chunks.append(struct.Struct(">B").pack(j) * i)
orig = b"".join(chunks)
cctx = zstd.ZstdCompressor()
compressed = cctx.compress(orig)
buffer = NonClosingBytesIO()
dctx = zstd.ZstdDecompressor()
with dctx.stream_writer(buffer) as decompressor:
pos = 0
while pos < len(compressed):
pos2 = pos + 8192
decompressor.write(compressed[pos:pos2])
pos += 8192
self.assertEqual(buffer.getvalue(), orig)
# Again with write_return_read=True
buffer = io.BytesIO()
writer = dctx.stream_writer(buffer, write_return_read=True)
pos = 0
while pos < len(compressed):
pos2 = pos + 8192
chunk = compressed[pos:pos2]
self.assertEqual(writer.write(chunk), len(chunk))
pos += 8192
self.assertEqual(buffer.getvalue(), orig)
def test_dictionary(self):
samples = []
for i in range(128):
samples.append(b"foo" * 64)
samples.append(b"bar" * 64)
samples.append(b"foobar" * 64)
d = zstd.train_dictionary(8192, samples)
orig = b"foobar" * 16384
buffer = NonClosingBytesIO()
cctx = zstd.ZstdCompressor(dict_data=d)
with cctx.stream_writer(buffer) as compressor:
self.assertEqual(compressor.write(orig), 0)
compressed = buffer.getvalue()
buffer = io.BytesIO()
dctx = zstd.ZstdDecompressor(dict_data=d)
decompressor = dctx.stream_writer(buffer)
self.assertEqual(decompressor.write(compressed), len(orig))
self.assertEqual(buffer.getvalue(), orig)
buffer = NonClosingBytesIO()
with dctx.stream_writer(buffer) as decompressor:
self.assertEqual(decompressor.write(compressed), len(orig))
self.assertEqual(buffer.getvalue(), orig)
def test_memory_size(self):
dctx = zstd.ZstdDecompressor()
buffer = io.BytesIO()
decompressor = dctx.stream_writer(buffer)
size = decompressor.memory_size()
self.assertGreater(size, 100000)
with dctx.stream_writer(buffer) as decompressor:
size = decompressor.memory_size()
self.assertGreater(size, 100000)
def test_write_size(self):
source = zstd.ZstdCompressor().compress(b"foobarfoobar")
dest = OpCountingBytesIO()
dctx = zstd.ZstdDecompressor()
with dctx.stream_writer(dest, write_size=1) as decompressor:
s = struct.Struct(">B")
for c in source:
if not isinstance(c, str):
c = s.pack(c)
decompressor.write(c)
self.assertEqual(dest.getvalue(), b"foobarfoobar")
self.assertEqual(dest._write_count, len(dest.getvalue()))
@make_cffi
class TestDecompressor_read_to_iter(TestCase):
def test_type_validation(self):
dctx = zstd.ZstdDecompressor()
# Object with read() works.
dctx.read_to_iter(io.BytesIO())
# Buffer protocol works.
dctx.read_to_iter(b"foobar")
with self.assertRaisesRegex(
ValueError, "must pass an object with a read"
):
b"".join(dctx.read_to_iter(True))
def test_empty_input(self):
dctx = zstd.ZstdDecompressor()
source = io.BytesIO()
it = dctx.read_to_iter(source)
# TODO this is arguably wrong. Should get an error about missing frame foo.
with self.assertRaises(StopIteration):
next(it)
it = dctx.read_to_iter(b"")
with self.assertRaises(StopIteration):
next(it)
def test_invalid_input(self):
dctx = zstd.ZstdDecompressor()
source = io.BytesIO(b"foobar")
it = dctx.read_to_iter(source)
with self.assertRaisesRegex(zstd.ZstdError, "Unknown frame descriptor"):
next(it)
it = dctx.read_to_iter(b"foobar")
with self.assertRaisesRegex(zstd.ZstdError, "Unknown frame descriptor"):
next(it)
def test_empty_roundtrip(self):
cctx = zstd.ZstdCompressor(level=1, write_content_size=False)
empty = cctx.compress(b"")
source = io.BytesIO(empty)
source.seek(0)
dctx = zstd.ZstdDecompressor()
it = dctx.read_to_iter(source)
# No chunks should be emitted since there is no data.
with self.assertRaises(StopIteration):
next(it)
# Again for good measure.
with self.assertRaises(StopIteration):
next(it)
def test_skip_bytes_too_large(self):
dctx = zstd.ZstdDecompressor()
with self.assertRaisesRegex(
ValueError, "skip_bytes must be smaller than read_size"
):
b"".join(dctx.read_to_iter(b"", skip_bytes=1, read_size=1))
with self.assertRaisesRegex(
ValueError, "skip_bytes larger than first input chunk"
):
b"".join(dctx.read_to_iter(b"foobar", skip_bytes=10))
def test_skip_bytes(self):
cctx = zstd.ZstdCompressor(write_content_size=False)
compressed = cctx.compress(b"foobar")
dctx = zstd.ZstdDecompressor()
output = b"".join(dctx.read_to_iter(b"hdr" + compressed, skip_bytes=3))
self.assertEqual(output, b"foobar")
def test_large_output(self):
source = io.BytesIO()
source.write(b"f" * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
source.write(b"o")
source.seek(0)
cctx = zstd.ZstdCompressor(level=1)
compressed = io.BytesIO(cctx.compress(source.getvalue()))
compressed.seek(0)
dctx = zstd.ZstdDecompressor()
it = dctx.read_to_iter(compressed)
chunks = []
chunks.append(next(it))
chunks.append(next(it))
with self.assertRaises(StopIteration):
next(it)
decompressed = b"".join(chunks)
self.assertEqual(decompressed, source.getvalue())
# And again with buffer protocol.
it = dctx.read_to_iter(compressed.getvalue())
chunks = []
chunks.append(next(it))
chunks.append(next(it))
with self.assertRaises(StopIteration):
next(it)
decompressed = b"".join(chunks)
self.assertEqual(decompressed, source.getvalue())
@unittest.skipUnless(
"ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set"
)
def test_large_input(self):
bytes = list(struct.Struct(">B").pack(i) for i in range(256))
compressed = NonClosingBytesIO()
input_size = 0
cctx = zstd.ZstdCompressor(level=1)
with cctx.stream_writer(compressed) as compressor:
while True:
compressor.write(random.choice(bytes))
input_size += 1
have_compressed = (
len(compressed.getvalue())
> zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
)
have_raw = (
input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2
)
if have_compressed and have_raw:
break
compressed = io.BytesIO(compressed.getvalue())
self.assertGreater(
len(compressed.getvalue()),
zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
)
dctx = zstd.ZstdDecompressor()
it = dctx.read_to_iter(compressed)
chunks = []
chunks.append(next(it))
chunks.append(next(it))
chunks.append(next(it))
with self.assertRaises(StopIteration):
next(it)
decompressed = b"".join(chunks)
self.assertEqual(len(decompressed), input_size)
# And again with buffer protocol.
it = dctx.read_to_iter(compressed.getvalue())
chunks = []
chunks.append(next(it))
chunks.append(next(it))
chunks.append(next(it))
with self.assertRaises(StopIteration):
next(it)
decompressed = b"".join(chunks)
self.assertEqual(len(decompressed), input_size)
def test_interesting(self):
# Found this edge case via fuzzing.
cctx = zstd.ZstdCompressor(level=1)
source = io.BytesIO()
compressed = NonClosingBytesIO()
with cctx.stream_writer(compressed) as compressor:
for i in range(256):
chunk = b"\0" * 1024
compressor.write(chunk)
source.write(chunk)
dctx = zstd.ZstdDecompressor()
simple = dctx.decompress(
compressed.getvalue(), max_output_size=len(source.getvalue())
)
self.assertEqual(simple, source.getvalue())
compressed = io.BytesIO(compressed.getvalue())
streamed = b"".join(dctx.read_to_iter(compressed))
self.assertEqual(streamed, source.getvalue())
def test_read_write_size(self):
source = OpCountingBytesIO(
zstd.ZstdCompressor().compress(b"foobarfoobar")
)
dctx = zstd.ZstdDecompressor()
for chunk in dctx.read_to_iter(source, read_size=1, write_size=1):
self.assertEqual(len(chunk), 1)
self.assertEqual(source._read_count, len(source.getvalue()))
def test_magic_less(self):
params = zstd.CompressionParameters.from_level(
1, format=zstd.FORMAT_ZSTD1_MAGICLESS
)
cctx = zstd.ZstdCompressor(compression_params=params)
frame = cctx.compress(b"foobar")
self.assertNotEqual(frame[0:4], b"\x28\xb5\x2f\xfd")
dctx = zstd.ZstdDecompressor()
with self.assertRaisesRegex(
zstd.ZstdError, "error determining content size from frame header"
):
dctx.decompress(frame)
dctx = zstd.ZstdDecompressor(format=zstd.FORMAT_ZSTD1_MAGICLESS)
res = b"".join(dctx.read_to_iter(frame))
self.assertEqual(res, b"foobar")
@make_cffi
class TestDecompressor_content_dict_chain(TestCase):
def test_bad_inputs_simple(self):
dctx = zstd.ZstdDecompressor()
with self.assertRaises(TypeError):
dctx.decompress_content_dict_chain(b"foo")
with self.assertRaises(TypeError):
dctx.decompress_content_dict_chain((b"foo", b"bar"))
with self.assertRaisesRegex(ValueError, "empty input chain"):
dctx.decompress_content_dict_chain([])
with self.assertRaisesRegex(ValueError, "chunk 0 must be bytes"):
dctx.decompress_content_dict_chain([u"foo"])
with self.assertRaisesRegex(ValueError, "chunk 0 must be bytes"):
dctx.decompress_content_dict_chain([True])
with self.assertRaisesRegex(
ValueError, "chunk 0 is too small to contain a zstd frame"
):
dctx.decompress_content_dict_chain([zstd.FRAME_HEADER])
with self.assertRaisesRegex(
ValueError, "chunk 0 is not a valid zstd frame"
):
dctx.decompress_content_dict_chain([b"foo" * 8])
no_size = zstd.ZstdCompressor(write_content_size=False).compress(
b"foo" * 64
)
with self.assertRaisesRegex(
ValueError, "chunk 0 missing content size in frame"
):
dctx.decompress_content_dict_chain([no_size])
# Corrupt first frame.
frame = zstd.ZstdCompressor().compress(b"foo" * 64)
frame = frame[0:12] + frame[15:]
with self.assertRaisesRegex(
zstd.ZstdError, "chunk 0 did not decompress full frame"
):
dctx.decompress_content_dict_chain([frame])
def test_bad_subsequent_input(self):
initial = zstd.ZstdCompressor().compress(b"foo" * 64)
dctx = zstd.ZstdDecompressor()
with self.assertRaisesRegex(ValueError, "chunk 1 must be bytes"):
dctx.decompress_content_dict_chain([initial, u"foo"])
with self.assertRaisesRegex(ValueError, "chunk 1 must be bytes"):
dctx.decompress_content_dict_chain([initial, None])
with self.assertRaisesRegex(
ValueError, "chunk 1 is too small to contain a zstd frame"
):
dctx.decompress_content_dict_chain([initial, zstd.FRAME_HEADER])
with self.assertRaisesRegex(
ValueError, "chunk 1 is not a valid zstd frame"
):
dctx.decompress_content_dict_chain([initial, b"foo" * 8])
no_size = zstd.ZstdCompressor(write_content_size=False).compress(
b"foo" * 64
)
with self.assertRaisesRegex(
ValueError, "chunk 1 missing content size in frame"
):
dctx.decompress_content_dict_chain([initial, no_size])
# Corrupt second frame.
cctx = zstd.ZstdCompressor(
dict_data=zstd.ZstdCompressionDict(b"foo" * 64)
)
frame = cctx.compress(b"bar" * 64)
frame = frame[0:12] + frame[15:]
with self.assertRaisesRegex(
zstd.ZstdError, "chunk 1 did not decompress full frame"
):
dctx.decompress_content_dict_chain([initial, frame])
def test_simple(self):
original = [
b"foo" * 64,
b"foobar" * 64,
b"baz" * 64,
b"foobaz" * 64,
b"foobarbaz" * 64,
]
chunks = []
chunks.append(zstd.ZstdCompressor().compress(original[0]))
for i, chunk in enumerate(original[1:]):
d = zstd.ZstdCompressionDict(original[i])
cctx = zstd.ZstdCompressor(dict_data=d)
chunks.append(cctx.compress(chunk))
for i in range(1, len(original)):
chain = chunks[0:i]
expected = original[i - 1]
dctx = zstd.ZstdDecompressor()
decompressed = dctx.decompress_content_dict_chain(chain)
self.assertEqual(decompressed, expected)
# TODO enable for CFFI
class TestDecompressor_multi_decompress_to_buffer(TestCase):
def test_invalid_inputs(self):
dctx = zstd.ZstdDecompressor()
if not hasattr(dctx, "multi_decompress_to_buffer"):
self.skipTest("multi_decompress_to_buffer not available")
with self.assertRaises(TypeError):
dctx.multi_decompress_to_buffer(True)
with self.assertRaises(TypeError):
dctx.multi_decompress_to_buffer((1, 2))
with self.assertRaisesRegex(
TypeError, "item 0 not a bytes like object"
):
dctx.multi_decompress_to_buffer([u"foo"])
with self.assertRaisesRegex(
ValueError, "could not determine decompressed size of item 0"
):
dctx.multi_decompress_to_buffer([b"foobarbaz"])
def test_list_input(self):
cctx = zstd.ZstdCompressor()
original = [b"foo" * 4, b"bar" * 6]
frames = [cctx.compress(d) for d in original]
dctx = zstd.ZstdDecompressor()
if not hasattr(dctx, "multi_decompress_to_buffer"):
self.skipTest("multi_decompress_to_buffer not available")
result = dctx.multi_decompress_to_buffer(frames)
self.assertEqual(len(result), len(frames))
self.assertEqual(result.size(), sum(map(len, original)))
for i, data in enumerate(original):
self.assertEqual(result[i].tobytes(), data)
self.assertEqual(result[0].offset, 0)
self.assertEqual(len(result[0]), 12)
self.assertEqual(result[1].offset, 12)
self.assertEqual(len(result[1]), 18)
def test_list_input_frame_sizes(self):
cctx = zstd.ZstdCompressor()
original = [b"foo" * 4, b"bar" * 6, b"baz" * 8]
frames = [cctx.compress(d) for d in original]
sizes = struct.pack("=" + "Q" * len(original), *map(len, original))
dctx = zstd.ZstdDecompressor()
if not hasattr(dctx, "multi_decompress_to_buffer"):
self.skipTest("multi_decompress_to_buffer not available")
result = dctx.multi_decompress_to_buffer(
frames, decompressed_sizes=sizes
)
self.assertEqual(len(result), len(frames))
self.assertEqual(result.size(), sum(map(len, original)))
for i, data in enumerate(original):
self.assertEqual(result[i].tobytes(), data)
def test_buffer_with_segments_input(self):
cctx = zstd.ZstdCompressor()
original = [b"foo" * 4, b"bar" * 6]
frames = [cctx.compress(d) for d in original]
dctx = zstd.ZstdDecompressor()
if not hasattr(dctx, "multi_decompress_to_buffer"):
self.skipTest("multi_decompress_to_buffer not available")
segments = struct.pack(
"=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1])
)
b = zstd.BufferWithSegments(b"".join(frames), segments)
result = dctx.multi_decompress_to_buffer(b)
self.assertEqual(len(result), len(frames))
self.assertEqual(result[0].offset, 0)
self.assertEqual(len(result[0]), 12)
self.assertEqual(result[1].offset, 12)
self.assertEqual(len(result[1]), 18)
def test_buffer_with_segments_sizes(self):
cctx = zstd.ZstdCompressor(write_content_size=False)
original = [b"foo" * 4, b"bar" * 6, b"baz" * 8]
frames = [cctx.compress(d) for d in original]
sizes = struct.pack("=" + "Q" * len(original), *map(len, original))
dctx = zstd.ZstdDecompressor()
if not hasattr(dctx, "multi_decompress_to_buffer"):
self.skipTest("multi_decompress_to_buffer not available")
segments = struct.pack(
"=QQQQQQ",
0,
len(frames[0]),
len(frames[0]),
len(frames[1]),
len(frames[0]) + len(frames[1]),
len(frames[2]),
)
b = zstd.BufferWithSegments(b"".join(frames), segments)
result = dctx.multi_decompress_to_buffer(b, decompressed_sizes=sizes)
self.assertEqual(len(result), len(frames))
self.assertEqual(result.size(), sum(map(len, original)))
for i, data in enumerate(original):
self.assertEqual(result[i].tobytes(), data)
def test_buffer_with_segments_collection_input(self):
cctx = zstd.ZstdCompressor()
original = [
b"foo0" * 2,
b"foo1" * 3,
b"foo2" * 4,
b"foo3" * 5,
b"foo4" * 6,
]
if not hasattr(cctx, "multi_compress_to_buffer"):
self.skipTest("multi_compress_to_buffer not available")
frames = cctx.multi_compress_to_buffer(original)
# Check round trip.
dctx = zstd.ZstdDecompressor()
decompressed = dctx.multi_decompress_to_buffer(frames, threads=3)
self.assertEqual(len(decompressed), len(original))
for i, data in enumerate(original):
self.assertEqual(data, decompressed[i].tobytes())
# And a manual mode.
b = b"".join([frames[0].tobytes(), frames[1].tobytes()])
b1 = zstd.BufferWithSegments(
b,
struct.pack(
"=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1])
),
)
b = b"".join(
[frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()]
)
b2 = zstd.BufferWithSegments(
b,
struct.pack(
"=QQQQQQ",
0,
len(frames[2]),
len(frames[2]),
len(frames[3]),
len(frames[2]) + len(frames[3]),
len(frames[4]),
),
)
c = zstd.BufferWithSegmentsCollection(b1, b2)
dctx = zstd.ZstdDecompressor()
decompressed = dctx.multi_decompress_to_buffer(c)
self.assertEqual(len(decompressed), 5)
for i in range(5):
self.assertEqual(decompressed[i].tobytes(), original[i])
def test_dict(self):
d = zstd.train_dictionary(16384, generate_samples(), k=64, d=16)
cctx = zstd.ZstdCompressor(dict_data=d, level=1)
frames = [cctx.compress(s) for s in generate_samples()]
dctx = zstd.ZstdDecompressor(dict_data=d)
if not hasattr(dctx, "multi_decompress_to_buffer"):
self.skipTest("multi_decompress_to_buffer not available")
result = dctx.multi_decompress_to_buffer(frames)
self.assertEqual([o.tobytes() for o in result], generate_samples())
def test_multiple_threads(self):
cctx = zstd.ZstdCompressor()
frames = []
frames.extend(cctx.compress(b"x" * 64) for i in range(256))
frames.extend(cctx.compress(b"y" * 64) for i in range(256))
dctx = zstd.ZstdDecompressor()
if not hasattr(dctx, "multi_decompress_to_buffer"):
self.skipTest("multi_decompress_to_buffer not available")
result = dctx.multi_decompress_to_buffer(frames, threads=-1)
self.assertEqual(len(result), len(frames))
self.assertEqual(result.size(), 2 * 64 * 256)
self.assertEqual(result[0].tobytes(), b"x" * 64)
self.assertEqual(result[256].tobytes(), b"y" * 64)
def test_item_failure(self):
cctx = zstd.ZstdCompressor()
frames = [cctx.compress(b"x" * 128), cctx.compress(b"y" * 128)]
frames[1] = frames[1][0:15] + b"extra" + frames[1][15:]
dctx = zstd.ZstdDecompressor()
if not hasattr(dctx, "multi_decompress_to_buffer"):
self.skipTest("multi_decompress_to_buffer not available")
with self.assertRaisesRegex(
zstd.ZstdError,
"error decompressing item 1: ("
"Corrupted block|"
"Destination buffer is too small)",
):
dctx.multi_decompress_to_buffer(frames)
with self.assertRaisesRegex(
zstd.ZstdError,
"error decompressing item 1: ("
"Corrupted block|"
"Destination buffer is too small)",
):
dctx.multi_decompress_to_buffer(frames, threads=2)