##// END OF EJS Templates
sidedatacopies: only fetch information once for merge...
sidedatacopies: only fetch information once for merge Before this change, merge would result in reading the data from revlog twice. With this change, we keep the information in memory until we encounter the other parent. When looking at pypy, I see about 1/3 of the changesets with copy information being merge. Not doing duplicated fetch for them provide a significant speedup. revision: large amount; added files: large amount; rename small amount; c3b14617fbd7 9ba6ab77fd29 before: ! wall 0.767042 comb 0.760000 user 0.750000 sys 0.010000 (median of 11) after: ! wall 0.671162 comb 0.670000 user 0.650000 sys 0.020000 (median of 13) revision: large amount; added files: small amount; rename small amount; c3b14617fbd7 f650a9b140d2 before: ! wall 1.170169 comb 1.170000 user 1.130000 sys 0.040000 (median of 10) after: ! wall 1.030596 comb 1.040000 user 1.010000 sys 0.030000 (median of 10) revision: large amount; added files: large amount; rename large amount; 08ea3258278e d9fa043f30c0 before: ! wall 0.209846 comb 0.200000 user 0.200000 sys 0.000000 (median of 46) after: ! wall 0.170981 comb 0.170000 user 0.170000 sys 0.000000 (median of 56) revision: small amount; added files: large amount; rename large amount; df6f7a526b60 a83dc6a2d56f before: ! wall 0.013248 comb 0.010000 user 0.010000 sys 0.000000 (median of 223) after: ! wall 0.013295 comb 0.020000 user 0.020000 sys 0.000000 (median of 222) revision: small amount; added files: large amount; rename small amount; 4aa4e1f8e19a 169138063d63 before: ! wall 0.001672 comb 0.000000 user 0.000000 sys 0.000000 (median of 1000) after: ! wall 0.001666 comb 0.000000 user 0.000000 sys 0.000000 (median of 1000) revision: small amount; added files: small amount; rename small amount; 4bc173b045a6 964879152e2e before: ! wall 0.000119 comb 0.000000 user 0.000000 sys 0.000000 (median of 8010) after: ! wall 0.000119 comb 0.000000 user 0.000000 sys 0.000000 (median of 8007) revision: medium amount; added files: large amount; rename medium amount; c95f1ced15f2 2c68e87c3efe before: ! wall 0.168599 comb 0.160000 user 0.160000 sys 0.000000 (median of 58) after: ! wall 0.133316 comb 0.140000 user 0.140000 sys 0.000000 (median of 73) revision: medium amount; added files: medium amount; rename small amount; d343da0c55a8 d7746d32bf9d before: ! wall 0.036052 comb 0.030000 user 0.030000 sys 0.000000 (median of 100) after: ! wall 0.032558 comb 0.030000 user 0.030000 sys 0.000000 (median of 100) Differential Revision: https://phab.mercurial-scm.org/D7127

File last commit:

r42237:675775c3 default
r43595:90213d02 default
Show More
compressionchunker.c
360 lines | 11.3 KiB | text/x-c | CLexer
/**
* Copyright (c) 2018-present, Gregory Szorc
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD license. See the LICENSE file for details.
*/
#include "python-zstandard.h"
extern PyObject* ZstdError;
PyDoc_STRVAR(ZstdCompressionChunkerIterator__doc__,
"Iterator of output chunks from ZstdCompressionChunker.\n"
);
static void ZstdCompressionChunkerIterator_dealloc(ZstdCompressionChunkerIterator* self) {
Py_XDECREF(self->chunker);
PyObject_Del(self);
}
static PyObject* ZstdCompressionChunkerIterator_iter(PyObject* self) {
Py_INCREF(self);
return self;
}
static PyObject* ZstdCompressionChunkerIterator_iternext(ZstdCompressionChunkerIterator* self) {
size_t zresult;
PyObject* chunk;
ZstdCompressionChunker* chunker = self->chunker;
ZSTD_EndDirective zFlushMode;
if (self->mode != compressionchunker_mode_normal && chunker->input.pos != chunker->input.size) {
PyErr_SetString(ZstdError, "input should have been fully consumed before calling flush() or finish()");
return NULL;
}
if (chunker->finished) {
return NULL;
}
/* If we have data left in the input, consume it. */
while (chunker->input.pos < chunker->input.size) {
Py_BEGIN_ALLOW_THREADS
zresult = ZSTD_compressStream2(chunker->compressor->cctx, &chunker->output,
&chunker->input, ZSTD_e_continue);
Py_END_ALLOW_THREADS
/* Input is fully consumed. */
if (chunker->input.pos == chunker->input.size) {
chunker->input.src = NULL;
chunker->input.pos = 0;
chunker->input.size = 0;
PyBuffer_Release(&chunker->inBuffer);
}
if (ZSTD_isError(zresult)) {
PyErr_Format(ZstdError, "zstd compress error: %s", ZSTD_getErrorName(zresult));
return NULL;
}
/* If it produced a full output chunk, emit it. */
if (chunker->output.pos == chunker->output.size) {
chunk = PyBytes_FromStringAndSize(chunker->output.dst, chunker->output.pos);
if (!chunk) {
return NULL;
}
chunker->output.pos = 0;
return chunk;
}
/* Else continue to compress available input data. */
}
/* We also need this here for the special case of an empty input buffer. */
if (chunker->input.pos == chunker->input.size) {
chunker->input.src = NULL;
chunker->input.pos = 0;
chunker->input.size = 0;
PyBuffer_Release(&chunker->inBuffer);
}
/* No more input data. A partial chunk may be in chunker->output.
* If we're in normal compression mode, we're done. Otherwise if we're in
* flush or finish mode, we need to emit what data remains.
*/
if (self->mode == compressionchunker_mode_normal) {
/* We don't need to set StopIteration. */
return NULL;
}
if (self->mode == compressionchunker_mode_flush) {
zFlushMode = ZSTD_e_flush;
}
else if (self->mode == compressionchunker_mode_finish) {
zFlushMode = ZSTD_e_end;
}
else {
PyErr_SetString(ZstdError, "unhandled compression mode; this should never happen");
return NULL;
}
Py_BEGIN_ALLOW_THREADS
zresult = ZSTD_compressStream2(chunker->compressor->cctx, &chunker->output,
&chunker->input, zFlushMode);
Py_END_ALLOW_THREADS
if (ZSTD_isError(zresult)) {
PyErr_Format(ZstdError, "zstd compress error: %s",
ZSTD_getErrorName(zresult));
return NULL;
}
if (!zresult && chunker->output.pos == 0) {
return NULL;
}
chunk = PyBytes_FromStringAndSize(chunker->output.dst, chunker->output.pos);
if (!chunk) {
return NULL;
}
chunker->output.pos = 0;
if (!zresult && self->mode == compressionchunker_mode_finish) {
chunker->finished = 1;
}
return chunk;
}
PyTypeObject ZstdCompressionChunkerIteratorType = {
PyVarObject_HEAD_INIT(NULL, 0)
"zstd.ZstdCompressionChunkerIterator", /* tp_name */
sizeof(ZstdCompressionChunkerIterator), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)ZstdCompressionChunkerIterator_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
ZstdCompressionChunkerIterator__doc__, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
ZstdCompressionChunkerIterator_iter, /* tp_iter */
(iternextfunc)ZstdCompressionChunkerIterator_iternext, /* tp_iternext */
0, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
PyType_GenericNew, /* tp_new */
};
PyDoc_STRVAR(ZstdCompressionChunker__doc__,
"Compress chunks iteratively into exact chunk sizes.\n"
);
static void ZstdCompressionChunker_dealloc(ZstdCompressionChunker* self) {
PyBuffer_Release(&self->inBuffer);
self->input.src = NULL;
PyMem_Free(self->output.dst);
self->output.dst = NULL;
Py_XDECREF(self->compressor);
PyObject_Del(self);
}
static ZstdCompressionChunkerIterator* ZstdCompressionChunker_compress(ZstdCompressionChunker* self, PyObject* args, PyObject* kwargs) {
static char* kwlist[] = {
"data",
NULL
};
ZstdCompressionChunkerIterator* result;
if (self->finished) {
PyErr_SetString(ZstdError, "cannot call compress() after compression finished");
return NULL;
}
if (self->inBuffer.obj) {
PyErr_SetString(ZstdError,
"cannot perform operation before consuming output from previous operation");
return NULL;
}
#if PY_MAJOR_VERSION >= 3
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y*:compress",
#else
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s*:compress",
#endif
kwlist, &self->inBuffer)) {
return NULL;
}
if (!PyBuffer_IsContiguous(&self->inBuffer, 'C') || self->inBuffer.ndim > 1) {
PyErr_SetString(PyExc_ValueError,
"data buffer should be contiguous and have at most one dimension");
PyBuffer_Release(&self->inBuffer);
return NULL;
}
result = (ZstdCompressionChunkerIterator*)PyObject_CallObject((PyObject*)&ZstdCompressionChunkerIteratorType, NULL);
if (!result) {
PyBuffer_Release(&self->inBuffer);
return NULL;
}
self->input.src = self->inBuffer.buf;
self->input.size = self->inBuffer.len;
self->input.pos = 0;
result->chunker = self;
Py_INCREF(result->chunker);
result->mode = compressionchunker_mode_normal;
return result;
}
static ZstdCompressionChunkerIterator* ZstdCompressionChunker_finish(ZstdCompressionChunker* self) {
ZstdCompressionChunkerIterator* result;
if (self->finished) {
PyErr_SetString(ZstdError, "cannot call finish() after compression finished");
return NULL;
}
if (self->inBuffer.obj) {
PyErr_SetString(ZstdError,
"cannot call finish() before consuming output from previous operation");
return NULL;
}
result = (ZstdCompressionChunkerIterator*)PyObject_CallObject((PyObject*)&ZstdCompressionChunkerIteratorType, NULL);
if (!result) {
return NULL;
}
result->chunker = self;
Py_INCREF(result->chunker);
result->mode = compressionchunker_mode_finish;
return result;
}
static ZstdCompressionChunkerIterator* ZstdCompressionChunker_flush(ZstdCompressionChunker* self, PyObject* args, PyObject* kwargs) {
ZstdCompressionChunkerIterator* result;
if (self->finished) {
PyErr_SetString(ZstdError, "cannot call flush() after compression finished");
return NULL;
}
if (self->inBuffer.obj) {
PyErr_SetString(ZstdError,
"cannot call flush() before consuming output from previous operation");
return NULL;
}
result = (ZstdCompressionChunkerIterator*)PyObject_CallObject((PyObject*)&ZstdCompressionChunkerIteratorType, NULL);
if (!result) {
return NULL;
}
result->chunker = self;
Py_INCREF(result->chunker);
result->mode = compressionchunker_mode_flush;
return result;
}
static PyMethodDef ZstdCompressionChunker_methods[] = {
{ "compress", (PyCFunction)ZstdCompressionChunker_compress, METH_VARARGS | METH_KEYWORDS,
PyDoc_STR("compress data") },
{ "finish", (PyCFunction)ZstdCompressionChunker_finish, METH_NOARGS,
PyDoc_STR("finish compression operation") },
{ "flush", (PyCFunction)ZstdCompressionChunker_flush, METH_VARARGS | METH_KEYWORDS,
PyDoc_STR("finish compression operation") },
{ NULL, NULL }
};
PyTypeObject ZstdCompressionChunkerType = {
PyVarObject_HEAD_INIT(NULL, 0)
"zstd.ZstdCompressionChunkerType", /* tp_name */
sizeof(ZstdCompressionChunker), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)ZstdCompressionChunker_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
ZstdCompressionChunker__doc__, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
ZstdCompressionChunker_methods, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
PyType_GenericNew, /* tp_new */
};
void compressionchunker_module_init(PyObject* module) {
Py_TYPE(&ZstdCompressionChunkerIteratorType) = &PyType_Type;
if (PyType_Ready(&ZstdCompressionChunkerIteratorType) < 0) {
return;
}
Py_TYPE(&ZstdCompressionChunkerType) = &PyType_Type;
if (PyType_Ready(&ZstdCompressionChunkerType) < 0) {
return;
}
}