##// END OF EJS Templates
dirstate-entry: add a `state` property (and use it)...
marmoute -
r48301:769037a2 default
parent child Browse files
Show More
@@ -1,795 +1,805 b''
1 /*
1 /*
2 parsers.c - efficient content parsing
2 parsers.c - efficient content parsing
3
3
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
5
5
6 This software may be used and distributed according to the terms of
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
7 the GNU General Public License, incorporated herein by reference.
8 */
8 */
9
9
10 #define PY_SSIZE_T_CLEAN
10 #define PY_SSIZE_T_CLEAN
11 #include <Python.h>
11 #include <Python.h>
12 #include <ctype.h>
12 #include <ctype.h>
13 #include <stddef.h>
13 #include <stddef.h>
14 #include <string.h>
14 #include <string.h>
15
15
16 #include "bitmanipulation.h"
16 #include "bitmanipulation.h"
17 #include "charencode.h"
17 #include "charencode.h"
18 #include "util.h"
18 #include "util.h"
19
19
20 #ifdef IS_PY3K
20 #ifdef IS_PY3K
21 /* The mapping of Python types is meant to be temporary to get Python
21 /* The mapping of Python types is meant to be temporary to get Python
22 * 3 to compile. We should remove this once Python 3 support is fully
22 * 3 to compile. We should remove this once Python 3 support is fully
23 * supported and proper types are used in the extensions themselves. */
23 * supported and proper types are used in the extensions themselves. */
24 #define PyInt_Check PyLong_Check
24 #define PyInt_Check PyLong_Check
25 #define PyInt_FromLong PyLong_FromLong
25 #define PyInt_FromLong PyLong_FromLong
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 #define PyInt_AsLong PyLong_AsLong
27 #define PyInt_AsLong PyLong_AsLong
28 #endif
28 #endif
29
29
30 static const char *const versionerrortext = "Python minor version mismatch";
30 static const char *const versionerrortext = "Python minor version mismatch";
31
31
32 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
32 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
33 {
33 {
34 Py_ssize_t expected_size;
34 Py_ssize_t expected_size;
35
35
36 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
36 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
37 return NULL;
37 return NULL;
38 }
38 }
39
39
40 return _dict_new_presized(expected_size);
40 return _dict_new_presized(expected_size);
41 }
41 }
42
42
43 static inline dirstateTupleObject *make_dirstate_tuple(char state, int mode,
43 static inline dirstateTupleObject *make_dirstate_tuple(char state, int mode,
44 int size, int mtime)
44 int size, int mtime)
45 {
45 {
46 dirstateTupleObject *t =
46 dirstateTupleObject *t =
47 PyObject_New(dirstateTupleObject, &dirstateTupleType);
47 PyObject_New(dirstateTupleObject, &dirstateTupleType);
48 if (!t) {
48 if (!t) {
49 return NULL;
49 return NULL;
50 }
50 }
51 t->state = state;
51 t->state = state;
52 t->mode = mode;
52 t->mode = mode;
53 t->size = size;
53 t->size = size;
54 t->mtime = mtime;
54 t->mtime = mtime;
55 return t;
55 return t;
56 }
56 }
57
57
58 static PyObject *dirstate_tuple_new(PyTypeObject *subtype, PyObject *args,
58 static PyObject *dirstate_tuple_new(PyTypeObject *subtype, PyObject *args,
59 PyObject *kwds)
59 PyObject *kwds)
60 {
60 {
61 /* We do all the initialization here and not a tp_init function because
61 /* We do all the initialization here and not a tp_init function because
62 * dirstate_tuple is immutable. */
62 * dirstate_tuple is immutable. */
63 dirstateTupleObject *t;
63 dirstateTupleObject *t;
64 char state;
64 char state;
65 int size, mode, mtime;
65 int size, mode, mtime;
66 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
66 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
67 return NULL;
67 return NULL;
68 }
68 }
69
69
70 t = (dirstateTupleObject *)subtype->tp_alloc(subtype, 1);
70 t = (dirstateTupleObject *)subtype->tp_alloc(subtype, 1);
71 if (!t) {
71 if (!t) {
72 return NULL;
72 return NULL;
73 }
73 }
74 t->state = state;
74 t->state = state;
75 t->mode = mode;
75 t->mode = mode;
76 t->size = size;
76 t->size = size;
77 t->mtime = mtime;
77 t->mtime = mtime;
78
78
79 return (PyObject *)t;
79 return (PyObject *)t;
80 }
80 }
81
81
82 static void dirstate_tuple_dealloc(PyObject *o)
82 static void dirstate_tuple_dealloc(PyObject *o)
83 {
83 {
84 PyObject_Del(o);
84 PyObject_Del(o);
85 }
85 }
86
86
87 static Py_ssize_t dirstate_tuple_length(PyObject *o)
87 static Py_ssize_t dirstate_tuple_length(PyObject *o)
88 {
88 {
89 return 4;
89 return 4;
90 }
90 }
91
91
92 static PyObject *dirstate_tuple_item(PyObject *o, Py_ssize_t i)
92 static PyObject *dirstate_tuple_item(PyObject *o, Py_ssize_t i)
93 {
93 {
94 dirstateTupleObject *t = (dirstateTupleObject *)o;
94 dirstateTupleObject *t = (dirstateTupleObject *)o;
95 switch (i) {
95 switch (i) {
96 case 0:
96 case 0:
97 return PyBytes_FromStringAndSize(&t->state, 1);
97 return PyBytes_FromStringAndSize(&t->state, 1);
98 case 1:
98 case 1:
99 return PyInt_FromLong(t->mode);
99 return PyInt_FromLong(t->mode);
100 case 2:
100 case 2:
101 return PyInt_FromLong(t->size);
101 return PyInt_FromLong(t->size);
102 case 3:
102 case 3:
103 return PyInt_FromLong(t->mtime);
103 return PyInt_FromLong(t->mtime);
104 default:
104 default:
105 PyErr_SetString(PyExc_IndexError, "index out of range");
105 PyErr_SetString(PyExc_IndexError, "index out of range");
106 return NULL;
106 return NULL;
107 }
107 }
108 }
108 }
109
109
110 static PySequenceMethods dirstate_tuple_sq = {
110 static PySequenceMethods dirstate_tuple_sq = {
111 dirstate_tuple_length, /* sq_length */
111 dirstate_tuple_length, /* sq_length */
112 0, /* sq_concat */
112 0, /* sq_concat */
113 0, /* sq_repeat */
113 0, /* sq_repeat */
114 dirstate_tuple_item, /* sq_item */
114 dirstate_tuple_item, /* sq_item */
115 0, /* sq_ass_item */
115 0, /* sq_ass_item */
116 0, /* sq_contains */
116 0, /* sq_contains */
117 0, /* sq_inplace_concat */
117 0, /* sq_inplace_concat */
118 0 /* sq_inplace_repeat */
118 0 /* sq_inplace_repeat */
119 };
119 };
120
120
121 static PyObject *dirstatetuple_v1_state(dirstateTupleObject *self)
121 static PyObject *dirstatetuple_v1_state(dirstateTupleObject *self)
122 {
122 {
123 return PyBytes_FromStringAndSize(&self->state, 1);
123 return PyBytes_FromStringAndSize(&self->state, 1);
124 };
124 };
125
125
126 static PyObject *dirstatetuple_v1_mode(dirstateTupleObject *self)
126 static PyObject *dirstatetuple_v1_mode(dirstateTupleObject *self)
127 {
127 {
128 return PyInt_FromLong(self->mode);
128 return PyInt_FromLong(self->mode);
129 };
129 };
130
130
131 static PyObject *dirstatetuple_v1_size(dirstateTupleObject *self)
131 static PyObject *dirstatetuple_v1_size(dirstateTupleObject *self)
132 {
132 {
133 return PyInt_FromLong(self->size);
133 return PyInt_FromLong(self->size);
134 };
134 };
135
135
136 static PyObject *dirstatetuple_v1_mtime(dirstateTupleObject *self)
136 static PyObject *dirstatetuple_v1_mtime(dirstateTupleObject *self)
137 {
137 {
138 return PyInt_FromLong(self->mtime);
138 return PyInt_FromLong(self->mtime);
139 };
139 };
140
140
141 static PyMethodDef dirstatetuple_methods[] = {
141 static PyMethodDef dirstatetuple_methods[] = {
142 {"v1_state", (PyCFunction)dirstatetuple_v1_state, METH_NOARGS,
142 {"v1_state", (PyCFunction)dirstatetuple_v1_state, METH_NOARGS,
143 "return a \"state\" suitable for v1 serialization"},
143 "return a \"state\" suitable for v1 serialization"},
144 {"v1_mode", (PyCFunction)dirstatetuple_v1_mode, METH_NOARGS,
144 {"v1_mode", (PyCFunction)dirstatetuple_v1_mode, METH_NOARGS,
145 "return a \"mode\" suitable for v1 serialization"},
145 "return a \"mode\" suitable for v1 serialization"},
146 {"v1_size", (PyCFunction)dirstatetuple_v1_size, METH_NOARGS,
146 {"v1_size", (PyCFunction)dirstatetuple_v1_size, METH_NOARGS,
147 "return a \"size\" suitable for v1 serialization"},
147 "return a \"size\" suitable for v1 serialization"},
148 {"v1_mtime", (PyCFunction)dirstatetuple_v1_mtime, METH_NOARGS,
148 {"v1_mtime", (PyCFunction)dirstatetuple_v1_mtime, METH_NOARGS,
149 "return a \"mtime\" suitable for v1 serialization"},
149 "return a \"mtime\" suitable for v1 serialization"},
150 {NULL} /* Sentinel */
150 {NULL} /* Sentinel */
151 };
151 };
152
152
153 static PyObject *dirstatetuple_get_state(dirstateTupleObject *self)
154 {
155 return PyBytes_FromStringAndSize(&self->state, 1);
156 };
157
158 static PyGetSetDef dirstatetuple_getset[] = {
159 {"state", (getter)dirstatetuple_get_state, NULL, "state", NULL},
160 {NULL} /* Sentinel */
161 };
162
153 PyTypeObject dirstateTupleType = {
163 PyTypeObject dirstateTupleType = {
154 PyVarObject_HEAD_INIT(NULL, 0) /* header */
164 PyVarObject_HEAD_INIT(NULL, 0) /* header */
155 "dirstate_tuple", /* tp_name */
165 "dirstate_tuple", /* tp_name */
156 sizeof(dirstateTupleObject), /* tp_basicsize */
166 sizeof(dirstateTupleObject), /* tp_basicsize */
157 0, /* tp_itemsize */
167 0, /* tp_itemsize */
158 (destructor)dirstate_tuple_dealloc, /* tp_dealloc */
168 (destructor)dirstate_tuple_dealloc, /* tp_dealloc */
159 0, /* tp_print */
169 0, /* tp_print */
160 0, /* tp_getattr */
170 0, /* tp_getattr */
161 0, /* tp_setattr */
171 0, /* tp_setattr */
162 0, /* tp_compare */
172 0, /* tp_compare */
163 0, /* tp_repr */
173 0, /* tp_repr */
164 0, /* tp_as_number */
174 0, /* tp_as_number */
165 &dirstate_tuple_sq, /* tp_as_sequence */
175 &dirstate_tuple_sq, /* tp_as_sequence */
166 0, /* tp_as_mapping */
176 0, /* tp_as_mapping */
167 0, /* tp_hash */
177 0, /* tp_hash */
168 0, /* tp_call */
178 0, /* tp_call */
169 0, /* tp_str */
179 0, /* tp_str */
170 0, /* tp_getattro */
180 0, /* tp_getattro */
171 0, /* tp_setattro */
181 0, /* tp_setattro */
172 0, /* tp_as_buffer */
182 0, /* tp_as_buffer */
173 Py_TPFLAGS_DEFAULT, /* tp_flags */
183 Py_TPFLAGS_DEFAULT, /* tp_flags */
174 "dirstate tuple", /* tp_doc */
184 "dirstate tuple", /* tp_doc */
175 0, /* tp_traverse */
185 0, /* tp_traverse */
176 0, /* tp_clear */
186 0, /* tp_clear */
177 0, /* tp_richcompare */
187 0, /* tp_richcompare */
178 0, /* tp_weaklistoffset */
188 0, /* tp_weaklistoffset */
179 0, /* tp_iter */
189 0, /* tp_iter */
180 0, /* tp_iternext */
190 0, /* tp_iternext */
181 dirstatetuple_methods, /* tp_methods */
191 dirstatetuple_methods, /* tp_methods */
182 0, /* tp_members */
192 0, /* tp_members */
183 0, /* tp_getset */
193 dirstatetuple_getset, /* tp_getset */
184 0, /* tp_base */
194 0, /* tp_base */
185 0, /* tp_dict */
195 0, /* tp_dict */
186 0, /* tp_descr_get */
196 0, /* tp_descr_get */
187 0, /* tp_descr_set */
197 0, /* tp_descr_set */
188 0, /* tp_dictoffset */
198 0, /* tp_dictoffset */
189 0, /* tp_init */
199 0, /* tp_init */
190 0, /* tp_alloc */
200 0, /* tp_alloc */
191 dirstate_tuple_new, /* tp_new */
201 dirstate_tuple_new, /* tp_new */
192 };
202 };
193
203
194 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
204 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
195 {
205 {
196 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
206 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
197 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
207 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
198 char state, *cur, *str, *cpos;
208 char state, *cur, *str, *cpos;
199 int mode, size, mtime;
209 int mode, size, mtime;
200 unsigned int flen, pos = 40;
210 unsigned int flen, pos = 40;
201 Py_ssize_t len = 40;
211 Py_ssize_t len = 40;
202 Py_ssize_t readlen;
212 Py_ssize_t readlen;
203
213
204 if (!PyArg_ParseTuple(
214 if (!PyArg_ParseTuple(
205 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
215 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
206 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
216 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
207 goto quit;
217 goto quit;
208 }
218 }
209
219
210 len = readlen;
220 len = readlen;
211
221
212 /* read parents */
222 /* read parents */
213 if (len < 40) {
223 if (len < 40) {
214 PyErr_SetString(PyExc_ValueError,
224 PyErr_SetString(PyExc_ValueError,
215 "too little data for parents");
225 "too little data for parents");
216 goto quit;
226 goto quit;
217 }
227 }
218
228
219 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
229 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
220 str + 20, (Py_ssize_t)20);
230 str + 20, (Py_ssize_t)20);
221 if (!parents) {
231 if (!parents) {
222 goto quit;
232 goto quit;
223 }
233 }
224
234
225 /* read filenames */
235 /* read filenames */
226 while (pos >= 40 && pos < len) {
236 while (pos >= 40 && pos < len) {
227 if (pos + 17 > len) {
237 if (pos + 17 > len) {
228 PyErr_SetString(PyExc_ValueError,
238 PyErr_SetString(PyExc_ValueError,
229 "overflow in dirstate");
239 "overflow in dirstate");
230 goto quit;
240 goto quit;
231 }
241 }
232 cur = str + pos;
242 cur = str + pos;
233 /* unpack header */
243 /* unpack header */
234 state = *cur;
244 state = *cur;
235 mode = getbe32(cur + 1);
245 mode = getbe32(cur + 1);
236 size = getbe32(cur + 5);
246 size = getbe32(cur + 5);
237 mtime = getbe32(cur + 9);
247 mtime = getbe32(cur + 9);
238 flen = getbe32(cur + 13);
248 flen = getbe32(cur + 13);
239 pos += 17;
249 pos += 17;
240 cur += 17;
250 cur += 17;
241 if (flen > len - pos) {
251 if (flen > len - pos) {
242 PyErr_SetString(PyExc_ValueError,
252 PyErr_SetString(PyExc_ValueError,
243 "overflow in dirstate");
253 "overflow in dirstate");
244 goto quit;
254 goto quit;
245 }
255 }
246
256
247 entry =
257 entry =
248 (PyObject *)make_dirstate_tuple(state, mode, size, mtime);
258 (PyObject *)make_dirstate_tuple(state, mode, size, mtime);
249 cpos = memchr(cur, 0, flen);
259 cpos = memchr(cur, 0, flen);
250 if (cpos) {
260 if (cpos) {
251 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
261 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
252 cname = PyBytes_FromStringAndSize(
262 cname = PyBytes_FromStringAndSize(
253 cpos + 1, flen - (cpos - cur) - 1);
263 cpos + 1, flen - (cpos - cur) - 1);
254 if (!fname || !cname ||
264 if (!fname || !cname ||
255 PyDict_SetItem(cmap, fname, cname) == -1 ||
265 PyDict_SetItem(cmap, fname, cname) == -1 ||
256 PyDict_SetItem(dmap, fname, entry) == -1) {
266 PyDict_SetItem(dmap, fname, entry) == -1) {
257 goto quit;
267 goto quit;
258 }
268 }
259 Py_DECREF(cname);
269 Py_DECREF(cname);
260 } else {
270 } else {
261 fname = PyBytes_FromStringAndSize(cur, flen);
271 fname = PyBytes_FromStringAndSize(cur, flen);
262 if (!fname ||
272 if (!fname ||
263 PyDict_SetItem(dmap, fname, entry) == -1) {
273 PyDict_SetItem(dmap, fname, entry) == -1) {
264 goto quit;
274 goto quit;
265 }
275 }
266 }
276 }
267 Py_DECREF(fname);
277 Py_DECREF(fname);
268 Py_DECREF(entry);
278 Py_DECREF(entry);
269 fname = cname = entry = NULL;
279 fname = cname = entry = NULL;
270 pos += flen;
280 pos += flen;
271 }
281 }
272
282
273 ret = parents;
283 ret = parents;
274 Py_INCREF(ret);
284 Py_INCREF(ret);
275 quit:
285 quit:
276 Py_XDECREF(fname);
286 Py_XDECREF(fname);
277 Py_XDECREF(cname);
287 Py_XDECREF(cname);
278 Py_XDECREF(entry);
288 Py_XDECREF(entry);
279 Py_XDECREF(parents);
289 Py_XDECREF(parents);
280 return ret;
290 return ret;
281 }
291 }
282
292
283 /*
293 /*
284 * Build a set of non-normal and other parent entries from the dirstate dmap
294 * Build a set of non-normal and other parent entries from the dirstate dmap
285 */
295 */
286 static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args)
296 static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args)
287 {
297 {
288 PyObject *dmap, *fname, *v;
298 PyObject *dmap, *fname, *v;
289 PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
299 PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
290 Py_ssize_t pos;
300 Py_ssize_t pos;
291
301
292 if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
302 if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
293 &dmap)) {
303 &dmap)) {
294 goto bail;
304 goto bail;
295 }
305 }
296
306
297 nonnset = PySet_New(NULL);
307 nonnset = PySet_New(NULL);
298 if (nonnset == NULL) {
308 if (nonnset == NULL) {
299 goto bail;
309 goto bail;
300 }
310 }
301
311
302 otherpset = PySet_New(NULL);
312 otherpset = PySet_New(NULL);
303 if (otherpset == NULL) {
313 if (otherpset == NULL) {
304 goto bail;
314 goto bail;
305 }
315 }
306
316
307 pos = 0;
317 pos = 0;
308 while (PyDict_Next(dmap, &pos, &fname, &v)) {
318 while (PyDict_Next(dmap, &pos, &fname, &v)) {
309 dirstateTupleObject *t;
319 dirstateTupleObject *t;
310 if (!dirstate_tuple_check(v)) {
320 if (!dirstate_tuple_check(v)) {
311 PyErr_SetString(PyExc_TypeError,
321 PyErr_SetString(PyExc_TypeError,
312 "expected a dirstate tuple");
322 "expected a dirstate tuple");
313 goto bail;
323 goto bail;
314 }
324 }
315 t = (dirstateTupleObject *)v;
325 t = (dirstateTupleObject *)v;
316
326
317 if (t->state == 'n' && t->size == -2) {
327 if (t->state == 'n' && t->size == -2) {
318 if (PySet_Add(otherpset, fname) == -1) {
328 if (PySet_Add(otherpset, fname) == -1) {
319 goto bail;
329 goto bail;
320 }
330 }
321 }
331 }
322
332
323 if (t->state == 'n' && t->mtime != -1) {
333 if (t->state == 'n' && t->mtime != -1) {
324 continue;
334 continue;
325 }
335 }
326 if (PySet_Add(nonnset, fname) == -1) {
336 if (PySet_Add(nonnset, fname) == -1) {
327 goto bail;
337 goto bail;
328 }
338 }
329 }
339 }
330
340
331 result = Py_BuildValue("(OO)", nonnset, otherpset);
341 result = Py_BuildValue("(OO)", nonnset, otherpset);
332 if (result == NULL) {
342 if (result == NULL) {
333 goto bail;
343 goto bail;
334 }
344 }
335 Py_DECREF(nonnset);
345 Py_DECREF(nonnset);
336 Py_DECREF(otherpset);
346 Py_DECREF(otherpset);
337 return result;
347 return result;
338 bail:
348 bail:
339 Py_XDECREF(nonnset);
349 Py_XDECREF(nonnset);
340 Py_XDECREF(otherpset);
350 Py_XDECREF(otherpset);
341 Py_XDECREF(result);
351 Py_XDECREF(result);
342 return NULL;
352 return NULL;
343 }
353 }
344
354
345 /*
355 /*
346 * Efficiently pack a dirstate object into its on-disk format.
356 * Efficiently pack a dirstate object into its on-disk format.
347 */
357 */
348 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
358 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
349 {
359 {
350 PyObject *packobj = NULL;
360 PyObject *packobj = NULL;
351 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
361 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
352 Py_ssize_t nbytes, pos, l;
362 Py_ssize_t nbytes, pos, l;
353 PyObject *k, *v = NULL, *pn;
363 PyObject *k, *v = NULL, *pn;
354 char *p, *s;
364 char *p, *s;
355 int now;
365 int now;
356
366
357 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
367 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
358 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
368 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
359 &now)) {
369 &now)) {
360 return NULL;
370 return NULL;
361 }
371 }
362
372
363 if (PyTuple_Size(pl) != 2) {
373 if (PyTuple_Size(pl) != 2) {
364 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
374 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
365 return NULL;
375 return NULL;
366 }
376 }
367
377
368 /* Figure out how much we need to allocate. */
378 /* Figure out how much we need to allocate. */
369 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
379 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
370 PyObject *c;
380 PyObject *c;
371 if (!PyBytes_Check(k)) {
381 if (!PyBytes_Check(k)) {
372 PyErr_SetString(PyExc_TypeError, "expected string key");
382 PyErr_SetString(PyExc_TypeError, "expected string key");
373 goto bail;
383 goto bail;
374 }
384 }
375 nbytes += PyBytes_GET_SIZE(k) + 17;
385 nbytes += PyBytes_GET_SIZE(k) + 17;
376 c = PyDict_GetItem(copymap, k);
386 c = PyDict_GetItem(copymap, k);
377 if (c) {
387 if (c) {
378 if (!PyBytes_Check(c)) {
388 if (!PyBytes_Check(c)) {
379 PyErr_SetString(PyExc_TypeError,
389 PyErr_SetString(PyExc_TypeError,
380 "expected string key");
390 "expected string key");
381 goto bail;
391 goto bail;
382 }
392 }
383 nbytes += PyBytes_GET_SIZE(c) + 1;
393 nbytes += PyBytes_GET_SIZE(c) + 1;
384 }
394 }
385 }
395 }
386
396
387 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
397 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
388 if (packobj == NULL) {
398 if (packobj == NULL) {
389 goto bail;
399 goto bail;
390 }
400 }
391
401
392 p = PyBytes_AS_STRING(packobj);
402 p = PyBytes_AS_STRING(packobj);
393
403
394 pn = PyTuple_GET_ITEM(pl, 0);
404 pn = PyTuple_GET_ITEM(pl, 0);
395 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
405 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
396 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
406 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
397 goto bail;
407 goto bail;
398 }
408 }
399 memcpy(p, s, l);
409 memcpy(p, s, l);
400 p += 20;
410 p += 20;
401 pn = PyTuple_GET_ITEM(pl, 1);
411 pn = PyTuple_GET_ITEM(pl, 1);
402 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
412 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
403 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
413 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
404 goto bail;
414 goto bail;
405 }
415 }
406 memcpy(p, s, l);
416 memcpy(p, s, l);
407 p += 20;
417 p += 20;
408
418
409 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
419 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
410 dirstateTupleObject *tuple;
420 dirstateTupleObject *tuple;
411 char state;
421 char state;
412 int mode, size, mtime;
422 int mode, size, mtime;
413 Py_ssize_t len, l;
423 Py_ssize_t len, l;
414 PyObject *o;
424 PyObject *o;
415 char *t;
425 char *t;
416
426
417 if (!dirstate_tuple_check(v)) {
427 if (!dirstate_tuple_check(v)) {
418 PyErr_SetString(PyExc_TypeError,
428 PyErr_SetString(PyExc_TypeError,
419 "expected a dirstate tuple");
429 "expected a dirstate tuple");
420 goto bail;
430 goto bail;
421 }
431 }
422 tuple = (dirstateTupleObject *)v;
432 tuple = (dirstateTupleObject *)v;
423
433
424 state = tuple->state;
434 state = tuple->state;
425 mode = tuple->mode;
435 mode = tuple->mode;
426 size = tuple->size;
436 size = tuple->size;
427 mtime = tuple->mtime;
437 mtime = tuple->mtime;
428 if (state == 'n' && mtime == now) {
438 if (state == 'n' && mtime == now) {
429 /* See pure/parsers.py:pack_dirstate for why we do
439 /* See pure/parsers.py:pack_dirstate for why we do
430 * this. */
440 * this. */
431 mtime = -1;
441 mtime = -1;
432 mtime_unset = (PyObject *)make_dirstate_tuple(
442 mtime_unset = (PyObject *)make_dirstate_tuple(
433 state, mode, size, mtime);
443 state, mode, size, mtime);
434 if (!mtime_unset) {
444 if (!mtime_unset) {
435 goto bail;
445 goto bail;
436 }
446 }
437 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
447 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
438 goto bail;
448 goto bail;
439 }
449 }
440 Py_DECREF(mtime_unset);
450 Py_DECREF(mtime_unset);
441 mtime_unset = NULL;
451 mtime_unset = NULL;
442 }
452 }
443 *p++ = state;
453 *p++ = state;
444 putbe32((uint32_t)mode, p);
454 putbe32((uint32_t)mode, p);
445 putbe32((uint32_t)size, p + 4);
455 putbe32((uint32_t)size, p + 4);
446 putbe32((uint32_t)mtime, p + 8);
456 putbe32((uint32_t)mtime, p + 8);
447 t = p + 12;
457 t = p + 12;
448 p += 16;
458 p += 16;
449 len = PyBytes_GET_SIZE(k);
459 len = PyBytes_GET_SIZE(k);
450 memcpy(p, PyBytes_AS_STRING(k), len);
460 memcpy(p, PyBytes_AS_STRING(k), len);
451 p += len;
461 p += len;
452 o = PyDict_GetItem(copymap, k);
462 o = PyDict_GetItem(copymap, k);
453 if (o) {
463 if (o) {
454 *p++ = '\0';
464 *p++ = '\0';
455 l = PyBytes_GET_SIZE(o);
465 l = PyBytes_GET_SIZE(o);
456 memcpy(p, PyBytes_AS_STRING(o), l);
466 memcpy(p, PyBytes_AS_STRING(o), l);
457 p += l;
467 p += l;
458 len += l + 1;
468 len += l + 1;
459 }
469 }
460 putbe32((uint32_t)len, t);
470 putbe32((uint32_t)len, t);
461 }
471 }
462
472
463 pos = p - PyBytes_AS_STRING(packobj);
473 pos = p - PyBytes_AS_STRING(packobj);
464 if (pos != nbytes) {
474 if (pos != nbytes) {
465 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
475 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
466 (long)pos, (long)nbytes);
476 (long)pos, (long)nbytes);
467 goto bail;
477 goto bail;
468 }
478 }
469
479
470 return packobj;
480 return packobj;
471 bail:
481 bail:
472 Py_XDECREF(mtime_unset);
482 Py_XDECREF(mtime_unset);
473 Py_XDECREF(packobj);
483 Py_XDECREF(packobj);
474 Py_XDECREF(v);
484 Py_XDECREF(v);
475 return NULL;
485 return NULL;
476 }
486 }
477
487
478 #define BUMPED_FIX 1
488 #define BUMPED_FIX 1
479 #define USING_SHA_256 2
489 #define USING_SHA_256 2
480 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
490 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
481
491
482 static PyObject *readshas(const char *source, unsigned char num,
492 static PyObject *readshas(const char *source, unsigned char num,
483 Py_ssize_t hashwidth)
493 Py_ssize_t hashwidth)
484 {
494 {
485 int i;
495 int i;
486 PyObject *list = PyTuple_New(num);
496 PyObject *list = PyTuple_New(num);
487 if (list == NULL) {
497 if (list == NULL) {
488 return NULL;
498 return NULL;
489 }
499 }
490 for (i = 0; i < num; i++) {
500 for (i = 0; i < num; i++) {
491 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
501 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
492 if (hash == NULL) {
502 if (hash == NULL) {
493 Py_DECREF(list);
503 Py_DECREF(list);
494 return NULL;
504 return NULL;
495 }
505 }
496 PyTuple_SET_ITEM(list, i, hash);
506 PyTuple_SET_ITEM(list, i, hash);
497 source += hashwidth;
507 source += hashwidth;
498 }
508 }
499 return list;
509 return list;
500 }
510 }
501
511
502 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
512 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
503 uint32_t *msize)
513 uint32_t *msize)
504 {
514 {
505 const char *data = databegin;
515 const char *data = databegin;
506 const char *meta;
516 const char *meta;
507
517
508 double mtime;
518 double mtime;
509 int16_t tz;
519 int16_t tz;
510 uint16_t flags;
520 uint16_t flags;
511 unsigned char nsuccs, nparents, nmetadata;
521 unsigned char nsuccs, nparents, nmetadata;
512 Py_ssize_t hashwidth = 20;
522 Py_ssize_t hashwidth = 20;
513
523
514 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
524 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
515 PyObject *metadata = NULL, *ret = NULL;
525 PyObject *metadata = NULL, *ret = NULL;
516 int i;
526 int i;
517
527
518 if (data + FM1_HEADER_SIZE > dataend) {
528 if (data + FM1_HEADER_SIZE > dataend) {
519 goto overflow;
529 goto overflow;
520 }
530 }
521
531
522 *msize = getbe32(data);
532 *msize = getbe32(data);
523 data += 4;
533 data += 4;
524 mtime = getbefloat64(data);
534 mtime = getbefloat64(data);
525 data += 8;
535 data += 8;
526 tz = getbeint16(data);
536 tz = getbeint16(data);
527 data += 2;
537 data += 2;
528 flags = getbeuint16(data);
538 flags = getbeuint16(data);
529 data += 2;
539 data += 2;
530
540
531 if (flags & USING_SHA_256) {
541 if (flags & USING_SHA_256) {
532 hashwidth = 32;
542 hashwidth = 32;
533 }
543 }
534
544
535 nsuccs = (unsigned char)(*data++);
545 nsuccs = (unsigned char)(*data++);
536 nparents = (unsigned char)(*data++);
546 nparents = (unsigned char)(*data++);
537 nmetadata = (unsigned char)(*data++);
547 nmetadata = (unsigned char)(*data++);
538
548
539 if (databegin + *msize > dataend) {
549 if (databegin + *msize > dataend) {
540 goto overflow;
550 goto overflow;
541 }
551 }
542 dataend = databegin + *msize; /* narrow down to marker size */
552 dataend = databegin + *msize; /* narrow down to marker size */
543
553
544 if (data + hashwidth > dataend) {
554 if (data + hashwidth > dataend) {
545 goto overflow;
555 goto overflow;
546 }
556 }
547 prec = PyBytes_FromStringAndSize(data, hashwidth);
557 prec = PyBytes_FromStringAndSize(data, hashwidth);
548 data += hashwidth;
558 data += hashwidth;
549 if (prec == NULL) {
559 if (prec == NULL) {
550 goto bail;
560 goto bail;
551 }
561 }
552
562
553 if (data + nsuccs * hashwidth > dataend) {
563 if (data + nsuccs * hashwidth > dataend) {
554 goto overflow;
564 goto overflow;
555 }
565 }
556 succs = readshas(data, nsuccs, hashwidth);
566 succs = readshas(data, nsuccs, hashwidth);
557 if (succs == NULL) {
567 if (succs == NULL) {
558 goto bail;
568 goto bail;
559 }
569 }
560 data += nsuccs * hashwidth;
570 data += nsuccs * hashwidth;
561
571
562 if (nparents == 1 || nparents == 2) {
572 if (nparents == 1 || nparents == 2) {
563 if (data + nparents * hashwidth > dataend) {
573 if (data + nparents * hashwidth > dataend) {
564 goto overflow;
574 goto overflow;
565 }
575 }
566 parents = readshas(data, nparents, hashwidth);
576 parents = readshas(data, nparents, hashwidth);
567 if (parents == NULL) {
577 if (parents == NULL) {
568 goto bail;
578 goto bail;
569 }
579 }
570 data += nparents * hashwidth;
580 data += nparents * hashwidth;
571 } else {
581 } else {
572 parents = Py_None;
582 parents = Py_None;
573 Py_INCREF(parents);
583 Py_INCREF(parents);
574 }
584 }
575
585
576 if (data + 2 * nmetadata > dataend) {
586 if (data + 2 * nmetadata > dataend) {
577 goto overflow;
587 goto overflow;
578 }
588 }
579 meta = data + (2 * nmetadata);
589 meta = data + (2 * nmetadata);
580 metadata = PyTuple_New(nmetadata);
590 metadata = PyTuple_New(nmetadata);
581 if (metadata == NULL) {
591 if (metadata == NULL) {
582 goto bail;
592 goto bail;
583 }
593 }
584 for (i = 0; i < nmetadata; i++) {
594 for (i = 0; i < nmetadata; i++) {
585 PyObject *tmp, *left = NULL, *right = NULL;
595 PyObject *tmp, *left = NULL, *right = NULL;
586 Py_ssize_t leftsize = (unsigned char)(*data++);
596 Py_ssize_t leftsize = (unsigned char)(*data++);
587 Py_ssize_t rightsize = (unsigned char)(*data++);
597 Py_ssize_t rightsize = (unsigned char)(*data++);
588 if (meta + leftsize + rightsize > dataend) {
598 if (meta + leftsize + rightsize > dataend) {
589 goto overflow;
599 goto overflow;
590 }
600 }
591 left = PyBytes_FromStringAndSize(meta, leftsize);
601 left = PyBytes_FromStringAndSize(meta, leftsize);
592 meta += leftsize;
602 meta += leftsize;
593 right = PyBytes_FromStringAndSize(meta, rightsize);
603 right = PyBytes_FromStringAndSize(meta, rightsize);
594 meta += rightsize;
604 meta += rightsize;
595 tmp = PyTuple_New(2);
605 tmp = PyTuple_New(2);
596 if (!left || !right || !tmp) {
606 if (!left || !right || !tmp) {
597 Py_XDECREF(left);
607 Py_XDECREF(left);
598 Py_XDECREF(right);
608 Py_XDECREF(right);
599 Py_XDECREF(tmp);
609 Py_XDECREF(tmp);
600 goto bail;
610 goto bail;
601 }
611 }
602 PyTuple_SET_ITEM(tmp, 0, left);
612 PyTuple_SET_ITEM(tmp, 0, left);
603 PyTuple_SET_ITEM(tmp, 1, right);
613 PyTuple_SET_ITEM(tmp, 1, right);
604 PyTuple_SET_ITEM(metadata, i, tmp);
614 PyTuple_SET_ITEM(metadata, i, tmp);
605 }
615 }
606 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
616 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
607 (int)tz * 60, parents);
617 (int)tz * 60, parents);
608 goto bail; /* return successfully */
618 goto bail; /* return successfully */
609
619
610 overflow:
620 overflow:
611 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
621 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
612 bail:
622 bail:
613 Py_XDECREF(prec);
623 Py_XDECREF(prec);
614 Py_XDECREF(succs);
624 Py_XDECREF(succs);
615 Py_XDECREF(metadata);
625 Py_XDECREF(metadata);
616 Py_XDECREF(parents);
626 Py_XDECREF(parents);
617 return ret;
627 return ret;
618 }
628 }
619
629
620 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
630 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
621 {
631 {
622 const char *data, *dataend;
632 const char *data, *dataend;
623 Py_ssize_t datalen, offset, stop;
633 Py_ssize_t datalen, offset, stop;
624 PyObject *markers = NULL;
634 PyObject *markers = NULL;
625
635
626 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
636 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
627 &offset, &stop)) {
637 &offset, &stop)) {
628 return NULL;
638 return NULL;
629 }
639 }
630 if (offset < 0) {
640 if (offset < 0) {
631 PyErr_SetString(PyExc_ValueError,
641 PyErr_SetString(PyExc_ValueError,
632 "invalid negative offset in fm1readmarkers");
642 "invalid negative offset in fm1readmarkers");
633 return NULL;
643 return NULL;
634 }
644 }
635 if (stop > datalen) {
645 if (stop > datalen) {
636 PyErr_SetString(
646 PyErr_SetString(
637 PyExc_ValueError,
647 PyExc_ValueError,
638 "stop longer than data length in fm1readmarkers");
648 "stop longer than data length in fm1readmarkers");
639 return NULL;
649 return NULL;
640 }
650 }
641 dataend = data + datalen;
651 dataend = data + datalen;
642 data += offset;
652 data += offset;
643 markers = PyList_New(0);
653 markers = PyList_New(0);
644 if (!markers) {
654 if (!markers) {
645 return NULL;
655 return NULL;
646 }
656 }
647 while (offset < stop) {
657 while (offset < stop) {
648 uint32_t msize;
658 uint32_t msize;
649 int error;
659 int error;
650 PyObject *record = fm1readmarker(data, dataend, &msize);
660 PyObject *record = fm1readmarker(data, dataend, &msize);
651 if (!record) {
661 if (!record) {
652 goto bail;
662 goto bail;
653 }
663 }
654 error = PyList_Append(markers, record);
664 error = PyList_Append(markers, record);
655 Py_DECREF(record);
665 Py_DECREF(record);
656 if (error) {
666 if (error) {
657 goto bail;
667 goto bail;
658 }
668 }
659 data += msize;
669 data += msize;
660 offset += msize;
670 offset += msize;
661 }
671 }
662 return markers;
672 return markers;
663 bail:
673 bail:
664 Py_DECREF(markers);
674 Py_DECREF(markers);
665 return NULL;
675 return NULL;
666 }
676 }
667
677
668 static char parsers_doc[] = "Efficient content parsing.";
678 static char parsers_doc[] = "Efficient content parsing.";
669
679
670 PyObject *encodedir(PyObject *self, PyObject *args);
680 PyObject *encodedir(PyObject *self, PyObject *args);
671 PyObject *pathencode(PyObject *self, PyObject *args);
681 PyObject *pathencode(PyObject *self, PyObject *args);
672 PyObject *lowerencode(PyObject *self, PyObject *args);
682 PyObject *lowerencode(PyObject *self, PyObject *args);
673 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
683 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
674
684
675 static PyMethodDef methods[] = {
685 static PyMethodDef methods[] = {
676 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
686 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
677 {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
687 {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
678 "create a set containing non-normal and other parent entries of given "
688 "create a set containing non-normal and other parent entries of given "
679 "dirstate\n"},
689 "dirstate\n"},
680 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
690 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
681 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
691 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
682 "parse a revlog index\n"},
692 "parse a revlog index\n"},
683 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
693 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
684 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
694 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
685 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
695 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
686 {"dict_new_presized", dict_new_presized, METH_VARARGS,
696 {"dict_new_presized", dict_new_presized, METH_VARARGS,
687 "construct a dict with an expected size\n"},
697 "construct a dict with an expected size\n"},
688 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
698 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
689 "make file foldmap\n"},
699 "make file foldmap\n"},
690 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
700 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
691 "escape a UTF-8 byte string to JSON (fast path)\n"},
701 "escape a UTF-8 byte string to JSON (fast path)\n"},
692 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
702 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
693 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
703 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
694 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
704 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
695 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
705 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
696 "parse v1 obsolete markers\n"},
706 "parse v1 obsolete markers\n"},
697 {NULL, NULL}};
707 {NULL, NULL}};
698
708
699 void dirs_module_init(PyObject *mod);
709 void dirs_module_init(PyObject *mod);
700 void manifest_module_init(PyObject *mod);
710 void manifest_module_init(PyObject *mod);
701 void revlog_module_init(PyObject *mod);
711 void revlog_module_init(PyObject *mod);
702
712
703 static const int version = 20;
713 static const int version = 20;
704
714
705 static void module_init(PyObject *mod)
715 static void module_init(PyObject *mod)
706 {
716 {
707 PyObject *capsule = NULL;
717 PyObject *capsule = NULL;
708 PyModule_AddIntConstant(mod, "version", version);
718 PyModule_AddIntConstant(mod, "version", version);
709
719
710 /* This module constant has two purposes. First, it lets us unit test
720 /* This module constant has two purposes. First, it lets us unit test
711 * the ImportError raised without hard-coding any error text. This
721 * the ImportError raised without hard-coding any error text. This
712 * means we can change the text in the future without breaking tests,
722 * means we can change the text in the future without breaking tests,
713 * even across changesets without a recompile. Second, its presence
723 * even across changesets without a recompile. Second, its presence
714 * can be used to determine whether the version-checking logic is
724 * can be used to determine whether the version-checking logic is
715 * present, which also helps in testing across changesets without a
725 * present, which also helps in testing across changesets without a
716 * recompile. Note that this means the pure-Python version of parsers
726 * recompile. Note that this means the pure-Python version of parsers
717 * should not have this module constant. */
727 * should not have this module constant. */
718 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
728 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
719
729
720 dirs_module_init(mod);
730 dirs_module_init(mod);
721 manifest_module_init(mod);
731 manifest_module_init(mod);
722 revlog_module_init(mod);
732 revlog_module_init(mod);
723
733
724 capsule = PyCapsule_New(
734 capsule = PyCapsule_New(
725 make_dirstate_tuple,
735 make_dirstate_tuple,
726 "mercurial.cext.parsers.make_dirstate_tuple_CAPI", NULL);
736 "mercurial.cext.parsers.make_dirstate_tuple_CAPI", NULL);
727 if (capsule != NULL)
737 if (capsule != NULL)
728 PyModule_AddObject(mod, "make_dirstate_tuple_CAPI", capsule);
738 PyModule_AddObject(mod, "make_dirstate_tuple_CAPI", capsule);
729
739
730 if (PyType_Ready(&dirstateTupleType) < 0) {
740 if (PyType_Ready(&dirstateTupleType) < 0) {
731 return;
741 return;
732 }
742 }
733 Py_INCREF(&dirstateTupleType);
743 Py_INCREF(&dirstateTupleType);
734 PyModule_AddObject(mod, "dirstatetuple",
744 PyModule_AddObject(mod, "dirstatetuple",
735 (PyObject *)&dirstateTupleType);
745 (PyObject *)&dirstateTupleType);
736 }
746 }
737
747
738 static int check_python_version(void)
748 static int check_python_version(void)
739 {
749 {
740 PyObject *sys = PyImport_ImportModule("sys"), *ver;
750 PyObject *sys = PyImport_ImportModule("sys"), *ver;
741 long hexversion;
751 long hexversion;
742 if (!sys) {
752 if (!sys) {
743 return -1;
753 return -1;
744 }
754 }
745 ver = PyObject_GetAttrString(sys, "hexversion");
755 ver = PyObject_GetAttrString(sys, "hexversion");
746 Py_DECREF(sys);
756 Py_DECREF(sys);
747 if (!ver) {
757 if (!ver) {
748 return -1;
758 return -1;
749 }
759 }
750 hexversion = PyInt_AsLong(ver);
760 hexversion = PyInt_AsLong(ver);
751 Py_DECREF(ver);
761 Py_DECREF(ver);
752 /* sys.hexversion is a 32-bit number by default, so the -1 case
762 /* sys.hexversion is a 32-bit number by default, so the -1 case
753 * should only occur in unusual circumstances (e.g. if sys.hexversion
763 * should only occur in unusual circumstances (e.g. if sys.hexversion
754 * is manually set to an invalid value). */
764 * is manually set to an invalid value). */
755 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
765 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
756 PyErr_Format(PyExc_ImportError,
766 PyErr_Format(PyExc_ImportError,
757 "%s: The Mercurial extension "
767 "%s: The Mercurial extension "
758 "modules were compiled with Python " PY_VERSION
768 "modules were compiled with Python " PY_VERSION
759 ", but "
769 ", but "
760 "Mercurial is currently using Python with "
770 "Mercurial is currently using Python with "
761 "sys.hexversion=%ld: "
771 "sys.hexversion=%ld: "
762 "Python %s\n at: %s",
772 "Python %s\n at: %s",
763 versionerrortext, hexversion, Py_GetVersion(),
773 versionerrortext, hexversion, Py_GetVersion(),
764 Py_GetProgramFullPath());
774 Py_GetProgramFullPath());
765 return -1;
775 return -1;
766 }
776 }
767 return 0;
777 return 0;
768 }
778 }
769
779
770 #ifdef IS_PY3K
780 #ifdef IS_PY3K
771 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
781 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
772 parsers_doc, -1, methods};
782 parsers_doc, -1, methods};
773
783
774 PyMODINIT_FUNC PyInit_parsers(void)
784 PyMODINIT_FUNC PyInit_parsers(void)
775 {
785 {
776 PyObject *mod;
786 PyObject *mod;
777
787
778 if (check_python_version() == -1)
788 if (check_python_version() == -1)
779 return NULL;
789 return NULL;
780 mod = PyModule_Create(&parsers_module);
790 mod = PyModule_Create(&parsers_module);
781 module_init(mod);
791 module_init(mod);
782 return mod;
792 return mod;
783 }
793 }
784 #else
794 #else
785 PyMODINIT_FUNC initparsers(void)
795 PyMODINIT_FUNC initparsers(void)
786 {
796 {
787 PyObject *mod;
797 PyObject *mod;
788
798
789 if (check_python_version() == -1) {
799 if (check_python_version() == -1) {
790 return;
800 return;
791 }
801 }
792 mod = Py_InitModule3("parsers", methods, parsers_doc);
802 mod = Py_InitModule3("parsers", methods, parsers_doc);
793 module_init(mod);
803 module_init(mod);
794 }
804 }
795 #endif
805 #endif
@@ -1,1444 +1,1455 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = 0x7FFFFFFF
46 _rangemask = 0x7FFFFFFF
47
47
48 dirstatetuple = parsers.dirstatetuple
48 dirstatetuple = parsers.dirstatetuple
49
49
50
50
51 # a special value used internally for `size` if the file come from the other parent
51 # a special value used internally for `size` if the file come from the other parent
52 FROM_P2 = dirstatemap.FROM_P2
52 FROM_P2 = dirstatemap.FROM_P2
53
53
54 # a special value used internally for `size` if the file is modified/merged/added
54 # a special value used internally for `size` if the file is modified/merged/added
55 NONNORMAL = dirstatemap.NONNORMAL
55 NONNORMAL = dirstatemap.NONNORMAL
56
56
57 # a special value used internally for `time` if the time is ambigeous
57 # a special value used internally for `time` if the time is ambigeous
58 AMBIGUOUS_TIME = dirstatemap.AMBIGUOUS_TIME
58 AMBIGUOUS_TIME = dirstatemap.AMBIGUOUS_TIME
59
59
60
60
61 class repocache(filecache):
61 class repocache(filecache):
62 """filecache for files in .hg/"""
62 """filecache for files in .hg/"""
63
63
64 def join(self, obj, fname):
64 def join(self, obj, fname):
65 return obj._opener.join(fname)
65 return obj._opener.join(fname)
66
66
67
67
68 class rootcache(filecache):
68 class rootcache(filecache):
69 """filecache for files in the repository root"""
69 """filecache for files in the repository root"""
70
70
71 def join(self, obj, fname):
71 def join(self, obj, fname):
72 return obj._join(fname)
72 return obj._join(fname)
73
73
74
74
75 def _getfsnow(vfs):
75 def _getfsnow(vfs):
76 '''Get "now" timestamp on filesystem'''
76 '''Get "now" timestamp on filesystem'''
77 tmpfd, tmpname = vfs.mkstemp()
77 tmpfd, tmpname = vfs.mkstemp()
78 try:
78 try:
79 return os.fstat(tmpfd)[stat.ST_MTIME]
79 return os.fstat(tmpfd)[stat.ST_MTIME]
80 finally:
80 finally:
81 os.close(tmpfd)
81 os.close(tmpfd)
82 vfs.unlink(tmpname)
82 vfs.unlink(tmpname)
83
83
84
84
85 @interfaceutil.implementer(intdirstate.idirstate)
85 @interfaceutil.implementer(intdirstate.idirstate)
86 class dirstate(object):
86 class dirstate(object):
87 def __init__(
87 def __init__(
88 self,
88 self,
89 opener,
89 opener,
90 ui,
90 ui,
91 root,
91 root,
92 validate,
92 validate,
93 sparsematchfn,
93 sparsematchfn,
94 nodeconstants,
94 nodeconstants,
95 use_dirstate_v2,
95 use_dirstate_v2,
96 ):
96 ):
97 """Create a new dirstate object.
97 """Create a new dirstate object.
98
98
99 opener is an open()-like callable that can be used to open the
99 opener is an open()-like callable that can be used to open the
100 dirstate file; root is the root of the directory tracked by
100 dirstate file; root is the root of the directory tracked by
101 the dirstate.
101 the dirstate.
102 """
102 """
103 self._use_dirstate_v2 = use_dirstate_v2
103 self._use_dirstate_v2 = use_dirstate_v2
104 self._nodeconstants = nodeconstants
104 self._nodeconstants = nodeconstants
105 self._opener = opener
105 self._opener = opener
106 self._validate = validate
106 self._validate = validate
107 self._root = root
107 self._root = root
108 self._sparsematchfn = sparsematchfn
108 self._sparsematchfn = sparsematchfn
109 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
109 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
110 # UNC path pointing to root share (issue4557)
110 # UNC path pointing to root share (issue4557)
111 self._rootdir = pathutil.normasprefix(root)
111 self._rootdir = pathutil.normasprefix(root)
112 self._dirty = False
112 self._dirty = False
113 self._lastnormaltime = 0
113 self._lastnormaltime = 0
114 self._ui = ui
114 self._ui = ui
115 self._filecache = {}
115 self._filecache = {}
116 self._parentwriters = 0
116 self._parentwriters = 0
117 self._filename = b'dirstate'
117 self._filename = b'dirstate'
118 self._pendingfilename = b'%s.pending' % self._filename
118 self._pendingfilename = b'%s.pending' % self._filename
119 self._plchangecallbacks = {}
119 self._plchangecallbacks = {}
120 self._origpl = None
120 self._origpl = None
121 self._updatedfiles = set()
121 self._updatedfiles = set()
122 self._mapcls = dirstatemap.dirstatemap
122 self._mapcls = dirstatemap.dirstatemap
123 # Access and cache cwd early, so we don't access it for the first time
123 # Access and cache cwd early, so we don't access it for the first time
124 # after a working-copy update caused it to not exist (accessing it then
124 # after a working-copy update caused it to not exist (accessing it then
125 # raises an exception).
125 # raises an exception).
126 self._cwd
126 self._cwd
127
127
128 def prefetch_parents(self):
128 def prefetch_parents(self):
129 """make sure the parents are loaded
129 """make sure the parents are loaded
130
130
131 Used to avoid a race condition.
131 Used to avoid a race condition.
132 """
132 """
133 self._pl
133 self._pl
134
134
135 @contextlib.contextmanager
135 @contextlib.contextmanager
136 def parentchange(self):
136 def parentchange(self):
137 """Context manager for handling dirstate parents.
137 """Context manager for handling dirstate parents.
138
138
139 If an exception occurs in the scope of the context manager,
139 If an exception occurs in the scope of the context manager,
140 the incoherent dirstate won't be written when wlock is
140 the incoherent dirstate won't be written when wlock is
141 released.
141 released.
142 """
142 """
143 self._parentwriters += 1
143 self._parentwriters += 1
144 yield
144 yield
145 # Typically we want the "undo" step of a context manager in a
145 # Typically we want the "undo" step of a context manager in a
146 # finally block so it happens even when an exception
146 # finally block so it happens even when an exception
147 # occurs. In this case, however, we only want to decrement
147 # occurs. In this case, however, we only want to decrement
148 # parentwriters if the code in the with statement exits
148 # parentwriters if the code in the with statement exits
149 # normally, so we don't have a try/finally here on purpose.
149 # normally, so we don't have a try/finally here on purpose.
150 self._parentwriters -= 1
150 self._parentwriters -= 1
151
151
152 def pendingparentchange(self):
152 def pendingparentchange(self):
153 """Returns true if the dirstate is in the middle of a set of changes
153 """Returns true if the dirstate is in the middle of a set of changes
154 that modify the dirstate parent.
154 that modify the dirstate parent.
155 """
155 """
156 return self._parentwriters > 0
156 return self._parentwriters > 0
157
157
158 @propertycache
158 @propertycache
159 def _map(self):
159 def _map(self):
160 """Return the dirstate contents (see documentation for dirstatemap)."""
160 """Return the dirstate contents (see documentation for dirstatemap)."""
161 self._map = self._mapcls(
161 self._map = self._mapcls(
162 self._ui,
162 self._ui,
163 self._opener,
163 self._opener,
164 self._root,
164 self._root,
165 self._nodeconstants,
165 self._nodeconstants,
166 self._use_dirstate_v2,
166 self._use_dirstate_v2,
167 )
167 )
168 return self._map
168 return self._map
169
169
170 @property
170 @property
171 def _sparsematcher(self):
171 def _sparsematcher(self):
172 """The matcher for the sparse checkout.
172 """The matcher for the sparse checkout.
173
173
174 The working directory may not include every file from a manifest. The
174 The working directory may not include every file from a manifest. The
175 matcher obtained by this property will match a path if it is to be
175 matcher obtained by this property will match a path if it is to be
176 included in the working directory.
176 included in the working directory.
177 """
177 """
178 # TODO there is potential to cache this property. For now, the matcher
178 # TODO there is potential to cache this property. For now, the matcher
179 # is resolved on every access. (But the called function does use a
179 # is resolved on every access. (But the called function does use a
180 # cache to keep the lookup fast.)
180 # cache to keep the lookup fast.)
181 return self._sparsematchfn()
181 return self._sparsematchfn()
182
182
183 @repocache(b'branch')
183 @repocache(b'branch')
184 def _branch(self):
184 def _branch(self):
185 try:
185 try:
186 return self._opener.read(b"branch").strip() or b"default"
186 return self._opener.read(b"branch").strip() or b"default"
187 except IOError as inst:
187 except IOError as inst:
188 if inst.errno != errno.ENOENT:
188 if inst.errno != errno.ENOENT:
189 raise
189 raise
190 return b"default"
190 return b"default"
191
191
192 @property
192 @property
193 def _pl(self):
193 def _pl(self):
194 return self._map.parents()
194 return self._map.parents()
195
195
196 def hasdir(self, d):
196 def hasdir(self, d):
197 return self._map.hastrackeddir(d)
197 return self._map.hastrackeddir(d)
198
198
199 @rootcache(b'.hgignore')
199 @rootcache(b'.hgignore')
200 def _ignore(self):
200 def _ignore(self):
201 files = self._ignorefiles()
201 files = self._ignorefiles()
202 if not files:
202 if not files:
203 return matchmod.never()
203 return matchmod.never()
204
204
205 pats = [b'include:%s' % f for f in files]
205 pats = [b'include:%s' % f for f in files]
206 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
206 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
207
207
208 @propertycache
208 @propertycache
209 def _slash(self):
209 def _slash(self):
210 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
210 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
211
211
212 @propertycache
212 @propertycache
213 def _checklink(self):
213 def _checklink(self):
214 return util.checklink(self._root)
214 return util.checklink(self._root)
215
215
216 @propertycache
216 @propertycache
217 def _checkexec(self):
217 def _checkexec(self):
218 return bool(util.checkexec(self._root))
218 return bool(util.checkexec(self._root))
219
219
220 @propertycache
220 @propertycache
221 def _checkcase(self):
221 def _checkcase(self):
222 return not util.fscasesensitive(self._join(b'.hg'))
222 return not util.fscasesensitive(self._join(b'.hg'))
223
223
224 def _join(self, f):
224 def _join(self, f):
225 # much faster than os.path.join()
225 # much faster than os.path.join()
226 # it's safe because f is always a relative path
226 # it's safe because f is always a relative path
227 return self._rootdir + f
227 return self._rootdir + f
228
228
229 def flagfunc(self, buildfallback):
229 def flagfunc(self, buildfallback):
230 if self._checklink and self._checkexec:
230 if self._checklink and self._checkexec:
231
231
232 def f(x):
232 def f(x):
233 try:
233 try:
234 st = os.lstat(self._join(x))
234 st = os.lstat(self._join(x))
235 if util.statislink(st):
235 if util.statislink(st):
236 return b'l'
236 return b'l'
237 if util.statisexec(st):
237 if util.statisexec(st):
238 return b'x'
238 return b'x'
239 except OSError:
239 except OSError:
240 pass
240 pass
241 return b''
241 return b''
242
242
243 return f
243 return f
244
244
245 fallback = buildfallback()
245 fallback = buildfallback()
246 if self._checklink:
246 if self._checklink:
247
247
248 def f(x):
248 def f(x):
249 if os.path.islink(self._join(x)):
249 if os.path.islink(self._join(x)):
250 return b'l'
250 return b'l'
251 if b'x' in fallback(x):
251 if b'x' in fallback(x):
252 return b'x'
252 return b'x'
253 return b''
253 return b''
254
254
255 return f
255 return f
256 if self._checkexec:
256 if self._checkexec:
257
257
258 def f(x):
258 def f(x):
259 if b'l' in fallback(x):
259 if b'l' in fallback(x):
260 return b'l'
260 return b'l'
261 if util.isexec(self._join(x)):
261 if util.isexec(self._join(x)):
262 return b'x'
262 return b'x'
263 return b''
263 return b''
264
264
265 return f
265 return f
266 else:
266 else:
267 return fallback
267 return fallback
268
268
269 @propertycache
269 @propertycache
270 def _cwd(self):
270 def _cwd(self):
271 # internal config: ui.forcecwd
271 # internal config: ui.forcecwd
272 forcecwd = self._ui.config(b'ui', b'forcecwd')
272 forcecwd = self._ui.config(b'ui', b'forcecwd')
273 if forcecwd:
273 if forcecwd:
274 return forcecwd
274 return forcecwd
275 return encoding.getcwd()
275 return encoding.getcwd()
276
276
277 def getcwd(self):
277 def getcwd(self):
278 """Return the path from which a canonical path is calculated.
278 """Return the path from which a canonical path is calculated.
279
279
280 This path should be used to resolve file patterns or to convert
280 This path should be used to resolve file patterns or to convert
281 canonical paths back to file paths for display. It shouldn't be
281 canonical paths back to file paths for display. It shouldn't be
282 used to get real file paths. Use vfs functions instead.
282 used to get real file paths. Use vfs functions instead.
283 """
283 """
284 cwd = self._cwd
284 cwd = self._cwd
285 if cwd == self._root:
285 if cwd == self._root:
286 return b''
286 return b''
287 # self._root ends with a path separator if self._root is '/' or 'C:\'
287 # self._root ends with a path separator if self._root is '/' or 'C:\'
288 rootsep = self._root
288 rootsep = self._root
289 if not util.endswithsep(rootsep):
289 if not util.endswithsep(rootsep):
290 rootsep += pycompat.ossep
290 rootsep += pycompat.ossep
291 if cwd.startswith(rootsep):
291 if cwd.startswith(rootsep):
292 return cwd[len(rootsep) :]
292 return cwd[len(rootsep) :]
293 else:
293 else:
294 # we're outside the repo. return an absolute path.
294 # we're outside the repo. return an absolute path.
295 return cwd
295 return cwd
296
296
297 def pathto(self, f, cwd=None):
297 def pathto(self, f, cwd=None):
298 if cwd is None:
298 if cwd is None:
299 cwd = self.getcwd()
299 cwd = self.getcwd()
300 path = util.pathto(self._root, cwd, f)
300 path = util.pathto(self._root, cwd, f)
301 if self._slash:
301 if self._slash:
302 return util.pconvert(path)
302 return util.pconvert(path)
303 return path
303 return path
304
304
305 def __getitem__(self, key):
305 def __getitem__(self, key):
306 """Return the current state of key (a filename) in the dirstate.
306 """Return the current state of key (a filename) in the dirstate.
307
307
308 States are:
308 States are:
309 n normal
309 n normal
310 m needs merging
310 m needs merging
311 r marked for removal
311 r marked for removal
312 a marked for addition
312 a marked for addition
313 ? not tracked
313 ? not tracked
314
315 XXX The "state" is a bit obscure to be in the "public" API. we should
316 consider migrating all user of this to going through the dirstate entry
317 instead.
314 """
318 """
315 return self._map.get(key, (b"?",))[0]
319 entry = self._map.get(key)
320 if entry is not None:
321 return entry.state
322 return b'?'
316
323
317 def __contains__(self, key):
324 def __contains__(self, key):
318 return key in self._map
325 return key in self._map
319
326
320 def __iter__(self):
327 def __iter__(self):
321 return iter(sorted(self._map))
328 return iter(sorted(self._map))
322
329
323 def items(self):
330 def items(self):
324 return pycompat.iteritems(self._map)
331 return pycompat.iteritems(self._map)
325
332
326 iteritems = items
333 iteritems = items
327
334
328 def directories(self):
335 def directories(self):
329 return self._map.directories()
336 return self._map.directories()
330
337
331 def parents(self):
338 def parents(self):
332 return [self._validate(p) for p in self._pl]
339 return [self._validate(p) for p in self._pl]
333
340
334 def p1(self):
341 def p1(self):
335 return self._validate(self._pl[0])
342 return self._validate(self._pl[0])
336
343
337 def p2(self):
344 def p2(self):
338 return self._validate(self._pl[1])
345 return self._validate(self._pl[1])
339
346
340 @property
347 @property
341 def in_merge(self):
348 def in_merge(self):
342 """True if a merge is in progress"""
349 """True if a merge is in progress"""
343 return self._pl[1] != self._nodeconstants.nullid
350 return self._pl[1] != self._nodeconstants.nullid
344
351
345 def branch(self):
352 def branch(self):
346 return encoding.tolocal(self._branch)
353 return encoding.tolocal(self._branch)
347
354
348 def setparents(self, p1, p2=None):
355 def setparents(self, p1, p2=None):
349 """Set dirstate parents to p1 and p2.
356 """Set dirstate parents to p1 and p2.
350
357
351 When moving from two parents to one, 'm' merged entries a
358 When moving from two parents to one, 'm' merged entries a
352 adjusted to normal and previous copy records discarded and
359 adjusted to normal and previous copy records discarded and
353 returned by the call.
360 returned by the call.
354
361
355 See localrepo.setparents()
362 See localrepo.setparents()
356 """
363 """
357 if p2 is None:
364 if p2 is None:
358 p2 = self._nodeconstants.nullid
365 p2 = self._nodeconstants.nullid
359 if self._parentwriters == 0:
366 if self._parentwriters == 0:
360 raise ValueError(
367 raise ValueError(
361 b"cannot set dirstate parent outside of "
368 b"cannot set dirstate parent outside of "
362 b"dirstate.parentchange context manager"
369 b"dirstate.parentchange context manager"
363 )
370 )
364
371
365 self._dirty = True
372 self._dirty = True
366 oldp2 = self._pl[1]
373 oldp2 = self._pl[1]
367 if self._origpl is None:
374 if self._origpl is None:
368 self._origpl = self._pl
375 self._origpl = self._pl
369 self._map.setparents(p1, p2)
376 self._map.setparents(p1, p2)
370 copies = {}
377 copies = {}
371 if (
378 if (
372 oldp2 != self._nodeconstants.nullid
379 oldp2 != self._nodeconstants.nullid
373 and p2 == self._nodeconstants.nullid
380 and p2 == self._nodeconstants.nullid
374 ):
381 ):
375 candidatefiles = self._map.non_normal_or_other_parent_paths()
382 candidatefiles = self._map.non_normal_or_other_parent_paths()
376
383
377 for f in candidatefiles:
384 for f in candidatefiles:
378 s = self._map.get(f)
385 s = self._map.get(f)
379 if s is None:
386 if s is None:
380 continue
387 continue
381
388
382 # Discard 'm' markers when moving away from a merge state
389 # Discard 'm' markers when moving away from a merge state
383 if s[0] == b'm':
390 if s.state == b'm':
384 source = self._map.copymap.get(f)
391 source = self._map.copymap.get(f)
385 if source:
392 if source:
386 copies[f] = source
393 copies[f] = source
387 self.normallookup(f)
394 self.normallookup(f)
388 # Also fix up otherparent markers
395 # Also fix up otherparent markers
389 elif s[0] == b'n' and s[2] == FROM_P2:
396 elif s.state == b'n' and s[2] == FROM_P2:
390 source = self._map.copymap.get(f)
397 source = self._map.copymap.get(f)
391 if source:
398 if source:
392 copies[f] = source
399 copies[f] = source
393 self.add(f)
400 self.add(f)
394 return copies
401 return copies
395
402
396 def setbranch(self, branch):
403 def setbranch(self, branch):
397 self.__class__._branch.set(self, encoding.fromlocal(branch))
404 self.__class__._branch.set(self, encoding.fromlocal(branch))
398 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
405 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
399 try:
406 try:
400 f.write(self._branch + b'\n')
407 f.write(self._branch + b'\n')
401 f.close()
408 f.close()
402
409
403 # make sure filecache has the correct stat info for _branch after
410 # make sure filecache has the correct stat info for _branch after
404 # replacing the underlying file
411 # replacing the underlying file
405 ce = self._filecache[b'_branch']
412 ce = self._filecache[b'_branch']
406 if ce:
413 if ce:
407 ce.refresh()
414 ce.refresh()
408 except: # re-raises
415 except: # re-raises
409 f.discard()
416 f.discard()
410 raise
417 raise
411
418
412 def invalidate(self):
419 def invalidate(self):
413 """Causes the next access to reread the dirstate.
420 """Causes the next access to reread the dirstate.
414
421
415 This is different from localrepo.invalidatedirstate() because it always
422 This is different from localrepo.invalidatedirstate() because it always
416 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
423 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
417 check whether the dirstate has changed before rereading it."""
424 check whether the dirstate has changed before rereading it."""
418
425
419 for a in ("_map", "_branch", "_ignore"):
426 for a in ("_map", "_branch", "_ignore"):
420 if a in self.__dict__:
427 if a in self.__dict__:
421 delattr(self, a)
428 delattr(self, a)
422 self._lastnormaltime = 0
429 self._lastnormaltime = 0
423 self._dirty = False
430 self._dirty = False
424 self._updatedfiles.clear()
431 self._updatedfiles.clear()
425 self._parentwriters = 0
432 self._parentwriters = 0
426 self._origpl = None
433 self._origpl = None
427
434
428 def copy(self, source, dest):
435 def copy(self, source, dest):
429 """Mark dest as a copy of source. Unmark dest if source is None."""
436 """Mark dest as a copy of source. Unmark dest if source is None."""
430 if source == dest:
437 if source == dest:
431 return
438 return
432 self._dirty = True
439 self._dirty = True
433 if source is not None:
440 if source is not None:
434 self._map.copymap[dest] = source
441 self._map.copymap[dest] = source
435 self._updatedfiles.add(source)
442 self._updatedfiles.add(source)
436 self._updatedfiles.add(dest)
443 self._updatedfiles.add(dest)
437 elif self._map.copymap.pop(dest, None):
444 elif self._map.copymap.pop(dest, None):
438 self._updatedfiles.add(dest)
445 self._updatedfiles.add(dest)
439
446
440 def copied(self, file):
447 def copied(self, file):
441 return self._map.copymap.get(file, None)
448 return self._map.copymap.get(file, None)
442
449
443 def copies(self):
450 def copies(self):
444 return self._map.copymap
451 return self._map.copymap
445
452
446 def _addpath(
453 def _addpath(
447 self,
454 self,
448 f,
455 f,
449 state,
456 state,
450 mode,
457 mode,
451 size=NONNORMAL,
458 size=NONNORMAL,
452 mtime=AMBIGUOUS_TIME,
459 mtime=AMBIGUOUS_TIME,
453 from_p2=False,
460 from_p2=False,
454 possibly_dirty=False,
461 possibly_dirty=False,
455 ):
462 ):
456 oldstate = self[f]
463 oldstate = self[f]
457 if state == b'a' or oldstate == b'r':
464 if state == b'a' or oldstate == b'r':
458 scmutil.checkfilename(f)
465 scmutil.checkfilename(f)
459 if self._map.hastrackeddir(f):
466 if self._map.hastrackeddir(f):
460 msg = _(b'directory %r already in dirstate')
467 msg = _(b'directory %r already in dirstate')
461 msg %= pycompat.bytestr(f)
468 msg %= pycompat.bytestr(f)
462 raise error.Abort(msg)
469 raise error.Abort(msg)
463 # shadows
470 # shadows
464 for d in pathutil.finddirs(f):
471 for d in pathutil.finddirs(f):
465 if self._map.hastrackeddir(d):
472 if self._map.hastrackeddir(d):
466 break
473 break
467 entry = self._map.get(d)
474 entry = self._map.get(d)
468 if entry is not None and entry[0] != b'r':
475 if entry is not None and entry.state != b'r':
469 msg = _(b'file %r in dirstate clashes with %r')
476 msg = _(b'file %r in dirstate clashes with %r')
470 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
477 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
471 raise error.Abort(msg)
478 raise error.Abort(msg)
472 if state == b'a':
479 if state == b'a':
473 assert not possibly_dirty
480 assert not possibly_dirty
474 assert not from_p2
481 assert not from_p2
475 size = NONNORMAL
482 size = NONNORMAL
476 mtime = AMBIGUOUS_TIME
483 mtime = AMBIGUOUS_TIME
477 elif from_p2:
484 elif from_p2:
478 assert not possibly_dirty
485 assert not possibly_dirty
479 size = FROM_P2
486 size = FROM_P2
480 mtime = AMBIGUOUS_TIME
487 mtime = AMBIGUOUS_TIME
481 elif possibly_dirty:
488 elif possibly_dirty:
482 mtime = AMBIGUOUS_TIME
489 mtime = AMBIGUOUS_TIME
483 else:
490 else:
484 assert size != FROM_P2
491 assert size != FROM_P2
485 assert size != NONNORMAL
492 assert size != NONNORMAL
486 size = size & _rangemask
493 size = size & _rangemask
487 mtime = mtime & _rangemask
494 mtime = mtime & _rangemask
488 self._dirty = True
495 self._dirty = True
489 self._updatedfiles.add(f)
496 self._updatedfiles.add(f)
490 self._map.addfile(f, oldstate, state, mode, size, mtime)
497 self._map.addfile(f, oldstate, state, mode, size, mtime)
491
498
492 def normal(self, f, parentfiledata=None):
499 def normal(self, f, parentfiledata=None):
493 """Mark a file normal and clean.
500 """Mark a file normal and clean.
494
501
495 parentfiledata: (mode, size, mtime) of the clean file
502 parentfiledata: (mode, size, mtime) of the clean file
496
503
497 parentfiledata should be computed from memory (for mode,
504 parentfiledata should be computed from memory (for mode,
498 size), as or close as possible from the point where we
505 size), as or close as possible from the point where we
499 determined the file was clean, to limit the risk of the
506 determined the file was clean, to limit the risk of the
500 file having been changed by an external process between the
507 file having been changed by an external process between the
501 moment where the file was determined to be clean and now."""
508 moment where the file was determined to be clean and now."""
502 if parentfiledata:
509 if parentfiledata:
503 (mode, size, mtime) = parentfiledata
510 (mode, size, mtime) = parentfiledata
504 else:
511 else:
505 s = os.lstat(self._join(f))
512 s = os.lstat(self._join(f))
506 mode = s.st_mode
513 mode = s.st_mode
507 size = s.st_size
514 size = s.st_size
508 mtime = s[stat.ST_MTIME]
515 mtime = s[stat.ST_MTIME]
509 self._addpath(f, b'n', mode, size, mtime)
516 self._addpath(f, b'n', mode, size, mtime)
510 self._map.copymap.pop(f, None)
517 self._map.copymap.pop(f, None)
511 if f in self._map.nonnormalset:
518 if f in self._map.nonnormalset:
512 self._map.nonnormalset.remove(f)
519 self._map.nonnormalset.remove(f)
513 if mtime > self._lastnormaltime:
520 if mtime > self._lastnormaltime:
514 # Remember the most recent modification timeslot for status(),
521 # Remember the most recent modification timeslot for status(),
515 # to make sure we won't miss future size-preserving file content
522 # to make sure we won't miss future size-preserving file content
516 # modifications that happen within the same timeslot.
523 # modifications that happen within the same timeslot.
517 self._lastnormaltime = mtime
524 self._lastnormaltime = mtime
518
525
519 def normallookup(self, f):
526 def normallookup(self, f):
520 '''Mark a file normal, but possibly dirty.'''
527 '''Mark a file normal, but possibly dirty.'''
521 if self.in_merge:
528 if self.in_merge:
522 # if there is a merge going on and the file was either
529 # if there is a merge going on and the file was either
523 # in state 'm' (-1) or coming from other parent (-2) before
530 # in state 'm' (-1) or coming from other parent (-2) before
524 # being removed, restore that state.
531 # being removed, restore that state.
525 entry = self._map.get(f)
532 entry = self._map.get(f)
526 if entry is not None:
533 if entry is not None:
527 if entry[0] == b'r' and entry[2] in (NONNORMAL, FROM_P2):
534 if entry.state == b'r' and entry[2] in (NONNORMAL, FROM_P2):
528 source = self._map.copymap.get(f)
535 source = self._map.copymap.get(f)
529 if entry[2] == NONNORMAL:
536 if entry[2] == NONNORMAL:
530 self.merge(f)
537 self.merge(f)
531 elif entry[2] == FROM_P2:
538 elif entry[2] == FROM_P2:
532 self.otherparent(f)
539 self.otherparent(f)
533 if source:
540 if source:
534 self.copy(source, f)
541 self.copy(source, f)
535 return
542 return
536 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == FROM_P2:
543 if (
544 entry.state == b'm'
545 or entry.state == b'n'
546 and entry[2] == FROM_P2
547 ):
537 return
548 return
538 self._addpath(f, b'n', 0, possibly_dirty=True)
549 self._addpath(f, b'n', 0, possibly_dirty=True)
539 self._map.copymap.pop(f, None)
550 self._map.copymap.pop(f, None)
540
551
541 def otherparent(self, f):
552 def otherparent(self, f):
542 '''Mark as coming from the other parent, always dirty.'''
553 '''Mark as coming from the other parent, always dirty.'''
543 if not self.in_merge:
554 if not self.in_merge:
544 msg = _(b"setting %r to other parent only allowed in merges") % f
555 msg = _(b"setting %r to other parent only allowed in merges") % f
545 raise error.Abort(msg)
556 raise error.Abort(msg)
546 if f in self and self[f] == b'n':
557 if f in self and self[f] == b'n':
547 # merge-like
558 # merge-like
548 self._addpath(f, b'm', 0, from_p2=True)
559 self._addpath(f, b'm', 0, from_p2=True)
549 else:
560 else:
550 # add-like
561 # add-like
551 self._addpath(f, b'n', 0, from_p2=True)
562 self._addpath(f, b'n', 0, from_p2=True)
552 self._map.copymap.pop(f, None)
563 self._map.copymap.pop(f, None)
553
564
554 def add(self, f):
565 def add(self, f):
555 '''Mark a file added.'''
566 '''Mark a file added.'''
556 self._addpath(f, b'a', 0)
567 self._addpath(f, b'a', 0)
557 self._map.copymap.pop(f, None)
568 self._map.copymap.pop(f, None)
558
569
559 def remove(self, f):
570 def remove(self, f):
560 '''Mark a file removed.'''
571 '''Mark a file removed.'''
561 self._dirty = True
572 self._dirty = True
562 self._updatedfiles.add(f)
573 self._updatedfiles.add(f)
563 self._map.removefile(f, in_merge=self.in_merge)
574 self._map.removefile(f, in_merge=self.in_merge)
564
575
565 def merge(self, f):
576 def merge(self, f):
566 '''Mark a file merged.'''
577 '''Mark a file merged.'''
567 if not self.in_merge:
578 if not self.in_merge:
568 return self.normallookup(f)
579 return self.normallookup(f)
569 return self.otherparent(f)
580 return self.otherparent(f)
570
581
571 def drop(self, f):
582 def drop(self, f):
572 '''Drop a file from the dirstate'''
583 '''Drop a file from the dirstate'''
573 oldstate = self[f]
584 oldstate = self[f]
574 if self._map.dropfile(f, oldstate):
585 if self._map.dropfile(f, oldstate):
575 self._dirty = True
586 self._dirty = True
576 self._updatedfiles.add(f)
587 self._updatedfiles.add(f)
577 self._map.copymap.pop(f, None)
588 self._map.copymap.pop(f, None)
578
589
579 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
590 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
580 if exists is None:
591 if exists is None:
581 exists = os.path.lexists(os.path.join(self._root, path))
592 exists = os.path.lexists(os.path.join(self._root, path))
582 if not exists:
593 if not exists:
583 # Maybe a path component exists
594 # Maybe a path component exists
584 if not ignoremissing and b'/' in path:
595 if not ignoremissing and b'/' in path:
585 d, f = path.rsplit(b'/', 1)
596 d, f = path.rsplit(b'/', 1)
586 d = self._normalize(d, False, ignoremissing, None)
597 d = self._normalize(d, False, ignoremissing, None)
587 folded = d + b"/" + f
598 folded = d + b"/" + f
588 else:
599 else:
589 # No path components, preserve original case
600 # No path components, preserve original case
590 folded = path
601 folded = path
591 else:
602 else:
592 # recursively normalize leading directory components
603 # recursively normalize leading directory components
593 # against dirstate
604 # against dirstate
594 if b'/' in normed:
605 if b'/' in normed:
595 d, f = normed.rsplit(b'/', 1)
606 d, f = normed.rsplit(b'/', 1)
596 d = self._normalize(d, False, ignoremissing, True)
607 d = self._normalize(d, False, ignoremissing, True)
597 r = self._root + b"/" + d
608 r = self._root + b"/" + d
598 folded = d + b"/" + util.fspath(f, r)
609 folded = d + b"/" + util.fspath(f, r)
599 else:
610 else:
600 folded = util.fspath(normed, self._root)
611 folded = util.fspath(normed, self._root)
601 storemap[normed] = folded
612 storemap[normed] = folded
602
613
603 return folded
614 return folded
604
615
605 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
616 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
606 normed = util.normcase(path)
617 normed = util.normcase(path)
607 folded = self._map.filefoldmap.get(normed, None)
618 folded = self._map.filefoldmap.get(normed, None)
608 if folded is None:
619 if folded is None:
609 if isknown:
620 if isknown:
610 folded = path
621 folded = path
611 else:
622 else:
612 folded = self._discoverpath(
623 folded = self._discoverpath(
613 path, normed, ignoremissing, exists, self._map.filefoldmap
624 path, normed, ignoremissing, exists, self._map.filefoldmap
614 )
625 )
615 return folded
626 return folded
616
627
617 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
628 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
618 normed = util.normcase(path)
629 normed = util.normcase(path)
619 folded = self._map.filefoldmap.get(normed, None)
630 folded = self._map.filefoldmap.get(normed, None)
620 if folded is None:
631 if folded is None:
621 folded = self._map.dirfoldmap.get(normed, None)
632 folded = self._map.dirfoldmap.get(normed, None)
622 if folded is None:
633 if folded is None:
623 if isknown:
634 if isknown:
624 folded = path
635 folded = path
625 else:
636 else:
626 # store discovered result in dirfoldmap so that future
637 # store discovered result in dirfoldmap so that future
627 # normalizefile calls don't start matching directories
638 # normalizefile calls don't start matching directories
628 folded = self._discoverpath(
639 folded = self._discoverpath(
629 path, normed, ignoremissing, exists, self._map.dirfoldmap
640 path, normed, ignoremissing, exists, self._map.dirfoldmap
630 )
641 )
631 return folded
642 return folded
632
643
633 def normalize(self, path, isknown=False, ignoremissing=False):
644 def normalize(self, path, isknown=False, ignoremissing=False):
634 """
645 """
635 normalize the case of a pathname when on a casefolding filesystem
646 normalize the case of a pathname when on a casefolding filesystem
636
647
637 isknown specifies whether the filename came from walking the
648 isknown specifies whether the filename came from walking the
638 disk, to avoid extra filesystem access.
649 disk, to avoid extra filesystem access.
639
650
640 If ignoremissing is True, missing path are returned
651 If ignoremissing is True, missing path are returned
641 unchanged. Otherwise, we try harder to normalize possibly
652 unchanged. Otherwise, we try harder to normalize possibly
642 existing path components.
653 existing path components.
643
654
644 The normalized case is determined based on the following precedence:
655 The normalized case is determined based on the following precedence:
645
656
646 - version of name already stored in the dirstate
657 - version of name already stored in the dirstate
647 - version of name stored on disk
658 - version of name stored on disk
648 - version provided via command arguments
659 - version provided via command arguments
649 """
660 """
650
661
651 if self._checkcase:
662 if self._checkcase:
652 return self._normalize(path, isknown, ignoremissing)
663 return self._normalize(path, isknown, ignoremissing)
653 return path
664 return path
654
665
655 def clear(self):
666 def clear(self):
656 self._map.clear()
667 self._map.clear()
657 self._lastnormaltime = 0
668 self._lastnormaltime = 0
658 self._updatedfiles.clear()
669 self._updatedfiles.clear()
659 self._dirty = True
670 self._dirty = True
660
671
661 def rebuild(self, parent, allfiles, changedfiles=None):
672 def rebuild(self, parent, allfiles, changedfiles=None):
662 if changedfiles is None:
673 if changedfiles is None:
663 # Rebuild entire dirstate
674 # Rebuild entire dirstate
664 to_lookup = allfiles
675 to_lookup = allfiles
665 to_drop = []
676 to_drop = []
666 lastnormaltime = self._lastnormaltime
677 lastnormaltime = self._lastnormaltime
667 self.clear()
678 self.clear()
668 self._lastnormaltime = lastnormaltime
679 self._lastnormaltime = lastnormaltime
669 elif len(changedfiles) < 10:
680 elif len(changedfiles) < 10:
670 # Avoid turning allfiles into a set, which can be expensive if it's
681 # Avoid turning allfiles into a set, which can be expensive if it's
671 # large.
682 # large.
672 to_lookup = []
683 to_lookup = []
673 to_drop = []
684 to_drop = []
674 for f in changedfiles:
685 for f in changedfiles:
675 if f in allfiles:
686 if f in allfiles:
676 to_lookup.append(f)
687 to_lookup.append(f)
677 else:
688 else:
678 to_drop.append(f)
689 to_drop.append(f)
679 else:
690 else:
680 changedfilesset = set(changedfiles)
691 changedfilesset = set(changedfiles)
681 to_lookup = changedfilesset & set(allfiles)
692 to_lookup = changedfilesset & set(allfiles)
682 to_drop = changedfilesset - to_lookup
693 to_drop = changedfilesset - to_lookup
683
694
684 if self._origpl is None:
695 if self._origpl is None:
685 self._origpl = self._pl
696 self._origpl = self._pl
686 self._map.setparents(parent, self._nodeconstants.nullid)
697 self._map.setparents(parent, self._nodeconstants.nullid)
687
698
688 for f in to_lookup:
699 for f in to_lookup:
689 self.normallookup(f)
700 self.normallookup(f)
690 for f in to_drop:
701 for f in to_drop:
691 self.drop(f)
702 self.drop(f)
692
703
693 self._dirty = True
704 self._dirty = True
694
705
695 def identity(self):
706 def identity(self):
696 """Return identity of dirstate itself to detect changing in storage
707 """Return identity of dirstate itself to detect changing in storage
697
708
698 If identity of previous dirstate is equal to this, writing
709 If identity of previous dirstate is equal to this, writing
699 changes based on the former dirstate out can keep consistency.
710 changes based on the former dirstate out can keep consistency.
700 """
711 """
701 return self._map.identity
712 return self._map.identity
702
713
703 def write(self, tr):
714 def write(self, tr):
704 if not self._dirty:
715 if not self._dirty:
705 return
716 return
706
717
707 filename = self._filename
718 filename = self._filename
708 if tr:
719 if tr:
709 # 'dirstate.write()' is not only for writing in-memory
720 # 'dirstate.write()' is not only for writing in-memory
710 # changes out, but also for dropping ambiguous timestamp.
721 # changes out, but also for dropping ambiguous timestamp.
711 # delayed writing re-raise "ambiguous timestamp issue".
722 # delayed writing re-raise "ambiguous timestamp issue".
712 # See also the wiki page below for detail:
723 # See also the wiki page below for detail:
713 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
724 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
714
725
715 # emulate dropping timestamp in 'parsers.pack_dirstate'
726 # emulate dropping timestamp in 'parsers.pack_dirstate'
716 now = _getfsnow(self._opener)
727 now = _getfsnow(self._opener)
717 self._map.clearambiguoustimes(self._updatedfiles, now)
728 self._map.clearambiguoustimes(self._updatedfiles, now)
718
729
719 # emulate that all 'dirstate.normal' results are written out
730 # emulate that all 'dirstate.normal' results are written out
720 self._lastnormaltime = 0
731 self._lastnormaltime = 0
721 self._updatedfiles.clear()
732 self._updatedfiles.clear()
722
733
723 # delay writing in-memory changes out
734 # delay writing in-memory changes out
724 tr.addfilegenerator(
735 tr.addfilegenerator(
725 b'dirstate',
736 b'dirstate',
726 (self._filename,),
737 (self._filename,),
727 self._writedirstate,
738 self._writedirstate,
728 location=b'plain',
739 location=b'plain',
729 )
740 )
730 return
741 return
731
742
732 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
743 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
733 self._writedirstate(st)
744 self._writedirstate(st)
734
745
735 def addparentchangecallback(self, category, callback):
746 def addparentchangecallback(self, category, callback):
736 """add a callback to be called when the wd parents are changed
747 """add a callback to be called when the wd parents are changed
737
748
738 Callback will be called with the following arguments:
749 Callback will be called with the following arguments:
739 dirstate, (oldp1, oldp2), (newp1, newp2)
750 dirstate, (oldp1, oldp2), (newp1, newp2)
740
751
741 Category is a unique identifier to allow overwriting an old callback
752 Category is a unique identifier to allow overwriting an old callback
742 with a newer callback.
753 with a newer callback.
743 """
754 """
744 self._plchangecallbacks[category] = callback
755 self._plchangecallbacks[category] = callback
745
756
746 def _writedirstate(self, st):
757 def _writedirstate(self, st):
747 # notify callbacks about parents change
758 # notify callbacks about parents change
748 if self._origpl is not None and self._origpl != self._pl:
759 if self._origpl is not None and self._origpl != self._pl:
749 for c, callback in sorted(
760 for c, callback in sorted(
750 pycompat.iteritems(self._plchangecallbacks)
761 pycompat.iteritems(self._plchangecallbacks)
751 ):
762 ):
752 callback(self, self._origpl, self._pl)
763 callback(self, self._origpl, self._pl)
753 self._origpl = None
764 self._origpl = None
754 # use the modification time of the newly created temporary file as the
765 # use the modification time of the newly created temporary file as the
755 # filesystem's notion of 'now'
766 # filesystem's notion of 'now'
756 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
767 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
757
768
758 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
769 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
759 # timestamp of each entries in dirstate, because of 'now > mtime'
770 # timestamp of each entries in dirstate, because of 'now > mtime'
760 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
771 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
761 if delaywrite > 0:
772 if delaywrite > 0:
762 # do we have any files to delay for?
773 # do we have any files to delay for?
763 for f, e in pycompat.iteritems(self._map):
774 for f, e in pycompat.iteritems(self._map):
764 if e[0] == b'n' and e[3] == now:
775 if e.state == b'n' and e[3] == now:
765 import time # to avoid useless import
776 import time # to avoid useless import
766
777
767 # rather than sleep n seconds, sleep until the next
778 # rather than sleep n seconds, sleep until the next
768 # multiple of n seconds
779 # multiple of n seconds
769 clock = time.time()
780 clock = time.time()
770 start = int(clock) - (int(clock) % delaywrite)
781 start = int(clock) - (int(clock) % delaywrite)
771 end = start + delaywrite
782 end = start + delaywrite
772 time.sleep(end - clock)
783 time.sleep(end - clock)
773 now = end # trust our estimate that the end is near now
784 now = end # trust our estimate that the end is near now
774 break
785 break
775
786
776 self._map.write(st, now)
787 self._map.write(st, now)
777 self._lastnormaltime = 0
788 self._lastnormaltime = 0
778 self._dirty = False
789 self._dirty = False
779
790
780 def _dirignore(self, f):
791 def _dirignore(self, f):
781 if self._ignore(f):
792 if self._ignore(f):
782 return True
793 return True
783 for p in pathutil.finddirs(f):
794 for p in pathutil.finddirs(f):
784 if self._ignore(p):
795 if self._ignore(p):
785 return True
796 return True
786 return False
797 return False
787
798
788 def _ignorefiles(self):
799 def _ignorefiles(self):
789 files = []
800 files = []
790 if os.path.exists(self._join(b'.hgignore')):
801 if os.path.exists(self._join(b'.hgignore')):
791 files.append(self._join(b'.hgignore'))
802 files.append(self._join(b'.hgignore'))
792 for name, path in self._ui.configitems(b"ui"):
803 for name, path in self._ui.configitems(b"ui"):
793 if name == b'ignore' or name.startswith(b'ignore.'):
804 if name == b'ignore' or name.startswith(b'ignore.'):
794 # we need to use os.path.join here rather than self._join
805 # we need to use os.path.join here rather than self._join
795 # because path is arbitrary and user-specified
806 # because path is arbitrary and user-specified
796 files.append(os.path.join(self._rootdir, util.expandpath(path)))
807 files.append(os.path.join(self._rootdir, util.expandpath(path)))
797 return files
808 return files
798
809
799 def _ignorefileandline(self, f):
810 def _ignorefileandline(self, f):
800 files = collections.deque(self._ignorefiles())
811 files = collections.deque(self._ignorefiles())
801 visited = set()
812 visited = set()
802 while files:
813 while files:
803 i = files.popleft()
814 i = files.popleft()
804 patterns = matchmod.readpatternfile(
815 patterns = matchmod.readpatternfile(
805 i, self._ui.warn, sourceinfo=True
816 i, self._ui.warn, sourceinfo=True
806 )
817 )
807 for pattern, lineno, line in patterns:
818 for pattern, lineno, line in patterns:
808 kind, p = matchmod._patsplit(pattern, b'glob')
819 kind, p = matchmod._patsplit(pattern, b'glob')
809 if kind == b"subinclude":
820 if kind == b"subinclude":
810 if p not in visited:
821 if p not in visited:
811 files.append(p)
822 files.append(p)
812 continue
823 continue
813 m = matchmod.match(
824 m = matchmod.match(
814 self._root, b'', [], [pattern], warn=self._ui.warn
825 self._root, b'', [], [pattern], warn=self._ui.warn
815 )
826 )
816 if m(f):
827 if m(f):
817 return (i, lineno, line)
828 return (i, lineno, line)
818 visited.add(i)
829 visited.add(i)
819 return (None, -1, b"")
830 return (None, -1, b"")
820
831
821 def _walkexplicit(self, match, subrepos):
832 def _walkexplicit(self, match, subrepos):
822 """Get stat data about the files explicitly specified by match.
833 """Get stat data about the files explicitly specified by match.
823
834
824 Return a triple (results, dirsfound, dirsnotfound).
835 Return a triple (results, dirsfound, dirsnotfound).
825 - results is a mapping from filename to stat result. It also contains
836 - results is a mapping from filename to stat result. It also contains
826 listings mapping subrepos and .hg to None.
837 listings mapping subrepos and .hg to None.
827 - dirsfound is a list of files found to be directories.
838 - dirsfound is a list of files found to be directories.
828 - dirsnotfound is a list of files that the dirstate thinks are
839 - dirsnotfound is a list of files that the dirstate thinks are
829 directories and that were not found."""
840 directories and that were not found."""
830
841
831 def badtype(mode):
842 def badtype(mode):
832 kind = _(b'unknown')
843 kind = _(b'unknown')
833 if stat.S_ISCHR(mode):
844 if stat.S_ISCHR(mode):
834 kind = _(b'character device')
845 kind = _(b'character device')
835 elif stat.S_ISBLK(mode):
846 elif stat.S_ISBLK(mode):
836 kind = _(b'block device')
847 kind = _(b'block device')
837 elif stat.S_ISFIFO(mode):
848 elif stat.S_ISFIFO(mode):
838 kind = _(b'fifo')
849 kind = _(b'fifo')
839 elif stat.S_ISSOCK(mode):
850 elif stat.S_ISSOCK(mode):
840 kind = _(b'socket')
851 kind = _(b'socket')
841 elif stat.S_ISDIR(mode):
852 elif stat.S_ISDIR(mode):
842 kind = _(b'directory')
853 kind = _(b'directory')
843 return _(b'unsupported file type (type is %s)') % kind
854 return _(b'unsupported file type (type is %s)') % kind
844
855
845 badfn = match.bad
856 badfn = match.bad
846 dmap = self._map
857 dmap = self._map
847 lstat = os.lstat
858 lstat = os.lstat
848 getkind = stat.S_IFMT
859 getkind = stat.S_IFMT
849 dirkind = stat.S_IFDIR
860 dirkind = stat.S_IFDIR
850 regkind = stat.S_IFREG
861 regkind = stat.S_IFREG
851 lnkkind = stat.S_IFLNK
862 lnkkind = stat.S_IFLNK
852 join = self._join
863 join = self._join
853 dirsfound = []
864 dirsfound = []
854 foundadd = dirsfound.append
865 foundadd = dirsfound.append
855 dirsnotfound = []
866 dirsnotfound = []
856 notfoundadd = dirsnotfound.append
867 notfoundadd = dirsnotfound.append
857
868
858 if not match.isexact() and self._checkcase:
869 if not match.isexact() and self._checkcase:
859 normalize = self._normalize
870 normalize = self._normalize
860 else:
871 else:
861 normalize = None
872 normalize = None
862
873
863 files = sorted(match.files())
874 files = sorted(match.files())
864 subrepos.sort()
875 subrepos.sort()
865 i, j = 0, 0
876 i, j = 0, 0
866 while i < len(files) and j < len(subrepos):
877 while i < len(files) and j < len(subrepos):
867 subpath = subrepos[j] + b"/"
878 subpath = subrepos[j] + b"/"
868 if files[i] < subpath:
879 if files[i] < subpath:
869 i += 1
880 i += 1
870 continue
881 continue
871 while i < len(files) and files[i].startswith(subpath):
882 while i < len(files) and files[i].startswith(subpath):
872 del files[i]
883 del files[i]
873 j += 1
884 j += 1
874
885
875 if not files or b'' in files:
886 if not files or b'' in files:
876 files = [b'']
887 files = [b'']
877 # constructing the foldmap is expensive, so don't do it for the
888 # constructing the foldmap is expensive, so don't do it for the
878 # common case where files is ['']
889 # common case where files is ['']
879 normalize = None
890 normalize = None
880 results = dict.fromkeys(subrepos)
891 results = dict.fromkeys(subrepos)
881 results[b'.hg'] = None
892 results[b'.hg'] = None
882
893
883 for ff in files:
894 for ff in files:
884 if normalize:
895 if normalize:
885 nf = normalize(ff, False, True)
896 nf = normalize(ff, False, True)
886 else:
897 else:
887 nf = ff
898 nf = ff
888 if nf in results:
899 if nf in results:
889 continue
900 continue
890
901
891 try:
902 try:
892 st = lstat(join(nf))
903 st = lstat(join(nf))
893 kind = getkind(st.st_mode)
904 kind = getkind(st.st_mode)
894 if kind == dirkind:
905 if kind == dirkind:
895 if nf in dmap:
906 if nf in dmap:
896 # file replaced by dir on disk but still in dirstate
907 # file replaced by dir on disk but still in dirstate
897 results[nf] = None
908 results[nf] = None
898 foundadd((nf, ff))
909 foundadd((nf, ff))
899 elif kind == regkind or kind == lnkkind:
910 elif kind == regkind or kind == lnkkind:
900 results[nf] = st
911 results[nf] = st
901 else:
912 else:
902 badfn(ff, badtype(kind))
913 badfn(ff, badtype(kind))
903 if nf in dmap:
914 if nf in dmap:
904 results[nf] = None
915 results[nf] = None
905 except OSError as inst: # nf not found on disk - it is dirstate only
916 except OSError as inst: # nf not found on disk - it is dirstate only
906 if nf in dmap: # does it exactly match a missing file?
917 if nf in dmap: # does it exactly match a missing file?
907 results[nf] = None
918 results[nf] = None
908 else: # does it match a missing directory?
919 else: # does it match a missing directory?
909 if self._map.hasdir(nf):
920 if self._map.hasdir(nf):
910 notfoundadd(nf)
921 notfoundadd(nf)
911 else:
922 else:
912 badfn(ff, encoding.strtolocal(inst.strerror))
923 badfn(ff, encoding.strtolocal(inst.strerror))
913
924
914 # match.files() may contain explicitly-specified paths that shouldn't
925 # match.files() may contain explicitly-specified paths that shouldn't
915 # be taken; drop them from the list of files found. dirsfound/notfound
926 # be taken; drop them from the list of files found. dirsfound/notfound
916 # aren't filtered here because they will be tested later.
927 # aren't filtered here because they will be tested later.
917 if match.anypats():
928 if match.anypats():
918 for f in list(results):
929 for f in list(results):
919 if f == b'.hg' or f in subrepos:
930 if f == b'.hg' or f in subrepos:
920 # keep sentinel to disable further out-of-repo walks
931 # keep sentinel to disable further out-of-repo walks
921 continue
932 continue
922 if not match(f):
933 if not match(f):
923 del results[f]
934 del results[f]
924
935
925 # Case insensitive filesystems cannot rely on lstat() failing to detect
936 # Case insensitive filesystems cannot rely on lstat() failing to detect
926 # a case-only rename. Prune the stat object for any file that does not
937 # a case-only rename. Prune the stat object for any file that does not
927 # match the case in the filesystem, if there are multiple files that
938 # match the case in the filesystem, if there are multiple files that
928 # normalize to the same path.
939 # normalize to the same path.
929 if match.isexact() and self._checkcase:
940 if match.isexact() and self._checkcase:
930 normed = {}
941 normed = {}
931
942
932 for f, st in pycompat.iteritems(results):
943 for f, st in pycompat.iteritems(results):
933 if st is None:
944 if st is None:
934 continue
945 continue
935
946
936 nc = util.normcase(f)
947 nc = util.normcase(f)
937 paths = normed.get(nc)
948 paths = normed.get(nc)
938
949
939 if paths is None:
950 if paths is None:
940 paths = set()
951 paths = set()
941 normed[nc] = paths
952 normed[nc] = paths
942
953
943 paths.add(f)
954 paths.add(f)
944
955
945 for norm, paths in pycompat.iteritems(normed):
956 for norm, paths in pycompat.iteritems(normed):
946 if len(paths) > 1:
957 if len(paths) > 1:
947 for path in paths:
958 for path in paths:
948 folded = self._discoverpath(
959 folded = self._discoverpath(
949 path, norm, True, None, self._map.dirfoldmap
960 path, norm, True, None, self._map.dirfoldmap
950 )
961 )
951 if path != folded:
962 if path != folded:
952 results[path] = None
963 results[path] = None
953
964
954 return results, dirsfound, dirsnotfound
965 return results, dirsfound, dirsnotfound
955
966
956 def walk(self, match, subrepos, unknown, ignored, full=True):
967 def walk(self, match, subrepos, unknown, ignored, full=True):
957 """
968 """
958 Walk recursively through the directory tree, finding all files
969 Walk recursively through the directory tree, finding all files
959 matched by match.
970 matched by match.
960
971
961 If full is False, maybe skip some known-clean files.
972 If full is False, maybe skip some known-clean files.
962
973
963 Return a dict mapping filename to stat-like object (either
974 Return a dict mapping filename to stat-like object (either
964 mercurial.osutil.stat instance or return value of os.stat()).
975 mercurial.osutil.stat instance or return value of os.stat()).
965
976
966 """
977 """
967 # full is a flag that extensions that hook into walk can use -- this
978 # full is a flag that extensions that hook into walk can use -- this
968 # implementation doesn't use it at all. This satisfies the contract
979 # implementation doesn't use it at all. This satisfies the contract
969 # because we only guarantee a "maybe".
980 # because we only guarantee a "maybe".
970
981
971 if ignored:
982 if ignored:
972 ignore = util.never
983 ignore = util.never
973 dirignore = util.never
984 dirignore = util.never
974 elif unknown:
985 elif unknown:
975 ignore = self._ignore
986 ignore = self._ignore
976 dirignore = self._dirignore
987 dirignore = self._dirignore
977 else:
988 else:
978 # if not unknown and not ignored, drop dir recursion and step 2
989 # if not unknown and not ignored, drop dir recursion and step 2
979 ignore = util.always
990 ignore = util.always
980 dirignore = util.always
991 dirignore = util.always
981
992
982 matchfn = match.matchfn
993 matchfn = match.matchfn
983 matchalways = match.always()
994 matchalways = match.always()
984 matchtdir = match.traversedir
995 matchtdir = match.traversedir
985 dmap = self._map
996 dmap = self._map
986 listdir = util.listdir
997 listdir = util.listdir
987 lstat = os.lstat
998 lstat = os.lstat
988 dirkind = stat.S_IFDIR
999 dirkind = stat.S_IFDIR
989 regkind = stat.S_IFREG
1000 regkind = stat.S_IFREG
990 lnkkind = stat.S_IFLNK
1001 lnkkind = stat.S_IFLNK
991 join = self._join
1002 join = self._join
992
1003
993 exact = skipstep3 = False
1004 exact = skipstep3 = False
994 if match.isexact(): # match.exact
1005 if match.isexact(): # match.exact
995 exact = True
1006 exact = True
996 dirignore = util.always # skip step 2
1007 dirignore = util.always # skip step 2
997 elif match.prefix(): # match.match, no patterns
1008 elif match.prefix(): # match.match, no patterns
998 skipstep3 = True
1009 skipstep3 = True
999
1010
1000 if not exact and self._checkcase:
1011 if not exact and self._checkcase:
1001 normalize = self._normalize
1012 normalize = self._normalize
1002 normalizefile = self._normalizefile
1013 normalizefile = self._normalizefile
1003 skipstep3 = False
1014 skipstep3 = False
1004 else:
1015 else:
1005 normalize = self._normalize
1016 normalize = self._normalize
1006 normalizefile = None
1017 normalizefile = None
1007
1018
1008 # step 1: find all explicit files
1019 # step 1: find all explicit files
1009 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1020 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1010 if matchtdir:
1021 if matchtdir:
1011 for d in work:
1022 for d in work:
1012 matchtdir(d[0])
1023 matchtdir(d[0])
1013 for d in dirsnotfound:
1024 for d in dirsnotfound:
1014 matchtdir(d)
1025 matchtdir(d)
1015
1026
1016 skipstep3 = skipstep3 and not (work or dirsnotfound)
1027 skipstep3 = skipstep3 and not (work or dirsnotfound)
1017 work = [d for d in work if not dirignore(d[0])]
1028 work = [d for d in work if not dirignore(d[0])]
1018
1029
1019 # step 2: visit subdirectories
1030 # step 2: visit subdirectories
1020 def traverse(work, alreadynormed):
1031 def traverse(work, alreadynormed):
1021 wadd = work.append
1032 wadd = work.append
1022 while work:
1033 while work:
1023 tracing.counter('dirstate.walk work', len(work))
1034 tracing.counter('dirstate.walk work', len(work))
1024 nd = work.pop()
1035 nd = work.pop()
1025 visitentries = match.visitchildrenset(nd)
1036 visitentries = match.visitchildrenset(nd)
1026 if not visitentries:
1037 if not visitentries:
1027 continue
1038 continue
1028 if visitentries == b'this' or visitentries == b'all':
1039 if visitentries == b'this' or visitentries == b'all':
1029 visitentries = None
1040 visitentries = None
1030 skip = None
1041 skip = None
1031 if nd != b'':
1042 if nd != b'':
1032 skip = b'.hg'
1043 skip = b'.hg'
1033 try:
1044 try:
1034 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1045 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1035 entries = listdir(join(nd), stat=True, skip=skip)
1046 entries = listdir(join(nd), stat=True, skip=skip)
1036 except OSError as inst:
1047 except OSError as inst:
1037 if inst.errno in (errno.EACCES, errno.ENOENT):
1048 if inst.errno in (errno.EACCES, errno.ENOENT):
1038 match.bad(
1049 match.bad(
1039 self.pathto(nd), encoding.strtolocal(inst.strerror)
1050 self.pathto(nd), encoding.strtolocal(inst.strerror)
1040 )
1051 )
1041 continue
1052 continue
1042 raise
1053 raise
1043 for f, kind, st in entries:
1054 for f, kind, st in entries:
1044 # Some matchers may return files in the visitentries set,
1055 # Some matchers may return files in the visitentries set,
1045 # instead of 'this', if the matcher explicitly mentions them
1056 # instead of 'this', if the matcher explicitly mentions them
1046 # and is not an exactmatcher. This is acceptable; we do not
1057 # and is not an exactmatcher. This is acceptable; we do not
1047 # make any hard assumptions about file-or-directory below
1058 # make any hard assumptions about file-or-directory below
1048 # based on the presence of `f` in visitentries. If
1059 # based on the presence of `f` in visitentries. If
1049 # visitchildrenset returned a set, we can always skip the
1060 # visitchildrenset returned a set, we can always skip the
1050 # entries *not* in the set it provided regardless of whether
1061 # entries *not* in the set it provided regardless of whether
1051 # they're actually a file or a directory.
1062 # they're actually a file or a directory.
1052 if visitentries and f not in visitentries:
1063 if visitentries and f not in visitentries:
1053 continue
1064 continue
1054 if normalizefile:
1065 if normalizefile:
1055 # even though f might be a directory, we're only
1066 # even though f might be a directory, we're only
1056 # interested in comparing it to files currently in the
1067 # interested in comparing it to files currently in the
1057 # dmap -- therefore normalizefile is enough
1068 # dmap -- therefore normalizefile is enough
1058 nf = normalizefile(
1069 nf = normalizefile(
1059 nd and (nd + b"/" + f) or f, True, True
1070 nd and (nd + b"/" + f) or f, True, True
1060 )
1071 )
1061 else:
1072 else:
1062 nf = nd and (nd + b"/" + f) or f
1073 nf = nd and (nd + b"/" + f) or f
1063 if nf not in results:
1074 if nf not in results:
1064 if kind == dirkind:
1075 if kind == dirkind:
1065 if not ignore(nf):
1076 if not ignore(nf):
1066 if matchtdir:
1077 if matchtdir:
1067 matchtdir(nf)
1078 matchtdir(nf)
1068 wadd(nf)
1079 wadd(nf)
1069 if nf in dmap and (matchalways or matchfn(nf)):
1080 if nf in dmap and (matchalways or matchfn(nf)):
1070 results[nf] = None
1081 results[nf] = None
1071 elif kind == regkind or kind == lnkkind:
1082 elif kind == regkind or kind == lnkkind:
1072 if nf in dmap:
1083 if nf in dmap:
1073 if matchalways or matchfn(nf):
1084 if matchalways or matchfn(nf):
1074 results[nf] = st
1085 results[nf] = st
1075 elif (matchalways or matchfn(nf)) and not ignore(
1086 elif (matchalways or matchfn(nf)) and not ignore(
1076 nf
1087 nf
1077 ):
1088 ):
1078 # unknown file -- normalize if necessary
1089 # unknown file -- normalize if necessary
1079 if not alreadynormed:
1090 if not alreadynormed:
1080 nf = normalize(nf, False, True)
1091 nf = normalize(nf, False, True)
1081 results[nf] = st
1092 results[nf] = st
1082 elif nf in dmap and (matchalways or matchfn(nf)):
1093 elif nf in dmap and (matchalways or matchfn(nf)):
1083 results[nf] = None
1094 results[nf] = None
1084
1095
1085 for nd, d in work:
1096 for nd, d in work:
1086 # alreadynormed means that processwork doesn't have to do any
1097 # alreadynormed means that processwork doesn't have to do any
1087 # expensive directory normalization
1098 # expensive directory normalization
1088 alreadynormed = not normalize or nd == d
1099 alreadynormed = not normalize or nd == d
1089 traverse([d], alreadynormed)
1100 traverse([d], alreadynormed)
1090
1101
1091 for s in subrepos:
1102 for s in subrepos:
1092 del results[s]
1103 del results[s]
1093 del results[b'.hg']
1104 del results[b'.hg']
1094
1105
1095 # step 3: visit remaining files from dmap
1106 # step 3: visit remaining files from dmap
1096 if not skipstep3 and not exact:
1107 if not skipstep3 and not exact:
1097 # If a dmap file is not in results yet, it was either
1108 # If a dmap file is not in results yet, it was either
1098 # a) not matching matchfn b) ignored, c) missing, or d) under a
1109 # a) not matching matchfn b) ignored, c) missing, or d) under a
1099 # symlink directory.
1110 # symlink directory.
1100 if not results and matchalways:
1111 if not results and matchalways:
1101 visit = [f for f in dmap]
1112 visit = [f for f in dmap]
1102 else:
1113 else:
1103 visit = [f for f in dmap if f not in results and matchfn(f)]
1114 visit = [f for f in dmap if f not in results and matchfn(f)]
1104 visit.sort()
1115 visit.sort()
1105
1116
1106 if unknown:
1117 if unknown:
1107 # unknown == True means we walked all dirs under the roots
1118 # unknown == True means we walked all dirs under the roots
1108 # that wasn't ignored, and everything that matched was stat'ed
1119 # that wasn't ignored, and everything that matched was stat'ed
1109 # and is already in results.
1120 # and is already in results.
1110 # The rest must thus be ignored or under a symlink.
1121 # The rest must thus be ignored or under a symlink.
1111 audit_path = pathutil.pathauditor(self._root, cached=True)
1122 audit_path = pathutil.pathauditor(self._root, cached=True)
1112
1123
1113 for nf in iter(visit):
1124 for nf in iter(visit):
1114 # If a stat for the same file was already added with a
1125 # If a stat for the same file was already added with a
1115 # different case, don't add one for this, since that would
1126 # different case, don't add one for this, since that would
1116 # make it appear as if the file exists under both names
1127 # make it appear as if the file exists under both names
1117 # on disk.
1128 # on disk.
1118 if (
1129 if (
1119 normalizefile
1130 normalizefile
1120 and normalizefile(nf, True, True) in results
1131 and normalizefile(nf, True, True) in results
1121 ):
1132 ):
1122 results[nf] = None
1133 results[nf] = None
1123 # Report ignored items in the dmap as long as they are not
1134 # Report ignored items in the dmap as long as they are not
1124 # under a symlink directory.
1135 # under a symlink directory.
1125 elif audit_path.check(nf):
1136 elif audit_path.check(nf):
1126 try:
1137 try:
1127 results[nf] = lstat(join(nf))
1138 results[nf] = lstat(join(nf))
1128 # file was just ignored, no links, and exists
1139 # file was just ignored, no links, and exists
1129 except OSError:
1140 except OSError:
1130 # file doesn't exist
1141 # file doesn't exist
1131 results[nf] = None
1142 results[nf] = None
1132 else:
1143 else:
1133 # It's either missing or under a symlink directory
1144 # It's either missing or under a symlink directory
1134 # which we in this case report as missing
1145 # which we in this case report as missing
1135 results[nf] = None
1146 results[nf] = None
1136 else:
1147 else:
1137 # We may not have walked the full directory tree above,
1148 # We may not have walked the full directory tree above,
1138 # so stat and check everything we missed.
1149 # so stat and check everything we missed.
1139 iv = iter(visit)
1150 iv = iter(visit)
1140 for st in util.statfiles([join(i) for i in visit]):
1151 for st in util.statfiles([join(i) for i in visit]):
1141 results[next(iv)] = st
1152 results[next(iv)] = st
1142 return results
1153 return results
1143
1154
1144 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1155 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1145 # Force Rayon (Rust parallelism library) to respect the number of
1156 # Force Rayon (Rust parallelism library) to respect the number of
1146 # workers. This is a temporary workaround until Rust code knows
1157 # workers. This is a temporary workaround until Rust code knows
1147 # how to read the config file.
1158 # how to read the config file.
1148 numcpus = self._ui.configint(b"worker", b"numcpus")
1159 numcpus = self._ui.configint(b"worker", b"numcpus")
1149 if numcpus is not None:
1160 if numcpus is not None:
1150 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1161 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1151
1162
1152 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1163 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1153 if not workers_enabled:
1164 if not workers_enabled:
1154 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1165 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1155
1166
1156 (
1167 (
1157 lookup,
1168 lookup,
1158 modified,
1169 modified,
1159 added,
1170 added,
1160 removed,
1171 removed,
1161 deleted,
1172 deleted,
1162 clean,
1173 clean,
1163 ignored,
1174 ignored,
1164 unknown,
1175 unknown,
1165 warnings,
1176 warnings,
1166 bad,
1177 bad,
1167 traversed,
1178 traversed,
1168 dirty,
1179 dirty,
1169 ) = rustmod.status(
1180 ) = rustmod.status(
1170 self._map._rustmap,
1181 self._map._rustmap,
1171 matcher,
1182 matcher,
1172 self._rootdir,
1183 self._rootdir,
1173 self._ignorefiles(),
1184 self._ignorefiles(),
1174 self._checkexec,
1185 self._checkexec,
1175 self._lastnormaltime,
1186 self._lastnormaltime,
1176 bool(list_clean),
1187 bool(list_clean),
1177 bool(list_ignored),
1188 bool(list_ignored),
1178 bool(list_unknown),
1189 bool(list_unknown),
1179 bool(matcher.traversedir),
1190 bool(matcher.traversedir),
1180 )
1191 )
1181
1192
1182 self._dirty |= dirty
1193 self._dirty |= dirty
1183
1194
1184 if matcher.traversedir:
1195 if matcher.traversedir:
1185 for dir in traversed:
1196 for dir in traversed:
1186 matcher.traversedir(dir)
1197 matcher.traversedir(dir)
1187
1198
1188 if self._ui.warn:
1199 if self._ui.warn:
1189 for item in warnings:
1200 for item in warnings:
1190 if isinstance(item, tuple):
1201 if isinstance(item, tuple):
1191 file_path, syntax = item
1202 file_path, syntax = item
1192 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1203 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1193 file_path,
1204 file_path,
1194 syntax,
1205 syntax,
1195 )
1206 )
1196 self._ui.warn(msg)
1207 self._ui.warn(msg)
1197 else:
1208 else:
1198 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1209 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1199 self._ui.warn(
1210 self._ui.warn(
1200 msg
1211 msg
1201 % (
1212 % (
1202 pathutil.canonpath(
1213 pathutil.canonpath(
1203 self._rootdir, self._rootdir, item
1214 self._rootdir, self._rootdir, item
1204 ),
1215 ),
1205 b"No such file or directory",
1216 b"No such file or directory",
1206 )
1217 )
1207 )
1218 )
1208
1219
1209 for (fn, message) in bad:
1220 for (fn, message) in bad:
1210 matcher.bad(fn, encoding.strtolocal(message))
1221 matcher.bad(fn, encoding.strtolocal(message))
1211
1222
1212 status = scmutil.status(
1223 status = scmutil.status(
1213 modified=modified,
1224 modified=modified,
1214 added=added,
1225 added=added,
1215 removed=removed,
1226 removed=removed,
1216 deleted=deleted,
1227 deleted=deleted,
1217 unknown=unknown,
1228 unknown=unknown,
1218 ignored=ignored,
1229 ignored=ignored,
1219 clean=clean,
1230 clean=clean,
1220 )
1231 )
1221 return (lookup, status)
1232 return (lookup, status)
1222
1233
1223 def status(self, match, subrepos, ignored, clean, unknown):
1234 def status(self, match, subrepos, ignored, clean, unknown):
1224 """Determine the status of the working copy relative to the
1235 """Determine the status of the working copy relative to the
1225 dirstate and return a pair of (unsure, status), where status is of type
1236 dirstate and return a pair of (unsure, status), where status is of type
1226 scmutil.status and:
1237 scmutil.status and:
1227
1238
1228 unsure:
1239 unsure:
1229 files that might have been modified since the dirstate was
1240 files that might have been modified since the dirstate was
1230 written, but need to be read to be sure (size is the same
1241 written, but need to be read to be sure (size is the same
1231 but mtime differs)
1242 but mtime differs)
1232 status.modified:
1243 status.modified:
1233 files that have definitely been modified since the dirstate
1244 files that have definitely been modified since the dirstate
1234 was written (different size or mode)
1245 was written (different size or mode)
1235 status.clean:
1246 status.clean:
1236 files that have definitely not been modified since the
1247 files that have definitely not been modified since the
1237 dirstate was written
1248 dirstate was written
1238 """
1249 """
1239 listignored, listclean, listunknown = ignored, clean, unknown
1250 listignored, listclean, listunknown = ignored, clean, unknown
1240 lookup, modified, added, unknown, ignored = [], [], [], [], []
1251 lookup, modified, added, unknown, ignored = [], [], [], [], []
1241 removed, deleted, clean = [], [], []
1252 removed, deleted, clean = [], [], []
1242
1253
1243 dmap = self._map
1254 dmap = self._map
1244 dmap.preload()
1255 dmap.preload()
1245
1256
1246 use_rust = True
1257 use_rust = True
1247
1258
1248 allowed_matchers = (
1259 allowed_matchers = (
1249 matchmod.alwaysmatcher,
1260 matchmod.alwaysmatcher,
1250 matchmod.exactmatcher,
1261 matchmod.exactmatcher,
1251 matchmod.includematcher,
1262 matchmod.includematcher,
1252 )
1263 )
1253
1264
1254 if rustmod is None:
1265 if rustmod is None:
1255 use_rust = False
1266 use_rust = False
1256 elif self._checkcase:
1267 elif self._checkcase:
1257 # Case-insensitive filesystems are not handled yet
1268 # Case-insensitive filesystems are not handled yet
1258 use_rust = False
1269 use_rust = False
1259 elif subrepos:
1270 elif subrepos:
1260 use_rust = False
1271 use_rust = False
1261 elif sparse.enabled:
1272 elif sparse.enabled:
1262 use_rust = False
1273 use_rust = False
1263 elif not isinstance(match, allowed_matchers):
1274 elif not isinstance(match, allowed_matchers):
1264 # Some matchers have yet to be implemented
1275 # Some matchers have yet to be implemented
1265 use_rust = False
1276 use_rust = False
1266
1277
1267 if use_rust:
1278 if use_rust:
1268 try:
1279 try:
1269 return self._rust_status(
1280 return self._rust_status(
1270 match, listclean, listignored, listunknown
1281 match, listclean, listignored, listunknown
1271 )
1282 )
1272 except rustmod.FallbackError:
1283 except rustmod.FallbackError:
1273 pass
1284 pass
1274
1285
1275 def noop(f):
1286 def noop(f):
1276 pass
1287 pass
1277
1288
1278 dcontains = dmap.__contains__
1289 dcontains = dmap.__contains__
1279 dget = dmap.__getitem__
1290 dget = dmap.__getitem__
1280 ladd = lookup.append # aka "unsure"
1291 ladd = lookup.append # aka "unsure"
1281 madd = modified.append
1292 madd = modified.append
1282 aadd = added.append
1293 aadd = added.append
1283 uadd = unknown.append if listunknown else noop
1294 uadd = unknown.append if listunknown else noop
1284 iadd = ignored.append if listignored else noop
1295 iadd = ignored.append if listignored else noop
1285 radd = removed.append
1296 radd = removed.append
1286 dadd = deleted.append
1297 dadd = deleted.append
1287 cadd = clean.append if listclean else noop
1298 cadd = clean.append if listclean else noop
1288 mexact = match.exact
1299 mexact = match.exact
1289 dirignore = self._dirignore
1300 dirignore = self._dirignore
1290 checkexec = self._checkexec
1301 checkexec = self._checkexec
1291 copymap = self._map.copymap
1302 copymap = self._map.copymap
1292 lastnormaltime = self._lastnormaltime
1303 lastnormaltime = self._lastnormaltime
1293
1304
1294 # We need to do full walks when either
1305 # We need to do full walks when either
1295 # - we're listing all clean files, or
1306 # - we're listing all clean files, or
1296 # - match.traversedir does something, because match.traversedir should
1307 # - match.traversedir does something, because match.traversedir should
1297 # be called for every dir in the working dir
1308 # be called for every dir in the working dir
1298 full = listclean or match.traversedir is not None
1309 full = listclean or match.traversedir is not None
1299 for fn, st in pycompat.iteritems(
1310 for fn, st in pycompat.iteritems(
1300 self.walk(match, subrepos, listunknown, listignored, full=full)
1311 self.walk(match, subrepos, listunknown, listignored, full=full)
1301 ):
1312 ):
1302 if not dcontains(fn):
1313 if not dcontains(fn):
1303 if (listignored or mexact(fn)) and dirignore(fn):
1314 if (listignored or mexact(fn)) and dirignore(fn):
1304 if listignored:
1315 if listignored:
1305 iadd(fn)
1316 iadd(fn)
1306 else:
1317 else:
1307 uadd(fn)
1318 uadd(fn)
1308 continue
1319 continue
1309
1320
1310 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1321 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1311 # written like that for performance reasons. dmap[fn] is not a
1322 # written like that for performance reasons. dmap[fn] is not a
1312 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1323 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1313 # opcode has fast paths when the value to be unpacked is a tuple or
1324 # opcode has fast paths when the value to be unpacked is a tuple or
1314 # a list, but falls back to creating a full-fledged iterator in
1325 # a list, but falls back to creating a full-fledged iterator in
1315 # general. That is much slower than simply accessing and storing the
1326 # general. That is much slower than simply accessing and storing the
1316 # tuple members one by one.
1327 # tuple members one by one.
1317 t = dget(fn)
1328 t = dget(fn)
1318 state = t[0]
1329 state = t.state
1319 mode = t[1]
1330 mode = t[1]
1320 size = t[2]
1331 size = t[2]
1321 time = t[3]
1332 time = t[3]
1322
1333
1323 if not st and state in b"nma":
1334 if not st and state in b"nma":
1324 dadd(fn)
1335 dadd(fn)
1325 elif state == b'n':
1336 elif state == b'n':
1326 if (
1337 if (
1327 size >= 0
1338 size >= 0
1328 and (
1339 and (
1329 (size != st.st_size and size != st.st_size & _rangemask)
1340 (size != st.st_size and size != st.st_size & _rangemask)
1330 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1341 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1331 )
1342 )
1332 or size == FROM_P2 # other parent
1343 or size == FROM_P2 # other parent
1333 or fn in copymap
1344 or fn in copymap
1334 ):
1345 ):
1335 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1346 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1336 # issue6456: Size returned may be longer due to
1347 # issue6456: Size returned may be longer due to
1337 # encryption on EXT-4 fscrypt, undecided.
1348 # encryption on EXT-4 fscrypt, undecided.
1338 ladd(fn)
1349 ladd(fn)
1339 else:
1350 else:
1340 madd(fn)
1351 madd(fn)
1341 elif (
1352 elif (
1342 time != st[stat.ST_MTIME]
1353 time != st[stat.ST_MTIME]
1343 and time != st[stat.ST_MTIME] & _rangemask
1354 and time != st[stat.ST_MTIME] & _rangemask
1344 ):
1355 ):
1345 ladd(fn)
1356 ladd(fn)
1346 elif st[stat.ST_MTIME] == lastnormaltime:
1357 elif st[stat.ST_MTIME] == lastnormaltime:
1347 # fn may have just been marked as normal and it may have
1358 # fn may have just been marked as normal and it may have
1348 # changed in the same second without changing its size.
1359 # changed in the same second without changing its size.
1349 # This can happen if we quickly do multiple commits.
1360 # This can happen if we quickly do multiple commits.
1350 # Force lookup, so we don't miss such a racy file change.
1361 # Force lookup, so we don't miss such a racy file change.
1351 ladd(fn)
1362 ladd(fn)
1352 elif listclean:
1363 elif listclean:
1353 cadd(fn)
1364 cadd(fn)
1354 elif state == b'm':
1365 elif state == b'm':
1355 madd(fn)
1366 madd(fn)
1356 elif state == b'a':
1367 elif state == b'a':
1357 aadd(fn)
1368 aadd(fn)
1358 elif state == b'r':
1369 elif state == b'r':
1359 radd(fn)
1370 radd(fn)
1360 status = scmutil.status(
1371 status = scmutil.status(
1361 modified, added, removed, deleted, unknown, ignored, clean
1372 modified, added, removed, deleted, unknown, ignored, clean
1362 )
1373 )
1363 return (lookup, status)
1374 return (lookup, status)
1364
1375
1365 def matches(self, match):
1376 def matches(self, match):
1366 """
1377 """
1367 return files in the dirstate (in whatever state) filtered by match
1378 return files in the dirstate (in whatever state) filtered by match
1368 """
1379 """
1369 dmap = self._map
1380 dmap = self._map
1370 if rustmod is not None:
1381 if rustmod is not None:
1371 dmap = self._map._rustmap
1382 dmap = self._map._rustmap
1372
1383
1373 if match.always():
1384 if match.always():
1374 return dmap.keys()
1385 return dmap.keys()
1375 files = match.files()
1386 files = match.files()
1376 if match.isexact():
1387 if match.isexact():
1377 # fast path -- filter the other way around, since typically files is
1388 # fast path -- filter the other way around, since typically files is
1378 # much smaller than dmap
1389 # much smaller than dmap
1379 return [f for f in files if f in dmap]
1390 return [f for f in files if f in dmap]
1380 if match.prefix() and all(fn in dmap for fn in files):
1391 if match.prefix() and all(fn in dmap for fn in files):
1381 # fast path -- all the values are known to be files, so just return
1392 # fast path -- all the values are known to be files, so just return
1382 # that
1393 # that
1383 return list(files)
1394 return list(files)
1384 return [f for f in dmap if match(f)]
1395 return [f for f in dmap if match(f)]
1385
1396
1386 def _actualfilename(self, tr):
1397 def _actualfilename(self, tr):
1387 if tr:
1398 if tr:
1388 return self._pendingfilename
1399 return self._pendingfilename
1389 else:
1400 else:
1390 return self._filename
1401 return self._filename
1391
1402
1392 def savebackup(self, tr, backupname):
1403 def savebackup(self, tr, backupname):
1393 '''Save current dirstate into backup file'''
1404 '''Save current dirstate into backup file'''
1394 filename = self._actualfilename(tr)
1405 filename = self._actualfilename(tr)
1395 assert backupname != filename
1406 assert backupname != filename
1396
1407
1397 # use '_writedirstate' instead of 'write' to write changes certainly,
1408 # use '_writedirstate' instead of 'write' to write changes certainly,
1398 # because the latter omits writing out if transaction is running.
1409 # because the latter omits writing out if transaction is running.
1399 # output file will be used to create backup of dirstate at this point.
1410 # output file will be used to create backup of dirstate at this point.
1400 if self._dirty or not self._opener.exists(filename):
1411 if self._dirty or not self._opener.exists(filename):
1401 self._writedirstate(
1412 self._writedirstate(
1402 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1413 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1403 )
1414 )
1404
1415
1405 if tr:
1416 if tr:
1406 # ensure that subsequent tr.writepending returns True for
1417 # ensure that subsequent tr.writepending returns True for
1407 # changes written out above, even if dirstate is never
1418 # changes written out above, even if dirstate is never
1408 # changed after this
1419 # changed after this
1409 tr.addfilegenerator(
1420 tr.addfilegenerator(
1410 b'dirstate',
1421 b'dirstate',
1411 (self._filename,),
1422 (self._filename,),
1412 self._writedirstate,
1423 self._writedirstate,
1413 location=b'plain',
1424 location=b'plain',
1414 )
1425 )
1415
1426
1416 # ensure that pending file written above is unlinked at
1427 # ensure that pending file written above is unlinked at
1417 # failure, even if tr.writepending isn't invoked until the
1428 # failure, even if tr.writepending isn't invoked until the
1418 # end of this transaction
1429 # end of this transaction
1419 tr.registertmp(filename, location=b'plain')
1430 tr.registertmp(filename, location=b'plain')
1420
1431
1421 self._opener.tryunlink(backupname)
1432 self._opener.tryunlink(backupname)
1422 # hardlink backup is okay because _writedirstate is always called
1433 # hardlink backup is okay because _writedirstate is always called
1423 # with an "atomictemp=True" file.
1434 # with an "atomictemp=True" file.
1424 util.copyfile(
1435 util.copyfile(
1425 self._opener.join(filename),
1436 self._opener.join(filename),
1426 self._opener.join(backupname),
1437 self._opener.join(backupname),
1427 hardlink=True,
1438 hardlink=True,
1428 )
1439 )
1429
1440
1430 def restorebackup(self, tr, backupname):
1441 def restorebackup(self, tr, backupname):
1431 '''Restore dirstate by backup file'''
1442 '''Restore dirstate by backup file'''
1432 # this "invalidate()" prevents "wlock.release()" from writing
1443 # this "invalidate()" prevents "wlock.release()" from writing
1433 # changes of dirstate out after restoring from backup file
1444 # changes of dirstate out after restoring from backup file
1434 self.invalidate()
1445 self.invalidate()
1435 filename = self._actualfilename(tr)
1446 filename = self._actualfilename(tr)
1436 o = self._opener
1447 o = self._opener
1437 if util.samefile(o.join(backupname), o.join(filename)):
1448 if util.samefile(o.join(backupname), o.join(filename)):
1438 o.unlink(backupname)
1449 o.unlink(backupname)
1439 else:
1450 else:
1440 o.rename(backupname, filename, checkambig=True)
1451 o.rename(backupname, filename, checkambig=True)
1441
1452
1442 def clearbackup(self, tr, backupname):
1453 def clearbackup(self, tr, backupname):
1443 '''Clear backup file'''
1454 '''Clear backup file'''
1444 self._opener.unlink(backupname)
1455 self._opener.unlink(backupname)
@@ -1,479 +1,494 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from .. import (
17 from .. import (
18 error,
18 error,
19 pycompat,
19 pycompat,
20 revlogutils,
20 revlogutils,
21 util,
21 util,
22 )
22 )
23
23
24 from ..revlogutils import nodemap as nodemaputil
24 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import constants as revlog_constants
25 from ..revlogutils import constants as revlog_constants
26
26
27 stringio = pycompat.bytesio
27 stringio = pycompat.bytesio
28
28
29
29
30 _pack = struct.pack
30 _pack = struct.pack
31 _unpack = struct.unpack
31 _unpack = struct.unpack
32 _compress = zlib.compress
32 _compress = zlib.compress
33 _decompress = zlib.decompress
33 _decompress = zlib.decompress
34
34
35
35
36 class dirstatetuple(object):
36 class dirstatetuple(object):
37 """represent a dirstate entry
37 """represent a dirstate entry
38
38
39 It contains:
39 It contains:
40
40
41 - state (one of 'n', 'a', 'r', 'm')
41 - state (one of 'n', 'a', 'r', 'm')
42 - mode,
42 - mode,
43 - size,
43 - size,
44 - mtime,
44 - mtime,
45 """
45 """
46
46
47 __slot__ = ('_state', '_mode', '_size', '_mtime')
47 __slot__ = ('_state', '_mode', '_size', '_mtime')
48
48
49 def __init__(self, state, mode, size, mtime):
49 def __init__(self, state, mode, size, mtime):
50 self._state = state
50 self._state = state
51 self._mode = mode
51 self._mode = mode
52 self._size = size
52 self._size = size
53 self._mtime = mtime
53 self._mtime = mtime
54
54
55 def __getitem__(self, idx):
55 def __getitem__(self, idx):
56 if idx == 0 or idx == -4:
56 if idx == 0 or idx == -4:
57 return self._state
57 return self._state
58 elif idx == 1 or idx == -3:
58 elif idx == 1 or idx == -3:
59 return self._mode
59 return self._mode
60 elif idx == 2 or idx == -2:
60 elif idx == 2 or idx == -2:
61 return self._size
61 return self._size
62 elif idx == 3 or idx == -1:
62 elif idx == 3 or idx == -1:
63 return self._mtime
63 return self._mtime
64 else:
64 else:
65 raise IndexError(idx)
65 raise IndexError(idx)
66
66
67 @property
68 def state(self):
69 """
70 States are:
71 n normal
72 m needs merging
73 r marked for removal
74 a marked for addition
75
76 XXX This "state" is a bit obscure and mostly a direct expression of the
77 dirstatev1 format. It would make sense to ultimately deprecate it in
78 favor of the more "semantic" attributes.
79 """
80 return self._state
81
67 def v1_state(self):
82 def v1_state(self):
68 """return a "state" suitable for v1 serialization"""
83 """return a "state" suitable for v1 serialization"""
69 return self._state
84 return self._state
70
85
71 def v1_mode(self):
86 def v1_mode(self):
72 """return a "mode" suitable for v1 serialization"""
87 """return a "mode" suitable for v1 serialization"""
73 return self._mode
88 return self._mode
74
89
75 def v1_size(self):
90 def v1_size(self):
76 """return a "size" suitable for v1 serialization"""
91 """return a "size" suitable for v1 serialization"""
77 return self._size
92 return self._size
78
93
79 def v1_mtime(self):
94 def v1_mtime(self):
80 """return a "mtime" suitable for v1 serialization"""
95 """return a "mtime" suitable for v1 serialization"""
81 return self._mtime
96 return self._mtime
82
97
83
98
84 def gettype(q):
99 def gettype(q):
85 return int(q & 0xFFFF)
100 return int(q & 0xFFFF)
86
101
87
102
88 class BaseIndexObject(object):
103 class BaseIndexObject(object):
89 # Can I be passed to an algorithme implemented in Rust ?
104 # Can I be passed to an algorithme implemented in Rust ?
90 rust_ext_compat = 0
105 rust_ext_compat = 0
91 # Format of an index entry according to Python's `struct` language
106 # Format of an index entry according to Python's `struct` language
92 index_format = revlog_constants.INDEX_ENTRY_V1
107 index_format = revlog_constants.INDEX_ENTRY_V1
93 # Size of a C unsigned long long int, platform independent
108 # Size of a C unsigned long long int, platform independent
94 big_int_size = struct.calcsize(b'>Q')
109 big_int_size = struct.calcsize(b'>Q')
95 # Size of a C long int, platform independent
110 # Size of a C long int, platform independent
96 int_size = struct.calcsize(b'>i')
111 int_size = struct.calcsize(b'>i')
97 # An empty index entry, used as a default value to be overridden, or nullrev
112 # An empty index entry, used as a default value to be overridden, or nullrev
98 null_item = (
113 null_item = (
99 0,
114 0,
100 0,
115 0,
101 0,
116 0,
102 -1,
117 -1,
103 -1,
118 -1,
104 -1,
119 -1,
105 -1,
120 -1,
106 sha1nodeconstants.nullid,
121 sha1nodeconstants.nullid,
107 0,
122 0,
108 0,
123 0,
109 revlog_constants.COMP_MODE_INLINE,
124 revlog_constants.COMP_MODE_INLINE,
110 revlog_constants.COMP_MODE_INLINE,
125 revlog_constants.COMP_MODE_INLINE,
111 )
126 )
112
127
113 @util.propertycache
128 @util.propertycache
114 def entry_size(self):
129 def entry_size(self):
115 return self.index_format.size
130 return self.index_format.size
116
131
117 @property
132 @property
118 def nodemap(self):
133 def nodemap(self):
119 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
134 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
120 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
135 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
121 return self._nodemap
136 return self._nodemap
122
137
123 @util.propertycache
138 @util.propertycache
124 def _nodemap(self):
139 def _nodemap(self):
125 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
140 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
126 for r in range(0, len(self)):
141 for r in range(0, len(self)):
127 n = self[r][7]
142 n = self[r][7]
128 nodemap[n] = r
143 nodemap[n] = r
129 return nodemap
144 return nodemap
130
145
131 def has_node(self, node):
146 def has_node(self, node):
132 """return True if the node exist in the index"""
147 """return True if the node exist in the index"""
133 return node in self._nodemap
148 return node in self._nodemap
134
149
135 def rev(self, node):
150 def rev(self, node):
136 """return a revision for a node
151 """return a revision for a node
137
152
138 If the node is unknown, raise a RevlogError"""
153 If the node is unknown, raise a RevlogError"""
139 return self._nodemap[node]
154 return self._nodemap[node]
140
155
141 def get_rev(self, node):
156 def get_rev(self, node):
142 """return a revision for a node
157 """return a revision for a node
143
158
144 If the node is unknown, return None"""
159 If the node is unknown, return None"""
145 return self._nodemap.get(node)
160 return self._nodemap.get(node)
146
161
147 def _stripnodes(self, start):
162 def _stripnodes(self, start):
148 if '_nodemap' in vars(self):
163 if '_nodemap' in vars(self):
149 for r in range(start, len(self)):
164 for r in range(start, len(self)):
150 n = self[r][7]
165 n = self[r][7]
151 del self._nodemap[n]
166 del self._nodemap[n]
152
167
153 def clearcaches(self):
168 def clearcaches(self):
154 self.__dict__.pop('_nodemap', None)
169 self.__dict__.pop('_nodemap', None)
155
170
156 def __len__(self):
171 def __len__(self):
157 return self._lgt + len(self._extra)
172 return self._lgt + len(self._extra)
158
173
159 def append(self, tup):
174 def append(self, tup):
160 if '_nodemap' in vars(self):
175 if '_nodemap' in vars(self):
161 self._nodemap[tup[7]] = len(self)
176 self._nodemap[tup[7]] = len(self)
162 data = self._pack_entry(len(self), tup)
177 data = self._pack_entry(len(self), tup)
163 self._extra.append(data)
178 self._extra.append(data)
164
179
165 def _pack_entry(self, rev, entry):
180 def _pack_entry(self, rev, entry):
166 assert entry[8] == 0
181 assert entry[8] == 0
167 assert entry[9] == 0
182 assert entry[9] == 0
168 return self.index_format.pack(*entry[:8])
183 return self.index_format.pack(*entry[:8])
169
184
170 def _check_index(self, i):
185 def _check_index(self, i):
171 if not isinstance(i, int):
186 if not isinstance(i, int):
172 raise TypeError(b"expecting int indexes")
187 raise TypeError(b"expecting int indexes")
173 if i < 0 or i >= len(self):
188 if i < 0 or i >= len(self):
174 raise IndexError
189 raise IndexError
175
190
176 def __getitem__(self, i):
191 def __getitem__(self, i):
177 if i == -1:
192 if i == -1:
178 return self.null_item
193 return self.null_item
179 self._check_index(i)
194 self._check_index(i)
180 if i >= self._lgt:
195 if i >= self._lgt:
181 data = self._extra[i - self._lgt]
196 data = self._extra[i - self._lgt]
182 else:
197 else:
183 index = self._calculate_index(i)
198 index = self._calculate_index(i)
184 data = self._data[index : index + self.entry_size]
199 data = self._data[index : index + self.entry_size]
185 r = self._unpack_entry(i, data)
200 r = self._unpack_entry(i, data)
186 if self._lgt and i == 0:
201 if self._lgt and i == 0:
187 offset = revlogutils.offset_type(0, gettype(r[0]))
202 offset = revlogutils.offset_type(0, gettype(r[0]))
188 r = (offset,) + r[1:]
203 r = (offset,) + r[1:]
189 return r
204 return r
190
205
191 def _unpack_entry(self, rev, data):
206 def _unpack_entry(self, rev, data):
192 r = self.index_format.unpack(data)
207 r = self.index_format.unpack(data)
193 r = r + (
208 r = r + (
194 0,
209 0,
195 0,
210 0,
196 revlog_constants.COMP_MODE_INLINE,
211 revlog_constants.COMP_MODE_INLINE,
197 revlog_constants.COMP_MODE_INLINE,
212 revlog_constants.COMP_MODE_INLINE,
198 )
213 )
199 return r
214 return r
200
215
201 def pack_header(self, header):
216 def pack_header(self, header):
202 """pack header information as binary"""
217 """pack header information as binary"""
203 v_fmt = revlog_constants.INDEX_HEADER
218 v_fmt = revlog_constants.INDEX_HEADER
204 return v_fmt.pack(header)
219 return v_fmt.pack(header)
205
220
206 def entry_binary(self, rev):
221 def entry_binary(self, rev):
207 """return the raw binary string representing a revision"""
222 """return the raw binary string representing a revision"""
208 entry = self[rev]
223 entry = self[rev]
209 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
224 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
210 if rev == 0:
225 if rev == 0:
211 p = p[revlog_constants.INDEX_HEADER.size :]
226 p = p[revlog_constants.INDEX_HEADER.size :]
212 return p
227 return p
213
228
214
229
215 class IndexObject(BaseIndexObject):
230 class IndexObject(BaseIndexObject):
216 def __init__(self, data):
231 def __init__(self, data):
217 assert len(data) % self.entry_size == 0, (
232 assert len(data) % self.entry_size == 0, (
218 len(data),
233 len(data),
219 self.entry_size,
234 self.entry_size,
220 len(data) % self.entry_size,
235 len(data) % self.entry_size,
221 )
236 )
222 self._data = data
237 self._data = data
223 self._lgt = len(data) // self.entry_size
238 self._lgt = len(data) // self.entry_size
224 self._extra = []
239 self._extra = []
225
240
226 def _calculate_index(self, i):
241 def _calculate_index(self, i):
227 return i * self.entry_size
242 return i * self.entry_size
228
243
229 def __delitem__(self, i):
244 def __delitem__(self, i):
230 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
245 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
231 raise ValueError(b"deleting slices only supports a:-1 with step 1")
246 raise ValueError(b"deleting slices only supports a:-1 with step 1")
232 i = i.start
247 i = i.start
233 self._check_index(i)
248 self._check_index(i)
234 self._stripnodes(i)
249 self._stripnodes(i)
235 if i < self._lgt:
250 if i < self._lgt:
236 self._data = self._data[: i * self.entry_size]
251 self._data = self._data[: i * self.entry_size]
237 self._lgt = i
252 self._lgt = i
238 self._extra = []
253 self._extra = []
239 else:
254 else:
240 self._extra = self._extra[: i - self._lgt]
255 self._extra = self._extra[: i - self._lgt]
241
256
242
257
243 class PersistentNodeMapIndexObject(IndexObject):
258 class PersistentNodeMapIndexObject(IndexObject):
244 """a Debug oriented class to test persistent nodemap
259 """a Debug oriented class to test persistent nodemap
245
260
246 We need a simple python object to test API and higher level behavior. See
261 We need a simple python object to test API and higher level behavior. See
247 the Rust implementation for more serious usage. This should be used only
262 the Rust implementation for more serious usage. This should be used only
248 through the dedicated `devel.persistent-nodemap` config.
263 through the dedicated `devel.persistent-nodemap` config.
249 """
264 """
250
265
251 def nodemap_data_all(self):
266 def nodemap_data_all(self):
252 """Return bytes containing a full serialization of a nodemap
267 """Return bytes containing a full serialization of a nodemap
253
268
254 The nodemap should be valid for the full set of revisions in the
269 The nodemap should be valid for the full set of revisions in the
255 index."""
270 index."""
256 return nodemaputil.persistent_data(self)
271 return nodemaputil.persistent_data(self)
257
272
258 def nodemap_data_incremental(self):
273 def nodemap_data_incremental(self):
259 """Return bytes containing a incremental update to persistent nodemap
274 """Return bytes containing a incremental update to persistent nodemap
260
275
261 This containst the data for an append-only update of the data provided
276 This containst the data for an append-only update of the data provided
262 in the last call to `update_nodemap_data`.
277 in the last call to `update_nodemap_data`.
263 """
278 """
264 if self._nm_root is None:
279 if self._nm_root is None:
265 return None
280 return None
266 docket = self._nm_docket
281 docket = self._nm_docket
267 changed, data = nodemaputil.update_persistent_data(
282 changed, data = nodemaputil.update_persistent_data(
268 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
283 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
269 )
284 )
270
285
271 self._nm_root = self._nm_max_idx = self._nm_docket = None
286 self._nm_root = self._nm_max_idx = self._nm_docket = None
272 return docket, changed, data
287 return docket, changed, data
273
288
274 def update_nodemap_data(self, docket, nm_data):
289 def update_nodemap_data(self, docket, nm_data):
275 """provide full block of persisted binary data for a nodemap
290 """provide full block of persisted binary data for a nodemap
276
291
277 The data are expected to come from disk. See `nodemap_data_all` for a
292 The data are expected to come from disk. See `nodemap_data_all` for a
278 produceur of such data."""
293 produceur of such data."""
279 if nm_data is not None:
294 if nm_data is not None:
280 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
295 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
281 if self._nm_root:
296 if self._nm_root:
282 self._nm_docket = docket
297 self._nm_docket = docket
283 else:
298 else:
284 self._nm_root = self._nm_max_idx = self._nm_docket = None
299 self._nm_root = self._nm_max_idx = self._nm_docket = None
285
300
286
301
287 class InlinedIndexObject(BaseIndexObject):
302 class InlinedIndexObject(BaseIndexObject):
288 def __init__(self, data, inline=0):
303 def __init__(self, data, inline=0):
289 self._data = data
304 self._data = data
290 self._lgt = self._inline_scan(None)
305 self._lgt = self._inline_scan(None)
291 self._inline_scan(self._lgt)
306 self._inline_scan(self._lgt)
292 self._extra = []
307 self._extra = []
293
308
294 def _inline_scan(self, lgt):
309 def _inline_scan(self, lgt):
295 off = 0
310 off = 0
296 if lgt is not None:
311 if lgt is not None:
297 self._offsets = [0] * lgt
312 self._offsets = [0] * lgt
298 count = 0
313 count = 0
299 while off <= len(self._data) - self.entry_size:
314 while off <= len(self._data) - self.entry_size:
300 start = off + self.big_int_size
315 start = off + self.big_int_size
301 (s,) = struct.unpack(
316 (s,) = struct.unpack(
302 b'>i',
317 b'>i',
303 self._data[start : start + self.int_size],
318 self._data[start : start + self.int_size],
304 )
319 )
305 if lgt is not None:
320 if lgt is not None:
306 self._offsets[count] = off
321 self._offsets[count] = off
307 count += 1
322 count += 1
308 off += self.entry_size + s
323 off += self.entry_size + s
309 if off != len(self._data):
324 if off != len(self._data):
310 raise ValueError(b"corrupted data")
325 raise ValueError(b"corrupted data")
311 return count
326 return count
312
327
313 def __delitem__(self, i):
328 def __delitem__(self, i):
314 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
329 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
315 raise ValueError(b"deleting slices only supports a:-1 with step 1")
330 raise ValueError(b"deleting slices only supports a:-1 with step 1")
316 i = i.start
331 i = i.start
317 self._check_index(i)
332 self._check_index(i)
318 self._stripnodes(i)
333 self._stripnodes(i)
319 if i < self._lgt:
334 if i < self._lgt:
320 self._offsets = self._offsets[:i]
335 self._offsets = self._offsets[:i]
321 self._lgt = i
336 self._lgt = i
322 self._extra = []
337 self._extra = []
323 else:
338 else:
324 self._extra = self._extra[: i - self._lgt]
339 self._extra = self._extra[: i - self._lgt]
325
340
326 def _calculate_index(self, i):
341 def _calculate_index(self, i):
327 return self._offsets[i]
342 return self._offsets[i]
328
343
329
344
330 def parse_index2(data, inline, revlogv2=False):
345 def parse_index2(data, inline, revlogv2=False):
331 if not inline:
346 if not inline:
332 cls = IndexObject2 if revlogv2 else IndexObject
347 cls = IndexObject2 if revlogv2 else IndexObject
333 return cls(data), None
348 return cls(data), None
334 cls = InlinedIndexObject
349 cls = InlinedIndexObject
335 return cls(data, inline), (0, data)
350 return cls(data, inline), (0, data)
336
351
337
352
338 def parse_index_cl_v2(data):
353 def parse_index_cl_v2(data):
339 return IndexChangelogV2(data), None
354 return IndexChangelogV2(data), None
340
355
341
356
342 class IndexObject2(IndexObject):
357 class IndexObject2(IndexObject):
343 index_format = revlog_constants.INDEX_ENTRY_V2
358 index_format = revlog_constants.INDEX_ENTRY_V2
344
359
345 def replace_sidedata_info(
360 def replace_sidedata_info(
346 self,
361 self,
347 rev,
362 rev,
348 sidedata_offset,
363 sidedata_offset,
349 sidedata_length,
364 sidedata_length,
350 offset_flags,
365 offset_flags,
351 compression_mode,
366 compression_mode,
352 ):
367 ):
353 """
368 """
354 Replace an existing index entry's sidedata offset and length with new
369 Replace an existing index entry's sidedata offset and length with new
355 ones.
370 ones.
356 This cannot be used outside of the context of sidedata rewriting,
371 This cannot be used outside of the context of sidedata rewriting,
357 inside the transaction that creates the revision `rev`.
372 inside the transaction that creates the revision `rev`.
358 """
373 """
359 if rev < 0:
374 if rev < 0:
360 raise KeyError
375 raise KeyError
361 self._check_index(rev)
376 self._check_index(rev)
362 if rev < self._lgt:
377 if rev < self._lgt:
363 msg = b"cannot rewrite entries outside of this transaction"
378 msg = b"cannot rewrite entries outside of this transaction"
364 raise KeyError(msg)
379 raise KeyError(msg)
365 else:
380 else:
366 entry = list(self[rev])
381 entry = list(self[rev])
367 entry[0] = offset_flags
382 entry[0] = offset_flags
368 entry[8] = sidedata_offset
383 entry[8] = sidedata_offset
369 entry[9] = sidedata_length
384 entry[9] = sidedata_length
370 entry[11] = compression_mode
385 entry[11] = compression_mode
371 entry = tuple(entry)
386 entry = tuple(entry)
372 new = self._pack_entry(rev, entry)
387 new = self._pack_entry(rev, entry)
373 self._extra[rev - self._lgt] = new
388 self._extra[rev - self._lgt] = new
374
389
375 def _unpack_entry(self, rev, data):
390 def _unpack_entry(self, rev, data):
376 data = self.index_format.unpack(data)
391 data = self.index_format.unpack(data)
377 entry = data[:10]
392 entry = data[:10]
378 data_comp = data[10] & 3
393 data_comp = data[10] & 3
379 sidedata_comp = (data[10] & (3 << 2)) >> 2
394 sidedata_comp = (data[10] & (3 << 2)) >> 2
380 return entry + (data_comp, sidedata_comp)
395 return entry + (data_comp, sidedata_comp)
381
396
382 def _pack_entry(self, rev, entry):
397 def _pack_entry(self, rev, entry):
383 data = entry[:10]
398 data = entry[:10]
384 data_comp = entry[10] & 3
399 data_comp = entry[10] & 3
385 sidedata_comp = (entry[11] & 3) << 2
400 sidedata_comp = (entry[11] & 3) << 2
386 data += (data_comp | sidedata_comp,)
401 data += (data_comp | sidedata_comp,)
387
402
388 return self.index_format.pack(*data)
403 return self.index_format.pack(*data)
389
404
390 def entry_binary(self, rev):
405 def entry_binary(self, rev):
391 """return the raw binary string representing a revision"""
406 """return the raw binary string representing a revision"""
392 entry = self[rev]
407 entry = self[rev]
393 return self._pack_entry(rev, entry)
408 return self._pack_entry(rev, entry)
394
409
395 def pack_header(self, header):
410 def pack_header(self, header):
396 """pack header information as binary"""
411 """pack header information as binary"""
397 msg = 'version header should go in the docket, not the index: %d'
412 msg = 'version header should go in the docket, not the index: %d'
398 msg %= header
413 msg %= header
399 raise error.ProgrammingError(msg)
414 raise error.ProgrammingError(msg)
400
415
401
416
402 class IndexChangelogV2(IndexObject2):
417 class IndexChangelogV2(IndexObject2):
403 index_format = revlog_constants.INDEX_ENTRY_CL_V2
418 index_format = revlog_constants.INDEX_ENTRY_CL_V2
404
419
405 def _unpack_entry(self, rev, data, r=True):
420 def _unpack_entry(self, rev, data, r=True):
406 items = self.index_format.unpack(data)
421 items = self.index_format.unpack(data)
407 entry = items[:3] + (rev, rev) + items[3:8]
422 entry = items[:3] + (rev, rev) + items[3:8]
408 data_comp = items[8] & 3
423 data_comp = items[8] & 3
409 sidedata_comp = (items[8] >> 2) & 3
424 sidedata_comp = (items[8] >> 2) & 3
410 return entry + (data_comp, sidedata_comp)
425 return entry + (data_comp, sidedata_comp)
411
426
412 def _pack_entry(self, rev, entry):
427 def _pack_entry(self, rev, entry):
413 assert entry[3] == rev, entry[3]
428 assert entry[3] == rev, entry[3]
414 assert entry[4] == rev, entry[4]
429 assert entry[4] == rev, entry[4]
415 data = entry[:3] + entry[5:10]
430 data = entry[:3] + entry[5:10]
416 data_comp = entry[10] & 3
431 data_comp = entry[10] & 3
417 sidedata_comp = (entry[11] & 3) << 2
432 sidedata_comp = (entry[11] & 3) << 2
418 data += (data_comp | sidedata_comp,)
433 data += (data_comp | sidedata_comp,)
419 return self.index_format.pack(*data)
434 return self.index_format.pack(*data)
420
435
421
436
422 def parse_index_devel_nodemap(data, inline):
437 def parse_index_devel_nodemap(data, inline):
423 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
438 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
424 return PersistentNodeMapIndexObject(data), None
439 return PersistentNodeMapIndexObject(data), None
425
440
426
441
427 def parse_dirstate(dmap, copymap, st):
442 def parse_dirstate(dmap, copymap, st):
428 parents = [st[:20], st[20:40]]
443 parents = [st[:20], st[20:40]]
429 # dereference fields so they will be local in loop
444 # dereference fields so they will be local in loop
430 format = b">cllll"
445 format = b">cllll"
431 e_size = struct.calcsize(format)
446 e_size = struct.calcsize(format)
432 pos1 = 40
447 pos1 = 40
433 l = len(st)
448 l = len(st)
434
449
435 # the inner loop
450 # the inner loop
436 while pos1 < l:
451 while pos1 < l:
437 pos2 = pos1 + e_size
452 pos2 = pos1 + e_size
438 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
453 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
439 pos1 = pos2 + e[4]
454 pos1 = pos2 + e[4]
440 f = st[pos2:pos1]
455 f = st[pos2:pos1]
441 if b'\0' in f:
456 if b'\0' in f:
442 f, c = f.split(b'\0')
457 f, c = f.split(b'\0')
443 copymap[f] = c
458 copymap[f] = c
444 dmap[f] = dirstatetuple(*e[:4])
459 dmap[f] = dirstatetuple(*e[:4])
445 return parents
460 return parents
446
461
447
462
448 def pack_dirstate(dmap, copymap, pl, now):
463 def pack_dirstate(dmap, copymap, pl, now):
449 now = int(now)
464 now = int(now)
450 cs = stringio()
465 cs = stringio()
451 write = cs.write
466 write = cs.write
452 write(b"".join(pl))
467 write(b"".join(pl))
453 for f, e in pycompat.iteritems(dmap):
468 for f, e in pycompat.iteritems(dmap):
454 if e[0] == b'n' and e[3] == now:
469 if e[0] == b'n' and e[3] == now:
455 # The file was last modified "simultaneously" with the current
470 # The file was last modified "simultaneously" with the current
456 # write to dirstate (i.e. within the same second for file-
471 # write to dirstate (i.e. within the same second for file-
457 # systems with a granularity of 1 sec). This commonly happens
472 # systems with a granularity of 1 sec). This commonly happens
458 # for at least a couple of files on 'update'.
473 # for at least a couple of files on 'update'.
459 # The user could change the file without changing its size
474 # The user could change the file without changing its size
460 # within the same second. Invalidate the file's mtime in
475 # within the same second. Invalidate the file's mtime in
461 # dirstate, forcing future 'status' calls to compare the
476 # dirstate, forcing future 'status' calls to compare the
462 # contents of the file if the size is the same. This prevents
477 # contents of the file if the size is the same. This prevents
463 # mistakenly treating such files as clean.
478 # mistakenly treating such files as clean.
464 e = dirstatetuple(e[0], e[1], e[2], -1)
479 e = dirstatetuple(e[0], e[1], e[2], -1)
465 dmap[f] = e
480 dmap[f] = e
466
481
467 if f in copymap:
482 if f in copymap:
468 f = b"%s\0%s" % (f, copymap[f])
483 f = b"%s\0%s" % (f, copymap[f])
469 e = _pack(
484 e = _pack(
470 b">cllll",
485 b">cllll",
471 e.v1_state(),
486 e.v1_state(),
472 e.v1_mode(),
487 e.v1_mode(),
473 e.v1_size(),
488 e.v1_size(),
474 e.v1_mtime(),
489 e.v1_mtime(),
475 len(f),
490 len(f),
476 )
491 )
477 write(e)
492 write(e)
478 write(f)
493 write(f)
479 return cs.getvalue()
494 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now