##// END OF EJS Templates
dirstate-entry: restrict `from_p2` property to tracked file...
marmoute -
r48306:28632eb3 default
parent child Browse files
Show More
@@ -1,860 +1,860 b''
1 /*
1 /*
2 parsers.c - efficient content parsing
2 parsers.c - efficient content parsing
3
3
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
5
5
6 This software may be used and distributed according to the terms of
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
7 the GNU General Public License, incorporated herein by reference.
8 */
8 */
9
9
10 #define PY_SSIZE_T_CLEAN
10 #define PY_SSIZE_T_CLEAN
11 #include <Python.h>
11 #include <Python.h>
12 #include <ctype.h>
12 #include <ctype.h>
13 #include <stddef.h>
13 #include <stddef.h>
14 #include <string.h>
14 #include <string.h>
15
15
16 #include "bitmanipulation.h"
16 #include "bitmanipulation.h"
17 #include "charencode.h"
17 #include "charencode.h"
18 #include "util.h"
18 #include "util.h"
19
19
20 #ifdef IS_PY3K
20 #ifdef IS_PY3K
21 /* The mapping of Python types is meant to be temporary to get Python
21 /* The mapping of Python types is meant to be temporary to get Python
22 * 3 to compile. We should remove this once Python 3 support is fully
22 * 3 to compile. We should remove this once Python 3 support is fully
23 * supported and proper types are used in the extensions themselves. */
23 * supported and proper types are used in the extensions themselves. */
24 #define PyInt_Check PyLong_Check
24 #define PyInt_Check PyLong_Check
25 #define PyInt_FromLong PyLong_FromLong
25 #define PyInt_FromLong PyLong_FromLong
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 #define PyInt_AsLong PyLong_AsLong
27 #define PyInt_AsLong PyLong_AsLong
28 #endif
28 #endif
29
29
30 static const char *const versionerrortext = "Python minor version mismatch";
30 static const char *const versionerrortext = "Python minor version mismatch";
31
31
32 static const int dirstate_v1_from_p2 = -2;
32 static const int dirstate_v1_from_p2 = -2;
33 static const int dirstate_v1_nonnormal = -1;
33 static const int dirstate_v1_nonnormal = -1;
34
34
35 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
35 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
36 {
36 {
37 Py_ssize_t expected_size;
37 Py_ssize_t expected_size;
38
38
39 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
39 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
40 return NULL;
40 return NULL;
41 }
41 }
42
42
43 return _dict_new_presized(expected_size);
43 return _dict_new_presized(expected_size);
44 }
44 }
45
45
46 static inline dirstateTupleObject *make_dirstate_tuple(char state, int mode,
46 static inline dirstateTupleObject *make_dirstate_tuple(char state, int mode,
47 int size, int mtime)
47 int size, int mtime)
48 {
48 {
49 dirstateTupleObject *t =
49 dirstateTupleObject *t =
50 PyObject_New(dirstateTupleObject, &dirstateTupleType);
50 PyObject_New(dirstateTupleObject, &dirstateTupleType);
51 if (!t) {
51 if (!t) {
52 return NULL;
52 return NULL;
53 }
53 }
54 t->state = state;
54 t->state = state;
55 t->mode = mode;
55 t->mode = mode;
56 t->size = size;
56 t->size = size;
57 t->mtime = mtime;
57 t->mtime = mtime;
58 return t;
58 return t;
59 }
59 }
60
60
61 static PyObject *dirstate_tuple_new(PyTypeObject *subtype, PyObject *args,
61 static PyObject *dirstate_tuple_new(PyTypeObject *subtype, PyObject *args,
62 PyObject *kwds)
62 PyObject *kwds)
63 {
63 {
64 /* We do all the initialization here and not a tp_init function because
64 /* We do all the initialization here and not a tp_init function because
65 * dirstate_tuple is immutable. */
65 * dirstate_tuple is immutable. */
66 dirstateTupleObject *t;
66 dirstateTupleObject *t;
67 char state;
67 char state;
68 int size, mode, mtime;
68 int size, mode, mtime;
69 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
69 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
70 return NULL;
70 return NULL;
71 }
71 }
72
72
73 t = (dirstateTupleObject *)subtype->tp_alloc(subtype, 1);
73 t = (dirstateTupleObject *)subtype->tp_alloc(subtype, 1);
74 if (!t) {
74 if (!t) {
75 return NULL;
75 return NULL;
76 }
76 }
77 t->state = state;
77 t->state = state;
78 t->mode = mode;
78 t->mode = mode;
79 t->size = size;
79 t->size = size;
80 t->mtime = mtime;
80 t->mtime = mtime;
81
81
82 return (PyObject *)t;
82 return (PyObject *)t;
83 }
83 }
84
84
85 static void dirstate_tuple_dealloc(PyObject *o)
85 static void dirstate_tuple_dealloc(PyObject *o)
86 {
86 {
87 PyObject_Del(o);
87 PyObject_Del(o);
88 }
88 }
89
89
90 static Py_ssize_t dirstate_tuple_length(PyObject *o)
90 static Py_ssize_t dirstate_tuple_length(PyObject *o)
91 {
91 {
92 return 4;
92 return 4;
93 }
93 }
94
94
95 static PyObject *dirstate_tuple_item(PyObject *o, Py_ssize_t i)
95 static PyObject *dirstate_tuple_item(PyObject *o, Py_ssize_t i)
96 {
96 {
97 dirstateTupleObject *t = (dirstateTupleObject *)o;
97 dirstateTupleObject *t = (dirstateTupleObject *)o;
98 switch (i) {
98 switch (i) {
99 case 0:
99 case 0:
100 return PyBytes_FromStringAndSize(&t->state, 1);
100 return PyBytes_FromStringAndSize(&t->state, 1);
101 case 1:
101 case 1:
102 return PyInt_FromLong(t->mode);
102 return PyInt_FromLong(t->mode);
103 case 2:
103 case 2:
104 return PyInt_FromLong(t->size);
104 return PyInt_FromLong(t->size);
105 case 3:
105 case 3:
106 return PyInt_FromLong(t->mtime);
106 return PyInt_FromLong(t->mtime);
107 default:
107 default:
108 PyErr_SetString(PyExc_IndexError, "index out of range");
108 PyErr_SetString(PyExc_IndexError, "index out of range");
109 return NULL;
109 return NULL;
110 }
110 }
111 }
111 }
112
112
113 static PySequenceMethods dirstate_tuple_sq = {
113 static PySequenceMethods dirstate_tuple_sq = {
114 dirstate_tuple_length, /* sq_length */
114 dirstate_tuple_length, /* sq_length */
115 0, /* sq_concat */
115 0, /* sq_concat */
116 0, /* sq_repeat */
116 0, /* sq_repeat */
117 dirstate_tuple_item, /* sq_item */
117 dirstate_tuple_item, /* sq_item */
118 0, /* sq_ass_item */
118 0, /* sq_ass_item */
119 0, /* sq_contains */
119 0, /* sq_contains */
120 0, /* sq_inplace_concat */
120 0, /* sq_inplace_concat */
121 0 /* sq_inplace_repeat */
121 0 /* sq_inplace_repeat */
122 };
122 };
123
123
124 static PyObject *dirstatetuple_v1_state(dirstateTupleObject *self)
124 static PyObject *dirstatetuple_v1_state(dirstateTupleObject *self)
125 {
125 {
126 return PyBytes_FromStringAndSize(&self->state, 1);
126 return PyBytes_FromStringAndSize(&self->state, 1);
127 };
127 };
128
128
129 static PyObject *dirstatetuple_v1_mode(dirstateTupleObject *self)
129 static PyObject *dirstatetuple_v1_mode(dirstateTupleObject *self)
130 {
130 {
131 return PyInt_FromLong(self->mode);
131 return PyInt_FromLong(self->mode);
132 };
132 };
133
133
134 static PyObject *dirstatetuple_v1_size(dirstateTupleObject *self)
134 static PyObject *dirstatetuple_v1_size(dirstateTupleObject *self)
135 {
135 {
136 return PyInt_FromLong(self->size);
136 return PyInt_FromLong(self->size);
137 };
137 };
138
138
139 static PyObject *dirstatetuple_v1_mtime(dirstateTupleObject *self)
139 static PyObject *dirstatetuple_v1_mtime(dirstateTupleObject *self)
140 {
140 {
141 return PyInt_FromLong(self->mtime);
141 return PyInt_FromLong(self->mtime);
142 };
142 };
143
143
144 static PyMethodDef dirstatetuple_methods[] = {
144 static PyMethodDef dirstatetuple_methods[] = {
145 {"v1_state", (PyCFunction)dirstatetuple_v1_state, METH_NOARGS,
145 {"v1_state", (PyCFunction)dirstatetuple_v1_state, METH_NOARGS,
146 "return a \"state\" suitable for v1 serialization"},
146 "return a \"state\" suitable for v1 serialization"},
147 {"v1_mode", (PyCFunction)dirstatetuple_v1_mode, METH_NOARGS,
147 {"v1_mode", (PyCFunction)dirstatetuple_v1_mode, METH_NOARGS,
148 "return a \"mode\" suitable for v1 serialization"},
148 "return a \"mode\" suitable for v1 serialization"},
149 {"v1_size", (PyCFunction)dirstatetuple_v1_size, METH_NOARGS,
149 {"v1_size", (PyCFunction)dirstatetuple_v1_size, METH_NOARGS,
150 "return a \"size\" suitable for v1 serialization"},
150 "return a \"size\" suitable for v1 serialization"},
151 {"v1_mtime", (PyCFunction)dirstatetuple_v1_mtime, METH_NOARGS,
151 {"v1_mtime", (PyCFunction)dirstatetuple_v1_mtime, METH_NOARGS,
152 "return a \"mtime\" suitable for v1 serialization"},
152 "return a \"mtime\" suitable for v1 serialization"},
153 {NULL} /* Sentinel */
153 {NULL} /* Sentinel */
154 };
154 };
155
155
156 static PyObject *dirstatetuple_get_state(dirstateTupleObject *self)
156 static PyObject *dirstatetuple_get_state(dirstateTupleObject *self)
157 {
157 {
158 return PyBytes_FromStringAndSize(&self->state, 1);
158 return PyBytes_FromStringAndSize(&self->state, 1);
159 };
159 };
160
160
161 static PyObject *dirstatetuple_get_merged(dirstateTupleObject *self)
161 static PyObject *dirstatetuple_get_merged(dirstateTupleObject *self)
162 {
162 {
163 if (self->state == 'm') {
163 if (self->state == 'm') {
164 Py_RETURN_TRUE;
164 Py_RETURN_TRUE;
165 } else {
165 } else {
166 Py_RETURN_FALSE;
166 Py_RETURN_FALSE;
167 }
167 }
168 };
168 };
169
169
170 static PyObject *dirstatetuple_get_merged_removed(dirstateTupleObject *self)
170 static PyObject *dirstatetuple_get_merged_removed(dirstateTupleObject *self)
171 {
171 {
172 if (self->state == 'r' && self->size == dirstate_v1_nonnormal) {
172 if (self->state == 'r' && self->size == dirstate_v1_nonnormal) {
173 Py_RETURN_TRUE;
173 Py_RETURN_TRUE;
174 } else {
174 } else {
175 Py_RETURN_FALSE;
175 Py_RETURN_FALSE;
176 }
176 }
177 };
177 };
178
178
179 static PyObject *dirstatetuple_get_from_p2(dirstateTupleObject *self)
179 static PyObject *dirstatetuple_get_from_p2(dirstateTupleObject *self)
180 {
180 {
181 if (self->size == dirstate_v1_from_p2) {
181 if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
182 Py_RETURN_TRUE;
182 Py_RETURN_TRUE;
183 } else {
183 } else {
184 Py_RETURN_FALSE;
184 Py_RETURN_FALSE;
185 }
185 }
186 };
186 };
187
187
188 static PyObject *dirstatetuple_get_from_p2_removed(dirstateTupleObject *self)
188 static PyObject *dirstatetuple_get_from_p2_removed(dirstateTupleObject *self)
189 {
189 {
190 if (self->state == 'r' && self->size == dirstate_v1_from_p2) {
190 if (self->state == 'r' && self->size == dirstate_v1_from_p2) {
191 Py_RETURN_TRUE;
191 Py_RETURN_TRUE;
192 } else {
192 } else {
193 Py_RETURN_FALSE;
193 Py_RETURN_FALSE;
194 }
194 }
195 };
195 };
196
196
197 static PyObject *dirstatetuple_get_removed(dirstateTupleObject *self)
197 static PyObject *dirstatetuple_get_removed(dirstateTupleObject *self)
198 {
198 {
199 if (self->state == 'r') {
199 if (self->state == 'r') {
200 Py_RETURN_TRUE;
200 Py_RETURN_TRUE;
201 } else {
201 } else {
202 Py_RETURN_FALSE;
202 Py_RETURN_FALSE;
203 }
203 }
204 };
204 };
205
205
206 static PyGetSetDef dirstatetuple_getset[] = {
206 static PyGetSetDef dirstatetuple_getset[] = {
207 {"state", (getter)dirstatetuple_get_state, NULL, "state", NULL},
207 {"state", (getter)dirstatetuple_get_state, NULL, "state", NULL},
208 {"merged_removed", (getter)dirstatetuple_get_merged_removed, NULL,
208 {"merged_removed", (getter)dirstatetuple_get_merged_removed, NULL,
209 "merged_removed", NULL},
209 "merged_removed", NULL},
210 {"merged", (getter)dirstatetuple_get_merged, NULL, "merged", NULL},
210 {"merged", (getter)dirstatetuple_get_merged, NULL, "merged", NULL},
211 {"from_p2_removed", (getter)dirstatetuple_get_from_p2_removed, NULL,
211 {"from_p2_removed", (getter)dirstatetuple_get_from_p2_removed, NULL,
212 "from_p2_removed", NULL},
212 "from_p2_removed", NULL},
213 {"from_p2", (getter)dirstatetuple_get_from_p2, NULL, "from_p2", NULL},
213 {"from_p2", (getter)dirstatetuple_get_from_p2, NULL, "from_p2", NULL},
214 {"removed", (getter)dirstatetuple_get_removed, NULL, "removed", NULL},
214 {"removed", (getter)dirstatetuple_get_removed, NULL, "removed", NULL},
215 {NULL} /* Sentinel */
215 {NULL} /* Sentinel */
216 };
216 };
217
217
218 PyTypeObject dirstateTupleType = {
218 PyTypeObject dirstateTupleType = {
219 PyVarObject_HEAD_INIT(NULL, 0) /* header */
219 PyVarObject_HEAD_INIT(NULL, 0) /* header */
220 "dirstate_tuple", /* tp_name */
220 "dirstate_tuple", /* tp_name */
221 sizeof(dirstateTupleObject), /* tp_basicsize */
221 sizeof(dirstateTupleObject), /* tp_basicsize */
222 0, /* tp_itemsize */
222 0, /* tp_itemsize */
223 (destructor)dirstate_tuple_dealloc, /* tp_dealloc */
223 (destructor)dirstate_tuple_dealloc, /* tp_dealloc */
224 0, /* tp_print */
224 0, /* tp_print */
225 0, /* tp_getattr */
225 0, /* tp_getattr */
226 0, /* tp_setattr */
226 0, /* tp_setattr */
227 0, /* tp_compare */
227 0, /* tp_compare */
228 0, /* tp_repr */
228 0, /* tp_repr */
229 0, /* tp_as_number */
229 0, /* tp_as_number */
230 &dirstate_tuple_sq, /* tp_as_sequence */
230 &dirstate_tuple_sq, /* tp_as_sequence */
231 0, /* tp_as_mapping */
231 0, /* tp_as_mapping */
232 0, /* tp_hash */
232 0, /* tp_hash */
233 0, /* tp_call */
233 0, /* tp_call */
234 0, /* tp_str */
234 0, /* tp_str */
235 0, /* tp_getattro */
235 0, /* tp_getattro */
236 0, /* tp_setattro */
236 0, /* tp_setattro */
237 0, /* tp_as_buffer */
237 0, /* tp_as_buffer */
238 Py_TPFLAGS_DEFAULT, /* tp_flags */
238 Py_TPFLAGS_DEFAULT, /* tp_flags */
239 "dirstate tuple", /* tp_doc */
239 "dirstate tuple", /* tp_doc */
240 0, /* tp_traverse */
240 0, /* tp_traverse */
241 0, /* tp_clear */
241 0, /* tp_clear */
242 0, /* tp_richcompare */
242 0, /* tp_richcompare */
243 0, /* tp_weaklistoffset */
243 0, /* tp_weaklistoffset */
244 0, /* tp_iter */
244 0, /* tp_iter */
245 0, /* tp_iternext */
245 0, /* tp_iternext */
246 dirstatetuple_methods, /* tp_methods */
246 dirstatetuple_methods, /* tp_methods */
247 0, /* tp_members */
247 0, /* tp_members */
248 dirstatetuple_getset, /* tp_getset */
248 dirstatetuple_getset, /* tp_getset */
249 0, /* tp_base */
249 0, /* tp_base */
250 0, /* tp_dict */
250 0, /* tp_dict */
251 0, /* tp_descr_get */
251 0, /* tp_descr_get */
252 0, /* tp_descr_set */
252 0, /* tp_descr_set */
253 0, /* tp_dictoffset */
253 0, /* tp_dictoffset */
254 0, /* tp_init */
254 0, /* tp_init */
255 0, /* tp_alloc */
255 0, /* tp_alloc */
256 dirstate_tuple_new, /* tp_new */
256 dirstate_tuple_new, /* tp_new */
257 };
257 };
258
258
259 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
259 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
260 {
260 {
261 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
261 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
262 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
262 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
263 char state, *cur, *str, *cpos;
263 char state, *cur, *str, *cpos;
264 int mode, size, mtime;
264 int mode, size, mtime;
265 unsigned int flen, pos = 40;
265 unsigned int flen, pos = 40;
266 Py_ssize_t len = 40;
266 Py_ssize_t len = 40;
267 Py_ssize_t readlen;
267 Py_ssize_t readlen;
268
268
269 if (!PyArg_ParseTuple(
269 if (!PyArg_ParseTuple(
270 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
270 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
271 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
271 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
272 goto quit;
272 goto quit;
273 }
273 }
274
274
275 len = readlen;
275 len = readlen;
276
276
277 /* read parents */
277 /* read parents */
278 if (len < 40) {
278 if (len < 40) {
279 PyErr_SetString(PyExc_ValueError,
279 PyErr_SetString(PyExc_ValueError,
280 "too little data for parents");
280 "too little data for parents");
281 goto quit;
281 goto quit;
282 }
282 }
283
283
284 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
284 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
285 str + 20, (Py_ssize_t)20);
285 str + 20, (Py_ssize_t)20);
286 if (!parents) {
286 if (!parents) {
287 goto quit;
287 goto quit;
288 }
288 }
289
289
290 /* read filenames */
290 /* read filenames */
291 while (pos >= 40 && pos < len) {
291 while (pos >= 40 && pos < len) {
292 if (pos + 17 > len) {
292 if (pos + 17 > len) {
293 PyErr_SetString(PyExc_ValueError,
293 PyErr_SetString(PyExc_ValueError,
294 "overflow in dirstate");
294 "overflow in dirstate");
295 goto quit;
295 goto quit;
296 }
296 }
297 cur = str + pos;
297 cur = str + pos;
298 /* unpack header */
298 /* unpack header */
299 state = *cur;
299 state = *cur;
300 mode = getbe32(cur + 1);
300 mode = getbe32(cur + 1);
301 size = getbe32(cur + 5);
301 size = getbe32(cur + 5);
302 mtime = getbe32(cur + 9);
302 mtime = getbe32(cur + 9);
303 flen = getbe32(cur + 13);
303 flen = getbe32(cur + 13);
304 pos += 17;
304 pos += 17;
305 cur += 17;
305 cur += 17;
306 if (flen > len - pos) {
306 if (flen > len - pos) {
307 PyErr_SetString(PyExc_ValueError,
307 PyErr_SetString(PyExc_ValueError,
308 "overflow in dirstate");
308 "overflow in dirstate");
309 goto quit;
309 goto quit;
310 }
310 }
311
311
312 entry =
312 entry =
313 (PyObject *)make_dirstate_tuple(state, mode, size, mtime);
313 (PyObject *)make_dirstate_tuple(state, mode, size, mtime);
314 cpos = memchr(cur, 0, flen);
314 cpos = memchr(cur, 0, flen);
315 if (cpos) {
315 if (cpos) {
316 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
316 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
317 cname = PyBytes_FromStringAndSize(
317 cname = PyBytes_FromStringAndSize(
318 cpos + 1, flen - (cpos - cur) - 1);
318 cpos + 1, flen - (cpos - cur) - 1);
319 if (!fname || !cname ||
319 if (!fname || !cname ||
320 PyDict_SetItem(cmap, fname, cname) == -1 ||
320 PyDict_SetItem(cmap, fname, cname) == -1 ||
321 PyDict_SetItem(dmap, fname, entry) == -1) {
321 PyDict_SetItem(dmap, fname, entry) == -1) {
322 goto quit;
322 goto quit;
323 }
323 }
324 Py_DECREF(cname);
324 Py_DECREF(cname);
325 } else {
325 } else {
326 fname = PyBytes_FromStringAndSize(cur, flen);
326 fname = PyBytes_FromStringAndSize(cur, flen);
327 if (!fname ||
327 if (!fname ||
328 PyDict_SetItem(dmap, fname, entry) == -1) {
328 PyDict_SetItem(dmap, fname, entry) == -1) {
329 goto quit;
329 goto quit;
330 }
330 }
331 }
331 }
332 Py_DECREF(fname);
332 Py_DECREF(fname);
333 Py_DECREF(entry);
333 Py_DECREF(entry);
334 fname = cname = entry = NULL;
334 fname = cname = entry = NULL;
335 pos += flen;
335 pos += flen;
336 }
336 }
337
337
338 ret = parents;
338 ret = parents;
339 Py_INCREF(ret);
339 Py_INCREF(ret);
340 quit:
340 quit:
341 Py_XDECREF(fname);
341 Py_XDECREF(fname);
342 Py_XDECREF(cname);
342 Py_XDECREF(cname);
343 Py_XDECREF(entry);
343 Py_XDECREF(entry);
344 Py_XDECREF(parents);
344 Py_XDECREF(parents);
345 return ret;
345 return ret;
346 }
346 }
347
347
348 /*
348 /*
349 * Build a set of non-normal and other parent entries from the dirstate dmap
349 * Build a set of non-normal and other parent entries from the dirstate dmap
350 */
350 */
351 static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args)
351 static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args)
352 {
352 {
353 PyObject *dmap, *fname, *v;
353 PyObject *dmap, *fname, *v;
354 PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
354 PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
355 Py_ssize_t pos;
355 Py_ssize_t pos;
356
356
357 if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
357 if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
358 &dmap)) {
358 &dmap)) {
359 goto bail;
359 goto bail;
360 }
360 }
361
361
362 nonnset = PySet_New(NULL);
362 nonnset = PySet_New(NULL);
363 if (nonnset == NULL) {
363 if (nonnset == NULL) {
364 goto bail;
364 goto bail;
365 }
365 }
366
366
367 otherpset = PySet_New(NULL);
367 otherpset = PySet_New(NULL);
368 if (otherpset == NULL) {
368 if (otherpset == NULL) {
369 goto bail;
369 goto bail;
370 }
370 }
371
371
372 pos = 0;
372 pos = 0;
373 while (PyDict_Next(dmap, &pos, &fname, &v)) {
373 while (PyDict_Next(dmap, &pos, &fname, &v)) {
374 dirstateTupleObject *t;
374 dirstateTupleObject *t;
375 if (!dirstate_tuple_check(v)) {
375 if (!dirstate_tuple_check(v)) {
376 PyErr_SetString(PyExc_TypeError,
376 PyErr_SetString(PyExc_TypeError,
377 "expected a dirstate tuple");
377 "expected a dirstate tuple");
378 goto bail;
378 goto bail;
379 }
379 }
380 t = (dirstateTupleObject *)v;
380 t = (dirstateTupleObject *)v;
381
381
382 if (t->state == 'n' && t->size == -2) {
382 if (t->state == 'n' && t->size == -2) {
383 if (PySet_Add(otherpset, fname) == -1) {
383 if (PySet_Add(otherpset, fname) == -1) {
384 goto bail;
384 goto bail;
385 }
385 }
386 }
386 }
387
387
388 if (t->state == 'n' && t->mtime != -1) {
388 if (t->state == 'n' && t->mtime != -1) {
389 continue;
389 continue;
390 }
390 }
391 if (PySet_Add(nonnset, fname) == -1) {
391 if (PySet_Add(nonnset, fname) == -1) {
392 goto bail;
392 goto bail;
393 }
393 }
394 }
394 }
395
395
396 result = Py_BuildValue("(OO)", nonnset, otherpset);
396 result = Py_BuildValue("(OO)", nonnset, otherpset);
397 if (result == NULL) {
397 if (result == NULL) {
398 goto bail;
398 goto bail;
399 }
399 }
400 Py_DECREF(nonnset);
400 Py_DECREF(nonnset);
401 Py_DECREF(otherpset);
401 Py_DECREF(otherpset);
402 return result;
402 return result;
403 bail:
403 bail:
404 Py_XDECREF(nonnset);
404 Py_XDECREF(nonnset);
405 Py_XDECREF(otherpset);
405 Py_XDECREF(otherpset);
406 Py_XDECREF(result);
406 Py_XDECREF(result);
407 return NULL;
407 return NULL;
408 }
408 }
409
409
410 /*
410 /*
411 * Efficiently pack a dirstate object into its on-disk format.
411 * Efficiently pack a dirstate object into its on-disk format.
412 */
412 */
413 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
413 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
414 {
414 {
415 PyObject *packobj = NULL;
415 PyObject *packobj = NULL;
416 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
416 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
417 Py_ssize_t nbytes, pos, l;
417 Py_ssize_t nbytes, pos, l;
418 PyObject *k, *v = NULL, *pn;
418 PyObject *k, *v = NULL, *pn;
419 char *p, *s;
419 char *p, *s;
420 int now;
420 int now;
421
421
422 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
422 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
423 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
423 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
424 &now)) {
424 &now)) {
425 return NULL;
425 return NULL;
426 }
426 }
427
427
428 if (PyTuple_Size(pl) != 2) {
428 if (PyTuple_Size(pl) != 2) {
429 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
429 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
430 return NULL;
430 return NULL;
431 }
431 }
432
432
433 /* Figure out how much we need to allocate. */
433 /* Figure out how much we need to allocate. */
434 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
434 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
435 PyObject *c;
435 PyObject *c;
436 if (!PyBytes_Check(k)) {
436 if (!PyBytes_Check(k)) {
437 PyErr_SetString(PyExc_TypeError, "expected string key");
437 PyErr_SetString(PyExc_TypeError, "expected string key");
438 goto bail;
438 goto bail;
439 }
439 }
440 nbytes += PyBytes_GET_SIZE(k) + 17;
440 nbytes += PyBytes_GET_SIZE(k) + 17;
441 c = PyDict_GetItem(copymap, k);
441 c = PyDict_GetItem(copymap, k);
442 if (c) {
442 if (c) {
443 if (!PyBytes_Check(c)) {
443 if (!PyBytes_Check(c)) {
444 PyErr_SetString(PyExc_TypeError,
444 PyErr_SetString(PyExc_TypeError,
445 "expected string key");
445 "expected string key");
446 goto bail;
446 goto bail;
447 }
447 }
448 nbytes += PyBytes_GET_SIZE(c) + 1;
448 nbytes += PyBytes_GET_SIZE(c) + 1;
449 }
449 }
450 }
450 }
451
451
452 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
452 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
453 if (packobj == NULL) {
453 if (packobj == NULL) {
454 goto bail;
454 goto bail;
455 }
455 }
456
456
457 p = PyBytes_AS_STRING(packobj);
457 p = PyBytes_AS_STRING(packobj);
458
458
459 pn = PyTuple_GET_ITEM(pl, 0);
459 pn = PyTuple_GET_ITEM(pl, 0);
460 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
460 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
461 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
461 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
462 goto bail;
462 goto bail;
463 }
463 }
464 memcpy(p, s, l);
464 memcpy(p, s, l);
465 p += 20;
465 p += 20;
466 pn = PyTuple_GET_ITEM(pl, 1);
466 pn = PyTuple_GET_ITEM(pl, 1);
467 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
467 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
468 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
468 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
469 goto bail;
469 goto bail;
470 }
470 }
471 memcpy(p, s, l);
471 memcpy(p, s, l);
472 p += 20;
472 p += 20;
473
473
474 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
474 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
475 dirstateTupleObject *tuple;
475 dirstateTupleObject *tuple;
476 char state;
476 char state;
477 int mode, size, mtime;
477 int mode, size, mtime;
478 Py_ssize_t len, l;
478 Py_ssize_t len, l;
479 PyObject *o;
479 PyObject *o;
480 char *t;
480 char *t;
481
481
482 if (!dirstate_tuple_check(v)) {
482 if (!dirstate_tuple_check(v)) {
483 PyErr_SetString(PyExc_TypeError,
483 PyErr_SetString(PyExc_TypeError,
484 "expected a dirstate tuple");
484 "expected a dirstate tuple");
485 goto bail;
485 goto bail;
486 }
486 }
487 tuple = (dirstateTupleObject *)v;
487 tuple = (dirstateTupleObject *)v;
488
488
489 state = tuple->state;
489 state = tuple->state;
490 mode = tuple->mode;
490 mode = tuple->mode;
491 size = tuple->size;
491 size = tuple->size;
492 mtime = tuple->mtime;
492 mtime = tuple->mtime;
493 if (state == 'n' && mtime == now) {
493 if (state == 'n' && mtime == now) {
494 /* See pure/parsers.py:pack_dirstate for why we do
494 /* See pure/parsers.py:pack_dirstate for why we do
495 * this. */
495 * this. */
496 mtime = -1;
496 mtime = -1;
497 mtime_unset = (PyObject *)make_dirstate_tuple(
497 mtime_unset = (PyObject *)make_dirstate_tuple(
498 state, mode, size, mtime);
498 state, mode, size, mtime);
499 if (!mtime_unset) {
499 if (!mtime_unset) {
500 goto bail;
500 goto bail;
501 }
501 }
502 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
502 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
503 goto bail;
503 goto bail;
504 }
504 }
505 Py_DECREF(mtime_unset);
505 Py_DECREF(mtime_unset);
506 mtime_unset = NULL;
506 mtime_unset = NULL;
507 }
507 }
508 *p++ = state;
508 *p++ = state;
509 putbe32((uint32_t)mode, p);
509 putbe32((uint32_t)mode, p);
510 putbe32((uint32_t)size, p + 4);
510 putbe32((uint32_t)size, p + 4);
511 putbe32((uint32_t)mtime, p + 8);
511 putbe32((uint32_t)mtime, p + 8);
512 t = p + 12;
512 t = p + 12;
513 p += 16;
513 p += 16;
514 len = PyBytes_GET_SIZE(k);
514 len = PyBytes_GET_SIZE(k);
515 memcpy(p, PyBytes_AS_STRING(k), len);
515 memcpy(p, PyBytes_AS_STRING(k), len);
516 p += len;
516 p += len;
517 o = PyDict_GetItem(copymap, k);
517 o = PyDict_GetItem(copymap, k);
518 if (o) {
518 if (o) {
519 *p++ = '\0';
519 *p++ = '\0';
520 l = PyBytes_GET_SIZE(o);
520 l = PyBytes_GET_SIZE(o);
521 memcpy(p, PyBytes_AS_STRING(o), l);
521 memcpy(p, PyBytes_AS_STRING(o), l);
522 p += l;
522 p += l;
523 len += l + 1;
523 len += l + 1;
524 }
524 }
525 putbe32((uint32_t)len, t);
525 putbe32((uint32_t)len, t);
526 }
526 }
527
527
528 pos = p - PyBytes_AS_STRING(packobj);
528 pos = p - PyBytes_AS_STRING(packobj);
529 if (pos != nbytes) {
529 if (pos != nbytes) {
530 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
530 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
531 (long)pos, (long)nbytes);
531 (long)pos, (long)nbytes);
532 goto bail;
532 goto bail;
533 }
533 }
534
534
535 return packobj;
535 return packobj;
536 bail:
536 bail:
537 Py_XDECREF(mtime_unset);
537 Py_XDECREF(mtime_unset);
538 Py_XDECREF(packobj);
538 Py_XDECREF(packobj);
539 Py_XDECREF(v);
539 Py_XDECREF(v);
540 return NULL;
540 return NULL;
541 }
541 }
542
542
543 #define BUMPED_FIX 1
543 #define BUMPED_FIX 1
544 #define USING_SHA_256 2
544 #define USING_SHA_256 2
545 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
545 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
546
546
547 static PyObject *readshas(const char *source, unsigned char num,
547 static PyObject *readshas(const char *source, unsigned char num,
548 Py_ssize_t hashwidth)
548 Py_ssize_t hashwidth)
549 {
549 {
550 int i;
550 int i;
551 PyObject *list = PyTuple_New(num);
551 PyObject *list = PyTuple_New(num);
552 if (list == NULL) {
552 if (list == NULL) {
553 return NULL;
553 return NULL;
554 }
554 }
555 for (i = 0; i < num; i++) {
555 for (i = 0; i < num; i++) {
556 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
556 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
557 if (hash == NULL) {
557 if (hash == NULL) {
558 Py_DECREF(list);
558 Py_DECREF(list);
559 return NULL;
559 return NULL;
560 }
560 }
561 PyTuple_SET_ITEM(list, i, hash);
561 PyTuple_SET_ITEM(list, i, hash);
562 source += hashwidth;
562 source += hashwidth;
563 }
563 }
564 return list;
564 return list;
565 }
565 }
566
566
567 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
567 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
568 uint32_t *msize)
568 uint32_t *msize)
569 {
569 {
570 const char *data = databegin;
570 const char *data = databegin;
571 const char *meta;
571 const char *meta;
572
572
573 double mtime;
573 double mtime;
574 int16_t tz;
574 int16_t tz;
575 uint16_t flags;
575 uint16_t flags;
576 unsigned char nsuccs, nparents, nmetadata;
576 unsigned char nsuccs, nparents, nmetadata;
577 Py_ssize_t hashwidth = 20;
577 Py_ssize_t hashwidth = 20;
578
578
579 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
579 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
580 PyObject *metadata = NULL, *ret = NULL;
580 PyObject *metadata = NULL, *ret = NULL;
581 int i;
581 int i;
582
582
583 if (data + FM1_HEADER_SIZE > dataend) {
583 if (data + FM1_HEADER_SIZE > dataend) {
584 goto overflow;
584 goto overflow;
585 }
585 }
586
586
587 *msize = getbe32(data);
587 *msize = getbe32(data);
588 data += 4;
588 data += 4;
589 mtime = getbefloat64(data);
589 mtime = getbefloat64(data);
590 data += 8;
590 data += 8;
591 tz = getbeint16(data);
591 tz = getbeint16(data);
592 data += 2;
592 data += 2;
593 flags = getbeuint16(data);
593 flags = getbeuint16(data);
594 data += 2;
594 data += 2;
595
595
596 if (flags & USING_SHA_256) {
596 if (flags & USING_SHA_256) {
597 hashwidth = 32;
597 hashwidth = 32;
598 }
598 }
599
599
600 nsuccs = (unsigned char)(*data++);
600 nsuccs = (unsigned char)(*data++);
601 nparents = (unsigned char)(*data++);
601 nparents = (unsigned char)(*data++);
602 nmetadata = (unsigned char)(*data++);
602 nmetadata = (unsigned char)(*data++);
603
603
604 if (databegin + *msize > dataend) {
604 if (databegin + *msize > dataend) {
605 goto overflow;
605 goto overflow;
606 }
606 }
607 dataend = databegin + *msize; /* narrow down to marker size */
607 dataend = databegin + *msize; /* narrow down to marker size */
608
608
609 if (data + hashwidth > dataend) {
609 if (data + hashwidth > dataend) {
610 goto overflow;
610 goto overflow;
611 }
611 }
612 prec = PyBytes_FromStringAndSize(data, hashwidth);
612 prec = PyBytes_FromStringAndSize(data, hashwidth);
613 data += hashwidth;
613 data += hashwidth;
614 if (prec == NULL) {
614 if (prec == NULL) {
615 goto bail;
615 goto bail;
616 }
616 }
617
617
618 if (data + nsuccs * hashwidth > dataend) {
618 if (data + nsuccs * hashwidth > dataend) {
619 goto overflow;
619 goto overflow;
620 }
620 }
621 succs = readshas(data, nsuccs, hashwidth);
621 succs = readshas(data, nsuccs, hashwidth);
622 if (succs == NULL) {
622 if (succs == NULL) {
623 goto bail;
623 goto bail;
624 }
624 }
625 data += nsuccs * hashwidth;
625 data += nsuccs * hashwidth;
626
626
627 if (nparents == 1 || nparents == 2) {
627 if (nparents == 1 || nparents == 2) {
628 if (data + nparents * hashwidth > dataend) {
628 if (data + nparents * hashwidth > dataend) {
629 goto overflow;
629 goto overflow;
630 }
630 }
631 parents = readshas(data, nparents, hashwidth);
631 parents = readshas(data, nparents, hashwidth);
632 if (parents == NULL) {
632 if (parents == NULL) {
633 goto bail;
633 goto bail;
634 }
634 }
635 data += nparents * hashwidth;
635 data += nparents * hashwidth;
636 } else {
636 } else {
637 parents = Py_None;
637 parents = Py_None;
638 Py_INCREF(parents);
638 Py_INCREF(parents);
639 }
639 }
640
640
641 if (data + 2 * nmetadata > dataend) {
641 if (data + 2 * nmetadata > dataend) {
642 goto overflow;
642 goto overflow;
643 }
643 }
644 meta = data + (2 * nmetadata);
644 meta = data + (2 * nmetadata);
645 metadata = PyTuple_New(nmetadata);
645 metadata = PyTuple_New(nmetadata);
646 if (metadata == NULL) {
646 if (metadata == NULL) {
647 goto bail;
647 goto bail;
648 }
648 }
649 for (i = 0; i < nmetadata; i++) {
649 for (i = 0; i < nmetadata; i++) {
650 PyObject *tmp, *left = NULL, *right = NULL;
650 PyObject *tmp, *left = NULL, *right = NULL;
651 Py_ssize_t leftsize = (unsigned char)(*data++);
651 Py_ssize_t leftsize = (unsigned char)(*data++);
652 Py_ssize_t rightsize = (unsigned char)(*data++);
652 Py_ssize_t rightsize = (unsigned char)(*data++);
653 if (meta + leftsize + rightsize > dataend) {
653 if (meta + leftsize + rightsize > dataend) {
654 goto overflow;
654 goto overflow;
655 }
655 }
656 left = PyBytes_FromStringAndSize(meta, leftsize);
656 left = PyBytes_FromStringAndSize(meta, leftsize);
657 meta += leftsize;
657 meta += leftsize;
658 right = PyBytes_FromStringAndSize(meta, rightsize);
658 right = PyBytes_FromStringAndSize(meta, rightsize);
659 meta += rightsize;
659 meta += rightsize;
660 tmp = PyTuple_New(2);
660 tmp = PyTuple_New(2);
661 if (!left || !right || !tmp) {
661 if (!left || !right || !tmp) {
662 Py_XDECREF(left);
662 Py_XDECREF(left);
663 Py_XDECREF(right);
663 Py_XDECREF(right);
664 Py_XDECREF(tmp);
664 Py_XDECREF(tmp);
665 goto bail;
665 goto bail;
666 }
666 }
667 PyTuple_SET_ITEM(tmp, 0, left);
667 PyTuple_SET_ITEM(tmp, 0, left);
668 PyTuple_SET_ITEM(tmp, 1, right);
668 PyTuple_SET_ITEM(tmp, 1, right);
669 PyTuple_SET_ITEM(metadata, i, tmp);
669 PyTuple_SET_ITEM(metadata, i, tmp);
670 }
670 }
671 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
671 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
672 (int)tz * 60, parents);
672 (int)tz * 60, parents);
673 goto bail; /* return successfully */
673 goto bail; /* return successfully */
674
674
675 overflow:
675 overflow:
676 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
676 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
677 bail:
677 bail:
678 Py_XDECREF(prec);
678 Py_XDECREF(prec);
679 Py_XDECREF(succs);
679 Py_XDECREF(succs);
680 Py_XDECREF(metadata);
680 Py_XDECREF(metadata);
681 Py_XDECREF(parents);
681 Py_XDECREF(parents);
682 return ret;
682 return ret;
683 }
683 }
684
684
685 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
685 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
686 {
686 {
687 const char *data, *dataend;
687 const char *data, *dataend;
688 Py_ssize_t datalen, offset, stop;
688 Py_ssize_t datalen, offset, stop;
689 PyObject *markers = NULL;
689 PyObject *markers = NULL;
690
690
691 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
691 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
692 &offset, &stop)) {
692 &offset, &stop)) {
693 return NULL;
693 return NULL;
694 }
694 }
695 if (offset < 0) {
695 if (offset < 0) {
696 PyErr_SetString(PyExc_ValueError,
696 PyErr_SetString(PyExc_ValueError,
697 "invalid negative offset in fm1readmarkers");
697 "invalid negative offset in fm1readmarkers");
698 return NULL;
698 return NULL;
699 }
699 }
700 if (stop > datalen) {
700 if (stop > datalen) {
701 PyErr_SetString(
701 PyErr_SetString(
702 PyExc_ValueError,
702 PyExc_ValueError,
703 "stop longer than data length in fm1readmarkers");
703 "stop longer than data length in fm1readmarkers");
704 return NULL;
704 return NULL;
705 }
705 }
706 dataend = data + datalen;
706 dataend = data + datalen;
707 data += offset;
707 data += offset;
708 markers = PyList_New(0);
708 markers = PyList_New(0);
709 if (!markers) {
709 if (!markers) {
710 return NULL;
710 return NULL;
711 }
711 }
712 while (offset < stop) {
712 while (offset < stop) {
713 uint32_t msize;
713 uint32_t msize;
714 int error;
714 int error;
715 PyObject *record = fm1readmarker(data, dataend, &msize);
715 PyObject *record = fm1readmarker(data, dataend, &msize);
716 if (!record) {
716 if (!record) {
717 goto bail;
717 goto bail;
718 }
718 }
719 error = PyList_Append(markers, record);
719 error = PyList_Append(markers, record);
720 Py_DECREF(record);
720 Py_DECREF(record);
721 if (error) {
721 if (error) {
722 goto bail;
722 goto bail;
723 }
723 }
724 data += msize;
724 data += msize;
725 offset += msize;
725 offset += msize;
726 }
726 }
727 return markers;
727 return markers;
728 bail:
728 bail:
729 Py_DECREF(markers);
729 Py_DECREF(markers);
730 return NULL;
730 return NULL;
731 }
731 }
732
732
733 static char parsers_doc[] = "Efficient content parsing.";
733 static char parsers_doc[] = "Efficient content parsing.";
734
734
735 PyObject *encodedir(PyObject *self, PyObject *args);
735 PyObject *encodedir(PyObject *self, PyObject *args);
736 PyObject *pathencode(PyObject *self, PyObject *args);
736 PyObject *pathencode(PyObject *self, PyObject *args);
737 PyObject *lowerencode(PyObject *self, PyObject *args);
737 PyObject *lowerencode(PyObject *self, PyObject *args);
738 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
738 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
739
739
740 static PyMethodDef methods[] = {
740 static PyMethodDef methods[] = {
741 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
741 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
742 {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
742 {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
743 "create a set containing non-normal and other parent entries of given "
743 "create a set containing non-normal and other parent entries of given "
744 "dirstate\n"},
744 "dirstate\n"},
745 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
745 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
746 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
746 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
747 "parse a revlog index\n"},
747 "parse a revlog index\n"},
748 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
748 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
749 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
749 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
750 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
750 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
751 {"dict_new_presized", dict_new_presized, METH_VARARGS,
751 {"dict_new_presized", dict_new_presized, METH_VARARGS,
752 "construct a dict with an expected size\n"},
752 "construct a dict with an expected size\n"},
753 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
753 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
754 "make file foldmap\n"},
754 "make file foldmap\n"},
755 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
755 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
756 "escape a UTF-8 byte string to JSON (fast path)\n"},
756 "escape a UTF-8 byte string to JSON (fast path)\n"},
757 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
757 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
758 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
758 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
759 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
759 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
760 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
760 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
761 "parse v1 obsolete markers\n"},
761 "parse v1 obsolete markers\n"},
762 {NULL, NULL}};
762 {NULL, NULL}};
763
763
764 void dirs_module_init(PyObject *mod);
764 void dirs_module_init(PyObject *mod);
765 void manifest_module_init(PyObject *mod);
765 void manifest_module_init(PyObject *mod);
766 void revlog_module_init(PyObject *mod);
766 void revlog_module_init(PyObject *mod);
767
767
768 static const int version = 20;
768 static const int version = 20;
769
769
770 static void module_init(PyObject *mod)
770 static void module_init(PyObject *mod)
771 {
771 {
772 PyObject *capsule = NULL;
772 PyObject *capsule = NULL;
773 PyModule_AddIntConstant(mod, "version", version);
773 PyModule_AddIntConstant(mod, "version", version);
774
774
775 /* This module constant has two purposes. First, it lets us unit test
775 /* This module constant has two purposes. First, it lets us unit test
776 * the ImportError raised without hard-coding any error text. This
776 * the ImportError raised without hard-coding any error text. This
777 * means we can change the text in the future without breaking tests,
777 * means we can change the text in the future without breaking tests,
778 * even across changesets without a recompile. Second, its presence
778 * even across changesets without a recompile. Second, its presence
779 * can be used to determine whether the version-checking logic is
779 * can be used to determine whether the version-checking logic is
780 * present, which also helps in testing across changesets without a
780 * present, which also helps in testing across changesets without a
781 * recompile. Note that this means the pure-Python version of parsers
781 * recompile. Note that this means the pure-Python version of parsers
782 * should not have this module constant. */
782 * should not have this module constant. */
783 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
783 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
784
784
785 dirs_module_init(mod);
785 dirs_module_init(mod);
786 manifest_module_init(mod);
786 manifest_module_init(mod);
787 revlog_module_init(mod);
787 revlog_module_init(mod);
788
788
789 capsule = PyCapsule_New(
789 capsule = PyCapsule_New(
790 make_dirstate_tuple,
790 make_dirstate_tuple,
791 "mercurial.cext.parsers.make_dirstate_tuple_CAPI", NULL);
791 "mercurial.cext.parsers.make_dirstate_tuple_CAPI", NULL);
792 if (capsule != NULL)
792 if (capsule != NULL)
793 PyModule_AddObject(mod, "make_dirstate_tuple_CAPI", capsule);
793 PyModule_AddObject(mod, "make_dirstate_tuple_CAPI", capsule);
794
794
795 if (PyType_Ready(&dirstateTupleType) < 0) {
795 if (PyType_Ready(&dirstateTupleType) < 0) {
796 return;
796 return;
797 }
797 }
798 Py_INCREF(&dirstateTupleType);
798 Py_INCREF(&dirstateTupleType);
799 PyModule_AddObject(mod, "dirstatetuple",
799 PyModule_AddObject(mod, "dirstatetuple",
800 (PyObject *)&dirstateTupleType);
800 (PyObject *)&dirstateTupleType);
801 }
801 }
802
802
803 static int check_python_version(void)
803 static int check_python_version(void)
804 {
804 {
805 PyObject *sys = PyImport_ImportModule("sys"), *ver;
805 PyObject *sys = PyImport_ImportModule("sys"), *ver;
806 long hexversion;
806 long hexversion;
807 if (!sys) {
807 if (!sys) {
808 return -1;
808 return -1;
809 }
809 }
810 ver = PyObject_GetAttrString(sys, "hexversion");
810 ver = PyObject_GetAttrString(sys, "hexversion");
811 Py_DECREF(sys);
811 Py_DECREF(sys);
812 if (!ver) {
812 if (!ver) {
813 return -1;
813 return -1;
814 }
814 }
815 hexversion = PyInt_AsLong(ver);
815 hexversion = PyInt_AsLong(ver);
816 Py_DECREF(ver);
816 Py_DECREF(ver);
817 /* sys.hexversion is a 32-bit number by default, so the -1 case
817 /* sys.hexversion is a 32-bit number by default, so the -1 case
818 * should only occur in unusual circumstances (e.g. if sys.hexversion
818 * should only occur in unusual circumstances (e.g. if sys.hexversion
819 * is manually set to an invalid value). */
819 * is manually set to an invalid value). */
820 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
820 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
821 PyErr_Format(PyExc_ImportError,
821 PyErr_Format(PyExc_ImportError,
822 "%s: The Mercurial extension "
822 "%s: The Mercurial extension "
823 "modules were compiled with Python " PY_VERSION
823 "modules were compiled with Python " PY_VERSION
824 ", but "
824 ", but "
825 "Mercurial is currently using Python with "
825 "Mercurial is currently using Python with "
826 "sys.hexversion=%ld: "
826 "sys.hexversion=%ld: "
827 "Python %s\n at: %s",
827 "Python %s\n at: %s",
828 versionerrortext, hexversion, Py_GetVersion(),
828 versionerrortext, hexversion, Py_GetVersion(),
829 Py_GetProgramFullPath());
829 Py_GetProgramFullPath());
830 return -1;
830 return -1;
831 }
831 }
832 return 0;
832 return 0;
833 }
833 }
834
834
835 #ifdef IS_PY3K
835 #ifdef IS_PY3K
836 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
836 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
837 parsers_doc, -1, methods};
837 parsers_doc, -1, methods};
838
838
839 PyMODINIT_FUNC PyInit_parsers(void)
839 PyMODINIT_FUNC PyInit_parsers(void)
840 {
840 {
841 PyObject *mod;
841 PyObject *mod;
842
842
843 if (check_python_version() == -1)
843 if (check_python_version() == -1)
844 return NULL;
844 return NULL;
845 mod = PyModule_Create(&parsers_module);
845 mod = PyModule_Create(&parsers_module);
846 module_init(mod);
846 module_init(mod);
847 return mod;
847 return mod;
848 }
848 }
849 #else
849 #else
850 PyMODINIT_FUNC initparsers(void)
850 PyMODINIT_FUNC initparsers(void)
851 {
851 {
852 PyObject *mod;
852 PyObject *mod;
853
853
854 if (check_python_version() == -1) {
854 if (check_python_version() == -1) {
855 return;
855 return;
856 }
856 }
857 mod = Py_InitModule3("parsers", methods, parsers_doc);
857 mod = Py_InitModule3("parsers", methods, parsers_doc);
858 module_init(mod);
858 module_init(mod);
859 }
859 }
860 #endif
860 #endif
@@ -1,1453 +1,1453 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = 0x7FFFFFFF
46 _rangemask = 0x7FFFFFFF
47
47
48 dirstatetuple = parsers.dirstatetuple
48 dirstatetuple = parsers.dirstatetuple
49
49
50
50
51 # a special value used internally for `size` if the file come from the other parent
51 # a special value used internally for `size` if the file come from the other parent
52 FROM_P2 = dirstatemap.FROM_P2
52 FROM_P2 = dirstatemap.FROM_P2
53
53
54 # a special value used internally for `size` if the file is modified/merged/added
54 # a special value used internally for `size` if the file is modified/merged/added
55 NONNORMAL = dirstatemap.NONNORMAL
55 NONNORMAL = dirstatemap.NONNORMAL
56
56
57 # a special value used internally for `time` if the time is ambigeous
57 # a special value used internally for `time` if the time is ambigeous
58 AMBIGUOUS_TIME = dirstatemap.AMBIGUOUS_TIME
58 AMBIGUOUS_TIME = dirstatemap.AMBIGUOUS_TIME
59
59
60
60
61 class repocache(filecache):
61 class repocache(filecache):
62 """filecache for files in .hg/"""
62 """filecache for files in .hg/"""
63
63
64 def join(self, obj, fname):
64 def join(self, obj, fname):
65 return obj._opener.join(fname)
65 return obj._opener.join(fname)
66
66
67
67
68 class rootcache(filecache):
68 class rootcache(filecache):
69 """filecache for files in the repository root"""
69 """filecache for files in the repository root"""
70
70
71 def join(self, obj, fname):
71 def join(self, obj, fname):
72 return obj._join(fname)
72 return obj._join(fname)
73
73
74
74
75 def _getfsnow(vfs):
75 def _getfsnow(vfs):
76 '''Get "now" timestamp on filesystem'''
76 '''Get "now" timestamp on filesystem'''
77 tmpfd, tmpname = vfs.mkstemp()
77 tmpfd, tmpname = vfs.mkstemp()
78 try:
78 try:
79 return os.fstat(tmpfd)[stat.ST_MTIME]
79 return os.fstat(tmpfd)[stat.ST_MTIME]
80 finally:
80 finally:
81 os.close(tmpfd)
81 os.close(tmpfd)
82 vfs.unlink(tmpname)
82 vfs.unlink(tmpname)
83
83
84
84
85 @interfaceutil.implementer(intdirstate.idirstate)
85 @interfaceutil.implementer(intdirstate.idirstate)
86 class dirstate(object):
86 class dirstate(object):
87 def __init__(
87 def __init__(
88 self,
88 self,
89 opener,
89 opener,
90 ui,
90 ui,
91 root,
91 root,
92 validate,
92 validate,
93 sparsematchfn,
93 sparsematchfn,
94 nodeconstants,
94 nodeconstants,
95 use_dirstate_v2,
95 use_dirstate_v2,
96 ):
96 ):
97 """Create a new dirstate object.
97 """Create a new dirstate object.
98
98
99 opener is an open()-like callable that can be used to open the
99 opener is an open()-like callable that can be used to open the
100 dirstate file; root is the root of the directory tracked by
100 dirstate file; root is the root of the directory tracked by
101 the dirstate.
101 the dirstate.
102 """
102 """
103 self._use_dirstate_v2 = use_dirstate_v2
103 self._use_dirstate_v2 = use_dirstate_v2
104 self._nodeconstants = nodeconstants
104 self._nodeconstants = nodeconstants
105 self._opener = opener
105 self._opener = opener
106 self._validate = validate
106 self._validate = validate
107 self._root = root
107 self._root = root
108 self._sparsematchfn = sparsematchfn
108 self._sparsematchfn = sparsematchfn
109 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
109 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
110 # UNC path pointing to root share (issue4557)
110 # UNC path pointing to root share (issue4557)
111 self._rootdir = pathutil.normasprefix(root)
111 self._rootdir = pathutil.normasprefix(root)
112 self._dirty = False
112 self._dirty = False
113 self._lastnormaltime = 0
113 self._lastnormaltime = 0
114 self._ui = ui
114 self._ui = ui
115 self._filecache = {}
115 self._filecache = {}
116 self._parentwriters = 0
116 self._parentwriters = 0
117 self._filename = b'dirstate'
117 self._filename = b'dirstate'
118 self._pendingfilename = b'%s.pending' % self._filename
118 self._pendingfilename = b'%s.pending' % self._filename
119 self._plchangecallbacks = {}
119 self._plchangecallbacks = {}
120 self._origpl = None
120 self._origpl = None
121 self._updatedfiles = set()
121 self._updatedfiles = set()
122 self._mapcls = dirstatemap.dirstatemap
122 self._mapcls = dirstatemap.dirstatemap
123 # Access and cache cwd early, so we don't access it for the first time
123 # Access and cache cwd early, so we don't access it for the first time
124 # after a working-copy update caused it to not exist (accessing it then
124 # after a working-copy update caused it to not exist (accessing it then
125 # raises an exception).
125 # raises an exception).
126 self._cwd
126 self._cwd
127
127
128 def prefetch_parents(self):
128 def prefetch_parents(self):
129 """make sure the parents are loaded
129 """make sure the parents are loaded
130
130
131 Used to avoid a race condition.
131 Used to avoid a race condition.
132 """
132 """
133 self._pl
133 self._pl
134
134
135 @contextlib.contextmanager
135 @contextlib.contextmanager
136 def parentchange(self):
136 def parentchange(self):
137 """Context manager for handling dirstate parents.
137 """Context manager for handling dirstate parents.
138
138
139 If an exception occurs in the scope of the context manager,
139 If an exception occurs in the scope of the context manager,
140 the incoherent dirstate won't be written when wlock is
140 the incoherent dirstate won't be written when wlock is
141 released.
141 released.
142 """
142 """
143 self._parentwriters += 1
143 self._parentwriters += 1
144 yield
144 yield
145 # Typically we want the "undo" step of a context manager in a
145 # Typically we want the "undo" step of a context manager in a
146 # finally block so it happens even when an exception
146 # finally block so it happens even when an exception
147 # occurs. In this case, however, we only want to decrement
147 # occurs. In this case, however, we only want to decrement
148 # parentwriters if the code in the with statement exits
148 # parentwriters if the code in the with statement exits
149 # normally, so we don't have a try/finally here on purpose.
149 # normally, so we don't have a try/finally here on purpose.
150 self._parentwriters -= 1
150 self._parentwriters -= 1
151
151
152 def pendingparentchange(self):
152 def pendingparentchange(self):
153 """Returns true if the dirstate is in the middle of a set of changes
153 """Returns true if the dirstate is in the middle of a set of changes
154 that modify the dirstate parent.
154 that modify the dirstate parent.
155 """
155 """
156 return self._parentwriters > 0
156 return self._parentwriters > 0
157
157
158 @propertycache
158 @propertycache
159 def _map(self):
159 def _map(self):
160 """Return the dirstate contents (see documentation for dirstatemap)."""
160 """Return the dirstate contents (see documentation for dirstatemap)."""
161 self._map = self._mapcls(
161 self._map = self._mapcls(
162 self._ui,
162 self._ui,
163 self._opener,
163 self._opener,
164 self._root,
164 self._root,
165 self._nodeconstants,
165 self._nodeconstants,
166 self._use_dirstate_v2,
166 self._use_dirstate_v2,
167 )
167 )
168 return self._map
168 return self._map
169
169
170 @property
170 @property
171 def _sparsematcher(self):
171 def _sparsematcher(self):
172 """The matcher for the sparse checkout.
172 """The matcher for the sparse checkout.
173
173
174 The working directory may not include every file from a manifest. The
174 The working directory may not include every file from a manifest. The
175 matcher obtained by this property will match a path if it is to be
175 matcher obtained by this property will match a path if it is to be
176 included in the working directory.
176 included in the working directory.
177 """
177 """
178 # TODO there is potential to cache this property. For now, the matcher
178 # TODO there is potential to cache this property. For now, the matcher
179 # is resolved on every access. (But the called function does use a
179 # is resolved on every access. (But the called function does use a
180 # cache to keep the lookup fast.)
180 # cache to keep the lookup fast.)
181 return self._sparsematchfn()
181 return self._sparsematchfn()
182
182
183 @repocache(b'branch')
183 @repocache(b'branch')
184 def _branch(self):
184 def _branch(self):
185 try:
185 try:
186 return self._opener.read(b"branch").strip() or b"default"
186 return self._opener.read(b"branch").strip() or b"default"
187 except IOError as inst:
187 except IOError as inst:
188 if inst.errno != errno.ENOENT:
188 if inst.errno != errno.ENOENT:
189 raise
189 raise
190 return b"default"
190 return b"default"
191
191
192 @property
192 @property
193 def _pl(self):
193 def _pl(self):
194 return self._map.parents()
194 return self._map.parents()
195
195
196 def hasdir(self, d):
196 def hasdir(self, d):
197 return self._map.hastrackeddir(d)
197 return self._map.hastrackeddir(d)
198
198
199 @rootcache(b'.hgignore')
199 @rootcache(b'.hgignore')
200 def _ignore(self):
200 def _ignore(self):
201 files = self._ignorefiles()
201 files = self._ignorefiles()
202 if not files:
202 if not files:
203 return matchmod.never()
203 return matchmod.never()
204
204
205 pats = [b'include:%s' % f for f in files]
205 pats = [b'include:%s' % f for f in files]
206 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
206 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
207
207
208 @propertycache
208 @propertycache
209 def _slash(self):
209 def _slash(self):
210 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
210 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
211
211
212 @propertycache
212 @propertycache
213 def _checklink(self):
213 def _checklink(self):
214 return util.checklink(self._root)
214 return util.checklink(self._root)
215
215
216 @propertycache
216 @propertycache
217 def _checkexec(self):
217 def _checkexec(self):
218 return bool(util.checkexec(self._root))
218 return bool(util.checkexec(self._root))
219
219
220 @propertycache
220 @propertycache
221 def _checkcase(self):
221 def _checkcase(self):
222 return not util.fscasesensitive(self._join(b'.hg'))
222 return not util.fscasesensitive(self._join(b'.hg'))
223
223
224 def _join(self, f):
224 def _join(self, f):
225 # much faster than os.path.join()
225 # much faster than os.path.join()
226 # it's safe because f is always a relative path
226 # it's safe because f is always a relative path
227 return self._rootdir + f
227 return self._rootdir + f
228
228
229 def flagfunc(self, buildfallback):
229 def flagfunc(self, buildfallback):
230 if self._checklink and self._checkexec:
230 if self._checklink and self._checkexec:
231
231
232 def f(x):
232 def f(x):
233 try:
233 try:
234 st = os.lstat(self._join(x))
234 st = os.lstat(self._join(x))
235 if util.statislink(st):
235 if util.statislink(st):
236 return b'l'
236 return b'l'
237 if util.statisexec(st):
237 if util.statisexec(st):
238 return b'x'
238 return b'x'
239 except OSError:
239 except OSError:
240 pass
240 pass
241 return b''
241 return b''
242
242
243 return f
243 return f
244
244
245 fallback = buildfallback()
245 fallback = buildfallback()
246 if self._checklink:
246 if self._checklink:
247
247
248 def f(x):
248 def f(x):
249 if os.path.islink(self._join(x)):
249 if os.path.islink(self._join(x)):
250 return b'l'
250 return b'l'
251 if b'x' in fallback(x):
251 if b'x' in fallback(x):
252 return b'x'
252 return b'x'
253 return b''
253 return b''
254
254
255 return f
255 return f
256 if self._checkexec:
256 if self._checkexec:
257
257
258 def f(x):
258 def f(x):
259 if b'l' in fallback(x):
259 if b'l' in fallback(x):
260 return b'l'
260 return b'l'
261 if util.isexec(self._join(x)):
261 if util.isexec(self._join(x)):
262 return b'x'
262 return b'x'
263 return b''
263 return b''
264
264
265 return f
265 return f
266 else:
266 else:
267 return fallback
267 return fallback
268
268
269 @propertycache
269 @propertycache
270 def _cwd(self):
270 def _cwd(self):
271 # internal config: ui.forcecwd
271 # internal config: ui.forcecwd
272 forcecwd = self._ui.config(b'ui', b'forcecwd')
272 forcecwd = self._ui.config(b'ui', b'forcecwd')
273 if forcecwd:
273 if forcecwd:
274 return forcecwd
274 return forcecwd
275 return encoding.getcwd()
275 return encoding.getcwd()
276
276
277 def getcwd(self):
277 def getcwd(self):
278 """Return the path from which a canonical path is calculated.
278 """Return the path from which a canonical path is calculated.
279
279
280 This path should be used to resolve file patterns or to convert
280 This path should be used to resolve file patterns or to convert
281 canonical paths back to file paths for display. It shouldn't be
281 canonical paths back to file paths for display. It shouldn't be
282 used to get real file paths. Use vfs functions instead.
282 used to get real file paths. Use vfs functions instead.
283 """
283 """
284 cwd = self._cwd
284 cwd = self._cwd
285 if cwd == self._root:
285 if cwd == self._root:
286 return b''
286 return b''
287 # self._root ends with a path separator if self._root is '/' or 'C:\'
287 # self._root ends with a path separator if self._root is '/' or 'C:\'
288 rootsep = self._root
288 rootsep = self._root
289 if not util.endswithsep(rootsep):
289 if not util.endswithsep(rootsep):
290 rootsep += pycompat.ossep
290 rootsep += pycompat.ossep
291 if cwd.startswith(rootsep):
291 if cwd.startswith(rootsep):
292 return cwd[len(rootsep) :]
292 return cwd[len(rootsep) :]
293 else:
293 else:
294 # we're outside the repo. return an absolute path.
294 # we're outside the repo. return an absolute path.
295 return cwd
295 return cwd
296
296
297 def pathto(self, f, cwd=None):
297 def pathto(self, f, cwd=None):
298 if cwd is None:
298 if cwd is None:
299 cwd = self.getcwd()
299 cwd = self.getcwd()
300 path = util.pathto(self._root, cwd, f)
300 path = util.pathto(self._root, cwd, f)
301 if self._slash:
301 if self._slash:
302 return util.pconvert(path)
302 return util.pconvert(path)
303 return path
303 return path
304
304
305 def __getitem__(self, key):
305 def __getitem__(self, key):
306 """Return the current state of key (a filename) in the dirstate.
306 """Return the current state of key (a filename) in the dirstate.
307
307
308 States are:
308 States are:
309 n normal
309 n normal
310 m needs merging
310 m needs merging
311 r marked for removal
311 r marked for removal
312 a marked for addition
312 a marked for addition
313 ? not tracked
313 ? not tracked
314
314
315 XXX The "state" is a bit obscure to be in the "public" API. we should
315 XXX The "state" is a bit obscure to be in the "public" API. we should
316 consider migrating all user of this to going through the dirstate entry
316 consider migrating all user of this to going through the dirstate entry
317 instead.
317 instead.
318 """
318 """
319 entry = self._map.get(key)
319 entry = self._map.get(key)
320 if entry is not None:
320 if entry is not None:
321 return entry.state
321 return entry.state
322 return b'?'
322 return b'?'
323
323
324 def __contains__(self, key):
324 def __contains__(self, key):
325 return key in self._map
325 return key in self._map
326
326
327 def __iter__(self):
327 def __iter__(self):
328 return iter(sorted(self._map))
328 return iter(sorted(self._map))
329
329
330 def items(self):
330 def items(self):
331 return pycompat.iteritems(self._map)
331 return pycompat.iteritems(self._map)
332
332
333 iteritems = items
333 iteritems = items
334
334
335 def directories(self):
335 def directories(self):
336 return self._map.directories()
336 return self._map.directories()
337
337
338 def parents(self):
338 def parents(self):
339 return [self._validate(p) for p in self._pl]
339 return [self._validate(p) for p in self._pl]
340
340
341 def p1(self):
341 def p1(self):
342 return self._validate(self._pl[0])
342 return self._validate(self._pl[0])
343
343
344 def p2(self):
344 def p2(self):
345 return self._validate(self._pl[1])
345 return self._validate(self._pl[1])
346
346
347 @property
347 @property
348 def in_merge(self):
348 def in_merge(self):
349 """True if a merge is in progress"""
349 """True if a merge is in progress"""
350 return self._pl[1] != self._nodeconstants.nullid
350 return self._pl[1] != self._nodeconstants.nullid
351
351
352 def branch(self):
352 def branch(self):
353 return encoding.tolocal(self._branch)
353 return encoding.tolocal(self._branch)
354
354
355 def setparents(self, p1, p2=None):
355 def setparents(self, p1, p2=None):
356 """Set dirstate parents to p1 and p2.
356 """Set dirstate parents to p1 and p2.
357
357
358 When moving from two parents to one, "merged" entries a
358 When moving from two parents to one, "merged" entries a
359 adjusted to normal and previous copy records discarded and
359 adjusted to normal and previous copy records discarded and
360 returned by the call.
360 returned by the call.
361
361
362 See localrepo.setparents()
362 See localrepo.setparents()
363 """
363 """
364 if p2 is None:
364 if p2 is None:
365 p2 = self._nodeconstants.nullid
365 p2 = self._nodeconstants.nullid
366 if self._parentwriters == 0:
366 if self._parentwriters == 0:
367 raise ValueError(
367 raise ValueError(
368 b"cannot set dirstate parent outside of "
368 b"cannot set dirstate parent outside of "
369 b"dirstate.parentchange context manager"
369 b"dirstate.parentchange context manager"
370 )
370 )
371
371
372 self._dirty = True
372 self._dirty = True
373 oldp2 = self._pl[1]
373 oldp2 = self._pl[1]
374 if self._origpl is None:
374 if self._origpl is None:
375 self._origpl = self._pl
375 self._origpl = self._pl
376 self._map.setparents(p1, p2)
376 self._map.setparents(p1, p2)
377 copies = {}
377 copies = {}
378 if (
378 if (
379 oldp2 != self._nodeconstants.nullid
379 oldp2 != self._nodeconstants.nullid
380 and p2 == self._nodeconstants.nullid
380 and p2 == self._nodeconstants.nullid
381 ):
381 ):
382 candidatefiles = self._map.non_normal_or_other_parent_paths()
382 candidatefiles = self._map.non_normal_or_other_parent_paths()
383
383
384 for f in candidatefiles:
384 for f in candidatefiles:
385 s = self._map.get(f)
385 s = self._map.get(f)
386 if s is None:
386 if s is None:
387 continue
387 continue
388
388
389 # Discard "merged" markers when moving away from a merge state
389 # Discard "merged" markers when moving away from a merge state
390 if s.merged:
390 if s.merged:
391 source = self._map.copymap.get(f)
391 source = self._map.copymap.get(f)
392 if source:
392 if source:
393 copies[f] = source
393 copies[f] = source
394 self.normallookup(f)
394 self.normallookup(f)
395 # Also fix up otherparent markers
395 # Also fix up otherparent markers
396 elif s.state == b'n' and s.from_p2:
396 elif s.from_p2:
397 source = self._map.copymap.get(f)
397 source = self._map.copymap.get(f)
398 if source:
398 if source:
399 copies[f] = source
399 copies[f] = source
400 self.add(f)
400 self.add(f)
401 return copies
401 return copies
402
402
403 def setbranch(self, branch):
403 def setbranch(self, branch):
404 self.__class__._branch.set(self, encoding.fromlocal(branch))
404 self.__class__._branch.set(self, encoding.fromlocal(branch))
405 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
405 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
406 try:
406 try:
407 f.write(self._branch + b'\n')
407 f.write(self._branch + b'\n')
408 f.close()
408 f.close()
409
409
410 # make sure filecache has the correct stat info for _branch after
410 # make sure filecache has the correct stat info for _branch after
411 # replacing the underlying file
411 # replacing the underlying file
412 ce = self._filecache[b'_branch']
412 ce = self._filecache[b'_branch']
413 if ce:
413 if ce:
414 ce.refresh()
414 ce.refresh()
415 except: # re-raises
415 except: # re-raises
416 f.discard()
416 f.discard()
417 raise
417 raise
418
418
419 def invalidate(self):
419 def invalidate(self):
420 """Causes the next access to reread the dirstate.
420 """Causes the next access to reread the dirstate.
421
421
422 This is different from localrepo.invalidatedirstate() because it always
422 This is different from localrepo.invalidatedirstate() because it always
423 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
423 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
424 check whether the dirstate has changed before rereading it."""
424 check whether the dirstate has changed before rereading it."""
425
425
426 for a in ("_map", "_branch", "_ignore"):
426 for a in ("_map", "_branch", "_ignore"):
427 if a in self.__dict__:
427 if a in self.__dict__:
428 delattr(self, a)
428 delattr(self, a)
429 self._lastnormaltime = 0
429 self._lastnormaltime = 0
430 self._dirty = False
430 self._dirty = False
431 self._updatedfiles.clear()
431 self._updatedfiles.clear()
432 self._parentwriters = 0
432 self._parentwriters = 0
433 self._origpl = None
433 self._origpl = None
434
434
435 def copy(self, source, dest):
435 def copy(self, source, dest):
436 """Mark dest as a copy of source. Unmark dest if source is None."""
436 """Mark dest as a copy of source. Unmark dest if source is None."""
437 if source == dest:
437 if source == dest:
438 return
438 return
439 self._dirty = True
439 self._dirty = True
440 if source is not None:
440 if source is not None:
441 self._map.copymap[dest] = source
441 self._map.copymap[dest] = source
442 self._updatedfiles.add(source)
442 self._updatedfiles.add(source)
443 self._updatedfiles.add(dest)
443 self._updatedfiles.add(dest)
444 elif self._map.copymap.pop(dest, None):
444 elif self._map.copymap.pop(dest, None):
445 self._updatedfiles.add(dest)
445 self._updatedfiles.add(dest)
446
446
447 def copied(self, file):
447 def copied(self, file):
448 return self._map.copymap.get(file, None)
448 return self._map.copymap.get(file, None)
449
449
450 def copies(self):
450 def copies(self):
451 return self._map.copymap
451 return self._map.copymap
452
452
453 def _addpath(
453 def _addpath(
454 self,
454 self,
455 f,
455 f,
456 state,
456 state,
457 mode,
457 mode,
458 size=NONNORMAL,
458 size=NONNORMAL,
459 mtime=AMBIGUOUS_TIME,
459 mtime=AMBIGUOUS_TIME,
460 from_p2=False,
460 from_p2=False,
461 possibly_dirty=False,
461 possibly_dirty=False,
462 ):
462 ):
463 oldstate = self[f]
463 oldstate = self[f]
464 if state == b'a' or oldstate == b'r':
464 if state == b'a' or oldstate == b'r':
465 scmutil.checkfilename(f)
465 scmutil.checkfilename(f)
466 if self._map.hastrackeddir(f):
466 if self._map.hastrackeddir(f):
467 msg = _(b'directory %r already in dirstate')
467 msg = _(b'directory %r already in dirstate')
468 msg %= pycompat.bytestr(f)
468 msg %= pycompat.bytestr(f)
469 raise error.Abort(msg)
469 raise error.Abort(msg)
470 # shadows
470 # shadows
471 for d in pathutil.finddirs(f):
471 for d in pathutil.finddirs(f):
472 if self._map.hastrackeddir(d):
472 if self._map.hastrackeddir(d):
473 break
473 break
474 entry = self._map.get(d)
474 entry = self._map.get(d)
475 if entry is not None and not entry.removed:
475 if entry is not None and not entry.removed:
476 msg = _(b'file %r in dirstate clashes with %r')
476 msg = _(b'file %r in dirstate clashes with %r')
477 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
477 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
478 raise error.Abort(msg)
478 raise error.Abort(msg)
479 if state == b'a':
479 if state == b'a':
480 assert not possibly_dirty
480 assert not possibly_dirty
481 assert not from_p2
481 assert not from_p2
482 size = NONNORMAL
482 size = NONNORMAL
483 mtime = AMBIGUOUS_TIME
483 mtime = AMBIGUOUS_TIME
484 elif from_p2:
484 elif from_p2:
485 assert not possibly_dirty
485 assert not possibly_dirty
486 size = FROM_P2
486 size = FROM_P2
487 mtime = AMBIGUOUS_TIME
487 mtime = AMBIGUOUS_TIME
488 elif possibly_dirty:
488 elif possibly_dirty:
489 mtime = AMBIGUOUS_TIME
489 mtime = AMBIGUOUS_TIME
490 else:
490 else:
491 assert size != FROM_P2
491 assert size != FROM_P2
492 assert size != NONNORMAL
492 assert size != NONNORMAL
493 size = size & _rangemask
493 size = size & _rangemask
494 mtime = mtime & _rangemask
494 mtime = mtime & _rangemask
495 self._dirty = True
495 self._dirty = True
496 self._updatedfiles.add(f)
496 self._updatedfiles.add(f)
497 self._map.addfile(f, oldstate, state, mode, size, mtime)
497 self._map.addfile(f, oldstate, state, mode, size, mtime)
498
498
499 def normal(self, f, parentfiledata=None):
499 def normal(self, f, parentfiledata=None):
500 """Mark a file normal and clean.
500 """Mark a file normal and clean.
501
501
502 parentfiledata: (mode, size, mtime) of the clean file
502 parentfiledata: (mode, size, mtime) of the clean file
503
503
504 parentfiledata should be computed from memory (for mode,
504 parentfiledata should be computed from memory (for mode,
505 size), as or close as possible from the point where we
505 size), as or close as possible from the point where we
506 determined the file was clean, to limit the risk of the
506 determined the file was clean, to limit the risk of the
507 file having been changed by an external process between the
507 file having been changed by an external process between the
508 moment where the file was determined to be clean and now."""
508 moment where the file was determined to be clean and now."""
509 if parentfiledata:
509 if parentfiledata:
510 (mode, size, mtime) = parentfiledata
510 (mode, size, mtime) = parentfiledata
511 else:
511 else:
512 s = os.lstat(self._join(f))
512 s = os.lstat(self._join(f))
513 mode = s.st_mode
513 mode = s.st_mode
514 size = s.st_size
514 size = s.st_size
515 mtime = s[stat.ST_MTIME]
515 mtime = s[stat.ST_MTIME]
516 self._addpath(f, b'n', mode, size, mtime)
516 self._addpath(f, b'n', mode, size, mtime)
517 self._map.copymap.pop(f, None)
517 self._map.copymap.pop(f, None)
518 if f in self._map.nonnormalset:
518 if f in self._map.nonnormalset:
519 self._map.nonnormalset.remove(f)
519 self._map.nonnormalset.remove(f)
520 if mtime > self._lastnormaltime:
520 if mtime > self._lastnormaltime:
521 # Remember the most recent modification timeslot for status(),
521 # Remember the most recent modification timeslot for status(),
522 # to make sure we won't miss future size-preserving file content
522 # to make sure we won't miss future size-preserving file content
523 # modifications that happen within the same timeslot.
523 # modifications that happen within the same timeslot.
524 self._lastnormaltime = mtime
524 self._lastnormaltime = mtime
525
525
526 def normallookup(self, f):
526 def normallookup(self, f):
527 '''Mark a file normal, but possibly dirty.'''
527 '''Mark a file normal, but possibly dirty.'''
528 if self.in_merge:
528 if self.in_merge:
529 # if there is a merge going on and the file was either
529 # if there is a merge going on and the file was either
530 # "merged" or coming from other parent (-2) before
530 # "merged" or coming from other parent (-2) before
531 # being removed, restore that state.
531 # being removed, restore that state.
532 entry = self._map.get(f)
532 entry = self._map.get(f)
533 if entry is not None:
533 if entry is not None:
534 # XXX this should probably be dealt with a a lower level
534 # XXX this should probably be dealt with a a lower level
535 # (see `merged_removed` and `from_p2_removed`)
535 # (see `merged_removed` and `from_p2_removed`)
536 if entry.merged_removed or entry.from_p2_removed:
536 if entry.merged_removed or entry.from_p2_removed:
537 source = self._map.copymap.get(f)
537 source = self._map.copymap.get(f)
538 if entry.merged_removed:
538 if entry.merged_removed:
539 self.merge(f)
539 self.merge(f)
540 elif entry.from_p2_removed:
540 elif entry.from_p2_removed:
541 self.otherparent(f)
541 self.otherparent(f)
542 if source is not None:
542 if source is not None:
543 self.copy(source, f)
543 self.copy(source, f)
544 return
544 return
545 elif entry.merged or entry.state == b'n' and entry.from_p2:
545 elif entry.merged or entry.from_p2:
546 return
546 return
547 self._addpath(f, b'n', 0, possibly_dirty=True)
547 self._addpath(f, b'n', 0, possibly_dirty=True)
548 self._map.copymap.pop(f, None)
548 self._map.copymap.pop(f, None)
549
549
550 def otherparent(self, f):
550 def otherparent(self, f):
551 '''Mark as coming from the other parent, always dirty.'''
551 '''Mark as coming from the other parent, always dirty.'''
552 if not self.in_merge:
552 if not self.in_merge:
553 msg = _(b"setting %r to other parent only allowed in merges") % f
553 msg = _(b"setting %r to other parent only allowed in merges") % f
554 raise error.Abort(msg)
554 raise error.Abort(msg)
555 if f in self and self[f] == b'n':
555 if f in self and self[f] == b'n':
556 # merge-like
556 # merge-like
557 self._addpath(f, b'm', 0, from_p2=True)
557 self._addpath(f, b'm', 0, from_p2=True)
558 else:
558 else:
559 # add-like
559 # add-like
560 self._addpath(f, b'n', 0, from_p2=True)
560 self._addpath(f, b'n', 0, from_p2=True)
561 self._map.copymap.pop(f, None)
561 self._map.copymap.pop(f, None)
562
562
563 def add(self, f):
563 def add(self, f):
564 '''Mark a file added.'''
564 '''Mark a file added.'''
565 self._addpath(f, b'a', 0)
565 self._addpath(f, b'a', 0)
566 self._map.copymap.pop(f, None)
566 self._map.copymap.pop(f, None)
567
567
568 def remove(self, f):
568 def remove(self, f):
569 '''Mark a file removed.'''
569 '''Mark a file removed.'''
570 self._dirty = True
570 self._dirty = True
571 self._updatedfiles.add(f)
571 self._updatedfiles.add(f)
572 self._map.removefile(f, in_merge=self.in_merge)
572 self._map.removefile(f, in_merge=self.in_merge)
573
573
574 def merge(self, f):
574 def merge(self, f):
575 '''Mark a file merged.'''
575 '''Mark a file merged.'''
576 if not self.in_merge:
576 if not self.in_merge:
577 return self.normallookup(f)
577 return self.normallookup(f)
578 return self.otherparent(f)
578 return self.otherparent(f)
579
579
580 def drop(self, f):
580 def drop(self, f):
581 '''Drop a file from the dirstate'''
581 '''Drop a file from the dirstate'''
582 oldstate = self[f]
582 oldstate = self[f]
583 if self._map.dropfile(f, oldstate):
583 if self._map.dropfile(f, oldstate):
584 self._dirty = True
584 self._dirty = True
585 self._updatedfiles.add(f)
585 self._updatedfiles.add(f)
586 self._map.copymap.pop(f, None)
586 self._map.copymap.pop(f, None)
587
587
588 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
588 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
589 if exists is None:
589 if exists is None:
590 exists = os.path.lexists(os.path.join(self._root, path))
590 exists = os.path.lexists(os.path.join(self._root, path))
591 if not exists:
591 if not exists:
592 # Maybe a path component exists
592 # Maybe a path component exists
593 if not ignoremissing and b'/' in path:
593 if not ignoremissing and b'/' in path:
594 d, f = path.rsplit(b'/', 1)
594 d, f = path.rsplit(b'/', 1)
595 d = self._normalize(d, False, ignoremissing, None)
595 d = self._normalize(d, False, ignoremissing, None)
596 folded = d + b"/" + f
596 folded = d + b"/" + f
597 else:
597 else:
598 # No path components, preserve original case
598 # No path components, preserve original case
599 folded = path
599 folded = path
600 else:
600 else:
601 # recursively normalize leading directory components
601 # recursively normalize leading directory components
602 # against dirstate
602 # against dirstate
603 if b'/' in normed:
603 if b'/' in normed:
604 d, f = normed.rsplit(b'/', 1)
604 d, f = normed.rsplit(b'/', 1)
605 d = self._normalize(d, False, ignoremissing, True)
605 d = self._normalize(d, False, ignoremissing, True)
606 r = self._root + b"/" + d
606 r = self._root + b"/" + d
607 folded = d + b"/" + util.fspath(f, r)
607 folded = d + b"/" + util.fspath(f, r)
608 else:
608 else:
609 folded = util.fspath(normed, self._root)
609 folded = util.fspath(normed, self._root)
610 storemap[normed] = folded
610 storemap[normed] = folded
611
611
612 return folded
612 return folded
613
613
614 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
614 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
615 normed = util.normcase(path)
615 normed = util.normcase(path)
616 folded = self._map.filefoldmap.get(normed, None)
616 folded = self._map.filefoldmap.get(normed, None)
617 if folded is None:
617 if folded is None:
618 if isknown:
618 if isknown:
619 folded = path
619 folded = path
620 else:
620 else:
621 folded = self._discoverpath(
621 folded = self._discoverpath(
622 path, normed, ignoremissing, exists, self._map.filefoldmap
622 path, normed, ignoremissing, exists, self._map.filefoldmap
623 )
623 )
624 return folded
624 return folded
625
625
626 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
626 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
627 normed = util.normcase(path)
627 normed = util.normcase(path)
628 folded = self._map.filefoldmap.get(normed, None)
628 folded = self._map.filefoldmap.get(normed, None)
629 if folded is None:
629 if folded is None:
630 folded = self._map.dirfoldmap.get(normed, None)
630 folded = self._map.dirfoldmap.get(normed, None)
631 if folded is None:
631 if folded is None:
632 if isknown:
632 if isknown:
633 folded = path
633 folded = path
634 else:
634 else:
635 # store discovered result in dirfoldmap so that future
635 # store discovered result in dirfoldmap so that future
636 # normalizefile calls don't start matching directories
636 # normalizefile calls don't start matching directories
637 folded = self._discoverpath(
637 folded = self._discoverpath(
638 path, normed, ignoremissing, exists, self._map.dirfoldmap
638 path, normed, ignoremissing, exists, self._map.dirfoldmap
639 )
639 )
640 return folded
640 return folded
641
641
642 def normalize(self, path, isknown=False, ignoremissing=False):
642 def normalize(self, path, isknown=False, ignoremissing=False):
643 """
643 """
644 normalize the case of a pathname when on a casefolding filesystem
644 normalize the case of a pathname when on a casefolding filesystem
645
645
646 isknown specifies whether the filename came from walking the
646 isknown specifies whether the filename came from walking the
647 disk, to avoid extra filesystem access.
647 disk, to avoid extra filesystem access.
648
648
649 If ignoremissing is True, missing path are returned
649 If ignoremissing is True, missing path are returned
650 unchanged. Otherwise, we try harder to normalize possibly
650 unchanged. Otherwise, we try harder to normalize possibly
651 existing path components.
651 existing path components.
652
652
653 The normalized case is determined based on the following precedence:
653 The normalized case is determined based on the following precedence:
654
654
655 - version of name already stored in the dirstate
655 - version of name already stored in the dirstate
656 - version of name stored on disk
656 - version of name stored on disk
657 - version provided via command arguments
657 - version provided via command arguments
658 """
658 """
659
659
660 if self._checkcase:
660 if self._checkcase:
661 return self._normalize(path, isknown, ignoremissing)
661 return self._normalize(path, isknown, ignoremissing)
662 return path
662 return path
663
663
664 def clear(self):
664 def clear(self):
665 self._map.clear()
665 self._map.clear()
666 self._lastnormaltime = 0
666 self._lastnormaltime = 0
667 self._updatedfiles.clear()
667 self._updatedfiles.clear()
668 self._dirty = True
668 self._dirty = True
669
669
670 def rebuild(self, parent, allfiles, changedfiles=None):
670 def rebuild(self, parent, allfiles, changedfiles=None):
671 if changedfiles is None:
671 if changedfiles is None:
672 # Rebuild entire dirstate
672 # Rebuild entire dirstate
673 to_lookup = allfiles
673 to_lookup = allfiles
674 to_drop = []
674 to_drop = []
675 lastnormaltime = self._lastnormaltime
675 lastnormaltime = self._lastnormaltime
676 self.clear()
676 self.clear()
677 self._lastnormaltime = lastnormaltime
677 self._lastnormaltime = lastnormaltime
678 elif len(changedfiles) < 10:
678 elif len(changedfiles) < 10:
679 # Avoid turning allfiles into a set, which can be expensive if it's
679 # Avoid turning allfiles into a set, which can be expensive if it's
680 # large.
680 # large.
681 to_lookup = []
681 to_lookup = []
682 to_drop = []
682 to_drop = []
683 for f in changedfiles:
683 for f in changedfiles:
684 if f in allfiles:
684 if f in allfiles:
685 to_lookup.append(f)
685 to_lookup.append(f)
686 else:
686 else:
687 to_drop.append(f)
687 to_drop.append(f)
688 else:
688 else:
689 changedfilesset = set(changedfiles)
689 changedfilesset = set(changedfiles)
690 to_lookup = changedfilesset & set(allfiles)
690 to_lookup = changedfilesset & set(allfiles)
691 to_drop = changedfilesset - to_lookup
691 to_drop = changedfilesset - to_lookup
692
692
693 if self._origpl is None:
693 if self._origpl is None:
694 self._origpl = self._pl
694 self._origpl = self._pl
695 self._map.setparents(parent, self._nodeconstants.nullid)
695 self._map.setparents(parent, self._nodeconstants.nullid)
696
696
697 for f in to_lookup:
697 for f in to_lookup:
698 self.normallookup(f)
698 self.normallookup(f)
699 for f in to_drop:
699 for f in to_drop:
700 self.drop(f)
700 self.drop(f)
701
701
702 self._dirty = True
702 self._dirty = True
703
703
704 def identity(self):
704 def identity(self):
705 """Return identity of dirstate itself to detect changing in storage
705 """Return identity of dirstate itself to detect changing in storage
706
706
707 If identity of previous dirstate is equal to this, writing
707 If identity of previous dirstate is equal to this, writing
708 changes based on the former dirstate out can keep consistency.
708 changes based on the former dirstate out can keep consistency.
709 """
709 """
710 return self._map.identity
710 return self._map.identity
711
711
712 def write(self, tr):
712 def write(self, tr):
713 if not self._dirty:
713 if not self._dirty:
714 return
714 return
715
715
716 filename = self._filename
716 filename = self._filename
717 if tr:
717 if tr:
718 # 'dirstate.write()' is not only for writing in-memory
718 # 'dirstate.write()' is not only for writing in-memory
719 # changes out, but also for dropping ambiguous timestamp.
719 # changes out, but also for dropping ambiguous timestamp.
720 # delayed writing re-raise "ambiguous timestamp issue".
720 # delayed writing re-raise "ambiguous timestamp issue".
721 # See also the wiki page below for detail:
721 # See also the wiki page below for detail:
722 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
722 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
723
723
724 # emulate dropping timestamp in 'parsers.pack_dirstate'
724 # emulate dropping timestamp in 'parsers.pack_dirstate'
725 now = _getfsnow(self._opener)
725 now = _getfsnow(self._opener)
726 self._map.clearambiguoustimes(self._updatedfiles, now)
726 self._map.clearambiguoustimes(self._updatedfiles, now)
727
727
728 # emulate that all 'dirstate.normal' results are written out
728 # emulate that all 'dirstate.normal' results are written out
729 self._lastnormaltime = 0
729 self._lastnormaltime = 0
730 self._updatedfiles.clear()
730 self._updatedfiles.clear()
731
731
732 # delay writing in-memory changes out
732 # delay writing in-memory changes out
733 tr.addfilegenerator(
733 tr.addfilegenerator(
734 b'dirstate',
734 b'dirstate',
735 (self._filename,),
735 (self._filename,),
736 self._writedirstate,
736 self._writedirstate,
737 location=b'plain',
737 location=b'plain',
738 )
738 )
739 return
739 return
740
740
741 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
741 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
742 self._writedirstate(st)
742 self._writedirstate(st)
743
743
744 def addparentchangecallback(self, category, callback):
744 def addparentchangecallback(self, category, callback):
745 """add a callback to be called when the wd parents are changed
745 """add a callback to be called when the wd parents are changed
746
746
747 Callback will be called with the following arguments:
747 Callback will be called with the following arguments:
748 dirstate, (oldp1, oldp2), (newp1, newp2)
748 dirstate, (oldp1, oldp2), (newp1, newp2)
749
749
750 Category is a unique identifier to allow overwriting an old callback
750 Category is a unique identifier to allow overwriting an old callback
751 with a newer callback.
751 with a newer callback.
752 """
752 """
753 self._plchangecallbacks[category] = callback
753 self._plchangecallbacks[category] = callback
754
754
755 def _writedirstate(self, st):
755 def _writedirstate(self, st):
756 # notify callbacks about parents change
756 # notify callbacks about parents change
757 if self._origpl is not None and self._origpl != self._pl:
757 if self._origpl is not None and self._origpl != self._pl:
758 for c, callback in sorted(
758 for c, callback in sorted(
759 pycompat.iteritems(self._plchangecallbacks)
759 pycompat.iteritems(self._plchangecallbacks)
760 ):
760 ):
761 callback(self, self._origpl, self._pl)
761 callback(self, self._origpl, self._pl)
762 self._origpl = None
762 self._origpl = None
763 # use the modification time of the newly created temporary file as the
763 # use the modification time of the newly created temporary file as the
764 # filesystem's notion of 'now'
764 # filesystem's notion of 'now'
765 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
765 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
766
766
767 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
767 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
768 # timestamp of each entries in dirstate, because of 'now > mtime'
768 # timestamp of each entries in dirstate, because of 'now > mtime'
769 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
769 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
770 if delaywrite > 0:
770 if delaywrite > 0:
771 # do we have any files to delay for?
771 # do we have any files to delay for?
772 for f, e in pycompat.iteritems(self._map):
772 for f, e in pycompat.iteritems(self._map):
773 if e.state == b'n' and e[3] == now:
773 if e.state == b'n' and e[3] == now:
774 import time # to avoid useless import
774 import time # to avoid useless import
775
775
776 # rather than sleep n seconds, sleep until the next
776 # rather than sleep n seconds, sleep until the next
777 # multiple of n seconds
777 # multiple of n seconds
778 clock = time.time()
778 clock = time.time()
779 start = int(clock) - (int(clock) % delaywrite)
779 start = int(clock) - (int(clock) % delaywrite)
780 end = start + delaywrite
780 end = start + delaywrite
781 time.sleep(end - clock)
781 time.sleep(end - clock)
782 now = end # trust our estimate that the end is near now
782 now = end # trust our estimate that the end is near now
783 break
783 break
784
784
785 self._map.write(st, now)
785 self._map.write(st, now)
786 self._lastnormaltime = 0
786 self._lastnormaltime = 0
787 self._dirty = False
787 self._dirty = False
788
788
789 def _dirignore(self, f):
789 def _dirignore(self, f):
790 if self._ignore(f):
790 if self._ignore(f):
791 return True
791 return True
792 for p in pathutil.finddirs(f):
792 for p in pathutil.finddirs(f):
793 if self._ignore(p):
793 if self._ignore(p):
794 return True
794 return True
795 return False
795 return False
796
796
797 def _ignorefiles(self):
797 def _ignorefiles(self):
798 files = []
798 files = []
799 if os.path.exists(self._join(b'.hgignore')):
799 if os.path.exists(self._join(b'.hgignore')):
800 files.append(self._join(b'.hgignore'))
800 files.append(self._join(b'.hgignore'))
801 for name, path in self._ui.configitems(b"ui"):
801 for name, path in self._ui.configitems(b"ui"):
802 if name == b'ignore' or name.startswith(b'ignore.'):
802 if name == b'ignore' or name.startswith(b'ignore.'):
803 # we need to use os.path.join here rather than self._join
803 # we need to use os.path.join here rather than self._join
804 # because path is arbitrary and user-specified
804 # because path is arbitrary and user-specified
805 files.append(os.path.join(self._rootdir, util.expandpath(path)))
805 files.append(os.path.join(self._rootdir, util.expandpath(path)))
806 return files
806 return files
807
807
808 def _ignorefileandline(self, f):
808 def _ignorefileandline(self, f):
809 files = collections.deque(self._ignorefiles())
809 files = collections.deque(self._ignorefiles())
810 visited = set()
810 visited = set()
811 while files:
811 while files:
812 i = files.popleft()
812 i = files.popleft()
813 patterns = matchmod.readpatternfile(
813 patterns = matchmod.readpatternfile(
814 i, self._ui.warn, sourceinfo=True
814 i, self._ui.warn, sourceinfo=True
815 )
815 )
816 for pattern, lineno, line in patterns:
816 for pattern, lineno, line in patterns:
817 kind, p = matchmod._patsplit(pattern, b'glob')
817 kind, p = matchmod._patsplit(pattern, b'glob')
818 if kind == b"subinclude":
818 if kind == b"subinclude":
819 if p not in visited:
819 if p not in visited:
820 files.append(p)
820 files.append(p)
821 continue
821 continue
822 m = matchmod.match(
822 m = matchmod.match(
823 self._root, b'', [], [pattern], warn=self._ui.warn
823 self._root, b'', [], [pattern], warn=self._ui.warn
824 )
824 )
825 if m(f):
825 if m(f):
826 return (i, lineno, line)
826 return (i, lineno, line)
827 visited.add(i)
827 visited.add(i)
828 return (None, -1, b"")
828 return (None, -1, b"")
829
829
830 def _walkexplicit(self, match, subrepos):
830 def _walkexplicit(self, match, subrepos):
831 """Get stat data about the files explicitly specified by match.
831 """Get stat data about the files explicitly specified by match.
832
832
833 Return a triple (results, dirsfound, dirsnotfound).
833 Return a triple (results, dirsfound, dirsnotfound).
834 - results is a mapping from filename to stat result. It also contains
834 - results is a mapping from filename to stat result. It also contains
835 listings mapping subrepos and .hg to None.
835 listings mapping subrepos and .hg to None.
836 - dirsfound is a list of files found to be directories.
836 - dirsfound is a list of files found to be directories.
837 - dirsnotfound is a list of files that the dirstate thinks are
837 - dirsnotfound is a list of files that the dirstate thinks are
838 directories and that were not found."""
838 directories and that were not found."""
839
839
840 def badtype(mode):
840 def badtype(mode):
841 kind = _(b'unknown')
841 kind = _(b'unknown')
842 if stat.S_ISCHR(mode):
842 if stat.S_ISCHR(mode):
843 kind = _(b'character device')
843 kind = _(b'character device')
844 elif stat.S_ISBLK(mode):
844 elif stat.S_ISBLK(mode):
845 kind = _(b'block device')
845 kind = _(b'block device')
846 elif stat.S_ISFIFO(mode):
846 elif stat.S_ISFIFO(mode):
847 kind = _(b'fifo')
847 kind = _(b'fifo')
848 elif stat.S_ISSOCK(mode):
848 elif stat.S_ISSOCK(mode):
849 kind = _(b'socket')
849 kind = _(b'socket')
850 elif stat.S_ISDIR(mode):
850 elif stat.S_ISDIR(mode):
851 kind = _(b'directory')
851 kind = _(b'directory')
852 return _(b'unsupported file type (type is %s)') % kind
852 return _(b'unsupported file type (type is %s)') % kind
853
853
854 badfn = match.bad
854 badfn = match.bad
855 dmap = self._map
855 dmap = self._map
856 lstat = os.lstat
856 lstat = os.lstat
857 getkind = stat.S_IFMT
857 getkind = stat.S_IFMT
858 dirkind = stat.S_IFDIR
858 dirkind = stat.S_IFDIR
859 regkind = stat.S_IFREG
859 regkind = stat.S_IFREG
860 lnkkind = stat.S_IFLNK
860 lnkkind = stat.S_IFLNK
861 join = self._join
861 join = self._join
862 dirsfound = []
862 dirsfound = []
863 foundadd = dirsfound.append
863 foundadd = dirsfound.append
864 dirsnotfound = []
864 dirsnotfound = []
865 notfoundadd = dirsnotfound.append
865 notfoundadd = dirsnotfound.append
866
866
867 if not match.isexact() and self._checkcase:
867 if not match.isexact() and self._checkcase:
868 normalize = self._normalize
868 normalize = self._normalize
869 else:
869 else:
870 normalize = None
870 normalize = None
871
871
872 files = sorted(match.files())
872 files = sorted(match.files())
873 subrepos.sort()
873 subrepos.sort()
874 i, j = 0, 0
874 i, j = 0, 0
875 while i < len(files) and j < len(subrepos):
875 while i < len(files) and j < len(subrepos):
876 subpath = subrepos[j] + b"/"
876 subpath = subrepos[j] + b"/"
877 if files[i] < subpath:
877 if files[i] < subpath:
878 i += 1
878 i += 1
879 continue
879 continue
880 while i < len(files) and files[i].startswith(subpath):
880 while i < len(files) and files[i].startswith(subpath):
881 del files[i]
881 del files[i]
882 j += 1
882 j += 1
883
883
884 if not files or b'' in files:
884 if not files or b'' in files:
885 files = [b'']
885 files = [b'']
886 # constructing the foldmap is expensive, so don't do it for the
886 # constructing the foldmap is expensive, so don't do it for the
887 # common case where files is ['']
887 # common case where files is ['']
888 normalize = None
888 normalize = None
889 results = dict.fromkeys(subrepos)
889 results = dict.fromkeys(subrepos)
890 results[b'.hg'] = None
890 results[b'.hg'] = None
891
891
892 for ff in files:
892 for ff in files:
893 if normalize:
893 if normalize:
894 nf = normalize(ff, False, True)
894 nf = normalize(ff, False, True)
895 else:
895 else:
896 nf = ff
896 nf = ff
897 if nf in results:
897 if nf in results:
898 continue
898 continue
899
899
900 try:
900 try:
901 st = lstat(join(nf))
901 st = lstat(join(nf))
902 kind = getkind(st.st_mode)
902 kind = getkind(st.st_mode)
903 if kind == dirkind:
903 if kind == dirkind:
904 if nf in dmap:
904 if nf in dmap:
905 # file replaced by dir on disk but still in dirstate
905 # file replaced by dir on disk but still in dirstate
906 results[nf] = None
906 results[nf] = None
907 foundadd((nf, ff))
907 foundadd((nf, ff))
908 elif kind == regkind or kind == lnkkind:
908 elif kind == regkind or kind == lnkkind:
909 results[nf] = st
909 results[nf] = st
910 else:
910 else:
911 badfn(ff, badtype(kind))
911 badfn(ff, badtype(kind))
912 if nf in dmap:
912 if nf in dmap:
913 results[nf] = None
913 results[nf] = None
914 except OSError as inst: # nf not found on disk - it is dirstate only
914 except OSError as inst: # nf not found on disk - it is dirstate only
915 if nf in dmap: # does it exactly match a missing file?
915 if nf in dmap: # does it exactly match a missing file?
916 results[nf] = None
916 results[nf] = None
917 else: # does it match a missing directory?
917 else: # does it match a missing directory?
918 if self._map.hasdir(nf):
918 if self._map.hasdir(nf):
919 notfoundadd(nf)
919 notfoundadd(nf)
920 else:
920 else:
921 badfn(ff, encoding.strtolocal(inst.strerror))
921 badfn(ff, encoding.strtolocal(inst.strerror))
922
922
923 # match.files() may contain explicitly-specified paths that shouldn't
923 # match.files() may contain explicitly-specified paths that shouldn't
924 # be taken; drop them from the list of files found. dirsfound/notfound
924 # be taken; drop them from the list of files found. dirsfound/notfound
925 # aren't filtered here because they will be tested later.
925 # aren't filtered here because they will be tested later.
926 if match.anypats():
926 if match.anypats():
927 for f in list(results):
927 for f in list(results):
928 if f == b'.hg' or f in subrepos:
928 if f == b'.hg' or f in subrepos:
929 # keep sentinel to disable further out-of-repo walks
929 # keep sentinel to disable further out-of-repo walks
930 continue
930 continue
931 if not match(f):
931 if not match(f):
932 del results[f]
932 del results[f]
933
933
934 # Case insensitive filesystems cannot rely on lstat() failing to detect
934 # Case insensitive filesystems cannot rely on lstat() failing to detect
935 # a case-only rename. Prune the stat object for any file that does not
935 # a case-only rename. Prune the stat object for any file that does not
936 # match the case in the filesystem, if there are multiple files that
936 # match the case in the filesystem, if there are multiple files that
937 # normalize to the same path.
937 # normalize to the same path.
938 if match.isexact() and self._checkcase:
938 if match.isexact() and self._checkcase:
939 normed = {}
939 normed = {}
940
940
941 for f, st in pycompat.iteritems(results):
941 for f, st in pycompat.iteritems(results):
942 if st is None:
942 if st is None:
943 continue
943 continue
944
944
945 nc = util.normcase(f)
945 nc = util.normcase(f)
946 paths = normed.get(nc)
946 paths = normed.get(nc)
947
947
948 if paths is None:
948 if paths is None:
949 paths = set()
949 paths = set()
950 normed[nc] = paths
950 normed[nc] = paths
951
951
952 paths.add(f)
952 paths.add(f)
953
953
954 for norm, paths in pycompat.iteritems(normed):
954 for norm, paths in pycompat.iteritems(normed):
955 if len(paths) > 1:
955 if len(paths) > 1:
956 for path in paths:
956 for path in paths:
957 folded = self._discoverpath(
957 folded = self._discoverpath(
958 path, norm, True, None, self._map.dirfoldmap
958 path, norm, True, None, self._map.dirfoldmap
959 )
959 )
960 if path != folded:
960 if path != folded:
961 results[path] = None
961 results[path] = None
962
962
963 return results, dirsfound, dirsnotfound
963 return results, dirsfound, dirsnotfound
964
964
965 def walk(self, match, subrepos, unknown, ignored, full=True):
965 def walk(self, match, subrepos, unknown, ignored, full=True):
966 """
966 """
967 Walk recursively through the directory tree, finding all files
967 Walk recursively through the directory tree, finding all files
968 matched by match.
968 matched by match.
969
969
970 If full is False, maybe skip some known-clean files.
970 If full is False, maybe skip some known-clean files.
971
971
972 Return a dict mapping filename to stat-like object (either
972 Return a dict mapping filename to stat-like object (either
973 mercurial.osutil.stat instance or return value of os.stat()).
973 mercurial.osutil.stat instance or return value of os.stat()).
974
974
975 """
975 """
976 # full is a flag that extensions that hook into walk can use -- this
976 # full is a flag that extensions that hook into walk can use -- this
977 # implementation doesn't use it at all. This satisfies the contract
977 # implementation doesn't use it at all. This satisfies the contract
978 # because we only guarantee a "maybe".
978 # because we only guarantee a "maybe".
979
979
980 if ignored:
980 if ignored:
981 ignore = util.never
981 ignore = util.never
982 dirignore = util.never
982 dirignore = util.never
983 elif unknown:
983 elif unknown:
984 ignore = self._ignore
984 ignore = self._ignore
985 dirignore = self._dirignore
985 dirignore = self._dirignore
986 else:
986 else:
987 # if not unknown and not ignored, drop dir recursion and step 2
987 # if not unknown and not ignored, drop dir recursion and step 2
988 ignore = util.always
988 ignore = util.always
989 dirignore = util.always
989 dirignore = util.always
990
990
991 matchfn = match.matchfn
991 matchfn = match.matchfn
992 matchalways = match.always()
992 matchalways = match.always()
993 matchtdir = match.traversedir
993 matchtdir = match.traversedir
994 dmap = self._map
994 dmap = self._map
995 listdir = util.listdir
995 listdir = util.listdir
996 lstat = os.lstat
996 lstat = os.lstat
997 dirkind = stat.S_IFDIR
997 dirkind = stat.S_IFDIR
998 regkind = stat.S_IFREG
998 regkind = stat.S_IFREG
999 lnkkind = stat.S_IFLNK
999 lnkkind = stat.S_IFLNK
1000 join = self._join
1000 join = self._join
1001
1001
1002 exact = skipstep3 = False
1002 exact = skipstep3 = False
1003 if match.isexact(): # match.exact
1003 if match.isexact(): # match.exact
1004 exact = True
1004 exact = True
1005 dirignore = util.always # skip step 2
1005 dirignore = util.always # skip step 2
1006 elif match.prefix(): # match.match, no patterns
1006 elif match.prefix(): # match.match, no patterns
1007 skipstep3 = True
1007 skipstep3 = True
1008
1008
1009 if not exact and self._checkcase:
1009 if not exact and self._checkcase:
1010 normalize = self._normalize
1010 normalize = self._normalize
1011 normalizefile = self._normalizefile
1011 normalizefile = self._normalizefile
1012 skipstep3 = False
1012 skipstep3 = False
1013 else:
1013 else:
1014 normalize = self._normalize
1014 normalize = self._normalize
1015 normalizefile = None
1015 normalizefile = None
1016
1016
1017 # step 1: find all explicit files
1017 # step 1: find all explicit files
1018 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1018 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1019 if matchtdir:
1019 if matchtdir:
1020 for d in work:
1020 for d in work:
1021 matchtdir(d[0])
1021 matchtdir(d[0])
1022 for d in dirsnotfound:
1022 for d in dirsnotfound:
1023 matchtdir(d)
1023 matchtdir(d)
1024
1024
1025 skipstep3 = skipstep3 and not (work or dirsnotfound)
1025 skipstep3 = skipstep3 and not (work or dirsnotfound)
1026 work = [d for d in work if not dirignore(d[0])]
1026 work = [d for d in work if not dirignore(d[0])]
1027
1027
1028 # step 2: visit subdirectories
1028 # step 2: visit subdirectories
1029 def traverse(work, alreadynormed):
1029 def traverse(work, alreadynormed):
1030 wadd = work.append
1030 wadd = work.append
1031 while work:
1031 while work:
1032 tracing.counter('dirstate.walk work', len(work))
1032 tracing.counter('dirstate.walk work', len(work))
1033 nd = work.pop()
1033 nd = work.pop()
1034 visitentries = match.visitchildrenset(nd)
1034 visitentries = match.visitchildrenset(nd)
1035 if not visitentries:
1035 if not visitentries:
1036 continue
1036 continue
1037 if visitentries == b'this' or visitentries == b'all':
1037 if visitentries == b'this' or visitentries == b'all':
1038 visitentries = None
1038 visitentries = None
1039 skip = None
1039 skip = None
1040 if nd != b'':
1040 if nd != b'':
1041 skip = b'.hg'
1041 skip = b'.hg'
1042 try:
1042 try:
1043 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1043 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1044 entries = listdir(join(nd), stat=True, skip=skip)
1044 entries = listdir(join(nd), stat=True, skip=skip)
1045 except OSError as inst:
1045 except OSError as inst:
1046 if inst.errno in (errno.EACCES, errno.ENOENT):
1046 if inst.errno in (errno.EACCES, errno.ENOENT):
1047 match.bad(
1047 match.bad(
1048 self.pathto(nd), encoding.strtolocal(inst.strerror)
1048 self.pathto(nd), encoding.strtolocal(inst.strerror)
1049 )
1049 )
1050 continue
1050 continue
1051 raise
1051 raise
1052 for f, kind, st in entries:
1052 for f, kind, st in entries:
1053 # Some matchers may return files in the visitentries set,
1053 # Some matchers may return files in the visitentries set,
1054 # instead of 'this', if the matcher explicitly mentions them
1054 # instead of 'this', if the matcher explicitly mentions them
1055 # and is not an exactmatcher. This is acceptable; we do not
1055 # and is not an exactmatcher. This is acceptable; we do not
1056 # make any hard assumptions about file-or-directory below
1056 # make any hard assumptions about file-or-directory below
1057 # based on the presence of `f` in visitentries. If
1057 # based on the presence of `f` in visitentries. If
1058 # visitchildrenset returned a set, we can always skip the
1058 # visitchildrenset returned a set, we can always skip the
1059 # entries *not* in the set it provided regardless of whether
1059 # entries *not* in the set it provided regardless of whether
1060 # they're actually a file or a directory.
1060 # they're actually a file or a directory.
1061 if visitentries and f not in visitentries:
1061 if visitentries and f not in visitentries:
1062 continue
1062 continue
1063 if normalizefile:
1063 if normalizefile:
1064 # even though f might be a directory, we're only
1064 # even though f might be a directory, we're only
1065 # interested in comparing it to files currently in the
1065 # interested in comparing it to files currently in the
1066 # dmap -- therefore normalizefile is enough
1066 # dmap -- therefore normalizefile is enough
1067 nf = normalizefile(
1067 nf = normalizefile(
1068 nd and (nd + b"/" + f) or f, True, True
1068 nd and (nd + b"/" + f) or f, True, True
1069 )
1069 )
1070 else:
1070 else:
1071 nf = nd and (nd + b"/" + f) or f
1071 nf = nd and (nd + b"/" + f) or f
1072 if nf not in results:
1072 if nf not in results:
1073 if kind == dirkind:
1073 if kind == dirkind:
1074 if not ignore(nf):
1074 if not ignore(nf):
1075 if matchtdir:
1075 if matchtdir:
1076 matchtdir(nf)
1076 matchtdir(nf)
1077 wadd(nf)
1077 wadd(nf)
1078 if nf in dmap and (matchalways or matchfn(nf)):
1078 if nf in dmap and (matchalways or matchfn(nf)):
1079 results[nf] = None
1079 results[nf] = None
1080 elif kind == regkind or kind == lnkkind:
1080 elif kind == regkind or kind == lnkkind:
1081 if nf in dmap:
1081 if nf in dmap:
1082 if matchalways or matchfn(nf):
1082 if matchalways or matchfn(nf):
1083 results[nf] = st
1083 results[nf] = st
1084 elif (matchalways or matchfn(nf)) and not ignore(
1084 elif (matchalways or matchfn(nf)) and not ignore(
1085 nf
1085 nf
1086 ):
1086 ):
1087 # unknown file -- normalize if necessary
1087 # unknown file -- normalize if necessary
1088 if not alreadynormed:
1088 if not alreadynormed:
1089 nf = normalize(nf, False, True)
1089 nf = normalize(nf, False, True)
1090 results[nf] = st
1090 results[nf] = st
1091 elif nf in dmap and (matchalways or matchfn(nf)):
1091 elif nf in dmap and (matchalways or matchfn(nf)):
1092 results[nf] = None
1092 results[nf] = None
1093
1093
1094 for nd, d in work:
1094 for nd, d in work:
1095 # alreadynormed means that processwork doesn't have to do any
1095 # alreadynormed means that processwork doesn't have to do any
1096 # expensive directory normalization
1096 # expensive directory normalization
1097 alreadynormed = not normalize or nd == d
1097 alreadynormed = not normalize or nd == d
1098 traverse([d], alreadynormed)
1098 traverse([d], alreadynormed)
1099
1099
1100 for s in subrepos:
1100 for s in subrepos:
1101 del results[s]
1101 del results[s]
1102 del results[b'.hg']
1102 del results[b'.hg']
1103
1103
1104 # step 3: visit remaining files from dmap
1104 # step 3: visit remaining files from dmap
1105 if not skipstep3 and not exact:
1105 if not skipstep3 and not exact:
1106 # If a dmap file is not in results yet, it was either
1106 # If a dmap file is not in results yet, it was either
1107 # a) not matching matchfn b) ignored, c) missing, or d) under a
1107 # a) not matching matchfn b) ignored, c) missing, or d) under a
1108 # symlink directory.
1108 # symlink directory.
1109 if not results and matchalways:
1109 if not results and matchalways:
1110 visit = [f for f in dmap]
1110 visit = [f for f in dmap]
1111 else:
1111 else:
1112 visit = [f for f in dmap if f not in results and matchfn(f)]
1112 visit = [f for f in dmap if f not in results and matchfn(f)]
1113 visit.sort()
1113 visit.sort()
1114
1114
1115 if unknown:
1115 if unknown:
1116 # unknown == True means we walked all dirs under the roots
1116 # unknown == True means we walked all dirs under the roots
1117 # that wasn't ignored, and everything that matched was stat'ed
1117 # that wasn't ignored, and everything that matched was stat'ed
1118 # and is already in results.
1118 # and is already in results.
1119 # The rest must thus be ignored or under a symlink.
1119 # The rest must thus be ignored or under a symlink.
1120 audit_path = pathutil.pathauditor(self._root, cached=True)
1120 audit_path = pathutil.pathauditor(self._root, cached=True)
1121
1121
1122 for nf in iter(visit):
1122 for nf in iter(visit):
1123 # If a stat for the same file was already added with a
1123 # If a stat for the same file was already added with a
1124 # different case, don't add one for this, since that would
1124 # different case, don't add one for this, since that would
1125 # make it appear as if the file exists under both names
1125 # make it appear as if the file exists under both names
1126 # on disk.
1126 # on disk.
1127 if (
1127 if (
1128 normalizefile
1128 normalizefile
1129 and normalizefile(nf, True, True) in results
1129 and normalizefile(nf, True, True) in results
1130 ):
1130 ):
1131 results[nf] = None
1131 results[nf] = None
1132 # Report ignored items in the dmap as long as they are not
1132 # Report ignored items in the dmap as long as they are not
1133 # under a symlink directory.
1133 # under a symlink directory.
1134 elif audit_path.check(nf):
1134 elif audit_path.check(nf):
1135 try:
1135 try:
1136 results[nf] = lstat(join(nf))
1136 results[nf] = lstat(join(nf))
1137 # file was just ignored, no links, and exists
1137 # file was just ignored, no links, and exists
1138 except OSError:
1138 except OSError:
1139 # file doesn't exist
1139 # file doesn't exist
1140 results[nf] = None
1140 results[nf] = None
1141 else:
1141 else:
1142 # It's either missing or under a symlink directory
1142 # It's either missing or under a symlink directory
1143 # which we in this case report as missing
1143 # which we in this case report as missing
1144 results[nf] = None
1144 results[nf] = None
1145 else:
1145 else:
1146 # We may not have walked the full directory tree above,
1146 # We may not have walked the full directory tree above,
1147 # so stat and check everything we missed.
1147 # so stat and check everything we missed.
1148 iv = iter(visit)
1148 iv = iter(visit)
1149 for st in util.statfiles([join(i) for i in visit]):
1149 for st in util.statfiles([join(i) for i in visit]):
1150 results[next(iv)] = st
1150 results[next(iv)] = st
1151 return results
1151 return results
1152
1152
1153 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1153 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1154 # Force Rayon (Rust parallelism library) to respect the number of
1154 # Force Rayon (Rust parallelism library) to respect the number of
1155 # workers. This is a temporary workaround until Rust code knows
1155 # workers. This is a temporary workaround until Rust code knows
1156 # how to read the config file.
1156 # how to read the config file.
1157 numcpus = self._ui.configint(b"worker", b"numcpus")
1157 numcpus = self._ui.configint(b"worker", b"numcpus")
1158 if numcpus is not None:
1158 if numcpus is not None:
1159 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1159 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1160
1160
1161 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1161 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1162 if not workers_enabled:
1162 if not workers_enabled:
1163 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1163 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1164
1164
1165 (
1165 (
1166 lookup,
1166 lookup,
1167 modified,
1167 modified,
1168 added,
1168 added,
1169 removed,
1169 removed,
1170 deleted,
1170 deleted,
1171 clean,
1171 clean,
1172 ignored,
1172 ignored,
1173 unknown,
1173 unknown,
1174 warnings,
1174 warnings,
1175 bad,
1175 bad,
1176 traversed,
1176 traversed,
1177 dirty,
1177 dirty,
1178 ) = rustmod.status(
1178 ) = rustmod.status(
1179 self._map._rustmap,
1179 self._map._rustmap,
1180 matcher,
1180 matcher,
1181 self._rootdir,
1181 self._rootdir,
1182 self._ignorefiles(),
1182 self._ignorefiles(),
1183 self._checkexec,
1183 self._checkexec,
1184 self._lastnormaltime,
1184 self._lastnormaltime,
1185 bool(list_clean),
1185 bool(list_clean),
1186 bool(list_ignored),
1186 bool(list_ignored),
1187 bool(list_unknown),
1187 bool(list_unknown),
1188 bool(matcher.traversedir),
1188 bool(matcher.traversedir),
1189 )
1189 )
1190
1190
1191 self._dirty |= dirty
1191 self._dirty |= dirty
1192
1192
1193 if matcher.traversedir:
1193 if matcher.traversedir:
1194 for dir in traversed:
1194 for dir in traversed:
1195 matcher.traversedir(dir)
1195 matcher.traversedir(dir)
1196
1196
1197 if self._ui.warn:
1197 if self._ui.warn:
1198 for item in warnings:
1198 for item in warnings:
1199 if isinstance(item, tuple):
1199 if isinstance(item, tuple):
1200 file_path, syntax = item
1200 file_path, syntax = item
1201 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1201 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1202 file_path,
1202 file_path,
1203 syntax,
1203 syntax,
1204 )
1204 )
1205 self._ui.warn(msg)
1205 self._ui.warn(msg)
1206 else:
1206 else:
1207 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1207 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1208 self._ui.warn(
1208 self._ui.warn(
1209 msg
1209 msg
1210 % (
1210 % (
1211 pathutil.canonpath(
1211 pathutil.canonpath(
1212 self._rootdir, self._rootdir, item
1212 self._rootdir, self._rootdir, item
1213 ),
1213 ),
1214 b"No such file or directory",
1214 b"No such file or directory",
1215 )
1215 )
1216 )
1216 )
1217
1217
1218 for (fn, message) in bad:
1218 for (fn, message) in bad:
1219 matcher.bad(fn, encoding.strtolocal(message))
1219 matcher.bad(fn, encoding.strtolocal(message))
1220
1220
1221 status = scmutil.status(
1221 status = scmutil.status(
1222 modified=modified,
1222 modified=modified,
1223 added=added,
1223 added=added,
1224 removed=removed,
1224 removed=removed,
1225 deleted=deleted,
1225 deleted=deleted,
1226 unknown=unknown,
1226 unknown=unknown,
1227 ignored=ignored,
1227 ignored=ignored,
1228 clean=clean,
1228 clean=clean,
1229 )
1229 )
1230 return (lookup, status)
1230 return (lookup, status)
1231
1231
1232 def status(self, match, subrepos, ignored, clean, unknown):
1232 def status(self, match, subrepos, ignored, clean, unknown):
1233 """Determine the status of the working copy relative to the
1233 """Determine the status of the working copy relative to the
1234 dirstate and return a pair of (unsure, status), where status is of type
1234 dirstate and return a pair of (unsure, status), where status is of type
1235 scmutil.status and:
1235 scmutil.status and:
1236
1236
1237 unsure:
1237 unsure:
1238 files that might have been modified since the dirstate was
1238 files that might have been modified since the dirstate was
1239 written, but need to be read to be sure (size is the same
1239 written, but need to be read to be sure (size is the same
1240 but mtime differs)
1240 but mtime differs)
1241 status.modified:
1241 status.modified:
1242 files that have definitely been modified since the dirstate
1242 files that have definitely been modified since the dirstate
1243 was written (different size or mode)
1243 was written (different size or mode)
1244 status.clean:
1244 status.clean:
1245 files that have definitely not been modified since the
1245 files that have definitely not been modified since the
1246 dirstate was written
1246 dirstate was written
1247 """
1247 """
1248 listignored, listclean, listunknown = ignored, clean, unknown
1248 listignored, listclean, listunknown = ignored, clean, unknown
1249 lookup, modified, added, unknown, ignored = [], [], [], [], []
1249 lookup, modified, added, unknown, ignored = [], [], [], [], []
1250 removed, deleted, clean = [], [], []
1250 removed, deleted, clean = [], [], []
1251
1251
1252 dmap = self._map
1252 dmap = self._map
1253 dmap.preload()
1253 dmap.preload()
1254
1254
1255 use_rust = True
1255 use_rust = True
1256
1256
1257 allowed_matchers = (
1257 allowed_matchers = (
1258 matchmod.alwaysmatcher,
1258 matchmod.alwaysmatcher,
1259 matchmod.exactmatcher,
1259 matchmod.exactmatcher,
1260 matchmod.includematcher,
1260 matchmod.includematcher,
1261 )
1261 )
1262
1262
1263 if rustmod is None:
1263 if rustmod is None:
1264 use_rust = False
1264 use_rust = False
1265 elif self._checkcase:
1265 elif self._checkcase:
1266 # Case-insensitive filesystems are not handled yet
1266 # Case-insensitive filesystems are not handled yet
1267 use_rust = False
1267 use_rust = False
1268 elif subrepos:
1268 elif subrepos:
1269 use_rust = False
1269 use_rust = False
1270 elif sparse.enabled:
1270 elif sparse.enabled:
1271 use_rust = False
1271 use_rust = False
1272 elif not isinstance(match, allowed_matchers):
1272 elif not isinstance(match, allowed_matchers):
1273 # Some matchers have yet to be implemented
1273 # Some matchers have yet to be implemented
1274 use_rust = False
1274 use_rust = False
1275
1275
1276 if use_rust:
1276 if use_rust:
1277 try:
1277 try:
1278 return self._rust_status(
1278 return self._rust_status(
1279 match, listclean, listignored, listunknown
1279 match, listclean, listignored, listunknown
1280 )
1280 )
1281 except rustmod.FallbackError:
1281 except rustmod.FallbackError:
1282 pass
1282 pass
1283
1283
1284 def noop(f):
1284 def noop(f):
1285 pass
1285 pass
1286
1286
1287 dcontains = dmap.__contains__
1287 dcontains = dmap.__contains__
1288 dget = dmap.__getitem__
1288 dget = dmap.__getitem__
1289 ladd = lookup.append # aka "unsure"
1289 ladd = lookup.append # aka "unsure"
1290 madd = modified.append
1290 madd = modified.append
1291 aadd = added.append
1291 aadd = added.append
1292 uadd = unknown.append if listunknown else noop
1292 uadd = unknown.append if listunknown else noop
1293 iadd = ignored.append if listignored else noop
1293 iadd = ignored.append if listignored else noop
1294 radd = removed.append
1294 radd = removed.append
1295 dadd = deleted.append
1295 dadd = deleted.append
1296 cadd = clean.append if listclean else noop
1296 cadd = clean.append if listclean else noop
1297 mexact = match.exact
1297 mexact = match.exact
1298 dirignore = self._dirignore
1298 dirignore = self._dirignore
1299 checkexec = self._checkexec
1299 checkexec = self._checkexec
1300 copymap = self._map.copymap
1300 copymap = self._map.copymap
1301 lastnormaltime = self._lastnormaltime
1301 lastnormaltime = self._lastnormaltime
1302
1302
1303 # We need to do full walks when either
1303 # We need to do full walks when either
1304 # - we're listing all clean files, or
1304 # - we're listing all clean files, or
1305 # - match.traversedir does something, because match.traversedir should
1305 # - match.traversedir does something, because match.traversedir should
1306 # be called for every dir in the working dir
1306 # be called for every dir in the working dir
1307 full = listclean or match.traversedir is not None
1307 full = listclean or match.traversedir is not None
1308 for fn, st in pycompat.iteritems(
1308 for fn, st in pycompat.iteritems(
1309 self.walk(match, subrepos, listunknown, listignored, full=full)
1309 self.walk(match, subrepos, listunknown, listignored, full=full)
1310 ):
1310 ):
1311 if not dcontains(fn):
1311 if not dcontains(fn):
1312 if (listignored or mexact(fn)) and dirignore(fn):
1312 if (listignored or mexact(fn)) and dirignore(fn):
1313 if listignored:
1313 if listignored:
1314 iadd(fn)
1314 iadd(fn)
1315 else:
1315 else:
1316 uadd(fn)
1316 uadd(fn)
1317 continue
1317 continue
1318
1318
1319 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1319 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1320 # written like that for performance reasons. dmap[fn] is not a
1320 # written like that for performance reasons. dmap[fn] is not a
1321 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1321 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1322 # opcode has fast paths when the value to be unpacked is a tuple or
1322 # opcode has fast paths when the value to be unpacked is a tuple or
1323 # a list, but falls back to creating a full-fledged iterator in
1323 # a list, but falls back to creating a full-fledged iterator in
1324 # general. That is much slower than simply accessing and storing the
1324 # general. That is much slower than simply accessing and storing the
1325 # tuple members one by one.
1325 # tuple members one by one.
1326 t = dget(fn)
1326 t = dget(fn)
1327 state = t.state
1327 state = t.state
1328 mode = t[1]
1328 mode = t[1]
1329 size = t[2]
1329 size = t[2]
1330 time = t[3]
1330 time = t[3]
1331
1331
1332 if not st and state in b"nma":
1332 if not st and state in b"nma":
1333 dadd(fn)
1333 dadd(fn)
1334 elif state == b'n':
1334 elif state == b'n':
1335 if (
1335 if (
1336 size >= 0
1336 size >= 0
1337 and (
1337 and (
1338 (size != st.st_size and size != st.st_size & _rangemask)
1338 (size != st.st_size and size != st.st_size & _rangemask)
1339 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1339 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1340 )
1340 )
1341 or t.from_p2
1341 or t.from_p2
1342 or fn in copymap
1342 or fn in copymap
1343 ):
1343 ):
1344 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1344 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1345 # issue6456: Size returned may be longer due to
1345 # issue6456: Size returned may be longer due to
1346 # encryption on EXT-4 fscrypt, undecided.
1346 # encryption on EXT-4 fscrypt, undecided.
1347 ladd(fn)
1347 ladd(fn)
1348 else:
1348 else:
1349 madd(fn)
1349 madd(fn)
1350 elif (
1350 elif (
1351 time != st[stat.ST_MTIME]
1351 time != st[stat.ST_MTIME]
1352 and time != st[stat.ST_MTIME] & _rangemask
1352 and time != st[stat.ST_MTIME] & _rangemask
1353 ):
1353 ):
1354 ladd(fn)
1354 ladd(fn)
1355 elif st[stat.ST_MTIME] == lastnormaltime:
1355 elif st[stat.ST_MTIME] == lastnormaltime:
1356 # fn may have just been marked as normal and it may have
1356 # fn may have just been marked as normal and it may have
1357 # changed in the same second without changing its size.
1357 # changed in the same second without changing its size.
1358 # This can happen if we quickly do multiple commits.
1358 # This can happen if we quickly do multiple commits.
1359 # Force lookup, so we don't miss such a racy file change.
1359 # Force lookup, so we don't miss such a racy file change.
1360 ladd(fn)
1360 ladd(fn)
1361 elif listclean:
1361 elif listclean:
1362 cadd(fn)
1362 cadd(fn)
1363 elif t.merged:
1363 elif t.merged:
1364 madd(fn)
1364 madd(fn)
1365 elif state == b'a':
1365 elif state == b'a':
1366 aadd(fn)
1366 aadd(fn)
1367 elif t.removed:
1367 elif t.removed:
1368 radd(fn)
1368 radd(fn)
1369 status = scmutil.status(
1369 status = scmutil.status(
1370 modified, added, removed, deleted, unknown, ignored, clean
1370 modified, added, removed, deleted, unknown, ignored, clean
1371 )
1371 )
1372 return (lookup, status)
1372 return (lookup, status)
1373
1373
1374 def matches(self, match):
1374 def matches(self, match):
1375 """
1375 """
1376 return files in the dirstate (in whatever state) filtered by match
1376 return files in the dirstate (in whatever state) filtered by match
1377 """
1377 """
1378 dmap = self._map
1378 dmap = self._map
1379 if rustmod is not None:
1379 if rustmod is not None:
1380 dmap = self._map._rustmap
1380 dmap = self._map._rustmap
1381
1381
1382 if match.always():
1382 if match.always():
1383 return dmap.keys()
1383 return dmap.keys()
1384 files = match.files()
1384 files = match.files()
1385 if match.isexact():
1385 if match.isexact():
1386 # fast path -- filter the other way around, since typically files is
1386 # fast path -- filter the other way around, since typically files is
1387 # much smaller than dmap
1387 # much smaller than dmap
1388 return [f for f in files if f in dmap]
1388 return [f for f in files if f in dmap]
1389 if match.prefix() and all(fn in dmap for fn in files):
1389 if match.prefix() and all(fn in dmap for fn in files):
1390 # fast path -- all the values are known to be files, so just return
1390 # fast path -- all the values are known to be files, so just return
1391 # that
1391 # that
1392 return list(files)
1392 return list(files)
1393 return [f for f in dmap if match(f)]
1393 return [f for f in dmap if match(f)]
1394
1394
1395 def _actualfilename(self, tr):
1395 def _actualfilename(self, tr):
1396 if tr:
1396 if tr:
1397 return self._pendingfilename
1397 return self._pendingfilename
1398 else:
1398 else:
1399 return self._filename
1399 return self._filename
1400
1400
1401 def savebackup(self, tr, backupname):
1401 def savebackup(self, tr, backupname):
1402 '''Save current dirstate into backup file'''
1402 '''Save current dirstate into backup file'''
1403 filename = self._actualfilename(tr)
1403 filename = self._actualfilename(tr)
1404 assert backupname != filename
1404 assert backupname != filename
1405
1405
1406 # use '_writedirstate' instead of 'write' to write changes certainly,
1406 # use '_writedirstate' instead of 'write' to write changes certainly,
1407 # because the latter omits writing out if transaction is running.
1407 # because the latter omits writing out if transaction is running.
1408 # output file will be used to create backup of dirstate at this point.
1408 # output file will be used to create backup of dirstate at this point.
1409 if self._dirty or not self._opener.exists(filename):
1409 if self._dirty or not self._opener.exists(filename):
1410 self._writedirstate(
1410 self._writedirstate(
1411 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1411 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1412 )
1412 )
1413
1413
1414 if tr:
1414 if tr:
1415 # ensure that subsequent tr.writepending returns True for
1415 # ensure that subsequent tr.writepending returns True for
1416 # changes written out above, even if dirstate is never
1416 # changes written out above, even if dirstate is never
1417 # changed after this
1417 # changed after this
1418 tr.addfilegenerator(
1418 tr.addfilegenerator(
1419 b'dirstate',
1419 b'dirstate',
1420 (self._filename,),
1420 (self._filename,),
1421 self._writedirstate,
1421 self._writedirstate,
1422 location=b'plain',
1422 location=b'plain',
1423 )
1423 )
1424
1424
1425 # ensure that pending file written above is unlinked at
1425 # ensure that pending file written above is unlinked at
1426 # failure, even if tr.writepending isn't invoked until the
1426 # failure, even if tr.writepending isn't invoked until the
1427 # end of this transaction
1427 # end of this transaction
1428 tr.registertmp(filename, location=b'plain')
1428 tr.registertmp(filename, location=b'plain')
1429
1429
1430 self._opener.tryunlink(backupname)
1430 self._opener.tryunlink(backupname)
1431 # hardlink backup is okay because _writedirstate is always called
1431 # hardlink backup is okay because _writedirstate is always called
1432 # with an "atomictemp=True" file.
1432 # with an "atomictemp=True" file.
1433 util.copyfile(
1433 util.copyfile(
1434 self._opener.join(filename),
1434 self._opener.join(filename),
1435 self._opener.join(backupname),
1435 self._opener.join(backupname),
1436 hardlink=True,
1436 hardlink=True,
1437 )
1437 )
1438
1438
1439 def restorebackup(self, tr, backupname):
1439 def restorebackup(self, tr, backupname):
1440 '''Restore dirstate by backup file'''
1440 '''Restore dirstate by backup file'''
1441 # this "invalidate()" prevents "wlock.release()" from writing
1441 # this "invalidate()" prevents "wlock.release()" from writing
1442 # changes of dirstate out after restoring from backup file
1442 # changes of dirstate out after restoring from backup file
1443 self.invalidate()
1443 self.invalidate()
1444 filename = self._actualfilename(tr)
1444 filename = self._actualfilename(tr)
1445 o = self._opener
1445 o = self._opener
1446 if util.samefile(o.join(backupname), o.join(filename)):
1446 if util.samefile(o.join(backupname), o.join(filename)):
1447 o.unlink(backupname)
1447 o.unlink(backupname)
1448 else:
1448 else:
1449 o.rename(backupname, filename, checkambig=True)
1449 o.rename(backupname, filename, checkambig=True)
1450
1450
1451 def clearbackup(self, tr, backupname):
1451 def clearbackup(self, tr, backupname):
1452 '''Clear backup file'''
1452 '''Clear backup file'''
1453 self._opener.unlink(backupname)
1453 self._opener.unlink(backupname)
@@ -1,540 +1,542 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from .. import (
17 from .. import (
18 error,
18 error,
19 pycompat,
19 pycompat,
20 revlogutils,
20 revlogutils,
21 util,
21 util,
22 )
22 )
23
23
24 from ..revlogutils import nodemap as nodemaputil
24 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import constants as revlog_constants
25 from ..revlogutils import constants as revlog_constants
26
26
27 stringio = pycompat.bytesio
27 stringio = pycompat.bytesio
28
28
29
29
30 _pack = struct.pack
30 _pack = struct.pack
31 _unpack = struct.unpack
31 _unpack = struct.unpack
32 _compress = zlib.compress
32 _compress = zlib.compress
33 _decompress = zlib.decompress
33 _decompress = zlib.decompress
34
34
35
35
36 # a special value used internally for `size` if the file come from the other parent
36 # a special value used internally for `size` if the file come from the other parent
37 FROM_P2 = -2
37 FROM_P2 = -2
38
38
39 # a special value used internally for `size` if the file is modified/merged/added
39 # a special value used internally for `size` if the file is modified/merged/added
40 NONNORMAL = -1
40 NONNORMAL = -1
41
41
42
42
43 class dirstatetuple(object):
43 class dirstatetuple(object):
44 """represent a dirstate entry
44 """represent a dirstate entry
45
45
46 It contains:
46 It contains:
47
47
48 - state (one of 'n', 'a', 'r', 'm')
48 - state (one of 'n', 'a', 'r', 'm')
49 - mode,
49 - mode,
50 - size,
50 - size,
51 - mtime,
51 - mtime,
52 """
52 """
53
53
54 __slot__ = ('_state', '_mode', '_size', '_mtime')
54 __slot__ = ('_state', '_mode', '_size', '_mtime')
55
55
56 def __init__(self, state, mode, size, mtime):
56 def __init__(self, state, mode, size, mtime):
57 self._state = state
57 self._state = state
58 self._mode = mode
58 self._mode = mode
59 self._size = size
59 self._size = size
60 self._mtime = mtime
60 self._mtime = mtime
61
61
62 def __getitem__(self, idx):
62 def __getitem__(self, idx):
63 if idx == 0 or idx == -4:
63 if idx == 0 or idx == -4:
64 return self._state
64 return self._state
65 elif idx == 1 or idx == -3:
65 elif idx == 1 or idx == -3:
66 return self._mode
66 return self._mode
67 elif idx == 2 or idx == -2:
67 elif idx == 2 or idx == -2:
68 return self._size
68 return self._size
69 elif idx == 3 or idx == -1:
69 elif idx == 3 or idx == -1:
70 return self._mtime
70 return self._mtime
71 else:
71 else:
72 raise IndexError(idx)
72 raise IndexError(idx)
73
73
74 @property
74 @property
75 def state(self):
75 def state(self):
76 """
76 """
77 States are:
77 States are:
78 n normal
78 n normal
79 m needs merging
79 m needs merging
80 r marked for removal
80 r marked for removal
81 a marked for addition
81 a marked for addition
82
82
83 XXX This "state" is a bit obscure and mostly a direct expression of the
83 XXX This "state" is a bit obscure and mostly a direct expression of the
84 dirstatev1 format. It would make sense to ultimately deprecate it in
84 dirstatev1 format. It would make sense to ultimately deprecate it in
85 favor of the more "semantic" attributes.
85 favor of the more "semantic" attributes.
86 """
86 """
87 return self._state
87 return self._state
88
88
89 @property
89 @property
90 def merged(self):
90 def merged(self):
91 """True if the file has been merged
91 """True if the file has been merged
92
92
93 Should only be set if a merge is in progress in the dirstate
93 Should only be set if a merge is in progress in the dirstate
94 """
94 """
95 return self._state == b'm'
95 return self._state == b'm'
96
96
97 @property
97 @property
98 def from_p2(self):
98 def from_p2(self):
99 """True if the file have been fetched from p2 during the current merge
99 """True if the file have been fetched from p2 during the current merge
100
100
101 This is only True is the file is currently tracked.
102
101 Should only be set if a merge is in progress in the dirstate
103 Should only be set if a merge is in progress in the dirstate
102 """
104 """
103 return self._size == FROM_P2
105 return self._state == b'n' and self._size == FROM_P2
104
106
105 @property
107 @property
106 def from_p2_removed(self):
108 def from_p2_removed(self):
107 """True if the file has been removed, but was "from_p2" initially
109 """True if the file has been removed, but was "from_p2" initially
108
110
109 This property seems like an abstraction leakage and should probably be
111 This property seems like an abstraction leakage and should probably be
110 dealt in this class (or maybe the dirstatemap) directly.
112 dealt in this class (or maybe the dirstatemap) directly.
111 """
113 """
112 return self._state == b'r' and self._size == FROM_P2
114 return self._state == b'r' and self._size == FROM_P2
113
115
114 @property
116 @property
115 def removed(self):
117 def removed(self):
116 """True if the file has been removed"""
118 """True if the file has been removed"""
117 return self._state == b'r'
119 return self._state == b'r'
118
120
119 @property
121 @property
120 def merged_removed(self):
122 def merged_removed(self):
121 """True if the file has been removed, but was "merged" initially
123 """True if the file has been removed, but was "merged" initially
122
124
123 This property seems like an abstraction leakage and should probably be
125 This property seems like an abstraction leakage and should probably be
124 dealt in this class (or maybe the dirstatemap) directly.
126 dealt in this class (or maybe the dirstatemap) directly.
125 """
127 """
126 return self._state == b'r' and self._size == NONNORMAL
128 return self._state == b'r' and self._size == NONNORMAL
127
129
128 def v1_state(self):
130 def v1_state(self):
129 """return a "state" suitable for v1 serialization"""
131 """return a "state" suitable for v1 serialization"""
130 return self._state
132 return self._state
131
133
132 def v1_mode(self):
134 def v1_mode(self):
133 """return a "mode" suitable for v1 serialization"""
135 """return a "mode" suitable for v1 serialization"""
134 return self._mode
136 return self._mode
135
137
136 def v1_size(self):
138 def v1_size(self):
137 """return a "size" suitable for v1 serialization"""
139 """return a "size" suitable for v1 serialization"""
138 return self._size
140 return self._size
139
141
140 def v1_mtime(self):
142 def v1_mtime(self):
141 """return a "mtime" suitable for v1 serialization"""
143 """return a "mtime" suitable for v1 serialization"""
142 return self._mtime
144 return self._mtime
143
145
144
146
145 def gettype(q):
147 def gettype(q):
146 return int(q & 0xFFFF)
148 return int(q & 0xFFFF)
147
149
148
150
149 class BaseIndexObject(object):
151 class BaseIndexObject(object):
150 # Can I be passed to an algorithme implemented in Rust ?
152 # Can I be passed to an algorithme implemented in Rust ?
151 rust_ext_compat = 0
153 rust_ext_compat = 0
152 # Format of an index entry according to Python's `struct` language
154 # Format of an index entry according to Python's `struct` language
153 index_format = revlog_constants.INDEX_ENTRY_V1
155 index_format = revlog_constants.INDEX_ENTRY_V1
154 # Size of a C unsigned long long int, platform independent
156 # Size of a C unsigned long long int, platform independent
155 big_int_size = struct.calcsize(b'>Q')
157 big_int_size = struct.calcsize(b'>Q')
156 # Size of a C long int, platform independent
158 # Size of a C long int, platform independent
157 int_size = struct.calcsize(b'>i')
159 int_size = struct.calcsize(b'>i')
158 # An empty index entry, used as a default value to be overridden, or nullrev
160 # An empty index entry, used as a default value to be overridden, or nullrev
159 null_item = (
161 null_item = (
160 0,
162 0,
161 0,
163 0,
162 0,
164 0,
163 -1,
165 -1,
164 -1,
166 -1,
165 -1,
167 -1,
166 -1,
168 -1,
167 sha1nodeconstants.nullid,
169 sha1nodeconstants.nullid,
168 0,
170 0,
169 0,
171 0,
170 revlog_constants.COMP_MODE_INLINE,
172 revlog_constants.COMP_MODE_INLINE,
171 revlog_constants.COMP_MODE_INLINE,
173 revlog_constants.COMP_MODE_INLINE,
172 )
174 )
173
175
174 @util.propertycache
176 @util.propertycache
175 def entry_size(self):
177 def entry_size(self):
176 return self.index_format.size
178 return self.index_format.size
177
179
178 @property
180 @property
179 def nodemap(self):
181 def nodemap(self):
180 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
182 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
181 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
183 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
182 return self._nodemap
184 return self._nodemap
183
185
184 @util.propertycache
186 @util.propertycache
185 def _nodemap(self):
187 def _nodemap(self):
186 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
188 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
187 for r in range(0, len(self)):
189 for r in range(0, len(self)):
188 n = self[r][7]
190 n = self[r][7]
189 nodemap[n] = r
191 nodemap[n] = r
190 return nodemap
192 return nodemap
191
193
192 def has_node(self, node):
194 def has_node(self, node):
193 """return True if the node exist in the index"""
195 """return True if the node exist in the index"""
194 return node in self._nodemap
196 return node in self._nodemap
195
197
196 def rev(self, node):
198 def rev(self, node):
197 """return a revision for a node
199 """return a revision for a node
198
200
199 If the node is unknown, raise a RevlogError"""
201 If the node is unknown, raise a RevlogError"""
200 return self._nodemap[node]
202 return self._nodemap[node]
201
203
202 def get_rev(self, node):
204 def get_rev(self, node):
203 """return a revision for a node
205 """return a revision for a node
204
206
205 If the node is unknown, return None"""
207 If the node is unknown, return None"""
206 return self._nodemap.get(node)
208 return self._nodemap.get(node)
207
209
208 def _stripnodes(self, start):
210 def _stripnodes(self, start):
209 if '_nodemap' in vars(self):
211 if '_nodemap' in vars(self):
210 for r in range(start, len(self)):
212 for r in range(start, len(self)):
211 n = self[r][7]
213 n = self[r][7]
212 del self._nodemap[n]
214 del self._nodemap[n]
213
215
214 def clearcaches(self):
216 def clearcaches(self):
215 self.__dict__.pop('_nodemap', None)
217 self.__dict__.pop('_nodemap', None)
216
218
217 def __len__(self):
219 def __len__(self):
218 return self._lgt + len(self._extra)
220 return self._lgt + len(self._extra)
219
221
220 def append(self, tup):
222 def append(self, tup):
221 if '_nodemap' in vars(self):
223 if '_nodemap' in vars(self):
222 self._nodemap[tup[7]] = len(self)
224 self._nodemap[tup[7]] = len(self)
223 data = self._pack_entry(len(self), tup)
225 data = self._pack_entry(len(self), tup)
224 self._extra.append(data)
226 self._extra.append(data)
225
227
226 def _pack_entry(self, rev, entry):
228 def _pack_entry(self, rev, entry):
227 assert entry[8] == 0
229 assert entry[8] == 0
228 assert entry[9] == 0
230 assert entry[9] == 0
229 return self.index_format.pack(*entry[:8])
231 return self.index_format.pack(*entry[:8])
230
232
231 def _check_index(self, i):
233 def _check_index(self, i):
232 if not isinstance(i, int):
234 if not isinstance(i, int):
233 raise TypeError(b"expecting int indexes")
235 raise TypeError(b"expecting int indexes")
234 if i < 0 or i >= len(self):
236 if i < 0 or i >= len(self):
235 raise IndexError
237 raise IndexError
236
238
237 def __getitem__(self, i):
239 def __getitem__(self, i):
238 if i == -1:
240 if i == -1:
239 return self.null_item
241 return self.null_item
240 self._check_index(i)
242 self._check_index(i)
241 if i >= self._lgt:
243 if i >= self._lgt:
242 data = self._extra[i - self._lgt]
244 data = self._extra[i - self._lgt]
243 else:
245 else:
244 index = self._calculate_index(i)
246 index = self._calculate_index(i)
245 data = self._data[index : index + self.entry_size]
247 data = self._data[index : index + self.entry_size]
246 r = self._unpack_entry(i, data)
248 r = self._unpack_entry(i, data)
247 if self._lgt and i == 0:
249 if self._lgt and i == 0:
248 offset = revlogutils.offset_type(0, gettype(r[0]))
250 offset = revlogutils.offset_type(0, gettype(r[0]))
249 r = (offset,) + r[1:]
251 r = (offset,) + r[1:]
250 return r
252 return r
251
253
252 def _unpack_entry(self, rev, data):
254 def _unpack_entry(self, rev, data):
253 r = self.index_format.unpack(data)
255 r = self.index_format.unpack(data)
254 r = r + (
256 r = r + (
255 0,
257 0,
256 0,
258 0,
257 revlog_constants.COMP_MODE_INLINE,
259 revlog_constants.COMP_MODE_INLINE,
258 revlog_constants.COMP_MODE_INLINE,
260 revlog_constants.COMP_MODE_INLINE,
259 )
261 )
260 return r
262 return r
261
263
262 def pack_header(self, header):
264 def pack_header(self, header):
263 """pack header information as binary"""
265 """pack header information as binary"""
264 v_fmt = revlog_constants.INDEX_HEADER
266 v_fmt = revlog_constants.INDEX_HEADER
265 return v_fmt.pack(header)
267 return v_fmt.pack(header)
266
268
267 def entry_binary(self, rev):
269 def entry_binary(self, rev):
268 """return the raw binary string representing a revision"""
270 """return the raw binary string representing a revision"""
269 entry = self[rev]
271 entry = self[rev]
270 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
272 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
271 if rev == 0:
273 if rev == 0:
272 p = p[revlog_constants.INDEX_HEADER.size :]
274 p = p[revlog_constants.INDEX_HEADER.size :]
273 return p
275 return p
274
276
275
277
276 class IndexObject(BaseIndexObject):
278 class IndexObject(BaseIndexObject):
277 def __init__(self, data):
279 def __init__(self, data):
278 assert len(data) % self.entry_size == 0, (
280 assert len(data) % self.entry_size == 0, (
279 len(data),
281 len(data),
280 self.entry_size,
282 self.entry_size,
281 len(data) % self.entry_size,
283 len(data) % self.entry_size,
282 )
284 )
283 self._data = data
285 self._data = data
284 self._lgt = len(data) // self.entry_size
286 self._lgt = len(data) // self.entry_size
285 self._extra = []
287 self._extra = []
286
288
287 def _calculate_index(self, i):
289 def _calculate_index(self, i):
288 return i * self.entry_size
290 return i * self.entry_size
289
291
290 def __delitem__(self, i):
292 def __delitem__(self, i):
291 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
293 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
292 raise ValueError(b"deleting slices only supports a:-1 with step 1")
294 raise ValueError(b"deleting slices only supports a:-1 with step 1")
293 i = i.start
295 i = i.start
294 self._check_index(i)
296 self._check_index(i)
295 self._stripnodes(i)
297 self._stripnodes(i)
296 if i < self._lgt:
298 if i < self._lgt:
297 self._data = self._data[: i * self.entry_size]
299 self._data = self._data[: i * self.entry_size]
298 self._lgt = i
300 self._lgt = i
299 self._extra = []
301 self._extra = []
300 else:
302 else:
301 self._extra = self._extra[: i - self._lgt]
303 self._extra = self._extra[: i - self._lgt]
302
304
303
305
304 class PersistentNodeMapIndexObject(IndexObject):
306 class PersistentNodeMapIndexObject(IndexObject):
305 """a Debug oriented class to test persistent nodemap
307 """a Debug oriented class to test persistent nodemap
306
308
307 We need a simple python object to test API and higher level behavior. See
309 We need a simple python object to test API and higher level behavior. See
308 the Rust implementation for more serious usage. This should be used only
310 the Rust implementation for more serious usage. This should be used only
309 through the dedicated `devel.persistent-nodemap` config.
311 through the dedicated `devel.persistent-nodemap` config.
310 """
312 """
311
313
312 def nodemap_data_all(self):
314 def nodemap_data_all(self):
313 """Return bytes containing a full serialization of a nodemap
315 """Return bytes containing a full serialization of a nodemap
314
316
315 The nodemap should be valid for the full set of revisions in the
317 The nodemap should be valid for the full set of revisions in the
316 index."""
318 index."""
317 return nodemaputil.persistent_data(self)
319 return nodemaputil.persistent_data(self)
318
320
319 def nodemap_data_incremental(self):
321 def nodemap_data_incremental(self):
320 """Return bytes containing a incremental update to persistent nodemap
322 """Return bytes containing a incremental update to persistent nodemap
321
323
322 This containst the data for an append-only update of the data provided
324 This containst the data for an append-only update of the data provided
323 in the last call to `update_nodemap_data`.
325 in the last call to `update_nodemap_data`.
324 """
326 """
325 if self._nm_root is None:
327 if self._nm_root is None:
326 return None
328 return None
327 docket = self._nm_docket
329 docket = self._nm_docket
328 changed, data = nodemaputil.update_persistent_data(
330 changed, data = nodemaputil.update_persistent_data(
329 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
331 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
330 )
332 )
331
333
332 self._nm_root = self._nm_max_idx = self._nm_docket = None
334 self._nm_root = self._nm_max_idx = self._nm_docket = None
333 return docket, changed, data
335 return docket, changed, data
334
336
335 def update_nodemap_data(self, docket, nm_data):
337 def update_nodemap_data(self, docket, nm_data):
336 """provide full block of persisted binary data for a nodemap
338 """provide full block of persisted binary data for a nodemap
337
339
338 The data are expected to come from disk. See `nodemap_data_all` for a
340 The data are expected to come from disk. See `nodemap_data_all` for a
339 produceur of such data."""
341 produceur of such data."""
340 if nm_data is not None:
342 if nm_data is not None:
341 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
343 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
342 if self._nm_root:
344 if self._nm_root:
343 self._nm_docket = docket
345 self._nm_docket = docket
344 else:
346 else:
345 self._nm_root = self._nm_max_idx = self._nm_docket = None
347 self._nm_root = self._nm_max_idx = self._nm_docket = None
346
348
347
349
348 class InlinedIndexObject(BaseIndexObject):
350 class InlinedIndexObject(BaseIndexObject):
349 def __init__(self, data, inline=0):
351 def __init__(self, data, inline=0):
350 self._data = data
352 self._data = data
351 self._lgt = self._inline_scan(None)
353 self._lgt = self._inline_scan(None)
352 self._inline_scan(self._lgt)
354 self._inline_scan(self._lgt)
353 self._extra = []
355 self._extra = []
354
356
355 def _inline_scan(self, lgt):
357 def _inline_scan(self, lgt):
356 off = 0
358 off = 0
357 if lgt is not None:
359 if lgt is not None:
358 self._offsets = [0] * lgt
360 self._offsets = [0] * lgt
359 count = 0
361 count = 0
360 while off <= len(self._data) - self.entry_size:
362 while off <= len(self._data) - self.entry_size:
361 start = off + self.big_int_size
363 start = off + self.big_int_size
362 (s,) = struct.unpack(
364 (s,) = struct.unpack(
363 b'>i',
365 b'>i',
364 self._data[start : start + self.int_size],
366 self._data[start : start + self.int_size],
365 )
367 )
366 if lgt is not None:
368 if lgt is not None:
367 self._offsets[count] = off
369 self._offsets[count] = off
368 count += 1
370 count += 1
369 off += self.entry_size + s
371 off += self.entry_size + s
370 if off != len(self._data):
372 if off != len(self._data):
371 raise ValueError(b"corrupted data")
373 raise ValueError(b"corrupted data")
372 return count
374 return count
373
375
374 def __delitem__(self, i):
376 def __delitem__(self, i):
375 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
377 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
376 raise ValueError(b"deleting slices only supports a:-1 with step 1")
378 raise ValueError(b"deleting slices only supports a:-1 with step 1")
377 i = i.start
379 i = i.start
378 self._check_index(i)
380 self._check_index(i)
379 self._stripnodes(i)
381 self._stripnodes(i)
380 if i < self._lgt:
382 if i < self._lgt:
381 self._offsets = self._offsets[:i]
383 self._offsets = self._offsets[:i]
382 self._lgt = i
384 self._lgt = i
383 self._extra = []
385 self._extra = []
384 else:
386 else:
385 self._extra = self._extra[: i - self._lgt]
387 self._extra = self._extra[: i - self._lgt]
386
388
387 def _calculate_index(self, i):
389 def _calculate_index(self, i):
388 return self._offsets[i]
390 return self._offsets[i]
389
391
390
392
391 def parse_index2(data, inline, revlogv2=False):
393 def parse_index2(data, inline, revlogv2=False):
392 if not inline:
394 if not inline:
393 cls = IndexObject2 if revlogv2 else IndexObject
395 cls = IndexObject2 if revlogv2 else IndexObject
394 return cls(data), None
396 return cls(data), None
395 cls = InlinedIndexObject
397 cls = InlinedIndexObject
396 return cls(data, inline), (0, data)
398 return cls(data, inline), (0, data)
397
399
398
400
399 def parse_index_cl_v2(data):
401 def parse_index_cl_v2(data):
400 return IndexChangelogV2(data), None
402 return IndexChangelogV2(data), None
401
403
402
404
403 class IndexObject2(IndexObject):
405 class IndexObject2(IndexObject):
404 index_format = revlog_constants.INDEX_ENTRY_V2
406 index_format = revlog_constants.INDEX_ENTRY_V2
405
407
406 def replace_sidedata_info(
408 def replace_sidedata_info(
407 self,
409 self,
408 rev,
410 rev,
409 sidedata_offset,
411 sidedata_offset,
410 sidedata_length,
412 sidedata_length,
411 offset_flags,
413 offset_flags,
412 compression_mode,
414 compression_mode,
413 ):
415 ):
414 """
416 """
415 Replace an existing index entry's sidedata offset and length with new
417 Replace an existing index entry's sidedata offset and length with new
416 ones.
418 ones.
417 This cannot be used outside of the context of sidedata rewriting,
419 This cannot be used outside of the context of sidedata rewriting,
418 inside the transaction that creates the revision `rev`.
420 inside the transaction that creates the revision `rev`.
419 """
421 """
420 if rev < 0:
422 if rev < 0:
421 raise KeyError
423 raise KeyError
422 self._check_index(rev)
424 self._check_index(rev)
423 if rev < self._lgt:
425 if rev < self._lgt:
424 msg = b"cannot rewrite entries outside of this transaction"
426 msg = b"cannot rewrite entries outside of this transaction"
425 raise KeyError(msg)
427 raise KeyError(msg)
426 else:
428 else:
427 entry = list(self[rev])
429 entry = list(self[rev])
428 entry[0] = offset_flags
430 entry[0] = offset_flags
429 entry[8] = sidedata_offset
431 entry[8] = sidedata_offset
430 entry[9] = sidedata_length
432 entry[9] = sidedata_length
431 entry[11] = compression_mode
433 entry[11] = compression_mode
432 entry = tuple(entry)
434 entry = tuple(entry)
433 new = self._pack_entry(rev, entry)
435 new = self._pack_entry(rev, entry)
434 self._extra[rev - self._lgt] = new
436 self._extra[rev - self._lgt] = new
435
437
436 def _unpack_entry(self, rev, data):
438 def _unpack_entry(self, rev, data):
437 data = self.index_format.unpack(data)
439 data = self.index_format.unpack(data)
438 entry = data[:10]
440 entry = data[:10]
439 data_comp = data[10] & 3
441 data_comp = data[10] & 3
440 sidedata_comp = (data[10] & (3 << 2)) >> 2
442 sidedata_comp = (data[10] & (3 << 2)) >> 2
441 return entry + (data_comp, sidedata_comp)
443 return entry + (data_comp, sidedata_comp)
442
444
443 def _pack_entry(self, rev, entry):
445 def _pack_entry(self, rev, entry):
444 data = entry[:10]
446 data = entry[:10]
445 data_comp = entry[10] & 3
447 data_comp = entry[10] & 3
446 sidedata_comp = (entry[11] & 3) << 2
448 sidedata_comp = (entry[11] & 3) << 2
447 data += (data_comp | sidedata_comp,)
449 data += (data_comp | sidedata_comp,)
448
450
449 return self.index_format.pack(*data)
451 return self.index_format.pack(*data)
450
452
451 def entry_binary(self, rev):
453 def entry_binary(self, rev):
452 """return the raw binary string representing a revision"""
454 """return the raw binary string representing a revision"""
453 entry = self[rev]
455 entry = self[rev]
454 return self._pack_entry(rev, entry)
456 return self._pack_entry(rev, entry)
455
457
456 def pack_header(self, header):
458 def pack_header(self, header):
457 """pack header information as binary"""
459 """pack header information as binary"""
458 msg = 'version header should go in the docket, not the index: %d'
460 msg = 'version header should go in the docket, not the index: %d'
459 msg %= header
461 msg %= header
460 raise error.ProgrammingError(msg)
462 raise error.ProgrammingError(msg)
461
463
462
464
463 class IndexChangelogV2(IndexObject2):
465 class IndexChangelogV2(IndexObject2):
464 index_format = revlog_constants.INDEX_ENTRY_CL_V2
466 index_format = revlog_constants.INDEX_ENTRY_CL_V2
465
467
466 def _unpack_entry(self, rev, data, r=True):
468 def _unpack_entry(self, rev, data, r=True):
467 items = self.index_format.unpack(data)
469 items = self.index_format.unpack(data)
468 entry = items[:3] + (rev, rev) + items[3:8]
470 entry = items[:3] + (rev, rev) + items[3:8]
469 data_comp = items[8] & 3
471 data_comp = items[8] & 3
470 sidedata_comp = (items[8] >> 2) & 3
472 sidedata_comp = (items[8] >> 2) & 3
471 return entry + (data_comp, sidedata_comp)
473 return entry + (data_comp, sidedata_comp)
472
474
473 def _pack_entry(self, rev, entry):
475 def _pack_entry(self, rev, entry):
474 assert entry[3] == rev, entry[3]
476 assert entry[3] == rev, entry[3]
475 assert entry[4] == rev, entry[4]
477 assert entry[4] == rev, entry[4]
476 data = entry[:3] + entry[5:10]
478 data = entry[:3] + entry[5:10]
477 data_comp = entry[10] & 3
479 data_comp = entry[10] & 3
478 sidedata_comp = (entry[11] & 3) << 2
480 sidedata_comp = (entry[11] & 3) << 2
479 data += (data_comp | sidedata_comp,)
481 data += (data_comp | sidedata_comp,)
480 return self.index_format.pack(*data)
482 return self.index_format.pack(*data)
481
483
482
484
483 def parse_index_devel_nodemap(data, inline):
485 def parse_index_devel_nodemap(data, inline):
484 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
486 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
485 return PersistentNodeMapIndexObject(data), None
487 return PersistentNodeMapIndexObject(data), None
486
488
487
489
488 def parse_dirstate(dmap, copymap, st):
490 def parse_dirstate(dmap, copymap, st):
489 parents = [st[:20], st[20:40]]
491 parents = [st[:20], st[20:40]]
490 # dereference fields so they will be local in loop
492 # dereference fields so they will be local in loop
491 format = b">cllll"
493 format = b">cllll"
492 e_size = struct.calcsize(format)
494 e_size = struct.calcsize(format)
493 pos1 = 40
495 pos1 = 40
494 l = len(st)
496 l = len(st)
495
497
496 # the inner loop
498 # the inner loop
497 while pos1 < l:
499 while pos1 < l:
498 pos2 = pos1 + e_size
500 pos2 = pos1 + e_size
499 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
501 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
500 pos1 = pos2 + e[4]
502 pos1 = pos2 + e[4]
501 f = st[pos2:pos1]
503 f = st[pos2:pos1]
502 if b'\0' in f:
504 if b'\0' in f:
503 f, c = f.split(b'\0')
505 f, c = f.split(b'\0')
504 copymap[f] = c
506 copymap[f] = c
505 dmap[f] = dirstatetuple(*e[:4])
507 dmap[f] = dirstatetuple(*e[:4])
506 return parents
508 return parents
507
509
508
510
509 def pack_dirstate(dmap, copymap, pl, now):
511 def pack_dirstate(dmap, copymap, pl, now):
510 now = int(now)
512 now = int(now)
511 cs = stringio()
513 cs = stringio()
512 write = cs.write
514 write = cs.write
513 write(b"".join(pl))
515 write(b"".join(pl))
514 for f, e in pycompat.iteritems(dmap):
516 for f, e in pycompat.iteritems(dmap):
515 if e[0] == b'n' and e[3] == now:
517 if e[0] == b'n' and e[3] == now:
516 # The file was last modified "simultaneously" with the current
518 # The file was last modified "simultaneously" with the current
517 # write to dirstate (i.e. within the same second for file-
519 # write to dirstate (i.e. within the same second for file-
518 # systems with a granularity of 1 sec). This commonly happens
520 # systems with a granularity of 1 sec). This commonly happens
519 # for at least a couple of files on 'update'.
521 # for at least a couple of files on 'update'.
520 # The user could change the file without changing its size
522 # The user could change the file without changing its size
521 # within the same second. Invalidate the file's mtime in
523 # within the same second. Invalidate the file's mtime in
522 # dirstate, forcing future 'status' calls to compare the
524 # dirstate, forcing future 'status' calls to compare the
523 # contents of the file if the size is the same. This prevents
525 # contents of the file if the size is the same. This prevents
524 # mistakenly treating such files as clean.
526 # mistakenly treating such files as clean.
525 e = dirstatetuple(e[0], e[1], e[2], -1)
527 e = dirstatetuple(e[0], e[1], e[2], -1)
526 dmap[f] = e
528 dmap[f] = e
527
529
528 if f in copymap:
530 if f in copymap:
529 f = b"%s\0%s" % (f, copymap[f])
531 f = b"%s\0%s" % (f, copymap[f])
530 e = _pack(
532 e = _pack(
531 b">cllll",
533 b">cllll",
532 e.v1_state(),
534 e.v1_state(),
533 e.v1_mode(),
535 e.v1_mode(),
534 e.v1_size(),
536 e.v1_size(),
535 e.v1_mtime(),
537 e.v1_mtime(),
536 len(f),
538 len(f),
537 )
539 )
538 write(e)
540 write(e)
539 write(f)
541 write(f)
540 return cs.getvalue()
542 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now