##// END OF EJS Templates
dirstate-entry: add a `need_delay` method...
marmoute -
r48321:ccbabaee default
parent child Browse files
Show More
@@ -1,880 +1,896
1 /*
1 /*
2 parsers.c - efficient content parsing
2 parsers.c - efficient content parsing
3
3
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
5
5
6 This software may be used and distributed according to the terms of
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
7 the GNU General Public License, incorporated herein by reference.
8 */
8 */
9
9
10 #define PY_SSIZE_T_CLEAN
10 #define PY_SSIZE_T_CLEAN
11 #include <Python.h>
11 #include <Python.h>
12 #include <ctype.h>
12 #include <ctype.h>
13 #include <stddef.h>
13 #include <stddef.h>
14 #include <string.h>
14 #include <string.h>
15
15
16 #include "bitmanipulation.h"
16 #include "bitmanipulation.h"
17 #include "charencode.h"
17 #include "charencode.h"
18 #include "util.h"
18 #include "util.h"
19
19
20 #ifdef IS_PY3K
20 #ifdef IS_PY3K
21 /* The mapping of Python types is meant to be temporary to get Python
21 /* The mapping of Python types is meant to be temporary to get Python
22 * 3 to compile. We should remove this once Python 3 support is fully
22 * 3 to compile. We should remove this once Python 3 support is fully
23 * supported and proper types are used in the extensions themselves. */
23 * supported and proper types are used in the extensions themselves. */
24 #define PyInt_Check PyLong_Check
24 #define PyInt_Check PyLong_Check
25 #define PyInt_FromLong PyLong_FromLong
25 #define PyInt_FromLong PyLong_FromLong
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 #define PyInt_AsLong PyLong_AsLong
27 #define PyInt_AsLong PyLong_AsLong
28 #endif
28 #endif
29
29
30 static const char *const versionerrortext = "Python minor version mismatch";
30 static const char *const versionerrortext = "Python minor version mismatch";
31
31
32 static const int dirstate_v1_from_p2 = -2;
32 static const int dirstate_v1_from_p2 = -2;
33 static const int dirstate_v1_nonnormal = -1;
33 static const int dirstate_v1_nonnormal = -1;
34
34
35 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
35 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
36 {
36 {
37 Py_ssize_t expected_size;
37 Py_ssize_t expected_size;
38
38
39 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
39 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
40 return NULL;
40 return NULL;
41 }
41 }
42
42
43 return _dict_new_presized(expected_size);
43 return _dict_new_presized(expected_size);
44 }
44 }
45
45
46 static inline dirstateTupleObject *make_dirstate_tuple(char state, int mode,
46 static inline dirstateTupleObject *make_dirstate_tuple(char state, int mode,
47 int size, int mtime)
47 int size, int mtime)
48 {
48 {
49 dirstateTupleObject *t =
49 dirstateTupleObject *t =
50 PyObject_New(dirstateTupleObject, &dirstateTupleType);
50 PyObject_New(dirstateTupleObject, &dirstateTupleType);
51 if (!t) {
51 if (!t) {
52 return NULL;
52 return NULL;
53 }
53 }
54 t->state = state;
54 t->state = state;
55 t->mode = mode;
55 t->mode = mode;
56 t->size = size;
56 t->size = size;
57 t->mtime = mtime;
57 t->mtime = mtime;
58 return t;
58 return t;
59 }
59 }
60
60
61 static PyObject *dirstate_tuple_new(PyTypeObject *subtype, PyObject *args,
61 static PyObject *dirstate_tuple_new(PyTypeObject *subtype, PyObject *args,
62 PyObject *kwds)
62 PyObject *kwds)
63 {
63 {
64 /* We do all the initialization here and not a tp_init function because
64 /* We do all the initialization here and not a tp_init function because
65 * dirstate_tuple is immutable. */
65 * dirstate_tuple is immutable. */
66 dirstateTupleObject *t;
66 dirstateTupleObject *t;
67 char state;
67 char state;
68 int size, mode, mtime;
68 int size, mode, mtime;
69 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
69 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
70 return NULL;
70 return NULL;
71 }
71 }
72
72
73 t = (dirstateTupleObject *)subtype->tp_alloc(subtype, 1);
73 t = (dirstateTupleObject *)subtype->tp_alloc(subtype, 1);
74 if (!t) {
74 if (!t) {
75 return NULL;
75 return NULL;
76 }
76 }
77 t->state = state;
77 t->state = state;
78 t->mode = mode;
78 t->mode = mode;
79 t->size = size;
79 t->size = size;
80 t->mtime = mtime;
80 t->mtime = mtime;
81
81
82 return (PyObject *)t;
82 return (PyObject *)t;
83 }
83 }
84
84
85 static void dirstate_tuple_dealloc(PyObject *o)
85 static void dirstate_tuple_dealloc(PyObject *o)
86 {
86 {
87 PyObject_Del(o);
87 PyObject_Del(o);
88 }
88 }
89
89
90 static Py_ssize_t dirstate_tuple_length(PyObject *o)
90 static Py_ssize_t dirstate_tuple_length(PyObject *o)
91 {
91 {
92 return 4;
92 return 4;
93 }
93 }
94
94
95 static PyObject *dirstate_tuple_item(PyObject *o, Py_ssize_t i)
95 static PyObject *dirstate_tuple_item(PyObject *o, Py_ssize_t i)
96 {
96 {
97 dirstateTupleObject *t = (dirstateTupleObject *)o;
97 dirstateTupleObject *t = (dirstateTupleObject *)o;
98 switch (i) {
98 switch (i) {
99 case 0:
99 case 0:
100 return PyBytes_FromStringAndSize(&t->state, 1);
100 return PyBytes_FromStringAndSize(&t->state, 1);
101 case 1:
101 case 1:
102 return PyInt_FromLong(t->mode);
102 return PyInt_FromLong(t->mode);
103 case 2:
103 case 2:
104 return PyInt_FromLong(t->size);
104 return PyInt_FromLong(t->size);
105 case 3:
105 case 3:
106 return PyInt_FromLong(t->mtime);
106 return PyInt_FromLong(t->mtime);
107 default:
107 default:
108 PyErr_SetString(PyExc_IndexError, "index out of range");
108 PyErr_SetString(PyExc_IndexError, "index out of range");
109 return NULL;
109 return NULL;
110 }
110 }
111 }
111 }
112
112
113 static PySequenceMethods dirstate_tuple_sq = {
113 static PySequenceMethods dirstate_tuple_sq = {
114 dirstate_tuple_length, /* sq_length */
114 dirstate_tuple_length, /* sq_length */
115 0, /* sq_concat */
115 0, /* sq_concat */
116 0, /* sq_repeat */
116 0, /* sq_repeat */
117 dirstate_tuple_item, /* sq_item */
117 dirstate_tuple_item, /* sq_item */
118 0, /* sq_ass_item */
118 0, /* sq_ass_item */
119 0, /* sq_contains */
119 0, /* sq_contains */
120 0, /* sq_inplace_concat */
120 0, /* sq_inplace_concat */
121 0 /* sq_inplace_repeat */
121 0 /* sq_inplace_repeat */
122 };
122 };
123
123
124 static PyObject *dirstatetuple_v1_state(dirstateTupleObject *self)
124 static PyObject *dirstatetuple_v1_state(dirstateTupleObject *self)
125 {
125 {
126 return PyBytes_FromStringAndSize(&self->state, 1);
126 return PyBytes_FromStringAndSize(&self->state, 1);
127 };
127 };
128
128
129 static PyObject *dirstatetuple_v1_mode(dirstateTupleObject *self)
129 static PyObject *dirstatetuple_v1_mode(dirstateTupleObject *self)
130 {
130 {
131 return PyInt_FromLong(self->mode);
131 return PyInt_FromLong(self->mode);
132 };
132 };
133
133
134 static PyObject *dirstatetuple_v1_size(dirstateTupleObject *self)
134 static PyObject *dirstatetuple_v1_size(dirstateTupleObject *self)
135 {
135 {
136 return PyInt_FromLong(self->size);
136 return PyInt_FromLong(self->size);
137 };
137 };
138
138
139 static PyObject *dirstatetuple_v1_mtime(dirstateTupleObject *self)
139 static PyObject *dirstatetuple_v1_mtime(dirstateTupleObject *self)
140 {
140 {
141 return PyInt_FromLong(self->mtime);
141 return PyInt_FromLong(self->mtime);
142 };
142 };
143
143
144 static PyObject *dirstatetuple_need_delay(dirstateTupleObject *self,
145 PyObject *value)
146 {
147 long now;
148 if (!pylong_to_long(value, &now)) {
149 return NULL;
150 }
151 if (self->state == 'n' && self->mtime == now) {
152 Py_RETURN_TRUE;
153 } else {
154 Py_RETURN_FALSE;
155 }
156 };
157
144 static PyMethodDef dirstatetuple_methods[] = {
158 static PyMethodDef dirstatetuple_methods[] = {
145 {"v1_state", (PyCFunction)dirstatetuple_v1_state, METH_NOARGS,
159 {"v1_state", (PyCFunction)dirstatetuple_v1_state, METH_NOARGS,
146 "return a \"state\" suitable for v1 serialization"},
160 "return a \"state\" suitable for v1 serialization"},
147 {"v1_mode", (PyCFunction)dirstatetuple_v1_mode, METH_NOARGS,
161 {"v1_mode", (PyCFunction)dirstatetuple_v1_mode, METH_NOARGS,
148 "return a \"mode\" suitable for v1 serialization"},
162 "return a \"mode\" suitable for v1 serialization"},
149 {"v1_size", (PyCFunction)dirstatetuple_v1_size, METH_NOARGS,
163 {"v1_size", (PyCFunction)dirstatetuple_v1_size, METH_NOARGS,
150 "return a \"size\" suitable for v1 serialization"},
164 "return a \"size\" suitable for v1 serialization"},
151 {"v1_mtime", (PyCFunction)dirstatetuple_v1_mtime, METH_NOARGS,
165 {"v1_mtime", (PyCFunction)dirstatetuple_v1_mtime, METH_NOARGS,
152 "return a \"mtime\" suitable for v1 serialization"},
166 "return a \"mtime\" suitable for v1 serialization"},
167 {"need_delay", (PyCFunction)dirstatetuple_need_delay, METH_O,
168 "True if the stored mtime would be ambiguous with the current time"},
153 {NULL} /* Sentinel */
169 {NULL} /* Sentinel */
154 };
170 };
155
171
156 static PyObject *dirstatetuple_get_state(dirstateTupleObject *self)
172 static PyObject *dirstatetuple_get_state(dirstateTupleObject *self)
157 {
173 {
158 return PyBytes_FromStringAndSize(&self->state, 1);
174 return PyBytes_FromStringAndSize(&self->state, 1);
159 };
175 };
160
176
161 static PyObject *dirstatetuple_get_tracked(dirstateTupleObject *self)
177 static PyObject *dirstatetuple_get_tracked(dirstateTupleObject *self)
162 {
178 {
163 if (self->state == 'a' || self->state == 'm' || self->state == 'n') {
179 if (self->state == 'a' || self->state == 'm' || self->state == 'n') {
164 Py_RETURN_TRUE;
180 Py_RETURN_TRUE;
165 } else {
181 } else {
166 Py_RETURN_FALSE;
182 Py_RETURN_FALSE;
167 }
183 }
168 };
184 };
169
185
170 static PyObject *dirstatetuple_get_added(dirstateTupleObject *self)
186 static PyObject *dirstatetuple_get_added(dirstateTupleObject *self)
171 {
187 {
172 if (self->state == 'a') {
188 if (self->state == 'a') {
173 Py_RETURN_TRUE;
189 Py_RETURN_TRUE;
174 } else {
190 } else {
175 Py_RETURN_FALSE;
191 Py_RETURN_FALSE;
176 }
192 }
177 };
193 };
178
194
179 static PyObject *dirstatetuple_get_merged(dirstateTupleObject *self)
195 static PyObject *dirstatetuple_get_merged(dirstateTupleObject *self)
180 {
196 {
181 if (self->state == 'm') {
197 if (self->state == 'm') {
182 Py_RETURN_TRUE;
198 Py_RETURN_TRUE;
183 } else {
199 } else {
184 Py_RETURN_FALSE;
200 Py_RETURN_FALSE;
185 }
201 }
186 };
202 };
187
203
188 static PyObject *dirstatetuple_get_merged_removed(dirstateTupleObject *self)
204 static PyObject *dirstatetuple_get_merged_removed(dirstateTupleObject *self)
189 {
205 {
190 if (self->state == 'r' && self->size == dirstate_v1_nonnormal) {
206 if (self->state == 'r' && self->size == dirstate_v1_nonnormal) {
191 Py_RETURN_TRUE;
207 Py_RETURN_TRUE;
192 } else {
208 } else {
193 Py_RETURN_FALSE;
209 Py_RETURN_FALSE;
194 }
210 }
195 };
211 };
196
212
197 static PyObject *dirstatetuple_get_from_p2(dirstateTupleObject *self)
213 static PyObject *dirstatetuple_get_from_p2(dirstateTupleObject *self)
198 {
214 {
199 if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
215 if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
200 Py_RETURN_TRUE;
216 Py_RETURN_TRUE;
201 } else {
217 } else {
202 Py_RETURN_FALSE;
218 Py_RETURN_FALSE;
203 }
219 }
204 };
220 };
205
221
206 static PyObject *dirstatetuple_get_from_p2_removed(dirstateTupleObject *self)
222 static PyObject *dirstatetuple_get_from_p2_removed(dirstateTupleObject *self)
207 {
223 {
208 if (self->state == 'r' && self->size == dirstate_v1_from_p2) {
224 if (self->state == 'r' && self->size == dirstate_v1_from_p2) {
209 Py_RETURN_TRUE;
225 Py_RETURN_TRUE;
210 } else {
226 } else {
211 Py_RETURN_FALSE;
227 Py_RETURN_FALSE;
212 }
228 }
213 };
229 };
214
230
215 static PyObject *dirstatetuple_get_removed(dirstateTupleObject *self)
231 static PyObject *dirstatetuple_get_removed(dirstateTupleObject *self)
216 {
232 {
217 if (self->state == 'r') {
233 if (self->state == 'r') {
218 Py_RETURN_TRUE;
234 Py_RETURN_TRUE;
219 } else {
235 } else {
220 Py_RETURN_FALSE;
236 Py_RETURN_FALSE;
221 }
237 }
222 };
238 };
223
239
224 static PyGetSetDef dirstatetuple_getset[] = {
240 static PyGetSetDef dirstatetuple_getset[] = {
225 {"state", (getter)dirstatetuple_get_state, NULL, "state", NULL},
241 {"state", (getter)dirstatetuple_get_state, NULL, "state", NULL},
226 {"tracked", (getter)dirstatetuple_get_tracked, NULL, "tracked", NULL},
242 {"tracked", (getter)dirstatetuple_get_tracked, NULL, "tracked", NULL},
227 {"added", (getter)dirstatetuple_get_added, NULL, "added", NULL},
243 {"added", (getter)dirstatetuple_get_added, NULL, "added", NULL},
228 {"merged_removed", (getter)dirstatetuple_get_merged_removed, NULL,
244 {"merged_removed", (getter)dirstatetuple_get_merged_removed, NULL,
229 "merged_removed", NULL},
245 "merged_removed", NULL},
230 {"merged", (getter)dirstatetuple_get_merged, NULL, "merged", NULL},
246 {"merged", (getter)dirstatetuple_get_merged, NULL, "merged", NULL},
231 {"from_p2_removed", (getter)dirstatetuple_get_from_p2_removed, NULL,
247 {"from_p2_removed", (getter)dirstatetuple_get_from_p2_removed, NULL,
232 "from_p2_removed", NULL},
248 "from_p2_removed", NULL},
233 {"from_p2", (getter)dirstatetuple_get_from_p2, NULL, "from_p2", NULL},
249 {"from_p2", (getter)dirstatetuple_get_from_p2, NULL, "from_p2", NULL},
234 {"removed", (getter)dirstatetuple_get_removed, NULL, "removed", NULL},
250 {"removed", (getter)dirstatetuple_get_removed, NULL, "removed", NULL},
235 {NULL} /* Sentinel */
251 {NULL} /* Sentinel */
236 };
252 };
237
253
238 PyTypeObject dirstateTupleType = {
254 PyTypeObject dirstateTupleType = {
239 PyVarObject_HEAD_INIT(NULL, 0) /* header */
255 PyVarObject_HEAD_INIT(NULL, 0) /* header */
240 "dirstate_tuple", /* tp_name */
256 "dirstate_tuple", /* tp_name */
241 sizeof(dirstateTupleObject), /* tp_basicsize */
257 sizeof(dirstateTupleObject), /* tp_basicsize */
242 0, /* tp_itemsize */
258 0, /* tp_itemsize */
243 (destructor)dirstate_tuple_dealloc, /* tp_dealloc */
259 (destructor)dirstate_tuple_dealloc, /* tp_dealloc */
244 0, /* tp_print */
260 0, /* tp_print */
245 0, /* tp_getattr */
261 0, /* tp_getattr */
246 0, /* tp_setattr */
262 0, /* tp_setattr */
247 0, /* tp_compare */
263 0, /* tp_compare */
248 0, /* tp_repr */
264 0, /* tp_repr */
249 0, /* tp_as_number */
265 0, /* tp_as_number */
250 &dirstate_tuple_sq, /* tp_as_sequence */
266 &dirstate_tuple_sq, /* tp_as_sequence */
251 0, /* tp_as_mapping */
267 0, /* tp_as_mapping */
252 0, /* tp_hash */
268 0, /* tp_hash */
253 0, /* tp_call */
269 0, /* tp_call */
254 0, /* tp_str */
270 0, /* tp_str */
255 0, /* tp_getattro */
271 0, /* tp_getattro */
256 0, /* tp_setattro */
272 0, /* tp_setattro */
257 0, /* tp_as_buffer */
273 0, /* tp_as_buffer */
258 Py_TPFLAGS_DEFAULT, /* tp_flags */
274 Py_TPFLAGS_DEFAULT, /* tp_flags */
259 "dirstate tuple", /* tp_doc */
275 "dirstate tuple", /* tp_doc */
260 0, /* tp_traverse */
276 0, /* tp_traverse */
261 0, /* tp_clear */
277 0, /* tp_clear */
262 0, /* tp_richcompare */
278 0, /* tp_richcompare */
263 0, /* tp_weaklistoffset */
279 0, /* tp_weaklistoffset */
264 0, /* tp_iter */
280 0, /* tp_iter */
265 0, /* tp_iternext */
281 0, /* tp_iternext */
266 dirstatetuple_methods, /* tp_methods */
282 dirstatetuple_methods, /* tp_methods */
267 0, /* tp_members */
283 0, /* tp_members */
268 dirstatetuple_getset, /* tp_getset */
284 dirstatetuple_getset, /* tp_getset */
269 0, /* tp_base */
285 0, /* tp_base */
270 0, /* tp_dict */
286 0, /* tp_dict */
271 0, /* tp_descr_get */
287 0, /* tp_descr_get */
272 0, /* tp_descr_set */
288 0, /* tp_descr_set */
273 0, /* tp_dictoffset */
289 0, /* tp_dictoffset */
274 0, /* tp_init */
290 0, /* tp_init */
275 0, /* tp_alloc */
291 0, /* tp_alloc */
276 dirstate_tuple_new, /* tp_new */
292 dirstate_tuple_new, /* tp_new */
277 };
293 };
278
294
279 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
295 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
280 {
296 {
281 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
297 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
282 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
298 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
283 char state, *cur, *str, *cpos;
299 char state, *cur, *str, *cpos;
284 int mode, size, mtime;
300 int mode, size, mtime;
285 unsigned int flen, pos = 40;
301 unsigned int flen, pos = 40;
286 Py_ssize_t len = 40;
302 Py_ssize_t len = 40;
287 Py_ssize_t readlen;
303 Py_ssize_t readlen;
288
304
289 if (!PyArg_ParseTuple(
305 if (!PyArg_ParseTuple(
290 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
306 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
291 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
307 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
292 goto quit;
308 goto quit;
293 }
309 }
294
310
295 len = readlen;
311 len = readlen;
296
312
297 /* read parents */
313 /* read parents */
298 if (len < 40) {
314 if (len < 40) {
299 PyErr_SetString(PyExc_ValueError,
315 PyErr_SetString(PyExc_ValueError,
300 "too little data for parents");
316 "too little data for parents");
301 goto quit;
317 goto quit;
302 }
318 }
303
319
304 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
320 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
305 str + 20, (Py_ssize_t)20);
321 str + 20, (Py_ssize_t)20);
306 if (!parents) {
322 if (!parents) {
307 goto quit;
323 goto quit;
308 }
324 }
309
325
310 /* read filenames */
326 /* read filenames */
311 while (pos >= 40 && pos < len) {
327 while (pos >= 40 && pos < len) {
312 if (pos + 17 > len) {
328 if (pos + 17 > len) {
313 PyErr_SetString(PyExc_ValueError,
329 PyErr_SetString(PyExc_ValueError,
314 "overflow in dirstate");
330 "overflow in dirstate");
315 goto quit;
331 goto quit;
316 }
332 }
317 cur = str + pos;
333 cur = str + pos;
318 /* unpack header */
334 /* unpack header */
319 state = *cur;
335 state = *cur;
320 mode = getbe32(cur + 1);
336 mode = getbe32(cur + 1);
321 size = getbe32(cur + 5);
337 size = getbe32(cur + 5);
322 mtime = getbe32(cur + 9);
338 mtime = getbe32(cur + 9);
323 flen = getbe32(cur + 13);
339 flen = getbe32(cur + 13);
324 pos += 17;
340 pos += 17;
325 cur += 17;
341 cur += 17;
326 if (flen > len - pos) {
342 if (flen > len - pos) {
327 PyErr_SetString(PyExc_ValueError,
343 PyErr_SetString(PyExc_ValueError,
328 "overflow in dirstate");
344 "overflow in dirstate");
329 goto quit;
345 goto quit;
330 }
346 }
331
347
332 entry =
348 entry =
333 (PyObject *)make_dirstate_tuple(state, mode, size, mtime);
349 (PyObject *)make_dirstate_tuple(state, mode, size, mtime);
334 cpos = memchr(cur, 0, flen);
350 cpos = memchr(cur, 0, flen);
335 if (cpos) {
351 if (cpos) {
336 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
352 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
337 cname = PyBytes_FromStringAndSize(
353 cname = PyBytes_FromStringAndSize(
338 cpos + 1, flen - (cpos - cur) - 1);
354 cpos + 1, flen - (cpos - cur) - 1);
339 if (!fname || !cname ||
355 if (!fname || !cname ||
340 PyDict_SetItem(cmap, fname, cname) == -1 ||
356 PyDict_SetItem(cmap, fname, cname) == -1 ||
341 PyDict_SetItem(dmap, fname, entry) == -1) {
357 PyDict_SetItem(dmap, fname, entry) == -1) {
342 goto quit;
358 goto quit;
343 }
359 }
344 Py_DECREF(cname);
360 Py_DECREF(cname);
345 } else {
361 } else {
346 fname = PyBytes_FromStringAndSize(cur, flen);
362 fname = PyBytes_FromStringAndSize(cur, flen);
347 if (!fname ||
363 if (!fname ||
348 PyDict_SetItem(dmap, fname, entry) == -1) {
364 PyDict_SetItem(dmap, fname, entry) == -1) {
349 goto quit;
365 goto quit;
350 }
366 }
351 }
367 }
352 Py_DECREF(fname);
368 Py_DECREF(fname);
353 Py_DECREF(entry);
369 Py_DECREF(entry);
354 fname = cname = entry = NULL;
370 fname = cname = entry = NULL;
355 pos += flen;
371 pos += flen;
356 }
372 }
357
373
358 ret = parents;
374 ret = parents;
359 Py_INCREF(ret);
375 Py_INCREF(ret);
360 quit:
376 quit:
361 Py_XDECREF(fname);
377 Py_XDECREF(fname);
362 Py_XDECREF(cname);
378 Py_XDECREF(cname);
363 Py_XDECREF(entry);
379 Py_XDECREF(entry);
364 Py_XDECREF(parents);
380 Py_XDECREF(parents);
365 return ret;
381 return ret;
366 }
382 }
367
383
368 /*
384 /*
369 * Build a set of non-normal and other parent entries from the dirstate dmap
385 * Build a set of non-normal and other parent entries from the dirstate dmap
370 */
386 */
371 static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args)
387 static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args)
372 {
388 {
373 PyObject *dmap, *fname, *v;
389 PyObject *dmap, *fname, *v;
374 PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
390 PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
375 Py_ssize_t pos;
391 Py_ssize_t pos;
376
392
377 if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
393 if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
378 &dmap)) {
394 &dmap)) {
379 goto bail;
395 goto bail;
380 }
396 }
381
397
382 nonnset = PySet_New(NULL);
398 nonnset = PySet_New(NULL);
383 if (nonnset == NULL) {
399 if (nonnset == NULL) {
384 goto bail;
400 goto bail;
385 }
401 }
386
402
387 otherpset = PySet_New(NULL);
403 otherpset = PySet_New(NULL);
388 if (otherpset == NULL) {
404 if (otherpset == NULL) {
389 goto bail;
405 goto bail;
390 }
406 }
391
407
392 pos = 0;
408 pos = 0;
393 while (PyDict_Next(dmap, &pos, &fname, &v)) {
409 while (PyDict_Next(dmap, &pos, &fname, &v)) {
394 dirstateTupleObject *t;
410 dirstateTupleObject *t;
395 if (!dirstate_tuple_check(v)) {
411 if (!dirstate_tuple_check(v)) {
396 PyErr_SetString(PyExc_TypeError,
412 PyErr_SetString(PyExc_TypeError,
397 "expected a dirstate tuple");
413 "expected a dirstate tuple");
398 goto bail;
414 goto bail;
399 }
415 }
400 t = (dirstateTupleObject *)v;
416 t = (dirstateTupleObject *)v;
401
417
402 if (t->state == 'n' && t->size == -2) {
418 if (t->state == 'n' && t->size == -2) {
403 if (PySet_Add(otherpset, fname) == -1) {
419 if (PySet_Add(otherpset, fname) == -1) {
404 goto bail;
420 goto bail;
405 }
421 }
406 }
422 }
407
423
408 if (t->state == 'n' && t->mtime != -1) {
424 if (t->state == 'n' && t->mtime != -1) {
409 continue;
425 continue;
410 }
426 }
411 if (PySet_Add(nonnset, fname) == -1) {
427 if (PySet_Add(nonnset, fname) == -1) {
412 goto bail;
428 goto bail;
413 }
429 }
414 }
430 }
415
431
416 result = Py_BuildValue("(OO)", nonnset, otherpset);
432 result = Py_BuildValue("(OO)", nonnset, otherpset);
417 if (result == NULL) {
433 if (result == NULL) {
418 goto bail;
434 goto bail;
419 }
435 }
420 Py_DECREF(nonnset);
436 Py_DECREF(nonnset);
421 Py_DECREF(otherpset);
437 Py_DECREF(otherpset);
422 return result;
438 return result;
423 bail:
439 bail:
424 Py_XDECREF(nonnset);
440 Py_XDECREF(nonnset);
425 Py_XDECREF(otherpset);
441 Py_XDECREF(otherpset);
426 Py_XDECREF(result);
442 Py_XDECREF(result);
427 return NULL;
443 return NULL;
428 }
444 }
429
445
430 /*
446 /*
431 * Efficiently pack a dirstate object into its on-disk format.
447 * Efficiently pack a dirstate object into its on-disk format.
432 */
448 */
433 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
449 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
434 {
450 {
435 PyObject *packobj = NULL;
451 PyObject *packobj = NULL;
436 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
452 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
437 Py_ssize_t nbytes, pos, l;
453 Py_ssize_t nbytes, pos, l;
438 PyObject *k, *v = NULL, *pn;
454 PyObject *k, *v = NULL, *pn;
439 char *p, *s;
455 char *p, *s;
440 int now;
456 int now;
441
457
442 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
458 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
443 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
459 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
444 &now)) {
460 &now)) {
445 return NULL;
461 return NULL;
446 }
462 }
447
463
448 if (PyTuple_Size(pl) != 2) {
464 if (PyTuple_Size(pl) != 2) {
449 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
465 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
450 return NULL;
466 return NULL;
451 }
467 }
452
468
453 /* Figure out how much we need to allocate. */
469 /* Figure out how much we need to allocate. */
454 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
470 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
455 PyObject *c;
471 PyObject *c;
456 if (!PyBytes_Check(k)) {
472 if (!PyBytes_Check(k)) {
457 PyErr_SetString(PyExc_TypeError, "expected string key");
473 PyErr_SetString(PyExc_TypeError, "expected string key");
458 goto bail;
474 goto bail;
459 }
475 }
460 nbytes += PyBytes_GET_SIZE(k) + 17;
476 nbytes += PyBytes_GET_SIZE(k) + 17;
461 c = PyDict_GetItem(copymap, k);
477 c = PyDict_GetItem(copymap, k);
462 if (c) {
478 if (c) {
463 if (!PyBytes_Check(c)) {
479 if (!PyBytes_Check(c)) {
464 PyErr_SetString(PyExc_TypeError,
480 PyErr_SetString(PyExc_TypeError,
465 "expected string key");
481 "expected string key");
466 goto bail;
482 goto bail;
467 }
483 }
468 nbytes += PyBytes_GET_SIZE(c) + 1;
484 nbytes += PyBytes_GET_SIZE(c) + 1;
469 }
485 }
470 }
486 }
471
487
472 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
488 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
473 if (packobj == NULL) {
489 if (packobj == NULL) {
474 goto bail;
490 goto bail;
475 }
491 }
476
492
477 p = PyBytes_AS_STRING(packobj);
493 p = PyBytes_AS_STRING(packobj);
478
494
479 pn = PyTuple_GET_ITEM(pl, 0);
495 pn = PyTuple_GET_ITEM(pl, 0);
480 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
496 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
481 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
497 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
482 goto bail;
498 goto bail;
483 }
499 }
484 memcpy(p, s, l);
500 memcpy(p, s, l);
485 p += 20;
501 p += 20;
486 pn = PyTuple_GET_ITEM(pl, 1);
502 pn = PyTuple_GET_ITEM(pl, 1);
487 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
503 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
488 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
504 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
489 goto bail;
505 goto bail;
490 }
506 }
491 memcpy(p, s, l);
507 memcpy(p, s, l);
492 p += 20;
508 p += 20;
493
509
494 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
510 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
495 dirstateTupleObject *tuple;
511 dirstateTupleObject *tuple;
496 char state;
512 char state;
497 int mode, size, mtime;
513 int mode, size, mtime;
498 Py_ssize_t len, l;
514 Py_ssize_t len, l;
499 PyObject *o;
515 PyObject *o;
500 char *t;
516 char *t;
501
517
502 if (!dirstate_tuple_check(v)) {
518 if (!dirstate_tuple_check(v)) {
503 PyErr_SetString(PyExc_TypeError,
519 PyErr_SetString(PyExc_TypeError,
504 "expected a dirstate tuple");
520 "expected a dirstate tuple");
505 goto bail;
521 goto bail;
506 }
522 }
507 tuple = (dirstateTupleObject *)v;
523 tuple = (dirstateTupleObject *)v;
508
524
509 state = tuple->state;
525 state = tuple->state;
510 mode = tuple->mode;
526 mode = tuple->mode;
511 size = tuple->size;
527 size = tuple->size;
512 mtime = tuple->mtime;
528 mtime = tuple->mtime;
513 if (state == 'n' && mtime == now) {
529 if (state == 'n' && mtime == now) {
514 /* See pure/parsers.py:pack_dirstate for why we do
530 /* See pure/parsers.py:pack_dirstate for why we do
515 * this. */
531 * this. */
516 mtime = -1;
532 mtime = -1;
517 mtime_unset = (PyObject *)make_dirstate_tuple(
533 mtime_unset = (PyObject *)make_dirstate_tuple(
518 state, mode, size, mtime);
534 state, mode, size, mtime);
519 if (!mtime_unset) {
535 if (!mtime_unset) {
520 goto bail;
536 goto bail;
521 }
537 }
522 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
538 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
523 goto bail;
539 goto bail;
524 }
540 }
525 Py_DECREF(mtime_unset);
541 Py_DECREF(mtime_unset);
526 mtime_unset = NULL;
542 mtime_unset = NULL;
527 }
543 }
528 *p++ = state;
544 *p++ = state;
529 putbe32((uint32_t)mode, p);
545 putbe32((uint32_t)mode, p);
530 putbe32((uint32_t)size, p + 4);
546 putbe32((uint32_t)size, p + 4);
531 putbe32((uint32_t)mtime, p + 8);
547 putbe32((uint32_t)mtime, p + 8);
532 t = p + 12;
548 t = p + 12;
533 p += 16;
549 p += 16;
534 len = PyBytes_GET_SIZE(k);
550 len = PyBytes_GET_SIZE(k);
535 memcpy(p, PyBytes_AS_STRING(k), len);
551 memcpy(p, PyBytes_AS_STRING(k), len);
536 p += len;
552 p += len;
537 o = PyDict_GetItem(copymap, k);
553 o = PyDict_GetItem(copymap, k);
538 if (o) {
554 if (o) {
539 *p++ = '\0';
555 *p++ = '\0';
540 l = PyBytes_GET_SIZE(o);
556 l = PyBytes_GET_SIZE(o);
541 memcpy(p, PyBytes_AS_STRING(o), l);
557 memcpy(p, PyBytes_AS_STRING(o), l);
542 p += l;
558 p += l;
543 len += l + 1;
559 len += l + 1;
544 }
560 }
545 putbe32((uint32_t)len, t);
561 putbe32((uint32_t)len, t);
546 }
562 }
547
563
548 pos = p - PyBytes_AS_STRING(packobj);
564 pos = p - PyBytes_AS_STRING(packobj);
549 if (pos != nbytes) {
565 if (pos != nbytes) {
550 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
566 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
551 (long)pos, (long)nbytes);
567 (long)pos, (long)nbytes);
552 goto bail;
568 goto bail;
553 }
569 }
554
570
555 return packobj;
571 return packobj;
556 bail:
572 bail:
557 Py_XDECREF(mtime_unset);
573 Py_XDECREF(mtime_unset);
558 Py_XDECREF(packobj);
574 Py_XDECREF(packobj);
559 Py_XDECREF(v);
575 Py_XDECREF(v);
560 return NULL;
576 return NULL;
561 }
577 }
562
578
563 #define BUMPED_FIX 1
579 #define BUMPED_FIX 1
564 #define USING_SHA_256 2
580 #define USING_SHA_256 2
565 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
581 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
566
582
567 static PyObject *readshas(const char *source, unsigned char num,
583 static PyObject *readshas(const char *source, unsigned char num,
568 Py_ssize_t hashwidth)
584 Py_ssize_t hashwidth)
569 {
585 {
570 int i;
586 int i;
571 PyObject *list = PyTuple_New(num);
587 PyObject *list = PyTuple_New(num);
572 if (list == NULL) {
588 if (list == NULL) {
573 return NULL;
589 return NULL;
574 }
590 }
575 for (i = 0; i < num; i++) {
591 for (i = 0; i < num; i++) {
576 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
592 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
577 if (hash == NULL) {
593 if (hash == NULL) {
578 Py_DECREF(list);
594 Py_DECREF(list);
579 return NULL;
595 return NULL;
580 }
596 }
581 PyTuple_SET_ITEM(list, i, hash);
597 PyTuple_SET_ITEM(list, i, hash);
582 source += hashwidth;
598 source += hashwidth;
583 }
599 }
584 return list;
600 return list;
585 }
601 }
586
602
587 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
603 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
588 uint32_t *msize)
604 uint32_t *msize)
589 {
605 {
590 const char *data = databegin;
606 const char *data = databegin;
591 const char *meta;
607 const char *meta;
592
608
593 double mtime;
609 double mtime;
594 int16_t tz;
610 int16_t tz;
595 uint16_t flags;
611 uint16_t flags;
596 unsigned char nsuccs, nparents, nmetadata;
612 unsigned char nsuccs, nparents, nmetadata;
597 Py_ssize_t hashwidth = 20;
613 Py_ssize_t hashwidth = 20;
598
614
599 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
615 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
600 PyObject *metadata = NULL, *ret = NULL;
616 PyObject *metadata = NULL, *ret = NULL;
601 int i;
617 int i;
602
618
603 if (data + FM1_HEADER_SIZE > dataend) {
619 if (data + FM1_HEADER_SIZE > dataend) {
604 goto overflow;
620 goto overflow;
605 }
621 }
606
622
607 *msize = getbe32(data);
623 *msize = getbe32(data);
608 data += 4;
624 data += 4;
609 mtime = getbefloat64(data);
625 mtime = getbefloat64(data);
610 data += 8;
626 data += 8;
611 tz = getbeint16(data);
627 tz = getbeint16(data);
612 data += 2;
628 data += 2;
613 flags = getbeuint16(data);
629 flags = getbeuint16(data);
614 data += 2;
630 data += 2;
615
631
616 if (flags & USING_SHA_256) {
632 if (flags & USING_SHA_256) {
617 hashwidth = 32;
633 hashwidth = 32;
618 }
634 }
619
635
620 nsuccs = (unsigned char)(*data++);
636 nsuccs = (unsigned char)(*data++);
621 nparents = (unsigned char)(*data++);
637 nparents = (unsigned char)(*data++);
622 nmetadata = (unsigned char)(*data++);
638 nmetadata = (unsigned char)(*data++);
623
639
624 if (databegin + *msize > dataend) {
640 if (databegin + *msize > dataend) {
625 goto overflow;
641 goto overflow;
626 }
642 }
627 dataend = databegin + *msize; /* narrow down to marker size */
643 dataend = databegin + *msize; /* narrow down to marker size */
628
644
629 if (data + hashwidth > dataend) {
645 if (data + hashwidth > dataend) {
630 goto overflow;
646 goto overflow;
631 }
647 }
632 prec = PyBytes_FromStringAndSize(data, hashwidth);
648 prec = PyBytes_FromStringAndSize(data, hashwidth);
633 data += hashwidth;
649 data += hashwidth;
634 if (prec == NULL) {
650 if (prec == NULL) {
635 goto bail;
651 goto bail;
636 }
652 }
637
653
638 if (data + nsuccs * hashwidth > dataend) {
654 if (data + nsuccs * hashwidth > dataend) {
639 goto overflow;
655 goto overflow;
640 }
656 }
641 succs = readshas(data, nsuccs, hashwidth);
657 succs = readshas(data, nsuccs, hashwidth);
642 if (succs == NULL) {
658 if (succs == NULL) {
643 goto bail;
659 goto bail;
644 }
660 }
645 data += nsuccs * hashwidth;
661 data += nsuccs * hashwidth;
646
662
647 if (nparents == 1 || nparents == 2) {
663 if (nparents == 1 || nparents == 2) {
648 if (data + nparents * hashwidth > dataend) {
664 if (data + nparents * hashwidth > dataend) {
649 goto overflow;
665 goto overflow;
650 }
666 }
651 parents = readshas(data, nparents, hashwidth);
667 parents = readshas(data, nparents, hashwidth);
652 if (parents == NULL) {
668 if (parents == NULL) {
653 goto bail;
669 goto bail;
654 }
670 }
655 data += nparents * hashwidth;
671 data += nparents * hashwidth;
656 } else {
672 } else {
657 parents = Py_None;
673 parents = Py_None;
658 Py_INCREF(parents);
674 Py_INCREF(parents);
659 }
675 }
660
676
661 if (data + 2 * nmetadata > dataend) {
677 if (data + 2 * nmetadata > dataend) {
662 goto overflow;
678 goto overflow;
663 }
679 }
664 meta = data + (2 * nmetadata);
680 meta = data + (2 * nmetadata);
665 metadata = PyTuple_New(nmetadata);
681 metadata = PyTuple_New(nmetadata);
666 if (metadata == NULL) {
682 if (metadata == NULL) {
667 goto bail;
683 goto bail;
668 }
684 }
669 for (i = 0; i < nmetadata; i++) {
685 for (i = 0; i < nmetadata; i++) {
670 PyObject *tmp, *left = NULL, *right = NULL;
686 PyObject *tmp, *left = NULL, *right = NULL;
671 Py_ssize_t leftsize = (unsigned char)(*data++);
687 Py_ssize_t leftsize = (unsigned char)(*data++);
672 Py_ssize_t rightsize = (unsigned char)(*data++);
688 Py_ssize_t rightsize = (unsigned char)(*data++);
673 if (meta + leftsize + rightsize > dataend) {
689 if (meta + leftsize + rightsize > dataend) {
674 goto overflow;
690 goto overflow;
675 }
691 }
676 left = PyBytes_FromStringAndSize(meta, leftsize);
692 left = PyBytes_FromStringAndSize(meta, leftsize);
677 meta += leftsize;
693 meta += leftsize;
678 right = PyBytes_FromStringAndSize(meta, rightsize);
694 right = PyBytes_FromStringAndSize(meta, rightsize);
679 meta += rightsize;
695 meta += rightsize;
680 tmp = PyTuple_New(2);
696 tmp = PyTuple_New(2);
681 if (!left || !right || !tmp) {
697 if (!left || !right || !tmp) {
682 Py_XDECREF(left);
698 Py_XDECREF(left);
683 Py_XDECREF(right);
699 Py_XDECREF(right);
684 Py_XDECREF(tmp);
700 Py_XDECREF(tmp);
685 goto bail;
701 goto bail;
686 }
702 }
687 PyTuple_SET_ITEM(tmp, 0, left);
703 PyTuple_SET_ITEM(tmp, 0, left);
688 PyTuple_SET_ITEM(tmp, 1, right);
704 PyTuple_SET_ITEM(tmp, 1, right);
689 PyTuple_SET_ITEM(metadata, i, tmp);
705 PyTuple_SET_ITEM(metadata, i, tmp);
690 }
706 }
691 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
707 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
692 (int)tz * 60, parents);
708 (int)tz * 60, parents);
693 goto bail; /* return successfully */
709 goto bail; /* return successfully */
694
710
695 overflow:
711 overflow:
696 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
712 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
697 bail:
713 bail:
698 Py_XDECREF(prec);
714 Py_XDECREF(prec);
699 Py_XDECREF(succs);
715 Py_XDECREF(succs);
700 Py_XDECREF(metadata);
716 Py_XDECREF(metadata);
701 Py_XDECREF(parents);
717 Py_XDECREF(parents);
702 return ret;
718 return ret;
703 }
719 }
704
720
705 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
721 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
706 {
722 {
707 const char *data, *dataend;
723 const char *data, *dataend;
708 Py_ssize_t datalen, offset, stop;
724 Py_ssize_t datalen, offset, stop;
709 PyObject *markers = NULL;
725 PyObject *markers = NULL;
710
726
711 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
727 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
712 &offset, &stop)) {
728 &offset, &stop)) {
713 return NULL;
729 return NULL;
714 }
730 }
715 if (offset < 0) {
731 if (offset < 0) {
716 PyErr_SetString(PyExc_ValueError,
732 PyErr_SetString(PyExc_ValueError,
717 "invalid negative offset in fm1readmarkers");
733 "invalid negative offset in fm1readmarkers");
718 return NULL;
734 return NULL;
719 }
735 }
720 if (stop > datalen) {
736 if (stop > datalen) {
721 PyErr_SetString(
737 PyErr_SetString(
722 PyExc_ValueError,
738 PyExc_ValueError,
723 "stop longer than data length in fm1readmarkers");
739 "stop longer than data length in fm1readmarkers");
724 return NULL;
740 return NULL;
725 }
741 }
726 dataend = data + datalen;
742 dataend = data + datalen;
727 data += offset;
743 data += offset;
728 markers = PyList_New(0);
744 markers = PyList_New(0);
729 if (!markers) {
745 if (!markers) {
730 return NULL;
746 return NULL;
731 }
747 }
732 while (offset < stop) {
748 while (offset < stop) {
733 uint32_t msize;
749 uint32_t msize;
734 int error;
750 int error;
735 PyObject *record = fm1readmarker(data, dataend, &msize);
751 PyObject *record = fm1readmarker(data, dataend, &msize);
736 if (!record) {
752 if (!record) {
737 goto bail;
753 goto bail;
738 }
754 }
739 error = PyList_Append(markers, record);
755 error = PyList_Append(markers, record);
740 Py_DECREF(record);
756 Py_DECREF(record);
741 if (error) {
757 if (error) {
742 goto bail;
758 goto bail;
743 }
759 }
744 data += msize;
760 data += msize;
745 offset += msize;
761 offset += msize;
746 }
762 }
747 return markers;
763 return markers;
748 bail:
764 bail:
749 Py_DECREF(markers);
765 Py_DECREF(markers);
750 return NULL;
766 return NULL;
751 }
767 }
752
768
753 static char parsers_doc[] = "Efficient content parsing.";
769 static char parsers_doc[] = "Efficient content parsing.";
754
770
755 PyObject *encodedir(PyObject *self, PyObject *args);
771 PyObject *encodedir(PyObject *self, PyObject *args);
756 PyObject *pathencode(PyObject *self, PyObject *args);
772 PyObject *pathencode(PyObject *self, PyObject *args);
757 PyObject *lowerencode(PyObject *self, PyObject *args);
773 PyObject *lowerencode(PyObject *self, PyObject *args);
758 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
774 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
759
775
760 static PyMethodDef methods[] = {
776 static PyMethodDef methods[] = {
761 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
777 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
762 {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
778 {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
763 "create a set containing non-normal and other parent entries of given "
779 "create a set containing non-normal and other parent entries of given "
764 "dirstate\n"},
780 "dirstate\n"},
765 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
781 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
766 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
782 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
767 "parse a revlog index\n"},
783 "parse a revlog index\n"},
768 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
784 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
769 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
785 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
770 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
786 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
771 {"dict_new_presized", dict_new_presized, METH_VARARGS,
787 {"dict_new_presized", dict_new_presized, METH_VARARGS,
772 "construct a dict with an expected size\n"},
788 "construct a dict with an expected size\n"},
773 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
789 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
774 "make file foldmap\n"},
790 "make file foldmap\n"},
775 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
791 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
776 "escape a UTF-8 byte string to JSON (fast path)\n"},
792 "escape a UTF-8 byte string to JSON (fast path)\n"},
777 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
793 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
778 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
794 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
779 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
795 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
780 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
796 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
781 "parse v1 obsolete markers\n"},
797 "parse v1 obsolete markers\n"},
782 {NULL, NULL}};
798 {NULL, NULL}};
783
799
784 void dirs_module_init(PyObject *mod);
800 void dirs_module_init(PyObject *mod);
785 void manifest_module_init(PyObject *mod);
801 void manifest_module_init(PyObject *mod);
786 void revlog_module_init(PyObject *mod);
802 void revlog_module_init(PyObject *mod);
787
803
788 static const int version = 20;
804 static const int version = 20;
789
805
790 static void module_init(PyObject *mod)
806 static void module_init(PyObject *mod)
791 {
807 {
792 PyObject *capsule = NULL;
808 PyObject *capsule = NULL;
793 PyModule_AddIntConstant(mod, "version", version);
809 PyModule_AddIntConstant(mod, "version", version);
794
810
795 /* This module constant has two purposes. First, it lets us unit test
811 /* This module constant has two purposes. First, it lets us unit test
796 * the ImportError raised without hard-coding any error text. This
812 * the ImportError raised without hard-coding any error text. This
797 * means we can change the text in the future without breaking tests,
813 * means we can change the text in the future without breaking tests,
798 * even across changesets without a recompile. Second, its presence
814 * even across changesets without a recompile. Second, its presence
799 * can be used to determine whether the version-checking logic is
815 * can be used to determine whether the version-checking logic is
800 * present, which also helps in testing across changesets without a
816 * present, which also helps in testing across changesets without a
801 * recompile. Note that this means the pure-Python version of parsers
817 * recompile. Note that this means the pure-Python version of parsers
802 * should not have this module constant. */
818 * should not have this module constant. */
803 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
819 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
804
820
805 dirs_module_init(mod);
821 dirs_module_init(mod);
806 manifest_module_init(mod);
822 manifest_module_init(mod);
807 revlog_module_init(mod);
823 revlog_module_init(mod);
808
824
809 capsule = PyCapsule_New(
825 capsule = PyCapsule_New(
810 make_dirstate_tuple,
826 make_dirstate_tuple,
811 "mercurial.cext.parsers.make_dirstate_tuple_CAPI", NULL);
827 "mercurial.cext.parsers.make_dirstate_tuple_CAPI", NULL);
812 if (capsule != NULL)
828 if (capsule != NULL)
813 PyModule_AddObject(mod, "make_dirstate_tuple_CAPI", capsule);
829 PyModule_AddObject(mod, "make_dirstate_tuple_CAPI", capsule);
814
830
815 if (PyType_Ready(&dirstateTupleType) < 0) {
831 if (PyType_Ready(&dirstateTupleType) < 0) {
816 return;
832 return;
817 }
833 }
818 Py_INCREF(&dirstateTupleType);
834 Py_INCREF(&dirstateTupleType);
819 PyModule_AddObject(mod, "dirstatetuple",
835 PyModule_AddObject(mod, "dirstatetuple",
820 (PyObject *)&dirstateTupleType);
836 (PyObject *)&dirstateTupleType);
821 }
837 }
822
838
823 static int check_python_version(void)
839 static int check_python_version(void)
824 {
840 {
825 PyObject *sys = PyImport_ImportModule("sys"), *ver;
841 PyObject *sys = PyImport_ImportModule("sys"), *ver;
826 long hexversion;
842 long hexversion;
827 if (!sys) {
843 if (!sys) {
828 return -1;
844 return -1;
829 }
845 }
830 ver = PyObject_GetAttrString(sys, "hexversion");
846 ver = PyObject_GetAttrString(sys, "hexversion");
831 Py_DECREF(sys);
847 Py_DECREF(sys);
832 if (!ver) {
848 if (!ver) {
833 return -1;
849 return -1;
834 }
850 }
835 hexversion = PyInt_AsLong(ver);
851 hexversion = PyInt_AsLong(ver);
836 Py_DECREF(ver);
852 Py_DECREF(ver);
837 /* sys.hexversion is a 32-bit number by default, so the -1 case
853 /* sys.hexversion is a 32-bit number by default, so the -1 case
838 * should only occur in unusual circumstances (e.g. if sys.hexversion
854 * should only occur in unusual circumstances (e.g. if sys.hexversion
839 * is manually set to an invalid value). */
855 * is manually set to an invalid value). */
840 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
856 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
841 PyErr_Format(PyExc_ImportError,
857 PyErr_Format(PyExc_ImportError,
842 "%s: The Mercurial extension "
858 "%s: The Mercurial extension "
843 "modules were compiled with Python " PY_VERSION
859 "modules were compiled with Python " PY_VERSION
844 ", but "
860 ", but "
845 "Mercurial is currently using Python with "
861 "Mercurial is currently using Python with "
846 "sys.hexversion=%ld: "
862 "sys.hexversion=%ld: "
847 "Python %s\n at: %s",
863 "Python %s\n at: %s",
848 versionerrortext, hexversion, Py_GetVersion(),
864 versionerrortext, hexversion, Py_GetVersion(),
849 Py_GetProgramFullPath());
865 Py_GetProgramFullPath());
850 return -1;
866 return -1;
851 }
867 }
852 return 0;
868 return 0;
853 }
869 }
854
870
855 #ifdef IS_PY3K
871 #ifdef IS_PY3K
856 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
872 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
857 parsers_doc, -1, methods};
873 parsers_doc, -1, methods};
858
874
859 PyMODINIT_FUNC PyInit_parsers(void)
875 PyMODINIT_FUNC PyInit_parsers(void)
860 {
876 {
861 PyObject *mod;
877 PyObject *mod;
862
878
863 if (check_python_version() == -1)
879 if (check_python_version() == -1)
864 return NULL;
880 return NULL;
865 mod = PyModule_Create(&parsers_module);
881 mod = PyModule_Create(&parsers_module);
866 module_init(mod);
882 module_init(mod);
867 return mod;
883 return mod;
868 }
884 }
869 #else
885 #else
870 PyMODINIT_FUNC initparsers(void)
886 PyMODINIT_FUNC initparsers(void)
871 {
887 {
872 PyObject *mod;
888 PyObject *mod;
873
889
874 if (check_python_version() == -1) {
890 if (check_python_version() == -1) {
875 return;
891 return;
876 }
892 }
877 mod = Py_InitModule3("parsers", methods, parsers_doc);
893 mod = Py_InitModule3("parsers", methods, parsers_doc);
878 module_init(mod);
894 module_init(mod);
879 }
895 }
880 #endif
896 #endif
@@ -1,1437 +1,1437
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 dirstatetuple = parsers.dirstatetuple
48 dirstatetuple = parsers.dirstatetuple
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 @interfaceutil.implementer(intdirstate.idirstate)
75 @interfaceutil.implementer(intdirstate.idirstate)
76 class dirstate(object):
76 class dirstate(object):
77 def __init__(
77 def __init__(
78 self,
78 self,
79 opener,
79 opener,
80 ui,
80 ui,
81 root,
81 root,
82 validate,
82 validate,
83 sparsematchfn,
83 sparsematchfn,
84 nodeconstants,
84 nodeconstants,
85 use_dirstate_v2,
85 use_dirstate_v2,
86 ):
86 ):
87 """Create a new dirstate object.
87 """Create a new dirstate object.
88
88
89 opener is an open()-like callable that can be used to open the
89 opener is an open()-like callable that can be used to open the
90 dirstate file; root is the root of the directory tracked by
90 dirstate file; root is the root of the directory tracked by
91 the dirstate.
91 the dirstate.
92 """
92 """
93 self._use_dirstate_v2 = use_dirstate_v2
93 self._use_dirstate_v2 = use_dirstate_v2
94 self._nodeconstants = nodeconstants
94 self._nodeconstants = nodeconstants
95 self._opener = opener
95 self._opener = opener
96 self._validate = validate
96 self._validate = validate
97 self._root = root
97 self._root = root
98 self._sparsematchfn = sparsematchfn
98 self._sparsematchfn = sparsematchfn
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
100 # UNC path pointing to root share (issue4557)
100 # UNC path pointing to root share (issue4557)
101 self._rootdir = pathutil.normasprefix(root)
101 self._rootdir = pathutil.normasprefix(root)
102 self._dirty = False
102 self._dirty = False
103 self._lastnormaltime = 0
103 self._lastnormaltime = 0
104 self._ui = ui
104 self._ui = ui
105 self._filecache = {}
105 self._filecache = {}
106 self._parentwriters = 0
106 self._parentwriters = 0
107 self._filename = b'dirstate'
107 self._filename = b'dirstate'
108 self._pendingfilename = b'%s.pending' % self._filename
108 self._pendingfilename = b'%s.pending' % self._filename
109 self._plchangecallbacks = {}
109 self._plchangecallbacks = {}
110 self._origpl = None
110 self._origpl = None
111 self._updatedfiles = set()
111 self._updatedfiles = set()
112 self._mapcls = dirstatemap.dirstatemap
112 self._mapcls = dirstatemap.dirstatemap
113 # Access and cache cwd early, so we don't access it for the first time
113 # Access and cache cwd early, so we don't access it for the first time
114 # after a working-copy update caused it to not exist (accessing it then
114 # after a working-copy update caused it to not exist (accessing it then
115 # raises an exception).
115 # raises an exception).
116 self._cwd
116 self._cwd
117
117
118 def prefetch_parents(self):
118 def prefetch_parents(self):
119 """make sure the parents are loaded
119 """make sure the parents are loaded
120
120
121 Used to avoid a race condition.
121 Used to avoid a race condition.
122 """
122 """
123 self._pl
123 self._pl
124
124
125 @contextlib.contextmanager
125 @contextlib.contextmanager
126 def parentchange(self):
126 def parentchange(self):
127 """Context manager for handling dirstate parents.
127 """Context manager for handling dirstate parents.
128
128
129 If an exception occurs in the scope of the context manager,
129 If an exception occurs in the scope of the context manager,
130 the incoherent dirstate won't be written when wlock is
130 the incoherent dirstate won't be written when wlock is
131 released.
131 released.
132 """
132 """
133 self._parentwriters += 1
133 self._parentwriters += 1
134 yield
134 yield
135 # Typically we want the "undo" step of a context manager in a
135 # Typically we want the "undo" step of a context manager in a
136 # finally block so it happens even when an exception
136 # finally block so it happens even when an exception
137 # occurs. In this case, however, we only want to decrement
137 # occurs. In this case, however, we only want to decrement
138 # parentwriters if the code in the with statement exits
138 # parentwriters if the code in the with statement exits
139 # normally, so we don't have a try/finally here on purpose.
139 # normally, so we don't have a try/finally here on purpose.
140 self._parentwriters -= 1
140 self._parentwriters -= 1
141
141
142 def pendingparentchange(self):
142 def pendingparentchange(self):
143 """Returns true if the dirstate is in the middle of a set of changes
143 """Returns true if the dirstate is in the middle of a set of changes
144 that modify the dirstate parent.
144 that modify the dirstate parent.
145 """
145 """
146 return self._parentwriters > 0
146 return self._parentwriters > 0
147
147
148 @propertycache
148 @propertycache
149 def _map(self):
149 def _map(self):
150 """Return the dirstate contents (see documentation for dirstatemap)."""
150 """Return the dirstate contents (see documentation for dirstatemap)."""
151 self._map = self._mapcls(
151 self._map = self._mapcls(
152 self._ui,
152 self._ui,
153 self._opener,
153 self._opener,
154 self._root,
154 self._root,
155 self._nodeconstants,
155 self._nodeconstants,
156 self._use_dirstate_v2,
156 self._use_dirstate_v2,
157 )
157 )
158 return self._map
158 return self._map
159
159
160 @property
160 @property
161 def _sparsematcher(self):
161 def _sparsematcher(self):
162 """The matcher for the sparse checkout.
162 """The matcher for the sparse checkout.
163
163
164 The working directory may not include every file from a manifest. The
164 The working directory may not include every file from a manifest. The
165 matcher obtained by this property will match a path if it is to be
165 matcher obtained by this property will match a path if it is to be
166 included in the working directory.
166 included in the working directory.
167 """
167 """
168 # TODO there is potential to cache this property. For now, the matcher
168 # TODO there is potential to cache this property. For now, the matcher
169 # is resolved on every access. (But the called function does use a
169 # is resolved on every access. (But the called function does use a
170 # cache to keep the lookup fast.)
170 # cache to keep the lookup fast.)
171 return self._sparsematchfn()
171 return self._sparsematchfn()
172
172
173 @repocache(b'branch')
173 @repocache(b'branch')
174 def _branch(self):
174 def _branch(self):
175 try:
175 try:
176 return self._opener.read(b"branch").strip() or b"default"
176 return self._opener.read(b"branch").strip() or b"default"
177 except IOError as inst:
177 except IOError as inst:
178 if inst.errno != errno.ENOENT:
178 if inst.errno != errno.ENOENT:
179 raise
179 raise
180 return b"default"
180 return b"default"
181
181
182 @property
182 @property
183 def _pl(self):
183 def _pl(self):
184 return self._map.parents()
184 return self._map.parents()
185
185
186 def hasdir(self, d):
186 def hasdir(self, d):
187 return self._map.hastrackeddir(d)
187 return self._map.hastrackeddir(d)
188
188
189 @rootcache(b'.hgignore')
189 @rootcache(b'.hgignore')
190 def _ignore(self):
190 def _ignore(self):
191 files = self._ignorefiles()
191 files = self._ignorefiles()
192 if not files:
192 if not files:
193 return matchmod.never()
193 return matchmod.never()
194
194
195 pats = [b'include:%s' % f for f in files]
195 pats = [b'include:%s' % f for f in files]
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
197
197
198 @propertycache
198 @propertycache
199 def _slash(self):
199 def _slash(self):
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
201
201
202 @propertycache
202 @propertycache
203 def _checklink(self):
203 def _checklink(self):
204 return util.checklink(self._root)
204 return util.checklink(self._root)
205
205
206 @propertycache
206 @propertycache
207 def _checkexec(self):
207 def _checkexec(self):
208 return bool(util.checkexec(self._root))
208 return bool(util.checkexec(self._root))
209
209
210 @propertycache
210 @propertycache
211 def _checkcase(self):
211 def _checkcase(self):
212 return not util.fscasesensitive(self._join(b'.hg'))
212 return not util.fscasesensitive(self._join(b'.hg'))
213
213
214 def _join(self, f):
214 def _join(self, f):
215 # much faster than os.path.join()
215 # much faster than os.path.join()
216 # it's safe because f is always a relative path
216 # it's safe because f is always a relative path
217 return self._rootdir + f
217 return self._rootdir + f
218
218
219 def flagfunc(self, buildfallback):
219 def flagfunc(self, buildfallback):
220 if self._checklink and self._checkexec:
220 if self._checklink and self._checkexec:
221
221
222 def f(x):
222 def f(x):
223 try:
223 try:
224 st = os.lstat(self._join(x))
224 st = os.lstat(self._join(x))
225 if util.statislink(st):
225 if util.statislink(st):
226 return b'l'
226 return b'l'
227 if util.statisexec(st):
227 if util.statisexec(st):
228 return b'x'
228 return b'x'
229 except OSError:
229 except OSError:
230 pass
230 pass
231 return b''
231 return b''
232
232
233 return f
233 return f
234
234
235 fallback = buildfallback()
235 fallback = buildfallback()
236 if self._checklink:
236 if self._checklink:
237
237
238 def f(x):
238 def f(x):
239 if os.path.islink(self._join(x)):
239 if os.path.islink(self._join(x)):
240 return b'l'
240 return b'l'
241 if b'x' in fallback(x):
241 if b'x' in fallback(x):
242 return b'x'
242 return b'x'
243 return b''
243 return b''
244
244
245 return f
245 return f
246 if self._checkexec:
246 if self._checkexec:
247
247
248 def f(x):
248 def f(x):
249 if b'l' in fallback(x):
249 if b'l' in fallback(x):
250 return b'l'
250 return b'l'
251 if util.isexec(self._join(x)):
251 if util.isexec(self._join(x)):
252 return b'x'
252 return b'x'
253 return b''
253 return b''
254
254
255 return f
255 return f
256 else:
256 else:
257 return fallback
257 return fallback
258
258
259 @propertycache
259 @propertycache
260 def _cwd(self):
260 def _cwd(self):
261 # internal config: ui.forcecwd
261 # internal config: ui.forcecwd
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
263 if forcecwd:
263 if forcecwd:
264 return forcecwd
264 return forcecwd
265 return encoding.getcwd()
265 return encoding.getcwd()
266
266
267 def getcwd(self):
267 def getcwd(self):
268 """Return the path from which a canonical path is calculated.
268 """Return the path from which a canonical path is calculated.
269
269
270 This path should be used to resolve file patterns or to convert
270 This path should be used to resolve file patterns or to convert
271 canonical paths back to file paths for display. It shouldn't be
271 canonical paths back to file paths for display. It shouldn't be
272 used to get real file paths. Use vfs functions instead.
272 used to get real file paths. Use vfs functions instead.
273 """
273 """
274 cwd = self._cwd
274 cwd = self._cwd
275 if cwd == self._root:
275 if cwd == self._root:
276 return b''
276 return b''
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
278 rootsep = self._root
278 rootsep = self._root
279 if not util.endswithsep(rootsep):
279 if not util.endswithsep(rootsep):
280 rootsep += pycompat.ossep
280 rootsep += pycompat.ossep
281 if cwd.startswith(rootsep):
281 if cwd.startswith(rootsep):
282 return cwd[len(rootsep) :]
282 return cwd[len(rootsep) :]
283 else:
283 else:
284 # we're outside the repo. return an absolute path.
284 # we're outside the repo. return an absolute path.
285 return cwd
285 return cwd
286
286
287 def pathto(self, f, cwd=None):
287 def pathto(self, f, cwd=None):
288 if cwd is None:
288 if cwd is None:
289 cwd = self.getcwd()
289 cwd = self.getcwd()
290 path = util.pathto(self._root, cwd, f)
290 path = util.pathto(self._root, cwd, f)
291 if self._slash:
291 if self._slash:
292 return util.pconvert(path)
292 return util.pconvert(path)
293 return path
293 return path
294
294
295 def __getitem__(self, key):
295 def __getitem__(self, key):
296 """Return the current state of key (a filename) in the dirstate.
296 """Return the current state of key (a filename) in the dirstate.
297
297
298 States are:
298 States are:
299 n normal
299 n normal
300 m needs merging
300 m needs merging
301 r marked for removal
301 r marked for removal
302 a marked for addition
302 a marked for addition
303 ? not tracked
303 ? not tracked
304
304
305 XXX The "state" is a bit obscure to be in the "public" API. we should
305 XXX The "state" is a bit obscure to be in the "public" API. we should
306 consider migrating all user of this to going through the dirstate entry
306 consider migrating all user of this to going through the dirstate entry
307 instead.
307 instead.
308 """
308 """
309 entry = self._map.get(key)
309 entry = self._map.get(key)
310 if entry is not None:
310 if entry is not None:
311 return entry.state
311 return entry.state
312 return b'?'
312 return b'?'
313
313
314 def __contains__(self, key):
314 def __contains__(self, key):
315 return key in self._map
315 return key in self._map
316
316
317 def __iter__(self):
317 def __iter__(self):
318 return iter(sorted(self._map))
318 return iter(sorted(self._map))
319
319
320 def items(self):
320 def items(self):
321 return pycompat.iteritems(self._map)
321 return pycompat.iteritems(self._map)
322
322
323 iteritems = items
323 iteritems = items
324
324
325 def directories(self):
325 def directories(self):
326 return self._map.directories()
326 return self._map.directories()
327
327
328 def parents(self):
328 def parents(self):
329 return [self._validate(p) for p in self._pl]
329 return [self._validate(p) for p in self._pl]
330
330
331 def p1(self):
331 def p1(self):
332 return self._validate(self._pl[0])
332 return self._validate(self._pl[0])
333
333
334 def p2(self):
334 def p2(self):
335 return self._validate(self._pl[1])
335 return self._validate(self._pl[1])
336
336
337 @property
337 @property
338 def in_merge(self):
338 def in_merge(self):
339 """True if a merge is in progress"""
339 """True if a merge is in progress"""
340 return self._pl[1] != self._nodeconstants.nullid
340 return self._pl[1] != self._nodeconstants.nullid
341
341
342 def branch(self):
342 def branch(self):
343 return encoding.tolocal(self._branch)
343 return encoding.tolocal(self._branch)
344
344
345 def setparents(self, p1, p2=None):
345 def setparents(self, p1, p2=None):
346 """Set dirstate parents to p1 and p2.
346 """Set dirstate parents to p1 and p2.
347
347
348 When moving from two parents to one, "merged" entries a
348 When moving from two parents to one, "merged" entries a
349 adjusted to normal and previous copy records discarded and
349 adjusted to normal and previous copy records discarded and
350 returned by the call.
350 returned by the call.
351
351
352 See localrepo.setparents()
352 See localrepo.setparents()
353 """
353 """
354 if p2 is None:
354 if p2 is None:
355 p2 = self._nodeconstants.nullid
355 p2 = self._nodeconstants.nullid
356 if self._parentwriters == 0:
356 if self._parentwriters == 0:
357 raise ValueError(
357 raise ValueError(
358 b"cannot set dirstate parent outside of "
358 b"cannot set dirstate parent outside of "
359 b"dirstate.parentchange context manager"
359 b"dirstate.parentchange context manager"
360 )
360 )
361
361
362 self._dirty = True
362 self._dirty = True
363 oldp2 = self._pl[1]
363 oldp2 = self._pl[1]
364 if self._origpl is None:
364 if self._origpl is None:
365 self._origpl = self._pl
365 self._origpl = self._pl
366 self._map.setparents(p1, p2)
366 self._map.setparents(p1, p2)
367 copies = {}
367 copies = {}
368 if (
368 if (
369 oldp2 != self._nodeconstants.nullid
369 oldp2 != self._nodeconstants.nullid
370 and p2 == self._nodeconstants.nullid
370 and p2 == self._nodeconstants.nullid
371 ):
371 ):
372 candidatefiles = self._map.non_normal_or_other_parent_paths()
372 candidatefiles = self._map.non_normal_or_other_parent_paths()
373
373
374 for f in candidatefiles:
374 for f in candidatefiles:
375 s = self._map.get(f)
375 s = self._map.get(f)
376 if s is None:
376 if s is None:
377 continue
377 continue
378
378
379 # Discard "merged" markers when moving away from a merge state
379 # Discard "merged" markers when moving away from a merge state
380 if s.merged:
380 if s.merged:
381 source = self._map.copymap.get(f)
381 source = self._map.copymap.get(f)
382 if source:
382 if source:
383 copies[f] = source
383 copies[f] = source
384 self.normallookup(f)
384 self.normallookup(f)
385 # Also fix up otherparent markers
385 # Also fix up otherparent markers
386 elif s.from_p2:
386 elif s.from_p2:
387 source = self._map.copymap.get(f)
387 source = self._map.copymap.get(f)
388 if source:
388 if source:
389 copies[f] = source
389 copies[f] = source
390 self.add(f)
390 self.add(f)
391 return copies
391 return copies
392
392
393 def setbranch(self, branch):
393 def setbranch(self, branch):
394 self.__class__._branch.set(self, encoding.fromlocal(branch))
394 self.__class__._branch.set(self, encoding.fromlocal(branch))
395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
396 try:
396 try:
397 f.write(self._branch + b'\n')
397 f.write(self._branch + b'\n')
398 f.close()
398 f.close()
399
399
400 # make sure filecache has the correct stat info for _branch after
400 # make sure filecache has the correct stat info for _branch after
401 # replacing the underlying file
401 # replacing the underlying file
402 ce = self._filecache[b'_branch']
402 ce = self._filecache[b'_branch']
403 if ce:
403 if ce:
404 ce.refresh()
404 ce.refresh()
405 except: # re-raises
405 except: # re-raises
406 f.discard()
406 f.discard()
407 raise
407 raise
408
408
409 def invalidate(self):
409 def invalidate(self):
410 """Causes the next access to reread the dirstate.
410 """Causes the next access to reread the dirstate.
411
411
412 This is different from localrepo.invalidatedirstate() because it always
412 This is different from localrepo.invalidatedirstate() because it always
413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
414 check whether the dirstate has changed before rereading it."""
414 check whether the dirstate has changed before rereading it."""
415
415
416 for a in ("_map", "_branch", "_ignore"):
416 for a in ("_map", "_branch", "_ignore"):
417 if a in self.__dict__:
417 if a in self.__dict__:
418 delattr(self, a)
418 delattr(self, a)
419 self._lastnormaltime = 0
419 self._lastnormaltime = 0
420 self._dirty = False
420 self._dirty = False
421 self._updatedfiles.clear()
421 self._updatedfiles.clear()
422 self._parentwriters = 0
422 self._parentwriters = 0
423 self._origpl = None
423 self._origpl = None
424
424
425 def copy(self, source, dest):
425 def copy(self, source, dest):
426 """Mark dest as a copy of source. Unmark dest if source is None."""
426 """Mark dest as a copy of source. Unmark dest if source is None."""
427 if source == dest:
427 if source == dest:
428 return
428 return
429 self._dirty = True
429 self._dirty = True
430 if source is not None:
430 if source is not None:
431 self._map.copymap[dest] = source
431 self._map.copymap[dest] = source
432 self._updatedfiles.add(source)
432 self._updatedfiles.add(source)
433 self._updatedfiles.add(dest)
433 self._updatedfiles.add(dest)
434 elif self._map.copymap.pop(dest, None):
434 elif self._map.copymap.pop(dest, None):
435 self._updatedfiles.add(dest)
435 self._updatedfiles.add(dest)
436
436
437 def copied(self, file):
437 def copied(self, file):
438 return self._map.copymap.get(file, None)
438 return self._map.copymap.get(file, None)
439
439
440 def copies(self):
440 def copies(self):
441 return self._map.copymap
441 return self._map.copymap
442
442
443 def _addpath(
443 def _addpath(
444 self,
444 self,
445 f,
445 f,
446 mode=0,
446 mode=0,
447 size=None,
447 size=None,
448 mtime=None,
448 mtime=None,
449 added=False,
449 added=False,
450 merged=False,
450 merged=False,
451 from_p2=False,
451 from_p2=False,
452 possibly_dirty=False,
452 possibly_dirty=False,
453 ):
453 ):
454 entry = self._map.get(f)
454 entry = self._map.get(f)
455 if added or entry is not None and entry.removed:
455 if added or entry is not None and entry.removed:
456 scmutil.checkfilename(f)
456 scmutil.checkfilename(f)
457 if self._map.hastrackeddir(f):
457 if self._map.hastrackeddir(f):
458 msg = _(b'directory %r already in dirstate')
458 msg = _(b'directory %r already in dirstate')
459 msg %= pycompat.bytestr(f)
459 msg %= pycompat.bytestr(f)
460 raise error.Abort(msg)
460 raise error.Abort(msg)
461 # shadows
461 # shadows
462 for d in pathutil.finddirs(f):
462 for d in pathutil.finddirs(f):
463 if self._map.hastrackeddir(d):
463 if self._map.hastrackeddir(d):
464 break
464 break
465 entry = self._map.get(d)
465 entry = self._map.get(d)
466 if entry is not None and not entry.removed:
466 if entry is not None and not entry.removed:
467 msg = _(b'file %r in dirstate clashes with %r')
467 msg = _(b'file %r in dirstate clashes with %r')
468 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
468 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
469 raise error.Abort(msg)
469 raise error.Abort(msg)
470 self._dirty = True
470 self._dirty = True
471 self._updatedfiles.add(f)
471 self._updatedfiles.add(f)
472 self._map.addfile(
472 self._map.addfile(
473 f,
473 f,
474 mode=mode,
474 mode=mode,
475 size=size,
475 size=size,
476 mtime=mtime,
476 mtime=mtime,
477 added=added,
477 added=added,
478 merged=merged,
478 merged=merged,
479 from_p2=from_p2,
479 from_p2=from_p2,
480 possibly_dirty=possibly_dirty,
480 possibly_dirty=possibly_dirty,
481 )
481 )
482
482
483 def normal(self, f, parentfiledata=None):
483 def normal(self, f, parentfiledata=None):
484 """Mark a file normal and clean.
484 """Mark a file normal and clean.
485
485
486 parentfiledata: (mode, size, mtime) of the clean file
486 parentfiledata: (mode, size, mtime) of the clean file
487
487
488 parentfiledata should be computed from memory (for mode,
488 parentfiledata should be computed from memory (for mode,
489 size), as or close as possible from the point where we
489 size), as or close as possible from the point where we
490 determined the file was clean, to limit the risk of the
490 determined the file was clean, to limit the risk of the
491 file having been changed by an external process between the
491 file having been changed by an external process between the
492 moment where the file was determined to be clean and now."""
492 moment where the file was determined to be clean and now."""
493 if parentfiledata:
493 if parentfiledata:
494 (mode, size, mtime) = parentfiledata
494 (mode, size, mtime) = parentfiledata
495 else:
495 else:
496 s = os.lstat(self._join(f))
496 s = os.lstat(self._join(f))
497 mode = s.st_mode
497 mode = s.st_mode
498 size = s.st_size
498 size = s.st_size
499 mtime = s[stat.ST_MTIME]
499 mtime = s[stat.ST_MTIME]
500 self._addpath(f, mode=mode, size=size, mtime=mtime)
500 self._addpath(f, mode=mode, size=size, mtime=mtime)
501 self._map.copymap.pop(f, None)
501 self._map.copymap.pop(f, None)
502 if f in self._map.nonnormalset:
502 if f in self._map.nonnormalset:
503 self._map.nonnormalset.remove(f)
503 self._map.nonnormalset.remove(f)
504 if mtime > self._lastnormaltime:
504 if mtime > self._lastnormaltime:
505 # Remember the most recent modification timeslot for status(),
505 # Remember the most recent modification timeslot for status(),
506 # to make sure we won't miss future size-preserving file content
506 # to make sure we won't miss future size-preserving file content
507 # modifications that happen within the same timeslot.
507 # modifications that happen within the same timeslot.
508 self._lastnormaltime = mtime
508 self._lastnormaltime = mtime
509
509
510 def normallookup(self, f):
510 def normallookup(self, f):
511 '''Mark a file normal, but possibly dirty.'''
511 '''Mark a file normal, but possibly dirty.'''
512 if self.in_merge:
512 if self.in_merge:
513 # if there is a merge going on and the file was either
513 # if there is a merge going on and the file was either
514 # "merged" or coming from other parent (-2) before
514 # "merged" or coming from other parent (-2) before
515 # being removed, restore that state.
515 # being removed, restore that state.
516 entry = self._map.get(f)
516 entry = self._map.get(f)
517 if entry is not None:
517 if entry is not None:
518 # XXX this should probably be dealt with a a lower level
518 # XXX this should probably be dealt with a a lower level
519 # (see `merged_removed` and `from_p2_removed`)
519 # (see `merged_removed` and `from_p2_removed`)
520 if entry.merged_removed or entry.from_p2_removed:
520 if entry.merged_removed or entry.from_p2_removed:
521 source = self._map.copymap.get(f)
521 source = self._map.copymap.get(f)
522 if entry.merged_removed:
522 if entry.merged_removed:
523 self.merge(f)
523 self.merge(f)
524 elif entry.from_p2_removed:
524 elif entry.from_p2_removed:
525 self.otherparent(f)
525 self.otherparent(f)
526 if source is not None:
526 if source is not None:
527 self.copy(source, f)
527 self.copy(source, f)
528 return
528 return
529 elif entry.merged or entry.from_p2:
529 elif entry.merged or entry.from_p2:
530 return
530 return
531 self._addpath(f, possibly_dirty=True)
531 self._addpath(f, possibly_dirty=True)
532 self._map.copymap.pop(f, None)
532 self._map.copymap.pop(f, None)
533
533
534 def otherparent(self, f):
534 def otherparent(self, f):
535 '''Mark as coming from the other parent, always dirty.'''
535 '''Mark as coming from the other parent, always dirty.'''
536 if not self.in_merge:
536 if not self.in_merge:
537 msg = _(b"setting %r to other parent only allowed in merges") % f
537 msg = _(b"setting %r to other parent only allowed in merges") % f
538 raise error.Abort(msg)
538 raise error.Abort(msg)
539 if f in self and self[f] == b'n':
539 if f in self and self[f] == b'n':
540 # merge-like
540 # merge-like
541 self._addpath(f, merged=True)
541 self._addpath(f, merged=True)
542 else:
542 else:
543 # add-like
543 # add-like
544 self._addpath(f, from_p2=True)
544 self._addpath(f, from_p2=True)
545 self._map.copymap.pop(f, None)
545 self._map.copymap.pop(f, None)
546
546
547 def add(self, f):
547 def add(self, f):
548 '''Mark a file added.'''
548 '''Mark a file added.'''
549 self._addpath(f, added=True)
549 self._addpath(f, added=True)
550 self._map.copymap.pop(f, None)
550 self._map.copymap.pop(f, None)
551
551
552 def remove(self, f):
552 def remove(self, f):
553 '''Mark a file removed.'''
553 '''Mark a file removed.'''
554 self._dirty = True
554 self._dirty = True
555 self._updatedfiles.add(f)
555 self._updatedfiles.add(f)
556 self._map.removefile(f, in_merge=self.in_merge)
556 self._map.removefile(f, in_merge=self.in_merge)
557
557
558 def merge(self, f):
558 def merge(self, f):
559 '''Mark a file merged.'''
559 '''Mark a file merged.'''
560 if not self.in_merge:
560 if not self.in_merge:
561 return self.normallookup(f)
561 return self.normallookup(f)
562 return self.otherparent(f)
562 return self.otherparent(f)
563
563
564 def drop(self, f):
564 def drop(self, f):
565 '''Drop a file from the dirstate'''
565 '''Drop a file from the dirstate'''
566 oldstate = self[f]
566 oldstate = self[f]
567 if self._map.dropfile(f, oldstate):
567 if self._map.dropfile(f, oldstate):
568 self._dirty = True
568 self._dirty = True
569 self._updatedfiles.add(f)
569 self._updatedfiles.add(f)
570 self._map.copymap.pop(f, None)
570 self._map.copymap.pop(f, None)
571
571
572 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
572 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
573 if exists is None:
573 if exists is None:
574 exists = os.path.lexists(os.path.join(self._root, path))
574 exists = os.path.lexists(os.path.join(self._root, path))
575 if not exists:
575 if not exists:
576 # Maybe a path component exists
576 # Maybe a path component exists
577 if not ignoremissing and b'/' in path:
577 if not ignoremissing and b'/' in path:
578 d, f = path.rsplit(b'/', 1)
578 d, f = path.rsplit(b'/', 1)
579 d = self._normalize(d, False, ignoremissing, None)
579 d = self._normalize(d, False, ignoremissing, None)
580 folded = d + b"/" + f
580 folded = d + b"/" + f
581 else:
581 else:
582 # No path components, preserve original case
582 # No path components, preserve original case
583 folded = path
583 folded = path
584 else:
584 else:
585 # recursively normalize leading directory components
585 # recursively normalize leading directory components
586 # against dirstate
586 # against dirstate
587 if b'/' in normed:
587 if b'/' in normed:
588 d, f = normed.rsplit(b'/', 1)
588 d, f = normed.rsplit(b'/', 1)
589 d = self._normalize(d, False, ignoremissing, True)
589 d = self._normalize(d, False, ignoremissing, True)
590 r = self._root + b"/" + d
590 r = self._root + b"/" + d
591 folded = d + b"/" + util.fspath(f, r)
591 folded = d + b"/" + util.fspath(f, r)
592 else:
592 else:
593 folded = util.fspath(normed, self._root)
593 folded = util.fspath(normed, self._root)
594 storemap[normed] = folded
594 storemap[normed] = folded
595
595
596 return folded
596 return folded
597
597
598 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
598 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
599 normed = util.normcase(path)
599 normed = util.normcase(path)
600 folded = self._map.filefoldmap.get(normed, None)
600 folded = self._map.filefoldmap.get(normed, None)
601 if folded is None:
601 if folded is None:
602 if isknown:
602 if isknown:
603 folded = path
603 folded = path
604 else:
604 else:
605 folded = self._discoverpath(
605 folded = self._discoverpath(
606 path, normed, ignoremissing, exists, self._map.filefoldmap
606 path, normed, ignoremissing, exists, self._map.filefoldmap
607 )
607 )
608 return folded
608 return folded
609
609
610 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
610 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
611 normed = util.normcase(path)
611 normed = util.normcase(path)
612 folded = self._map.filefoldmap.get(normed, None)
612 folded = self._map.filefoldmap.get(normed, None)
613 if folded is None:
613 if folded is None:
614 folded = self._map.dirfoldmap.get(normed, None)
614 folded = self._map.dirfoldmap.get(normed, None)
615 if folded is None:
615 if folded is None:
616 if isknown:
616 if isknown:
617 folded = path
617 folded = path
618 else:
618 else:
619 # store discovered result in dirfoldmap so that future
619 # store discovered result in dirfoldmap so that future
620 # normalizefile calls don't start matching directories
620 # normalizefile calls don't start matching directories
621 folded = self._discoverpath(
621 folded = self._discoverpath(
622 path, normed, ignoremissing, exists, self._map.dirfoldmap
622 path, normed, ignoremissing, exists, self._map.dirfoldmap
623 )
623 )
624 return folded
624 return folded
625
625
626 def normalize(self, path, isknown=False, ignoremissing=False):
626 def normalize(self, path, isknown=False, ignoremissing=False):
627 """
627 """
628 normalize the case of a pathname when on a casefolding filesystem
628 normalize the case of a pathname when on a casefolding filesystem
629
629
630 isknown specifies whether the filename came from walking the
630 isknown specifies whether the filename came from walking the
631 disk, to avoid extra filesystem access.
631 disk, to avoid extra filesystem access.
632
632
633 If ignoremissing is True, missing path are returned
633 If ignoremissing is True, missing path are returned
634 unchanged. Otherwise, we try harder to normalize possibly
634 unchanged. Otherwise, we try harder to normalize possibly
635 existing path components.
635 existing path components.
636
636
637 The normalized case is determined based on the following precedence:
637 The normalized case is determined based on the following precedence:
638
638
639 - version of name already stored in the dirstate
639 - version of name already stored in the dirstate
640 - version of name stored on disk
640 - version of name stored on disk
641 - version provided via command arguments
641 - version provided via command arguments
642 """
642 """
643
643
644 if self._checkcase:
644 if self._checkcase:
645 return self._normalize(path, isknown, ignoremissing)
645 return self._normalize(path, isknown, ignoremissing)
646 return path
646 return path
647
647
648 def clear(self):
648 def clear(self):
649 self._map.clear()
649 self._map.clear()
650 self._lastnormaltime = 0
650 self._lastnormaltime = 0
651 self._updatedfiles.clear()
651 self._updatedfiles.clear()
652 self._dirty = True
652 self._dirty = True
653
653
654 def rebuild(self, parent, allfiles, changedfiles=None):
654 def rebuild(self, parent, allfiles, changedfiles=None):
655 if changedfiles is None:
655 if changedfiles is None:
656 # Rebuild entire dirstate
656 # Rebuild entire dirstate
657 to_lookup = allfiles
657 to_lookup = allfiles
658 to_drop = []
658 to_drop = []
659 lastnormaltime = self._lastnormaltime
659 lastnormaltime = self._lastnormaltime
660 self.clear()
660 self.clear()
661 self._lastnormaltime = lastnormaltime
661 self._lastnormaltime = lastnormaltime
662 elif len(changedfiles) < 10:
662 elif len(changedfiles) < 10:
663 # Avoid turning allfiles into a set, which can be expensive if it's
663 # Avoid turning allfiles into a set, which can be expensive if it's
664 # large.
664 # large.
665 to_lookup = []
665 to_lookup = []
666 to_drop = []
666 to_drop = []
667 for f in changedfiles:
667 for f in changedfiles:
668 if f in allfiles:
668 if f in allfiles:
669 to_lookup.append(f)
669 to_lookup.append(f)
670 else:
670 else:
671 to_drop.append(f)
671 to_drop.append(f)
672 else:
672 else:
673 changedfilesset = set(changedfiles)
673 changedfilesset = set(changedfiles)
674 to_lookup = changedfilesset & set(allfiles)
674 to_lookup = changedfilesset & set(allfiles)
675 to_drop = changedfilesset - to_lookup
675 to_drop = changedfilesset - to_lookup
676
676
677 if self._origpl is None:
677 if self._origpl is None:
678 self._origpl = self._pl
678 self._origpl = self._pl
679 self._map.setparents(parent, self._nodeconstants.nullid)
679 self._map.setparents(parent, self._nodeconstants.nullid)
680
680
681 for f in to_lookup:
681 for f in to_lookup:
682 self.normallookup(f)
682 self.normallookup(f)
683 for f in to_drop:
683 for f in to_drop:
684 self.drop(f)
684 self.drop(f)
685
685
686 self._dirty = True
686 self._dirty = True
687
687
688 def identity(self):
688 def identity(self):
689 """Return identity of dirstate itself to detect changing in storage
689 """Return identity of dirstate itself to detect changing in storage
690
690
691 If identity of previous dirstate is equal to this, writing
691 If identity of previous dirstate is equal to this, writing
692 changes based on the former dirstate out can keep consistency.
692 changes based on the former dirstate out can keep consistency.
693 """
693 """
694 return self._map.identity
694 return self._map.identity
695
695
696 def write(self, tr):
696 def write(self, tr):
697 if not self._dirty:
697 if not self._dirty:
698 return
698 return
699
699
700 filename = self._filename
700 filename = self._filename
701 if tr:
701 if tr:
702 # 'dirstate.write()' is not only for writing in-memory
702 # 'dirstate.write()' is not only for writing in-memory
703 # changes out, but also for dropping ambiguous timestamp.
703 # changes out, but also for dropping ambiguous timestamp.
704 # delayed writing re-raise "ambiguous timestamp issue".
704 # delayed writing re-raise "ambiguous timestamp issue".
705 # See also the wiki page below for detail:
705 # See also the wiki page below for detail:
706 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
706 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
707
707
708 # emulate dropping timestamp in 'parsers.pack_dirstate'
708 # emulate dropping timestamp in 'parsers.pack_dirstate'
709 now = _getfsnow(self._opener)
709 now = _getfsnow(self._opener)
710 self._map.clearambiguoustimes(self._updatedfiles, now)
710 self._map.clearambiguoustimes(self._updatedfiles, now)
711
711
712 # emulate that all 'dirstate.normal' results are written out
712 # emulate that all 'dirstate.normal' results are written out
713 self._lastnormaltime = 0
713 self._lastnormaltime = 0
714 self._updatedfiles.clear()
714 self._updatedfiles.clear()
715
715
716 # delay writing in-memory changes out
716 # delay writing in-memory changes out
717 tr.addfilegenerator(
717 tr.addfilegenerator(
718 b'dirstate',
718 b'dirstate',
719 (self._filename,),
719 (self._filename,),
720 self._writedirstate,
720 self._writedirstate,
721 location=b'plain',
721 location=b'plain',
722 )
722 )
723 return
723 return
724
724
725 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
725 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
726 self._writedirstate(st)
726 self._writedirstate(st)
727
727
728 def addparentchangecallback(self, category, callback):
728 def addparentchangecallback(self, category, callback):
729 """add a callback to be called when the wd parents are changed
729 """add a callback to be called when the wd parents are changed
730
730
731 Callback will be called with the following arguments:
731 Callback will be called with the following arguments:
732 dirstate, (oldp1, oldp2), (newp1, newp2)
732 dirstate, (oldp1, oldp2), (newp1, newp2)
733
733
734 Category is a unique identifier to allow overwriting an old callback
734 Category is a unique identifier to allow overwriting an old callback
735 with a newer callback.
735 with a newer callback.
736 """
736 """
737 self._plchangecallbacks[category] = callback
737 self._plchangecallbacks[category] = callback
738
738
739 def _writedirstate(self, st):
739 def _writedirstate(self, st):
740 # notify callbacks about parents change
740 # notify callbacks about parents change
741 if self._origpl is not None and self._origpl != self._pl:
741 if self._origpl is not None and self._origpl != self._pl:
742 for c, callback in sorted(
742 for c, callback in sorted(
743 pycompat.iteritems(self._plchangecallbacks)
743 pycompat.iteritems(self._plchangecallbacks)
744 ):
744 ):
745 callback(self, self._origpl, self._pl)
745 callback(self, self._origpl, self._pl)
746 self._origpl = None
746 self._origpl = None
747 # use the modification time of the newly created temporary file as the
747 # use the modification time of the newly created temporary file as the
748 # filesystem's notion of 'now'
748 # filesystem's notion of 'now'
749 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
749 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
750
750
751 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
751 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
752 # timestamp of each entries in dirstate, because of 'now > mtime'
752 # timestamp of each entries in dirstate, because of 'now > mtime'
753 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
753 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
754 if delaywrite > 0:
754 if delaywrite > 0:
755 # do we have any files to delay for?
755 # do we have any files to delay for?
756 for f, e in pycompat.iteritems(self._map):
756 for f, e in pycompat.iteritems(self._map):
757 if e.state == b'n' and e[3] == now:
757 if e.need_delay(now):
758 import time # to avoid useless import
758 import time # to avoid useless import
759
759
760 # rather than sleep n seconds, sleep until the next
760 # rather than sleep n seconds, sleep until the next
761 # multiple of n seconds
761 # multiple of n seconds
762 clock = time.time()
762 clock = time.time()
763 start = int(clock) - (int(clock) % delaywrite)
763 start = int(clock) - (int(clock) % delaywrite)
764 end = start + delaywrite
764 end = start + delaywrite
765 time.sleep(end - clock)
765 time.sleep(end - clock)
766 now = end # trust our estimate that the end is near now
766 now = end # trust our estimate that the end is near now
767 break
767 break
768
768
769 self._map.write(st, now)
769 self._map.write(st, now)
770 self._lastnormaltime = 0
770 self._lastnormaltime = 0
771 self._dirty = False
771 self._dirty = False
772
772
773 def _dirignore(self, f):
773 def _dirignore(self, f):
774 if self._ignore(f):
774 if self._ignore(f):
775 return True
775 return True
776 for p in pathutil.finddirs(f):
776 for p in pathutil.finddirs(f):
777 if self._ignore(p):
777 if self._ignore(p):
778 return True
778 return True
779 return False
779 return False
780
780
781 def _ignorefiles(self):
781 def _ignorefiles(self):
782 files = []
782 files = []
783 if os.path.exists(self._join(b'.hgignore')):
783 if os.path.exists(self._join(b'.hgignore')):
784 files.append(self._join(b'.hgignore'))
784 files.append(self._join(b'.hgignore'))
785 for name, path in self._ui.configitems(b"ui"):
785 for name, path in self._ui.configitems(b"ui"):
786 if name == b'ignore' or name.startswith(b'ignore.'):
786 if name == b'ignore' or name.startswith(b'ignore.'):
787 # we need to use os.path.join here rather than self._join
787 # we need to use os.path.join here rather than self._join
788 # because path is arbitrary and user-specified
788 # because path is arbitrary and user-specified
789 files.append(os.path.join(self._rootdir, util.expandpath(path)))
789 files.append(os.path.join(self._rootdir, util.expandpath(path)))
790 return files
790 return files
791
791
792 def _ignorefileandline(self, f):
792 def _ignorefileandline(self, f):
793 files = collections.deque(self._ignorefiles())
793 files = collections.deque(self._ignorefiles())
794 visited = set()
794 visited = set()
795 while files:
795 while files:
796 i = files.popleft()
796 i = files.popleft()
797 patterns = matchmod.readpatternfile(
797 patterns = matchmod.readpatternfile(
798 i, self._ui.warn, sourceinfo=True
798 i, self._ui.warn, sourceinfo=True
799 )
799 )
800 for pattern, lineno, line in patterns:
800 for pattern, lineno, line in patterns:
801 kind, p = matchmod._patsplit(pattern, b'glob')
801 kind, p = matchmod._patsplit(pattern, b'glob')
802 if kind == b"subinclude":
802 if kind == b"subinclude":
803 if p not in visited:
803 if p not in visited:
804 files.append(p)
804 files.append(p)
805 continue
805 continue
806 m = matchmod.match(
806 m = matchmod.match(
807 self._root, b'', [], [pattern], warn=self._ui.warn
807 self._root, b'', [], [pattern], warn=self._ui.warn
808 )
808 )
809 if m(f):
809 if m(f):
810 return (i, lineno, line)
810 return (i, lineno, line)
811 visited.add(i)
811 visited.add(i)
812 return (None, -1, b"")
812 return (None, -1, b"")
813
813
814 def _walkexplicit(self, match, subrepos):
814 def _walkexplicit(self, match, subrepos):
815 """Get stat data about the files explicitly specified by match.
815 """Get stat data about the files explicitly specified by match.
816
816
817 Return a triple (results, dirsfound, dirsnotfound).
817 Return a triple (results, dirsfound, dirsnotfound).
818 - results is a mapping from filename to stat result. It also contains
818 - results is a mapping from filename to stat result. It also contains
819 listings mapping subrepos and .hg to None.
819 listings mapping subrepos and .hg to None.
820 - dirsfound is a list of files found to be directories.
820 - dirsfound is a list of files found to be directories.
821 - dirsnotfound is a list of files that the dirstate thinks are
821 - dirsnotfound is a list of files that the dirstate thinks are
822 directories and that were not found."""
822 directories and that were not found."""
823
823
824 def badtype(mode):
824 def badtype(mode):
825 kind = _(b'unknown')
825 kind = _(b'unknown')
826 if stat.S_ISCHR(mode):
826 if stat.S_ISCHR(mode):
827 kind = _(b'character device')
827 kind = _(b'character device')
828 elif stat.S_ISBLK(mode):
828 elif stat.S_ISBLK(mode):
829 kind = _(b'block device')
829 kind = _(b'block device')
830 elif stat.S_ISFIFO(mode):
830 elif stat.S_ISFIFO(mode):
831 kind = _(b'fifo')
831 kind = _(b'fifo')
832 elif stat.S_ISSOCK(mode):
832 elif stat.S_ISSOCK(mode):
833 kind = _(b'socket')
833 kind = _(b'socket')
834 elif stat.S_ISDIR(mode):
834 elif stat.S_ISDIR(mode):
835 kind = _(b'directory')
835 kind = _(b'directory')
836 return _(b'unsupported file type (type is %s)') % kind
836 return _(b'unsupported file type (type is %s)') % kind
837
837
838 badfn = match.bad
838 badfn = match.bad
839 dmap = self._map
839 dmap = self._map
840 lstat = os.lstat
840 lstat = os.lstat
841 getkind = stat.S_IFMT
841 getkind = stat.S_IFMT
842 dirkind = stat.S_IFDIR
842 dirkind = stat.S_IFDIR
843 regkind = stat.S_IFREG
843 regkind = stat.S_IFREG
844 lnkkind = stat.S_IFLNK
844 lnkkind = stat.S_IFLNK
845 join = self._join
845 join = self._join
846 dirsfound = []
846 dirsfound = []
847 foundadd = dirsfound.append
847 foundadd = dirsfound.append
848 dirsnotfound = []
848 dirsnotfound = []
849 notfoundadd = dirsnotfound.append
849 notfoundadd = dirsnotfound.append
850
850
851 if not match.isexact() and self._checkcase:
851 if not match.isexact() and self._checkcase:
852 normalize = self._normalize
852 normalize = self._normalize
853 else:
853 else:
854 normalize = None
854 normalize = None
855
855
856 files = sorted(match.files())
856 files = sorted(match.files())
857 subrepos.sort()
857 subrepos.sort()
858 i, j = 0, 0
858 i, j = 0, 0
859 while i < len(files) and j < len(subrepos):
859 while i < len(files) and j < len(subrepos):
860 subpath = subrepos[j] + b"/"
860 subpath = subrepos[j] + b"/"
861 if files[i] < subpath:
861 if files[i] < subpath:
862 i += 1
862 i += 1
863 continue
863 continue
864 while i < len(files) and files[i].startswith(subpath):
864 while i < len(files) and files[i].startswith(subpath):
865 del files[i]
865 del files[i]
866 j += 1
866 j += 1
867
867
868 if not files or b'' in files:
868 if not files or b'' in files:
869 files = [b'']
869 files = [b'']
870 # constructing the foldmap is expensive, so don't do it for the
870 # constructing the foldmap is expensive, so don't do it for the
871 # common case where files is ['']
871 # common case where files is ['']
872 normalize = None
872 normalize = None
873 results = dict.fromkeys(subrepos)
873 results = dict.fromkeys(subrepos)
874 results[b'.hg'] = None
874 results[b'.hg'] = None
875
875
876 for ff in files:
876 for ff in files:
877 if normalize:
877 if normalize:
878 nf = normalize(ff, False, True)
878 nf = normalize(ff, False, True)
879 else:
879 else:
880 nf = ff
880 nf = ff
881 if nf in results:
881 if nf in results:
882 continue
882 continue
883
883
884 try:
884 try:
885 st = lstat(join(nf))
885 st = lstat(join(nf))
886 kind = getkind(st.st_mode)
886 kind = getkind(st.st_mode)
887 if kind == dirkind:
887 if kind == dirkind:
888 if nf in dmap:
888 if nf in dmap:
889 # file replaced by dir on disk but still in dirstate
889 # file replaced by dir on disk but still in dirstate
890 results[nf] = None
890 results[nf] = None
891 foundadd((nf, ff))
891 foundadd((nf, ff))
892 elif kind == regkind or kind == lnkkind:
892 elif kind == regkind or kind == lnkkind:
893 results[nf] = st
893 results[nf] = st
894 else:
894 else:
895 badfn(ff, badtype(kind))
895 badfn(ff, badtype(kind))
896 if nf in dmap:
896 if nf in dmap:
897 results[nf] = None
897 results[nf] = None
898 except OSError as inst: # nf not found on disk - it is dirstate only
898 except OSError as inst: # nf not found on disk - it is dirstate only
899 if nf in dmap: # does it exactly match a missing file?
899 if nf in dmap: # does it exactly match a missing file?
900 results[nf] = None
900 results[nf] = None
901 else: # does it match a missing directory?
901 else: # does it match a missing directory?
902 if self._map.hasdir(nf):
902 if self._map.hasdir(nf):
903 notfoundadd(nf)
903 notfoundadd(nf)
904 else:
904 else:
905 badfn(ff, encoding.strtolocal(inst.strerror))
905 badfn(ff, encoding.strtolocal(inst.strerror))
906
906
907 # match.files() may contain explicitly-specified paths that shouldn't
907 # match.files() may contain explicitly-specified paths that shouldn't
908 # be taken; drop them from the list of files found. dirsfound/notfound
908 # be taken; drop them from the list of files found. dirsfound/notfound
909 # aren't filtered here because they will be tested later.
909 # aren't filtered here because they will be tested later.
910 if match.anypats():
910 if match.anypats():
911 for f in list(results):
911 for f in list(results):
912 if f == b'.hg' or f in subrepos:
912 if f == b'.hg' or f in subrepos:
913 # keep sentinel to disable further out-of-repo walks
913 # keep sentinel to disable further out-of-repo walks
914 continue
914 continue
915 if not match(f):
915 if not match(f):
916 del results[f]
916 del results[f]
917
917
918 # Case insensitive filesystems cannot rely on lstat() failing to detect
918 # Case insensitive filesystems cannot rely on lstat() failing to detect
919 # a case-only rename. Prune the stat object for any file that does not
919 # a case-only rename. Prune the stat object for any file that does not
920 # match the case in the filesystem, if there are multiple files that
920 # match the case in the filesystem, if there are multiple files that
921 # normalize to the same path.
921 # normalize to the same path.
922 if match.isexact() and self._checkcase:
922 if match.isexact() and self._checkcase:
923 normed = {}
923 normed = {}
924
924
925 for f, st in pycompat.iteritems(results):
925 for f, st in pycompat.iteritems(results):
926 if st is None:
926 if st is None:
927 continue
927 continue
928
928
929 nc = util.normcase(f)
929 nc = util.normcase(f)
930 paths = normed.get(nc)
930 paths = normed.get(nc)
931
931
932 if paths is None:
932 if paths is None:
933 paths = set()
933 paths = set()
934 normed[nc] = paths
934 normed[nc] = paths
935
935
936 paths.add(f)
936 paths.add(f)
937
937
938 for norm, paths in pycompat.iteritems(normed):
938 for norm, paths in pycompat.iteritems(normed):
939 if len(paths) > 1:
939 if len(paths) > 1:
940 for path in paths:
940 for path in paths:
941 folded = self._discoverpath(
941 folded = self._discoverpath(
942 path, norm, True, None, self._map.dirfoldmap
942 path, norm, True, None, self._map.dirfoldmap
943 )
943 )
944 if path != folded:
944 if path != folded:
945 results[path] = None
945 results[path] = None
946
946
947 return results, dirsfound, dirsnotfound
947 return results, dirsfound, dirsnotfound
948
948
949 def walk(self, match, subrepos, unknown, ignored, full=True):
949 def walk(self, match, subrepos, unknown, ignored, full=True):
950 """
950 """
951 Walk recursively through the directory tree, finding all files
951 Walk recursively through the directory tree, finding all files
952 matched by match.
952 matched by match.
953
953
954 If full is False, maybe skip some known-clean files.
954 If full is False, maybe skip some known-clean files.
955
955
956 Return a dict mapping filename to stat-like object (either
956 Return a dict mapping filename to stat-like object (either
957 mercurial.osutil.stat instance or return value of os.stat()).
957 mercurial.osutil.stat instance or return value of os.stat()).
958
958
959 """
959 """
960 # full is a flag that extensions that hook into walk can use -- this
960 # full is a flag that extensions that hook into walk can use -- this
961 # implementation doesn't use it at all. This satisfies the contract
961 # implementation doesn't use it at all. This satisfies the contract
962 # because we only guarantee a "maybe".
962 # because we only guarantee a "maybe".
963
963
964 if ignored:
964 if ignored:
965 ignore = util.never
965 ignore = util.never
966 dirignore = util.never
966 dirignore = util.never
967 elif unknown:
967 elif unknown:
968 ignore = self._ignore
968 ignore = self._ignore
969 dirignore = self._dirignore
969 dirignore = self._dirignore
970 else:
970 else:
971 # if not unknown and not ignored, drop dir recursion and step 2
971 # if not unknown and not ignored, drop dir recursion and step 2
972 ignore = util.always
972 ignore = util.always
973 dirignore = util.always
973 dirignore = util.always
974
974
975 matchfn = match.matchfn
975 matchfn = match.matchfn
976 matchalways = match.always()
976 matchalways = match.always()
977 matchtdir = match.traversedir
977 matchtdir = match.traversedir
978 dmap = self._map
978 dmap = self._map
979 listdir = util.listdir
979 listdir = util.listdir
980 lstat = os.lstat
980 lstat = os.lstat
981 dirkind = stat.S_IFDIR
981 dirkind = stat.S_IFDIR
982 regkind = stat.S_IFREG
982 regkind = stat.S_IFREG
983 lnkkind = stat.S_IFLNK
983 lnkkind = stat.S_IFLNK
984 join = self._join
984 join = self._join
985
985
986 exact = skipstep3 = False
986 exact = skipstep3 = False
987 if match.isexact(): # match.exact
987 if match.isexact(): # match.exact
988 exact = True
988 exact = True
989 dirignore = util.always # skip step 2
989 dirignore = util.always # skip step 2
990 elif match.prefix(): # match.match, no patterns
990 elif match.prefix(): # match.match, no patterns
991 skipstep3 = True
991 skipstep3 = True
992
992
993 if not exact and self._checkcase:
993 if not exact and self._checkcase:
994 normalize = self._normalize
994 normalize = self._normalize
995 normalizefile = self._normalizefile
995 normalizefile = self._normalizefile
996 skipstep3 = False
996 skipstep3 = False
997 else:
997 else:
998 normalize = self._normalize
998 normalize = self._normalize
999 normalizefile = None
999 normalizefile = None
1000
1000
1001 # step 1: find all explicit files
1001 # step 1: find all explicit files
1002 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1002 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1003 if matchtdir:
1003 if matchtdir:
1004 for d in work:
1004 for d in work:
1005 matchtdir(d[0])
1005 matchtdir(d[0])
1006 for d in dirsnotfound:
1006 for d in dirsnotfound:
1007 matchtdir(d)
1007 matchtdir(d)
1008
1008
1009 skipstep3 = skipstep3 and not (work or dirsnotfound)
1009 skipstep3 = skipstep3 and not (work or dirsnotfound)
1010 work = [d for d in work if not dirignore(d[0])]
1010 work = [d for d in work if not dirignore(d[0])]
1011
1011
1012 # step 2: visit subdirectories
1012 # step 2: visit subdirectories
1013 def traverse(work, alreadynormed):
1013 def traverse(work, alreadynormed):
1014 wadd = work.append
1014 wadd = work.append
1015 while work:
1015 while work:
1016 tracing.counter('dirstate.walk work', len(work))
1016 tracing.counter('dirstate.walk work', len(work))
1017 nd = work.pop()
1017 nd = work.pop()
1018 visitentries = match.visitchildrenset(nd)
1018 visitentries = match.visitchildrenset(nd)
1019 if not visitentries:
1019 if not visitentries:
1020 continue
1020 continue
1021 if visitentries == b'this' or visitentries == b'all':
1021 if visitentries == b'this' or visitentries == b'all':
1022 visitentries = None
1022 visitentries = None
1023 skip = None
1023 skip = None
1024 if nd != b'':
1024 if nd != b'':
1025 skip = b'.hg'
1025 skip = b'.hg'
1026 try:
1026 try:
1027 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1027 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1028 entries = listdir(join(nd), stat=True, skip=skip)
1028 entries = listdir(join(nd), stat=True, skip=skip)
1029 except OSError as inst:
1029 except OSError as inst:
1030 if inst.errno in (errno.EACCES, errno.ENOENT):
1030 if inst.errno in (errno.EACCES, errno.ENOENT):
1031 match.bad(
1031 match.bad(
1032 self.pathto(nd), encoding.strtolocal(inst.strerror)
1032 self.pathto(nd), encoding.strtolocal(inst.strerror)
1033 )
1033 )
1034 continue
1034 continue
1035 raise
1035 raise
1036 for f, kind, st in entries:
1036 for f, kind, st in entries:
1037 # Some matchers may return files in the visitentries set,
1037 # Some matchers may return files in the visitentries set,
1038 # instead of 'this', if the matcher explicitly mentions them
1038 # instead of 'this', if the matcher explicitly mentions them
1039 # and is not an exactmatcher. This is acceptable; we do not
1039 # and is not an exactmatcher. This is acceptable; we do not
1040 # make any hard assumptions about file-or-directory below
1040 # make any hard assumptions about file-or-directory below
1041 # based on the presence of `f` in visitentries. If
1041 # based on the presence of `f` in visitentries. If
1042 # visitchildrenset returned a set, we can always skip the
1042 # visitchildrenset returned a set, we can always skip the
1043 # entries *not* in the set it provided regardless of whether
1043 # entries *not* in the set it provided regardless of whether
1044 # they're actually a file or a directory.
1044 # they're actually a file or a directory.
1045 if visitentries and f not in visitentries:
1045 if visitentries and f not in visitentries:
1046 continue
1046 continue
1047 if normalizefile:
1047 if normalizefile:
1048 # even though f might be a directory, we're only
1048 # even though f might be a directory, we're only
1049 # interested in comparing it to files currently in the
1049 # interested in comparing it to files currently in the
1050 # dmap -- therefore normalizefile is enough
1050 # dmap -- therefore normalizefile is enough
1051 nf = normalizefile(
1051 nf = normalizefile(
1052 nd and (nd + b"/" + f) or f, True, True
1052 nd and (nd + b"/" + f) or f, True, True
1053 )
1053 )
1054 else:
1054 else:
1055 nf = nd and (nd + b"/" + f) or f
1055 nf = nd and (nd + b"/" + f) or f
1056 if nf not in results:
1056 if nf not in results:
1057 if kind == dirkind:
1057 if kind == dirkind:
1058 if not ignore(nf):
1058 if not ignore(nf):
1059 if matchtdir:
1059 if matchtdir:
1060 matchtdir(nf)
1060 matchtdir(nf)
1061 wadd(nf)
1061 wadd(nf)
1062 if nf in dmap and (matchalways or matchfn(nf)):
1062 if nf in dmap and (matchalways or matchfn(nf)):
1063 results[nf] = None
1063 results[nf] = None
1064 elif kind == regkind or kind == lnkkind:
1064 elif kind == regkind or kind == lnkkind:
1065 if nf in dmap:
1065 if nf in dmap:
1066 if matchalways or matchfn(nf):
1066 if matchalways or matchfn(nf):
1067 results[nf] = st
1067 results[nf] = st
1068 elif (matchalways or matchfn(nf)) and not ignore(
1068 elif (matchalways or matchfn(nf)) and not ignore(
1069 nf
1069 nf
1070 ):
1070 ):
1071 # unknown file -- normalize if necessary
1071 # unknown file -- normalize if necessary
1072 if not alreadynormed:
1072 if not alreadynormed:
1073 nf = normalize(nf, False, True)
1073 nf = normalize(nf, False, True)
1074 results[nf] = st
1074 results[nf] = st
1075 elif nf in dmap and (matchalways or matchfn(nf)):
1075 elif nf in dmap and (matchalways or matchfn(nf)):
1076 results[nf] = None
1076 results[nf] = None
1077
1077
1078 for nd, d in work:
1078 for nd, d in work:
1079 # alreadynormed means that processwork doesn't have to do any
1079 # alreadynormed means that processwork doesn't have to do any
1080 # expensive directory normalization
1080 # expensive directory normalization
1081 alreadynormed = not normalize or nd == d
1081 alreadynormed = not normalize or nd == d
1082 traverse([d], alreadynormed)
1082 traverse([d], alreadynormed)
1083
1083
1084 for s in subrepos:
1084 for s in subrepos:
1085 del results[s]
1085 del results[s]
1086 del results[b'.hg']
1086 del results[b'.hg']
1087
1087
1088 # step 3: visit remaining files from dmap
1088 # step 3: visit remaining files from dmap
1089 if not skipstep3 and not exact:
1089 if not skipstep3 and not exact:
1090 # If a dmap file is not in results yet, it was either
1090 # If a dmap file is not in results yet, it was either
1091 # a) not matching matchfn b) ignored, c) missing, or d) under a
1091 # a) not matching matchfn b) ignored, c) missing, or d) under a
1092 # symlink directory.
1092 # symlink directory.
1093 if not results and matchalways:
1093 if not results and matchalways:
1094 visit = [f for f in dmap]
1094 visit = [f for f in dmap]
1095 else:
1095 else:
1096 visit = [f for f in dmap if f not in results and matchfn(f)]
1096 visit = [f for f in dmap if f not in results and matchfn(f)]
1097 visit.sort()
1097 visit.sort()
1098
1098
1099 if unknown:
1099 if unknown:
1100 # unknown == True means we walked all dirs under the roots
1100 # unknown == True means we walked all dirs under the roots
1101 # that wasn't ignored, and everything that matched was stat'ed
1101 # that wasn't ignored, and everything that matched was stat'ed
1102 # and is already in results.
1102 # and is already in results.
1103 # The rest must thus be ignored or under a symlink.
1103 # The rest must thus be ignored or under a symlink.
1104 audit_path = pathutil.pathauditor(self._root, cached=True)
1104 audit_path = pathutil.pathauditor(self._root, cached=True)
1105
1105
1106 for nf in iter(visit):
1106 for nf in iter(visit):
1107 # If a stat for the same file was already added with a
1107 # If a stat for the same file was already added with a
1108 # different case, don't add one for this, since that would
1108 # different case, don't add one for this, since that would
1109 # make it appear as if the file exists under both names
1109 # make it appear as if the file exists under both names
1110 # on disk.
1110 # on disk.
1111 if (
1111 if (
1112 normalizefile
1112 normalizefile
1113 and normalizefile(nf, True, True) in results
1113 and normalizefile(nf, True, True) in results
1114 ):
1114 ):
1115 results[nf] = None
1115 results[nf] = None
1116 # Report ignored items in the dmap as long as they are not
1116 # Report ignored items in the dmap as long as they are not
1117 # under a symlink directory.
1117 # under a symlink directory.
1118 elif audit_path.check(nf):
1118 elif audit_path.check(nf):
1119 try:
1119 try:
1120 results[nf] = lstat(join(nf))
1120 results[nf] = lstat(join(nf))
1121 # file was just ignored, no links, and exists
1121 # file was just ignored, no links, and exists
1122 except OSError:
1122 except OSError:
1123 # file doesn't exist
1123 # file doesn't exist
1124 results[nf] = None
1124 results[nf] = None
1125 else:
1125 else:
1126 # It's either missing or under a symlink directory
1126 # It's either missing or under a symlink directory
1127 # which we in this case report as missing
1127 # which we in this case report as missing
1128 results[nf] = None
1128 results[nf] = None
1129 else:
1129 else:
1130 # We may not have walked the full directory tree above,
1130 # We may not have walked the full directory tree above,
1131 # so stat and check everything we missed.
1131 # so stat and check everything we missed.
1132 iv = iter(visit)
1132 iv = iter(visit)
1133 for st in util.statfiles([join(i) for i in visit]):
1133 for st in util.statfiles([join(i) for i in visit]):
1134 results[next(iv)] = st
1134 results[next(iv)] = st
1135 return results
1135 return results
1136
1136
1137 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1137 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1138 # Force Rayon (Rust parallelism library) to respect the number of
1138 # Force Rayon (Rust parallelism library) to respect the number of
1139 # workers. This is a temporary workaround until Rust code knows
1139 # workers. This is a temporary workaround until Rust code knows
1140 # how to read the config file.
1140 # how to read the config file.
1141 numcpus = self._ui.configint(b"worker", b"numcpus")
1141 numcpus = self._ui.configint(b"worker", b"numcpus")
1142 if numcpus is not None:
1142 if numcpus is not None:
1143 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1143 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1144
1144
1145 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1145 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1146 if not workers_enabled:
1146 if not workers_enabled:
1147 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1147 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1148
1148
1149 (
1149 (
1150 lookup,
1150 lookup,
1151 modified,
1151 modified,
1152 added,
1152 added,
1153 removed,
1153 removed,
1154 deleted,
1154 deleted,
1155 clean,
1155 clean,
1156 ignored,
1156 ignored,
1157 unknown,
1157 unknown,
1158 warnings,
1158 warnings,
1159 bad,
1159 bad,
1160 traversed,
1160 traversed,
1161 dirty,
1161 dirty,
1162 ) = rustmod.status(
1162 ) = rustmod.status(
1163 self._map._rustmap,
1163 self._map._rustmap,
1164 matcher,
1164 matcher,
1165 self._rootdir,
1165 self._rootdir,
1166 self._ignorefiles(),
1166 self._ignorefiles(),
1167 self._checkexec,
1167 self._checkexec,
1168 self._lastnormaltime,
1168 self._lastnormaltime,
1169 bool(list_clean),
1169 bool(list_clean),
1170 bool(list_ignored),
1170 bool(list_ignored),
1171 bool(list_unknown),
1171 bool(list_unknown),
1172 bool(matcher.traversedir),
1172 bool(matcher.traversedir),
1173 )
1173 )
1174
1174
1175 self._dirty |= dirty
1175 self._dirty |= dirty
1176
1176
1177 if matcher.traversedir:
1177 if matcher.traversedir:
1178 for dir in traversed:
1178 for dir in traversed:
1179 matcher.traversedir(dir)
1179 matcher.traversedir(dir)
1180
1180
1181 if self._ui.warn:
1181 if self._ui.warn:
1182 for item in warnings:
1182 for item in warnings:
1183 if isinstance(item, tuple):
1183 if isinstance(item, tuple):
1184 file_path, syntax = item
1184 file_path, syntax = item
1185 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1185 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1186 file_path,
1186 file_path,
1187 syntax,
1187 syntax,
1188 )
1188 )
1189 self._ui.warn(msg)
1189 self._ui.warn(msg)
1190 else:
1190 else:
1191 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1191 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1192 self._ui.warn(
1192 self._ui.warn(
1193 msg
1193 msg
1194 % (
1194 % (
1195 pathutil.canonpath(
1195 pathutil.canonpath(
1196 self._rootdir, self._rootdir, item
1196 self._rootdir, self._rootdir, item
1197 ),
1197 ),
1198 b"No such file or directory",
1198 b"No such file or directory",
1199 )
1199 )
1200 )
1200 )
1201
1201
1202 for (fn, message) in bad:
1202 for (fn, message) in bad:
1203 matcher.bad(fn, encoding.strtolocal(message))
1203 matcher.bad(fn, encoding.strtolocal(message))
1204
1204
1205 status = scmutil.status(
1205 status = scmutil.status(
1206 modified=modified,
1206 modified=modified,
1207 added=added,
1207 added=added,
1208 removed=removed,
1208 removed=removed,
1209 deleted=deleted,
1209 deleted=deleted,
1210 unknown=unknown,
1210 unknown=unknown,
1211 ignored=ignored,
1211 ignored=ignored,
1212 clean=clean,
1212 clean=clean,
1213 )
1213 )
1214 return (lookup, status)
1214 return (lookup, status)
1215
1215
1216 def status(self, match, subrepos, ignored, clean, unknown):
1216 def status(self, match, subrepos, ignored, clean, unknown):
1217 """Determine the status of the working copy relative to the
1217 """Determine the status of the working copy relative to the
1218 dirstate and return a pair of (unsure, status), where status is of type
1218 dirstate and return a pair of (unsure, status), where status is of type
1219 scmutil.status and:
1219 scmutil.status and:
1220
1220
1221 unsure:
1221 unsure:
1222 files that might have been modified since the dirstate was
1222 files that might have been modified since the dirstate was
1223 written, but need to be read to be sure (size is the same
1223 written, but need to be read to be sure (size is the same
1224 but mtime differs)
1224 but mtime differs)
1225 status.modified:
1225 status.modified:
1226 files that have definitely been modified since the dirstate
1226 files that have definitely been modified since the dirstate
1227 was written (different size or mode)
1227 was written (different size or mode)
1228 status.clean:
1228 status.clean:
1229 files that have definitely not been modified since the
1229 files that have definitely not been modified since the
1230 dirstate was written
1230 dirstate was written
1231 """
1231 """
1232 listignored, listclean, listunknown = ignored, clean, unknown
1232 listignored, listclean, listunknown = ignored, clean, unknown
1233 lookup, modified, added, unknown, ignored = [], [], [], [], []
1233 lookup, modified, added, unknown, ignored = [], [], [], [], []
1234 removed, deleted, clean = [], [], []
1234 removed, deleted, clean = [], [], []
1235
1235
1236 dmap = self._map
1236 dmap = self._map
1237 dmap.preload()
1237 dmap.preload()
1238
1238
1239 use_rust = True
1239 use_rust = True
1240
1240
1241 allowed_matchers = (
1241 allowed_matchers = (
1242 matchmod.alwaysmatcher,
1242 matchmod.alwaysmatcher,
1243 matchmod.exactmatcher,
1243 matchmod.exactmatcher,
1244 matchmod.includematcher,
1244 matchmod.includematcher,
1245 )
1245 )
1246
1246
1247 if rustmod is None:
1247 if rustmod is None:
1248 use_rust = False
1248 use_rust = False
1249 elif self._checkcase:
1249 elif self._checkcase:
1250 # Case-insensitive filesystems are not handled yet
1250 # Case-insensitive filesystems are not handled yet
1251 use_rust = False
1251 use_rust = False
1252 elif subrepos:
1252 elif subrepos:
1253 use_rust = False
1253 use_rust = False
1254 elif sparse.enabled:
1254 elif sparse.enabled:
1255 use_rust = False
1255 use_rust = False
1256 elif not isinstance(match, allowed_matchers):
1256 elif not isinstance(match, allowed_matchers):
1257 # Some matchers have yet to be implemented
1257 # Some matchers have yet to be implemented
1258 use_rust = False
1258 use_rust = False
1259
1259
1260 if use_rust:
1260 if use_rust:
1261 try:
1261 try:
1262 return self._rust_status(
1262 return self._rust_status(
1263 match, listclean, listignored, listunknown
1263 match, listclean, listignored, listunknown
1264 )
1264 )
1265 except rustmod.FallbackError:
1265 except rustmod.FallbackError:
1266 pass
1266 pass
1267
1267
1268 def noop(f):
1268 def noop(f):
1269 pass
1269 pass
1270
1270
1271 dcontains = dmap.__contains__
1271 dcontains = dmap.__contains__
1272 dget = dmap.__getitem__
1272 dget = dmap.__getitem__
1273 ladd = lookup.append # aka "unsure"
1273 ladd = lookup.append # aka "unsure"
1274 madd = modified.append
1274 madd = modified.append
1275 aadd = added.append
1275 aadd = added.append
1276 uadd = unknown.append if listunknown else noop
1276 uadd = unknown.append if listunknown else noop
1277 iadd = ignored.append if listignored else noop
1277 iadd = ignored.append if listignored else noop
1278 radd = removed.append
1278 radd = removed.append
1279 dadd = deleted.append
1279 dadd = deleted.append
1280 cadd = clean.append if listclean else noop
1280 cadd = clean.append if listclean else noop
1281 mexact = match.exact
1281 mexact = match.exact
1282 dirignore = self._dirignore
1282 dirignore = self._dirignore
1283 checkexec = self._checkexec
1283 checkexec = self._checkexec
1284 copymap = self._map.copymap
1284 copymap = self._map.copymap
1285 lastnormaltime = self._lastnormaltime
1285 lastnormaltime = self._lastnormaltime
1286
1286
1287 # We need to do full walks when either
1287 # We need to do full walks when either
1288 # - we're listing all clean files, or
1288 # - we're listing all clean files, or
1289 # - match.traversedir does something, because match.traversedir should
1289 # - match.traversedir does something, because match.traversedir should
1290 # be called for every dir in the working dir
1290 # be called for every dir in the working dir
1291 full = listclean or match.traversedir is not None
1291 full = listclean or match.traversedir is not None
1292 for fn, st in pycompat.iteritems(
1292 for fn, st in pycompat.iteritems(
1293 self.walk(match, subrepos, listunknown, listignored, full=full)
1293 self.walk(match, subrepos, listunknown, listignored, full=full)
1294 ):
1294 ):
1295 if not dcontains(fn):
1295 if not dcontains(fn):
1296 if (listignored or mexact(fn)) and dirignore(fn):
1296 if (listignored or mexact(fn)) and dirignore(fn):
1297 if listignored:
1297 if listignored:
1298 iadd(fn)
1298 iadd(fn)
1299 else:
1299 else:
1300 uadd(fn)
1300 uadd(fn)
1301 continue
1301 continue
1302
1302
1303 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1303 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1304 # written like that for performance reasons. dmap[fn] is not a
1304 # written like that for performance reasons. dmap[fn] is not a
1305 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1305 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1306 # opcode has fast paths when the value to be unpacked is a tuple or
1306 # opcode has fast paths when the value to be unpacked is a tuple or
1307 # a list, but falls back to creating a full-fledged iterator in
1307 # a list, but falls back to creating a full-fledged iterator in
1308 # general. That is much slower than simply accessing and storing the
1308 # general. That is much slower than simply accessing and storing the
1309 # tuple members one by one.
1309 # tuple members one by one.
1310 t = dget(fn)
1310 t = dget(fn)
1311 state = t.state
1311 state = t.state
1312 mode = t[1]
1312 mode = t[1]
1313 size = t[2]
1313 size = t[2]
1314 time = t[3]
1314 time = t[3]
1315
1315
1316 if not st and t.tracked:
1316 if not st and t.tracked:
1317 dadd(fn)
1317 dadd(fn)
1318 elif state == b'n':
1318 elif state == b'n':
1319 if (
1319 if (
1320 size >= 0
1320 size >= 0
1321 and (
1321 and (
1322 (size != st.st_size and size != st.st_size & _rangemask)
1322 (size != st.st_size and size != st.st_size & _rangemask)
1323 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1323 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1324 )
1324 )
1325 or t.from_p2
1325 or t.from_p2
1326 or fn in copymap
1326 or fn in copymap
1327 ):
1327 ):
1328 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1328 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1329 # issue6456: Size returned may be longer due to
1329 # issue6456: Size returned may be longer due to
1330 # encryption on EXT-4 fscrypt, undecided.
1330 # encryption on EXT-4 fscrypt, undecided.
1331 ladd(fn)
1331 ladd(fn)
1332 else:
1332 else:
1333 madd(fn)
1333 madd(fn)
1334 elif (
1334 elif (
1335 time != st[stat.ST_MTIME]
1335 time != st[stat.ST_MTIME]
1336 and time != st[stat.ST_MTIME] & _rangemask
1336 and time != st[stat.ST_MTIME] & _rangemask
1337 ):
1337 ):
1338 ladd(fn)
1338 ladd(fn)
1339 elif st[stat.ST_MTIME] == lastnormaltime:
1339 elif st[stat.ST_MTIME] == lastnormaltime:
1340 # fn may have just been marked as normal and it may have
1340 # fn may have just been marked as normal and it may have
1341 # changed in the same second without changing its size.
1341 # changed in the same second without changing its size.
1342 # This can happen if we quickly do multiple commits.
1342 # This can happen if we quickly do multiple commits.
1343 # Force lookup, so we don't miss such a racy file change.
1343 # Force lookup, so we don't miss such a racy file change.
1344 ladd(fn)
1344 ladd(fn)
1345 elif listclean:
1345 elif listclean:
1346 cadd(fn)
1346 cadd(fn)
1347 elif t.merged:
1347 elif t.merged:
1348 madd(fn)
1348 madd(fn)
1349 elif t.added:
1349 elif t.added:
1350 aadd(fn)
1350 aadd(fn)
1351 elif t.removed:
1351 elif t.removed:
1352 radd(fn)
1352 radd(fn)
1353 status = scmutil.status(
1353 status = scmutil.status(
1354 modified, added, removed, deleted, unknown, ignored, clean
1354 modified, added, removed, deleted, unknown, ignored, clean
1355 )
1355 )
1356 return (lookup, status)
1356 return (lookup, status)
1357
1357
1358 def matches(self, match):
1358 def matches(self, match):
1359 """
1359 """
1360 return files in the dirstate (in whatever state) filtered by match
1360 return files in the dirstate (in whatever state) filtered by match
1361 """
1361 """
1362 dmap = self._map
1362 dmap = self._map
1363 if rustmod is not None:
1363 if rustmod is not None:
1364 dmap = self._map._rustmap
1364 dmap = self._map._rustmap
1365
1365
1366 if match.always():
1366 if match.always():
1367 return dmap.keys()
1367 return dmap.keys()
1368 files = match.files()
1368 files = match.files()
1369 if match.isexact():
1369 if match.isexact():
1370 # fast path -- filter the other way around, since typically files is
1370 # fast path -- filter the other way around, since typically files is
1371 # much smaller than dmap
1371 # much smaller than dmap
1372 return [f for f in files if f in dmap]
1372 return [f for f in files if f in dmap]
1373 if match.prefix() and all(fn in dmap for fn in files):
1373 if match.prefix() and all(fn in dmap for fn in files):
1374 # fast path -- all the values are known to be files, so just return
1374 # fast path -- all the values are known to be files, so just return
1375 # that
1375 # that
1376 return list(files)
1376 return list(files)
1377 return [f for f in dmap if match(f)]
1377 return [f for f in dmap if match(f)]
1378
1378
1379 def _actualfilename(self, tr):
1379 def _actualfilename(self, tr):
1380 if tr:
1380 if tr:
1381 return self._pendingfilename
1381 return self._pendingfilename
1382 else:
1382 else:
1383 return self._filename
1383 return self._filename
1384
1384
1385 def savebackup(self, tr, backupname):
1385 def savebackup(self, tr, backupname):
1386 '''Save current dirstate into backup file'''
1386 '''Save current dirstate into backup file'''
1387 filename = self._actualfilename(tr)
1387 filename = self._actualfilename(tr)
1388 assert backupname != filename
1388 assert backupname != filename
1389
1389
1390 # use '_writedirstate' instead of 'write' to write changes certainly,
1390 # use '_writedirstate' instead of 'write' to write changes certainly,
1391 # because the latter omits writing out if transaction is running.
1391 # because the latter omits writing out if transaction is running.
1392 # output file will be used to create backup of dirstate at this point.
1392 # output file will be used to create backup of dirstate at this point.
1393 if self._dirty or not self._opener.exists(filename):
1393 if self._dirty or not self._opener.exists(filename):
1394 self._writedirstate(
1394 self._writedirstate(
1395 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1395 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1396 )
1396 )
1397
1397
1398 if tr:
1398 if tr:
1399 # ensure that subsequent tr.writepending returns True for
1399 # ensure that subsequent tr.writepending returns True for
1400 # changes written out above, even if dirstate is never
1400 # changes written out above, even if dirstate is never
1401 # changed after this
1401 # changed after this
1402 tr.addfilegenerator(
1402 tr.addfilegenerator(
1403 b'dirstate',
1403 b'dirstate',
1404 (self._filename,),
1404 (self._filename,),
1405 self._writedirstate,
1405 self._writedirstate,
1406 location=b'plain',
1406 location=b'plain',
1407 )
1407 )
1408
1408
1409 # ensure that pending file written above is unlinked at
1409 # ensure that pending file written above is unlinked at
1410 # failure, even if tr.writepending isn't invoked until the
1410 # failure, even if tr.writepending isn't invoked until the
1411 # end of this transaction
1411 # end of this transaction
1412 tr.registertmp(filename, location=b'plain')
1412 tr.registertmp(filename, location=b'plain')
1413
1413
1414 self._opener.tryunlink(backupname)
1414 self._opener.tryunlink(backupname)
1415 # hardlink backup is okay because _writedirstate is always called
1415 # hardlink backup is okay because _writedirstate is always called
1416 # with an "atomictemp=True" file.
1416 # with an "atomictemp=True" file.
1417 util.copyfile(
1417 util.copyfile(
1418 self._opener.join(filename),
1418 self._opener.join(filename),
1419 self._opener.join(backupname),
1419 self._opener.join(backupname),
1420 hardlink=True,
1420 hardlink=True,
1421 )
1421 )
1422
1422
1423 def restorebackup(self, tr, backupname):
1423 def restorebackup(self, tr, backupname):
1424 '''Restore dirstate by backup file'''
1424 '''Restore dirstate by backup file'''
1425 # this "invalidate()" prevents "wlock.release()" from writing
1425 # this "invalidate()" prevents "wlock.release()" from writing
1426 # changes of dirstate out after restoring from backup file
1426 # changes of dirstate out after restoring from backup file
1427 self.invalidate()
1427 self.invalidate()
1428 filename = self._actualfilename(tr)
1428 filename = self._actualfilename(tr)
1429 o = self._opener
1429 o = self._opener
1430 if util.samefile(o.join(backupname), o.join(filename)):
1430 if util.samefile(o.join(backupname), o.join(filename)):
1431 o.unlink(backupname)
1431 o.unlink(backupname)
1432 else:
1432 else:
1433 o.rename(backupname, filename, checkambig=True)
1433 o.rename(backupname, filename, checkambig=True)
1434
1434
1435 def clearbackup(self, tr, backupname):
1435 def clearbackup(self, tr, backupname):
1436 '''Clear backup file'''
1436 '''Clear backup file'''
1437 self._opener.unlink(backupname)
1437 self._opener.unlink(backupname)
@@ -1,552 +1,556
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from .. import (
17 from .. import (
18 error,
18 error,
19 pycompat,
19 pycompat,
20 revlogutils,
20 revlogutils,
21 util,
21 util,
22 )
22 )
23
23
24 from ..revlogutils import nodemap as nodemaputil
24 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import constants as revlog_constants
25 from ..revlogutils import constants as revlog_constants
26
26
27 stringio = pycompat.bytesio
27 stringio = pycompat.bytesio
28
28
29
29
30 _pack = struct.pack
30 _pack = struct.pack
31 _unpack = struct.unpack
31 _unpack = struct.unpack
32 _compress = zlib.compress
32 _compress = zlib.compress
33 _decompress = zlib.decompress
33 _decompress = zlib.decompress
34
34
35
35
36 # a special value used internally for `size` if the file come from the other parent
36 # a special value used internally for `size` if the file come from the other parent
37 FROM_P2 = -2
37 FROM_P2 = -2
38
38
39 # a special value used internally for `size` if the file is modified/merged/added
39 # a special value used internally for `size` if the file is modified/merged/added
40 NONNORMAL = -1
40 NONNORMAL = -1
41
41
42
42
43 class dirstatetuple(object):
43 class dirstatetuple(object):
44 """represent a dirstate entry
44 """represent a dirstate entry
45
45
46 It contains:
46 It contains:
47
47
48 - state (one of 'n', 'a', 'r', 'm')
48 - state (one of 'n', 'a', 'r', 'm')
49 - mode,
49 - mode,
50 - size,
50 - size,
51 - mtime,
51 - mtime,
52 """
52 """
53
53
54 __slot__ = ('_state', '_mode', '_size', '_mtime')
54 __slot__ = ('_state', '_mode', '_size', '_mtime')
55
55
56 def __init__(self, state, mode, size, mtime):
56 def __init__(self, state, mode, size, mtime):
57 self._state = state
57 self._state = state
58 self._mode = mode
58 self._mode = mode
59 self._size = size
59 self._size = size
60 self._mtime = mtime
60 self._mtime = mtime
61
61
62 def __getitem__(self, idx):
62 def __getitem__(self, idx):
63 if idx == 0 or idx == -4:
63 if idx == 0 or idx == -4:
64 return self._state
64 return self._state
65 elif idx == 1 or idx == -3:
65 elif idx == 1 or idx == -3:
66 return self._mode
66 return self._mode
67 elif idx == 2 or idx == -2:
67 elif idx == 2 or idx == -2:
68 return self._size
68 return self._size
69 elif idx == 3 or idx == -1:
69 elif idx == 3 or idx == -1:
70 return self._mtime
70 return self._mtime
71 else:
71 else:
72 raise IndexError(idx)
72 raise IndexError(idx)
73
73
74 @property
74 @property
75 def state(self):
75 def state(self):
76 """
76 """
77 States are:
77 States are:
78 n normal
78 n normal
79 m needs merging
79 m needs merging
80 r marked for removal
80 r marked for removal
81 a marked for addition
81 a marked for addition
82
82
83 XXX This "state" is a bit obscure and mostly a direct expression of the
83 XXX This "state" is a bit obscure and mostly a direct expression of the
84 dirstatev1 format. It would make sense to ultimately deprecate it in
84 dirstatev1 format. It would make sense to ultimately deprecate it in
85 favor of the more "semantic" attributes.
85 favor of the more "semantic" attributes.
86 """
86 """
87 return self._state
87 return self._state
88
88
89 @property
89 @property
90 def tracked(self):
90 def tracked(self):
91 """True is the file is tracked in the working copy"""
91 """True is the file is tracked in the working copy"""
92 return self._state in b"nma"
92 return self._state in b"nma"
93
93
94 @property
94 @property
95 def added(self):
95 def added(self):
96 """True if the file has been added"""
96 """True if the file has been added"""
97 return self._state == b'a'
97 return self._state == b'a'
98
98
99 @property
99 @property
100 def merged(self):
100 def merged(self):
101 """True if the file has been merged
101 """True if the file has been merged
102
102
103 Should only be set if a merge is in progress in the dirstate
103 Should only be set if a merge is in progress in the dirstate
104 """
104 """
105 return self._state == b'm'
105 return self._state == b'm'
106
106
107 @property
107 @property
108 def from_p2(self):
108 def from_p2(self):
109 """True if the file have been fetched from p2 during the current merge
109 """True if the file have been fetched from p2 during the current merge
110
110
111 This is only True is the file is currently tracked.
111 This is only True is the file is currently tracked.
112
112
113 Should only be set if a merge is in progress in the dirstate
113 Should only be set if a merge is in progress in the dirstate
114 """
114 """
115 return self._state == b'n' and self._size == FROM_P2
115 return self._state == b'n' and self._size == FROM_P2
116
116
117 @property
117 @property
118 def from_p2_removed(self):
118 def from_p2_removed(self):
119 """True if the file has been removed, but was "from_p2" initially
119 """True if the file has been removed, but was "from_p2" initially
120
120
121 This property seems like an abstraction leakage and should probably be
121 This property seems like an abstraction leakage and should probably be
122 dealt in this class (or maybe the dirstatemap) directly.
122 dealt in this class (or maybe the dirstatemap) directly.
123 """
123 """
124 return self._state == b'r' and self._size == FROM_P2
124 return self._state == b'r' and self._size == FROM_P2
125
125
126 @property
126 @property
127 def removed(self):
127 def removed(self):
128 """True if the file has been removed"""
128 """True if the file has been removed"""
129 return self._state == b'r'
129 return self._state == b'r'
130
130
131 @property
131 @property
132 def merged_removed(self):
132 def merged_removed(self):
133 """True if the file has been removed, but was "merged" initially
133 """True if the file has been removed, but was "merged" initially
134
134
135 This property seems like an abstraction leakage and should probably be
135 This property seems like an abstraction leakage and should probably be
136 dealt in this class (or maybe the dirstatemap) directly.
136 dealt in this class (or maybe the dirstatemap) directly.
137 """
137 """
138 return self._state == b'r' and self._size == NONNORMAL
138 return self._state == b'r' and self._size == NONNORMAL
139
139
140 def v1_state(self):
140 def v1_state(self):
141 """return a "state" suitable for v1 serialization"""
141 """return a "state" suitable for v1 serialization"""
142 return self._state
142 return self._state
143
143
144 def v1_mode(self):
144 def v1_mode(self):
145 """return a "mode" suitable for v1 serialization"""
145 """return a "mode" suitable for v1 serialization"""
146 return self._mode
146 return self._mode
147
147
148 def v1_size(self):
148 def v1_size(self):
149 """return a "size" suitable for v1 serialization"""
149 """return a "size" suitable for v1 serialization"""
150 return self._size
150 return self._size
151
151
152 def v1_mtime(self):
152 def v1_mtime(self):
153 """return a "mtime" suitable for v1 serialization"""
153 """return a "mtime" suitable for v1 serialization"""
154 return self._mtime
154 return self._mtime
155
155
156 def need_delay(self, now):
157 """True if the stored mtime would be ambiguous with the current time"""
158 return self._state == b'n' and self._mtime == now
159
156
160
157 def gettype(q):
161 def gettype(q):
158 return int(q & 0xFFFF)
162 return int(q & 0xFFFF)
159
163
160
164
161 class BaseIndexObject(object):
165 class BaseIndexObject(object):
162 # Can I be passed to an algorithme implemented in Rust ?
166 # Can I be passed to an algorithme implemented in Rust ?
163 rust_ext_compat = 0
167 rust_ext_compat = 0
164 # Format of an index entry according to Python's `struct` language
168 # Format of an index entry according to Python's `struct` language
165 index_format = revlog_constants.INDEX_ENTRY_V1
169 index_format = revlog_constants.INDEX_ENTRY_V1
166 # Size of a C unsigned long long int, platform independent
170 # Size of a C unsigned long long int, platform independent
167 big_int_size = struct.calcsize(b'>Q')
171 big_int_size = struct.calcsize(b'>Q')
168 # Size of a C long int, platform independent
172 # Size of a C long int, platform independent
169 int_size = struct.calcsize(b'>i')
173 int_size = struct.calcsize(b'>i')
170 # An empty index entry, used as a default value to be overridden, or nullrev
174 # An empty index entry, used as a default value to be overridden, or nullrev
171 null_item = (
175 null_item = (
172 0,
176 0,
173 0,
177 0,
174 0,
178 0,
175 -1,
179 -1,
176 -1,
180 -1,
177 -1,
181 -1,
178 -1,
182 -1,
179 sha1nodeconstants.nullid,
183 sha1nodeconstants.nullid,
180 0,
184 0,
181 0,
185 0,
182 revlog_constants.COMP_MODE_INLINE,
186 revlog_constants.COMP_MODE_INLINE,
183 revlog_constants.COMP_MODE_INLINE,
187 revlog_constants.COMP_MODE_INLINE,
184 )
188 )
185
189
186 @util.propertycache
190 @util.propertycache
187 def entry_size(self):
191 def entry_size(self):
188 return self.index_format.size
192 return self.index_format.size
189
193
190 @property
194 @property
191 def nodemap(self):
195 def nodemap(self):
192 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
196 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
193 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
197 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
194 return self._nodemap
198 return self._nodemap
195
199
196 @util.propertycache
200 @util.propertycache
197 def _nodemap(self):
201 def _nodemap(self):
198 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
202 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
199 for r in range(0, len(self)):
203 for r in range(0, len(self)):
200 n = self[r][7]
204 n = self[r][7]
201 nodemap[n] = r
205 nodemap[n] = r
202 return nodemap
206 return nodemap
203
207
204 def has_node(self, node):
208 def has_node(self, node):
205 """return True if the node exist in the index"""
209 """return True if the node exist in the index"""
206 return node in self._nodemap
210 return node in self._nodemap
207
211
208 def rev(self, node):
212 def rev(self, node):
209 """return a revision for a node
213 """return a revision for a node
210
214
211 If the node is unknown, raise a RevlogError"""
215 If the node is unknown, raise a RevlogError"""
212 return self._nodemap[node]
216 return self._nodemap[node]
213
217
214 def get_rev(self, node):
218 def get_rev(self, node):
215 """return a revision for a node
219 """return a revision for a node
216
220
217 If the node is unknown, return None"""
221 If the node is unknown, return None"""
218 return self._nodemap.get(node)
222 return self._nodemap.get(node)
219
223
220 def _stripnodes(self, start):
224 def _stripnodes(self, start):
221 if '_nodemap' in vars(self):
225 if '_nodemap' in vars(self):
222 for r in range(start, len(self)):
226 for r in range(start, len(self)):
223 n = self[r][7]
227 n = self[r][7]
224 del self._nodemap[n]
228 del self._nodemap[n]
225
229
226 def clearcaches(self):
230 def clearcaches(self):
227 self.__dict__.pop('_nodemap', None)
231 self.__dict__.pop('_nodemap', None)
228
232
229 def __len__(self):
233 def __len__(self):
230 return self._lgt + len(self._extra)
234 return self._lgt + len(self._extra)
231
235
232 def append(self, tup):
236 def append(self, tup):
233 if '_nodemap' in vars(self):
237 if '_nodemap' in vars(self):
234 self._nodemap[tup[7]] = len(self)
238 self._nodemap[tup[7]] = len(self)
235 data = self._pack_entry(len(self), tup)
239 data = self._pack_entry(len(self), tup)
236 self._extra.append(data)
240 self._extra.append(data)
237
241
238 def _pack_entry(self, rev, entry):
242 def _pack_entry(self, rev, entry):
239 assert entry[8] == 0
243 assert entry[8] == 0
240 assert entry[9] == 0
244 assert entry[9] == 0
241 return self.index_format.pack(*entry[:8])
245 return self.index_format.pack(*entry[:8])
242
246
243 def _check_index(self, i):
247 def _check_index(self, i):
244 if not isinstance(i, int):
248 if not isinstance(i, int):
245 raise TypeError(b"expecting int indexes")
249 raise TypeError(b"expecting int indexes")
246 if i < 0 or i >= len(self):
250 if i < 0 or i >= len(self):
247 raise IndexError
251 raise IndexError
248
252
249 def __getitem__(self, i):
253 def __getitem__(self, i):
250 if i == -1:
254 if i == -1:
251 return self.null_item
255 return self.null_item
252 self._check_index(i)
256 self._check_index(i)
253 if i >= self._lgt:
257 if i >= self._lgt:
254 data = self._extra[i - self._lgt]
258 data = self._extra[i - self._lgt]
255 else:
259 else:
256 index = self._calculate_index(i)
260 index = self._calculate_index(i)
257 data = self._data[index : index + self.entry_size]
261 data = self._data[index : index + self.entry_size]
258 r = self._unpack_entry(i, data)
262 r = self._unpack_entry(i, data)
259 if self._lgt and i == 0:
263 if self._lgt and i == 0:
260 offset = revlogutils.offset_type(0, gettype(r[0]))
264 offset = revlogutils.offset_type(0, gettype(r[0]))
261 r = (offset,) + r[1:]
265 r = (offset,) + r[1:]
262 return r
266 return r
263
267
264 def _unpack_entry(self, rev, data):
268 def _unpack_entry(self, rev, data):
265 r = self.index_format.unpack(data)
269 r = self.index_format.unpack(data)
266 r = r + (
270 r = r + (
267 0,
271 0,
268 0,
272 0,
269 revlog_constants.COMP_MODE_INLINE,
273 revlog_constants.COMP_MODE_INLINE,
270 revlog_constants.COMP_MODE_INLINE,
274 revlog_constants.COMP_MODE_INLINE,
271 )
275 )
272 return r
276 return r
273
277
274 def pack_header(self, header):
278 def pack_header(self, header):
275 """pack header information as binary"""
279 """pack header information as binary"""
276 v_fmt = revlog_constants.INDEX_HEADER
280 v_fmt = revlog_constants.INDEX_HEADER
277 return v_fmt.pack(header)
281 return v_fmt.pack(header)
278
282
279 def entry_binary(self, rev):
283 def entry_binary(self, rev):
280 """return the raw binary string representing a revision"""
284 """return the raw binary string representing a revision"""
281 entry = self[rev]
285 entry = self[rev]
282 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
286 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
283 if rev == 0:
287 if rev == 0:
284 p = p[revlog_constants.INDEX_HEADER.size :]
288 p = p[revlog_constants.INDEX_HEADER.size :]
285 return p
289 return p
286
290
287
291
288 class IndexObject(BaseIndexObject):
292 class IndexObject(BaseIndexObject):
289 def __init__(self, data):
293 def __init__(self, data):
290 assert len(data) % self.entry_size == 0, (
294 assert len(data) % self.entry_size == 0, (
291 len(data),
295 len(data),
292 self.entry_size,
296 self.entry_size,
293 len(data) % self.entry_size,
297 len(data) % self.entry_size,
294 )
298 )
295 self._data = data
299 self._data = data
296 self._lgt = len(data) // self.entry_size
300 self._lgt = len(data) // self.entry_size
297 self._extra = []
301 self._extra = []
298
302
299 def _calculate_index(self, i):
303 def _calculate_index(self, i):
300 return i * self.entry_size
304 return i * self.entry_size
301
305
302 def __delitem__(self, i):
306 def __delitem__(self, i):
303 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
307 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
304 raise ValueError(b"deleting slices only supports a:-1 with step 1")
308 raise ValueError(b"deleting slices only supports a:-1 with step 1")
305 i = i.start
309 i = i.start
306 self._check_index(i)
310 self._check_index(i)
307 self._stripnodes(i)
311 self._stripnodes(i)
308 if i < self._lgt:
312 if i < self._lgt:
309 self._data = self._data[: i * self.entry_size]
313 self._data = self._data[: i * self.entry_size]
310 self._lgt = i
314 self._lgt = i
311 self._extra = []
315 self._extra = []
312 else:
316 else:
313 self._extra = self._extra[: i - self._lgt]
317 self._extra = self._extra[: i - self._lgt]
314
318
315
319
316 class PersistentNodeMapIndexObject(IndexObject):
320 class PersistentNodeMapIndexObject(IndexObject):
317 """a Debug oriented class to test persistent nodemap
321 """a Debug oriented class to test persistent nodemap
318
322
319 We need a simple python object to test API and higher level behavior. See
323 We need a simple python object to test API and higher level behavior. See
320 the Rust implementation for more serious usage. This should be used only
324 the Rust implementation for more serious usage. This should be used only
321 through the dedicated `devel.persistent-nodemap` config.
325 through the dedicated `devel.persistent-nodemap` config.
322 """
326 """
323
327
324 def nodemap_data_all(self):
328 def nodemap_data_all(self):
325 """Return bytes containing a full serialization of a nodemap
329 """Return bytes containing a full serialization of a nodemap
326
330
327 The nodemap should be valid for the full set of revisions in the
331 The nodemap should be valid for the full set of revisions in the
328 index."""
332 index."""
329 return nodemaputil.persistent_data(self)
333 return nodemaputil.persistent_data(self)
330
334
331 def nodemap_data_incremental(self):
335 def nodemap_data_incremental(self):
332 """Return bytes containing a incremental update to persistent nodemap
336 """Return bytes containing a incremental update to persistent nodemap
333
337
334 This containst the data for an append-only update of the data provided
338 This containst the data for an append-only update of the data provided
335 in the last call to `update_nodemap_data`.
339 in the last call to `update_nodemap_data`.
336 """
340 """
337 if self._nm_root is None:
341 if self._nm_root is None:
338 return None
342 return None
339 docket = self._nm_docket
343 docket = self._nm_docket
340 changed, data = nodemaputil.update_persistent_data(
344 changed, data = nodemaputil.update_persistent_data(
341 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
345 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
342 )
346 )
343
347
344 self._nm_root = self._nm_max_idx = self._nm_docket = None
348 self._nm_root = self._nm_max_idx = self._nm_docket = None
345 return docket, changed, data
349 return docket, changed, data
346
350
347 def update_nodemap_data(self, docket, nm_data):
351 def update_nodemap_data(self, docket, nm_data):
348 """provide full block of persisted binary data for a nodemap
352 """provide full block of persisted binary data for a nodemap
349
353
350 The data are expected to come from disk. See `nodemap_data_all` for a
354 The data are expected to come from disk. See `nodemap_data_all` for a
351 produceur of such data."""
355 produceur of such data."""
352 if nm_data is not None:
356 if nm_data is not None:
353 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
357 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
354 if self._nm_root:
358 if self._nm_root:
355 self._nm_docket = docket
359 self._nm_docket = docket
356 else:
360 else:
357 self._nm_root = self._nm_max_idx = self._nm_docket = None
361 self._nm_root = self._nm_max_idx = self._nm_docket = None
358
362
359
363
360 class InlinedIndexObject(BaseIndexObject):
364 class InlinedIndexObject(BaseIndexObject):
361 def __init__(self, data, inline=0):
365 def __init__(self, data, inline=0):
362 self._data = data
366 self._data = data
363 self._lgt = self._inline_scan(None)
367 self._lgt = self._inline_scan(None)
364 self._inline_scan(self._lgt)
368 self._inline_scan(self._lgt)
365 self._extra = []
369 self._extra = []
366
370
367 def _inline_scan(self, lgt):
371 def _inline_scan(self, lgt):
368 off = 0
372 off = 0
369 if lgt is not None:
373 if lgt is not None:
370 self._offsets = [0] * lgt
374 self._offsets = [0] * lgt
371 count = 0
375 count = 0
372 while off <= len(self._data) - self.entry_size:
376 while off <= len(self._data) - self.entry_size:
373 start = off + self.big_int_size
377 start = off + self.big_int_size
374 (s,) = struct.unpack(
378 (s,) = struct.unpack(
375 b'>i',
379 b'>i',
376 self._data[start : start + self.int_size],
380 self._data[start : start + self.int_size],
377 )
381 )
378 if lgt is not None:
382 if lgt is not None:
379 self._offsets[count] = off
383 self._offsets[count] = off
380 count += 1
384 count += 1
381 off += self.entry_size + s
385 off += self.entry_size + s
382 if off != len(self._data):
386 if off != len(self._data):
383 raise ValueError(b"corrupted data")
387 raise ValueError(b"corrupted data")
384 return count
388 return count
385
389
386 def __delitem__(self, i):
390 def __delitem__(self, i):
387 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
391 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
388 raise ValueError(b"deleting slices only supports a:-1 with step 1")
392 raise ValueError(b"deleting slices only supports a:-1 with step 1")
389 i = i.start
393 i = i.start
390 self._check_index(i)
394 self._check_index(i)
391 self._stripnodes(i)
395 self._stripnodes(i)
392 if i < self._lgt:
396 if i < self._lgt:
393 self._offsets = self._offsets[:i]
397 self._offsets = self._offsets[:i]
394 self._lgt = i
398 self._lgt = i
395 self._extra = []
399 self._extra = []
396 else:
400 else:
397 self._extra = self._extra[: i - self._lgt]
401 self._extra = self._extra[: i - self._lgt]
398
402
399 def _calculate_index(self, i):
403 def _calculate_index(self, i):
400 return self._offsets[i]
404 return self._offsets[i]
401
405
402
406
403 def parse_index2(data, inline, revlogv2=False):
407 def parse_index2(data, inline, revlogv2=False):
404 if not inline:
408 if not inline:
405 cls = IndexObject2 if revlogv2 else IndexObject
409 cls = IndexObject2 if revlogv2 else IndexObject
406 return cls(data), None
410 return cls(data), None
407 cls = InlinedIndexObject
411 cls = InlinedIndexObject
408 return cls(data, inline), (0, data)
412 return cls(data, inline), (0, data)
409
413
410
414
411 def parse_index_cl_v2(data):
415 def parse_index_cl_v2(data):
412 return IndexChangelogV2(data), None
416 return IndexChangelogV2(data), None
413
417
414
418
415 class IndexObject2(IndexObject):
419 class IndexObject2(IndexObject):
416 index_format = revlog_constants.INDEX_ENTRY_V2
420 index_format = revlog_constants.INDEX_ENTRY_V2
417
421
418 def replace_sidedata_info(
422 def replace_sidedata_info(
419 self,
423 self,
420 rev,
424 rev,
421 sidedata_offset,
425 sidedata_offset,
422 sidedata_length,
426 sidedata_length,
423 offset_flags,
427 offset_flags,
424 compression_mode,
428 compression_mode,
425 ):
429 ):
426 """
430 """
427 Replace an existing index entry's sidedata offset and length with new
431 Replace an existing index entry's sidedata offset and length with new
428 ones.
432 ones.
429 This cannot be used outside of the context of sidedata rewriting,
433 This cannot be used outside of the context of sidedata rewriting,
430 inside the transaction that creates the revision `rev`.
434 inside the transaction that creates the revision `rev`.
431 """
435 """
432 if rev < 0:
436 if rev < 0:
433 raise KeyError
437 raise KeyError
434 self._check_index(rev)
438 self._check_index(rev)
435 if rev < self._lgt:
439 if rev < self._lgt:
436 msg = b"cannot rewrite entries outside of this transaction"
440 msg = b"cannot rewrite entries outside of this transaction"
437 raise KeyError(msg)
441 raise KeyError(msg)
438 else:
442 else:
439 entry = list(self[rev])
443 entry = list(self[rev])
440 entry[0] = offset_flags
444 entry[0] = offset_flags
441 entry[8] = sidedata_offset
445 entry[8] = sidedata_offset
442 entry[9] = sidedata_length
446 entry[9] = sidedata_length
443 entry[11] = compression_mode
447 entry[11] = compression_mode
444 entry = tuple(entry)
448 entry = tuple(entry)
445 new = self._pack_entry(rev, entry)
449 new = self._pack_entry(rev, entry)
446 self._extra[rev - self._lgt] = new
450 self._extra[rev - self._lgt] = new
447
451
448 def _unpack_entry(self, rev, data):
452 def _unpack_entry(self, rev, data):
449 data = self.index_format.unpack(data)
453 data = self.index_format.unpack(data)
450 entry = data[:10]
454 entry = data[:10]
451 data_comp = data[10] & 3
455 data_comp = data[10] & 3
452 sidedata_comp = (data[10] & (3 << 2)) >> 2
456 sidedata_comp = (data[10] & (3 << 2)) >> 2
453 return entry + (data_comp, sidedata_comp)
457 return entry + (data_comp, sidedata_comp)
454
458
455 def _pack_entry(self, rev, entry):
459 def _pack_entry(self, rev, entry):
456 data = entry[:10]
460 data = entry[:10]
457 data_comp = entry[10] & 3
461 data_comp = entry[10] & 3
458 sidedata_comp = (entry[11] & 3) << 2
462 sidedata_comp = (entry[11] & 3) << 2
459 data += (data_comp | sidedata_comp,)
463 data += (data_comp | sidedata_comp,)
460
464
461 return self.index_format.pack(*data)
465 return self.index_format.pack(*data)
462
466
463 def entry_binary(self, rev):
467 def entry_binary(self, rev):
464 """return the raw binary string representing a revision"""
468 """return the raw binary string representing a revision"""
465 entry = self[rev]
469 entry = self[rev]
466 return self._pack_entry(rev, entry)
470 return self._pack_entry(rev, entry)
467
471
468 def pack_header(self, header):
472 def pack_header(self, header):
469 """pack header information as binary"""
473 """pack header information as binary"""
470 msg = 'version header should go in the docket, not the index: %d'
474 msg = 'version header should go in the docket, not the index: %d'
471 msg %= header
475 msg %= header
472 raise error.ProgrammingError(msg)
476 raise error.ProgrammingError(msg)
473
477
474
478
475 class IndexChangelogV2(IndexObject2):
479 class IndexChangelogV2(IndexObject2):
476 index_format = revlog_constants.INDEX_ENTRY_CL_V2
480 index_format = revlog_constants.INDEX_ENTRY_CL_V2
477
481
478 def _unpack_entry(self, rev, data, r=True):
482 def _unpack_entry(self, rev, data, r=True):
479 items = self.index_format.unpack(data)
483 items = self.index_format.unpack(data)
480 entry = items[:3] + (rev, rev) + items[3:8]
484 entry = items[:3] + (rev, rev) + items[3:8]
481 data_comp = items[8] & 3
485 data_comp = items[8] & 3
482 sidedata_comp = (items[8] >> 2) & 3
486 sidedata_comp = (items[8] >> 2) & 3
483 return entry + (data_comp, sidedata_comp)
487 return entry + (data_comp, sidedata_comp)
484
488
485 def _pack_entry(self, rev, entry):
489 def _pack_entry(self, rev, entry):
486 assert entry[3] == rev, entry[3]
490 assert entry[3] == rev, entry[3]
487 assert entry[4] == rev, entry[4]
491 assert entry[4] == rev, entry[4]
488 data = entry[:3] + entry[5:10]
492 data = entry[:3] + entry[5:10]
489 data_comp = entry[10] & 3
493 data_comp = entry[10] & 3
490 sidedata_comp = (entry[11] & 3) << 2
494 sidedata_comp = (entry[11] & 3) << 2
491 data += (data_comp | sidedata_comp,)
495 data += (data_comp | sidedata_comp,)
492 return self.index_format.pack(*data)
496 return self.index_format.pack(*data)
493
497
494
498
495 def parse_index_devel_nodemap(data, inline):
499 def parse_index_devel_nodemap(data, inline):
496 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
500 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
497 return PersistentNodeMapIndexObject(data), None
501 return PersistentNodeMapIndexObject(data), None
498
502
499
503
500 def parse_dirstate(dmap, copymap, st):
504 def parse_dirstate(dmap, copymap, st):
501 parents = [st[:20], st[20:40]]
505 parents = [st[:20], st[20:40]]
502 # dereference fields so they will be local in loop
506 # dereference fields so they will be local in loop
503 format = b">cllll"
507 format = b">cllll"
504 e_size = struct.calcsize(format)
508 e_size = struct.calcsize(format)
505 pos1 = 40
509 pos1 = 40
506 l = len(st)
510 l = len(st)
507
511
508 # the inner loop
512 # the inner loop
509 while pos1 < l:
513 while pos1 < l:
510 pos2 = pos1 + e_size
514 pos2 = pos1 + e_size
511 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
515 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
512 pos1 = pos2 + e[4]
516 pos1 = pos2 + e[4]
513 f = st[pos2:pos1]
517 f = st[pos2:pos1]
514 if b'\0' in f:
518 if b'\0' in f:
515 f, c = f.split(b'\0')
519 f, c = f.split(b'\0')
516 copymap[f] = c
520 copymap[f] = c
517 dmap[f] = dirstatetuple(*e[:4])
521 dmap[f] = dirstatetuple(*e[:4])
518 return parents
522 return parents
519
523
520
524
521 def pack_dirstate(dmap, copymap, pl, now):
525 def pack_dirstate(dmap, copymap, pl, now):
522 now = int(now)
526 now = int(now)
523 cs = stringio()
527 cs = stringio()
524 write = cs.write
528 write = cs.write
525 write(b"".join(pl))
529 write(b"".join(pl))
526 for f, e in pycompat.iteritems(dmap):
530 for f, e in pycompat.iteritems(dmap):
527 if e[0] == b'n' and e[3] == now:
531 if e[0] == b'n' and e[3] == now:
528 # The file was last modified "simultaneously" with the current
532 # The file was last modified "simultaneously" with the current
529 # write to dirstate (i.e. within the same second for file-
533 # write to dirstate (i.e. within the same second for file-
530 # systems with a granularity of 1 sec). This commonly happens
534 # systems with a granularity of 1 sec). This commonly happens
531 # for at least a couple of files on 'update'.
535 # for at least a couple of files on 'update'.
532 # The user could change the file without changing its size
536 # The user could change the file without changing its size
533 # within the same second. Invalidate the file's mtime in
537 # within the same second. Invalidate the file's mtime in
534 # dirstate, forcing future 'status' calls to compare the
538 # dirstate, forcing future 'status' calls to compare the
535 # contents of the file if the size is the same. This prevents
539 # contents of the file if the size is the same. This prevents
536 # mistakenly treating such files as clean.
540 # mistakenly treating such files as clean.
537 e = dirstatetuple(e[0], e[1], e[2], -1)
541 e = dirstatetuple(e[0], e[1], e[2], -1)
538 dmap[f] = e
542 dmap[f] = e
539
543
540 if f in copymap:
544 if f in copymap:
541 f = b"%s\0%s" % (f, copymap[f])
545 f = b"%s\0%s" % (f, copymap[f])
542 e = _pack(
546 e = _pack(
543 b">cllll",
547 b">cllll",
544 e.v1_state(),
548 e.v1_state(),
545 e.v1_mode(),
549 e.v1_mode(),
546 e.v1_size(),
550 e.v1_size(),
547 e.v1_mtime(),
551 e.v1_mtime(),
548 len(f),
552 len(f),
549 )
553 )
550 write(e)
554 write(e)
551 write(f)
555 write(f)
552 return cs.getvalue()
556 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now