##// END OF EJS Templates
dirstatemap: replace `removefile` by an explicit `entry.set_untracked()`...
marmoute -
r48701:3853e6ee default
parent child Browse files
Show More
@@ -1,989 +1,1006 b''
1 /*
1 /*
2 parsers.c - efficient content parsing
2 parsers.c - efficient content parsing
3
3
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
5
5
6 This software may be used and distributed according to the terms of
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
7 the GNU General Public License, incorporated herein by reference.
8 */
8 */
9
9
10 #define PY_SSIZE_T_CLEAN
10 #define PY_SSIZE_T_CLEAN
11 #include <Python.h>
11 #include <Python.h>
12 #include <ctype.h>
12 #include <ctype.h>
13 #include <stddef.h>
13 #include <stddef.h>
14 #include <string.h>
14 #include <string.h>
15
15
16 #include "bitmanipulation.h"
16 #include "bitmanipulation.h"
17 #include "charencode.h"
17 #include "charencode.h"
18 #include "util.h"
18 #include "util.h"
19
19
20 #ifdef IS_PY3K
20 #ifdef IS_PY3K
21 /* The mapping of Python types is meant to be temporary to get Python
21 /* The mapping of Python types is meant to be temporary to get Python
22 * 3 to compile. We should remove this once Python 3 support is fully
22 * 3 to compile. We should remove this once Python 3 support is fully
23 * supported and proper types are used in the extensions themselves. */
23 * supported and proper types are used in the extensions themselves. */
24 #define PyInt_Check PyLong_Check
24 #define PyInt_Check PyLong_Check
25 #define PyInt_FromLong PyLong_FromLong
25 #define PyInt_FromLong PyLong_FromLong
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 #define PyInt_AsLong PyLong_AsLong
27 #define PyInt_AsLong PyLong_AsLong
28 #endif
28 #endif
29
29
30 static const char *const versionerrortext = "Python minor version mismatch";
30 static const char *const versionerrortext = "Python minor version mismatch";
31
31
32 static const int dirstate_v1_from_p2 = -2;
32 static const int dirstate_v1_from_p2 = -2;
33 static const int dirstate_v1_nonnormal = -1;
33 static const int dirstate_v1_nonnormal = -1;
34 static const int ambiguous_time = -1;
34 static const int ambiguous_time = -1;
35
35
36 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
36 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
37 {
37 {
38 Py_ssize_t expected_size;
38 Py_ssize_t expected_size;
39
39
40 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
40 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
41 return NULL;
41 return NULL;
42 }
42 }
43
43
44 return _dict_new_presized(expected_size);
44 return _dict_new_presized(expected_size);
45 }
45 }
46
46
47 static inline dirstateItemObject *make_dirstate_item(char state, int mode,
47 static inline dirstateItemObject *make_dirstate_item(char state, int mode,
48 int size, int mtime)
48 int size, int mtime)
49 {
49 {
50 dirstateItemObject *t =
50 dirstateItemObject *t =
51 PyObject_New(dirstateItemObject, &dirstateItemType);
51 PyObject_New(dirstateItemObject, &dirstateItemType);
52 if (!t) {
52 if (!t) {
53 return NULL;
53 return NULL;
54 }
54 }
55 t->state = state;
55 t->state = state;
56 t->mode = mode;
56 t->mode = mode;
57 t->size = size;
57 t->size = size;
58 t->mtime = mtime;
58 t->mtime = mtime;
59 return t;
59 return t;
60 }
60 }
61
61
62 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
62 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
63 PyObject *kwds)
63 PyObject *kwds)
64 {
64 {
65 /* We do all the initialization here and not a tp_init function because
65 /* We do all the initialization here and not a tp_init function because
66 * dirstate_item is immutable. */
66 * dirstate_item is immutable. */
67 dirstateItemObject *t;
67 dirstateItemObject *t;
68 char state;
68 char state;
69 int size, mode, mtime;
69 int size, mode, mtime;
70 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
70 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
71 return NULL;
71 return NULL;
72 }
72 }
73
73
74 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
74 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
75 if (!t) {
75 if (!t) {
76 return NULL;
76 return NULL;
77 }
77 }
78 t->state = state;
78 t->state = state;
79 t->mode = mode;
79 t->mode = mode;
80 t->size = size;
80 t->size = size;
81 t->mtime = mtime;
81 t->mtime = mtime;
82
82
83 return (PyObject *)t;
83 return (PyObject *)t;
84 }
84 }
85
85
86 static void dirstate_item_dealloc(PyObject *o)
86 static void dirstate_item_dealloc(PyObject *o)
87 {
87 {
88 PyObject_Del(o);
88 PyObject_Del(o);
89 }
89 }
90
90
91 static Py_ssize_t dirstate_item_length(PyObject *o)
91 static Py_ssize_t dirstate_item_length(PyObject *o)
92 {
92 {
93 return 4;
93 return 4;
94 }
94 }
95
95
96 static PyObject *dirstate_item_item(PyObject *o, Py_ssize_t i)
96 static PyObject *dirstate_item_item(PyObject *o, Py_ssize_t i)
97 {
97 {
98 dirstateItemObject *t = (dirstateItemObject *)o;
98 dirstateItemObject *t = (dirstateItemObject *)o;
99 switch (i) {
99 switch (i) {
100 case 0:
100 case 0:
101 return PyBytes_FromStringAndSize(&t->state, 1);
101 return PyBytes_FromStringAndSize(&t->state, 1);
102 case 1:
102 case 1:
103 return PyInt_FromLong(t->mode);
103 return PyInt_FromLong(t->mode);
104 case 2:
104 case 2:
105 return PyInt_FromLong(t->size);
105 return PyInt_FromLong(t->size);
106 case 3:
106 case 3:
107 return PyInt_FromLong(t->mtime);
107 return PyInt_FromLong(t->mtime);
108 default:
108 default:
109 PyErr_SetString(PyExc_IndexError, "index out of range");
109 PyErr_SetString(PyExc_IndexError, "index out of range");
110 return NULL;
110 return NULL;
111 }
111 }
112 }
112 }
113
113
114 static PySequenceMethods dirstate_item_sq = {
114 static PySequenceMethods dirstate_item_sq = {
115 dirstate_item_length, /* sq_length */
115 dirstate_item_length, /* sq_length */
116 0, /* sq_concat */
116 0, /* sq_concat */
117 0, /* sq_repeat */
117 0, /* sq_repeat */
118 dirstate_item_item, /* sq_item */
118 dirstate_item_item, /* sq_item */
119 0, /* sq_ass_item */
119 0, /* sq_ass_item */
120 0, /* sq_contains */
120 0, /* sq_contains */
121 0, /* sq_inplace_concat */
121 0, /* sq_inplace_concat */
122 0 /* sq_inplace_repeat */
122 0 /* sq_inplace_repeat */
123 };
123 };
124
124
125 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
125 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
126 {
126 {
127 return PyBytes_FromStringAndSize(&self->state, 1);
127 return PyBytes_FromStringAndSize(&self->state, 1);
128 };
128 };
129
129
130 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
130 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
131 {
131 {
132 return PyInt_FromLong(self->mode);
132 return PyInt_FromLong(self->mode);
133 };
133 };
134
134
135 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
135 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
136 {
136 {
137 return PyInt_FromLong(self->size);
137 return PyInt_FromLong(self->size);
138 };
138 };
139
139
140 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
140 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
141 {
141 {
142 return PyInt_FromLong(self->mtime);
142 return PyInt_FromLong(self->mtime);
143 };
143 };
144
144
145 static PyObject *dm_nonnormal(dirstateItemObject *self)
145 static PyObject *dm_nonnormal(dirstateItemObject *self)
146 {
146 {
147 if (self->state != 'n' || self->mtime == ambiguous_time) {
147 if (self->state != 'n' || self->mtime == ambiguous_time) {
148 Py_RETURN_TRUE;
148 Py_RETURN_TRUE;
149 } else {
149 } else {
150 Py_RETURN_FALSE;
150 Py_RETURN_FALSE;
151 }
151 }
152 };
152 };
153 static PyObject *dm_otherparent(dirstateItemObject *self)
153 static PyObject *dm_otherparent(dirstateItemObject *self)
154 {
154 {
155 if (self->size == dirstate_v1_from_p2) {
155 if (self->size == dirstate_v1_from_p2) {
156 Py_RETURN_TRUE;
156 Py_RETURN_TRUE;
157 } else {
157 } else {
158 Py_RETURN_FALSE;
158 Py_RETURN_FALSE;
159 }
159 }
160 };
160 };
161
161
162 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
162 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
163 PyObject *value)
163 PyObject *value)
164 {
164 {
165 long now;
165 long now;
166 if (!pylong_to_long(value, &now)) {
166 if (!pylong_to_long(value, &now)) {
167 return NULL;
167 return NULL;
168 }
168 }
169 if (self->state == 'n' && self->mtime == now) {
169 if (self->state == 'n' && self->mtime == now) {
170 Py_RETURN_TRUE;
170 Py_RETURN_TRUE;
171 } else {
171 } else {
172 Py_RETURN_FALSE;
172 Py_RETURN_FALSE;
173 }
173 }
174 };
174 };
175
175
176 /* This will never change since it's bound to V1, unlike `make_dirstate_item`
176 /* This will never change since it's bound to V1, unlike `make_dirstate_item`
177 */
177 */
178 static inline dirstateItemObject *
178 static inline dirstateItemObject *
179 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
179 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
180 {
180 {
181 dirstateItemObject *t =
181 dirstateItemObject *t =
182 PyObject_New(dirstateItemObject, &dirstateItemType);
182 PyObject_New(dirstateItemObject, &dirstateItemType);
183 if (!t) {
183 if (!t) {
184 return NULL;
184 return NULL;
185 }
185 }
186 t->state = state;
186 t->state = state;
187 t->mode = mode;
187 t->mode = mode;
188 t->size = size;
188 t->size = size;
189 t->mtime = mtime;
189 t->mtime = mtime;
190 return t;
190 return t;
191 }
191 }
192
192
193 /* This will never change since it's bound to V1, unlike `dirstate_item_new` */
193 /* This will never change since it's bound to V1, unlike `dirstate_item_new` */
194 static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype,
194 static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype,
195 PyObject *args)
195 PyObject *args)
196 {
196 {
197 /* We do all the initialization here and not a tp_init function because
197 /* We do all the initialization here and not a tp_init function because
198 * dirstate_item is immutable. */
198 * dirstate_item is immutable. */
199 dirstateItemObject *t;
199 dirstateItemObject *t;
200 char state;
200 char state;
201 int size, mode, mtime;
201 int size, mode, mtime;
202 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
202 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
203 return NULL;
203 return NULL;
204 }
204 }
205
205
206 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
206 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
207 if (!t) {
207 if (!t) {
208 return NULL;
208 return NULL;
209 }
209 }
210 t->state = state;
210 t->state = state;
211 t->mode = mode;
211 t->mode = mode;
212 t->size = size;
212 t->size = size;
213 t->mtime = mtime;
213 t->mtime = mtime;
214
214
215 return (PyObject *)t;
215 return (PyObject *)t;
216 };
216 };
217
217
218 /* This means the next status call will have to actually check its content
218 /* This means the next status call will have to actually check its content
219 to make sure it is correct. */
219 to make sure it is correct. */
220 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
220 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
221 {
221 {
222 self->mtime = ambiguous_time;
222 self->mtime = ambiguous_time;
223 Py_RETURN_NONE;
223 Py_RETURN_NONE;
224 }
224 }
225
225
226 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
227 {
228 if (self->state == 'm') {
229 self->size = dirstate_v1_nonnormal;
230 } else if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
231 self->size = dirstate_v1_from_p2;
232 } else {
233 self->size = 0;
234 }
235 self->state = 'r';
236 self->mode = 0;
237 self->mtime = 0;
238 Py_RETURN_NONE;
239 }
240
226 static PyMethodDef dirstate_item_methods[] = {
241 static PyMethodDef dirstate_item_methods[] = {
227 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
242 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
228 "return a \"state\" suitable for v1 serialization"},
243 "return a \"state\" suitable for v1 serialization"},
229 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
244 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
230 "return a \"mode\" suitable for v1 serialization"},
245 "return a \"mode\" suitable for v1 serialization"},
231 {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS,
246 {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS,
232 "return a \"size\" suitable for v1 serialization"},
247 "return a \"size\" suitable for v1 serialization"},
233 {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
248 {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
234 "return a \"mtime\" suitable for v1 serialization"},
249 "return a \"mtime\" suitable for v1 serialization"},
235 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
250 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
236 "True if the stored mtime would be ambiguous with the current time"},
251 "True if the stored mtime would be ambiguous with the current time"},
237 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, METH_O,
252 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, METH_O,
238 "build a new DirstateItem object from V1 data"},
253 "build a new DirstateItem object from V1 data"},
239 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
254 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
240 METH_NOARGS, "mark a file as \"possibly dirty\""},
255 METH_NOARGS, "mark a file as \"possibly dirty\""},
256 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
257 "mark a file as \"untracked\""},
241 {"dm_nonnormal", (PyCFunction)dm_nonnormal, METH_NOARGS,
258 {"dm_nonnormal", (PyCFunction)dm_nonnormal, METH_NOARGS,
242 "True is the entry is non-normal in the dirstatemap sense"},
259 "True is the entry is non-normal in the dirstatemap sense"},
243 {"dm_otherparent", (PyCFunction)dm_otherparent, METH_NOARGS,
260 {"dm_otherparent", (PyCFunction)dm_otherparent, METH_NOARGS,
244 "True is the entry is `otherparent` in the dirstatemap sense"},
261 "True is the entry is `otherparent` in the dirstatemap sense"},
245 {NULL} /* Sentinel */
262 {NULL} /* Sentinel */
246 };
263 };
247
264
248 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
265 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
249 {
266 {
250 return PyInt_FromLong(self->mode);
267 return PyInt_FromLong(self->mode);
251 };
268 };
252
269
253 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
270 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
254 {
271 {
255 return PyInt_FromLong(self->size);
272 return PyInt_FromLong(self->size);
256 };
273 };
257
274
258 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
275 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
259 {
276 {
260 return PyInt_FromLong(self->mtime);
277 return PyInt_FromLong(self->mtime);
261 };
278 };
262
279
263 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
280 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
264 {
281 {
265 return PyBytes_FromStringAndSize(&self->state, 1);
282 return PyBytes_FromStringAndSize(&self->state, 1);
266 };
283 };
267
284
268 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
285 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
269 {
286 {
270 if (self->state == 'a' || self->state == 'm' || self->state == 'n') {
287 if (self->state == 'a' || self->state == 'm' || self->state == 'n') {
271 Py_RETURN_TRUE;
288 Py_RETURN_TRUE;
272 } else {
289 } else {
273 Py_RETURN_FALSE;
290 Py_RETURN_FALSE;
274 }
291 }
275 };
292 };
276
293
277 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
294 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
278 {
295 {
279 if (self->state == 'a') {
296 if (self->state == 'a') {
280 Py_RETURN_TRUE;
297 Py_RETURN_TRUE;
281 } else {
298 } else {
282 Py_RETURN_FALSE;
299 Py_RETURN_FALSE;
283 }
300 }
284 };
301 };
285
302
286 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
303 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
287 {
304 {
288 if (self->state == 'm') {
305 if (self->state == 'm') {
289 Py_RETURN_TRUE;
306 Py_RETURN_TRUE;
290 } else {
307 } else {
291 Py_RETURN_FALSE;
308 Py_RETURN_FALSE;
292 }
309 }
293 };
310 };
294
311
295 static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self)
312 static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self)
296 {
313 {
297 if (self->state == 'r' && self->size == dirstate_v1_nonnormal) {
314 if (self->state == 'r' && self->size == dirstate_v1_nonnormal) {
298 Py_RETURN_TRUE;
315 Py_RETURN_TRUE;
299 } else {
316 } else {
300 Py_RETURN_FALSE;
317 Py_RETURN_FALSE;
301 }
318 }
302 };
319 };
303
320
304 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
321 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
305 {
322 {
306 if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
323 if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
307 Py_RETURN_TRUE;
324 Py_RETURN_TRUE;
308 } else {
325 } else {
309 Py_RETURN_FALSE;
326 Py_RETURN_FALSE;
310 }
327 }
311 };
328 };
312
329
313 static PyObject *dirstate_item_get_from_p2_removed(dirstateItemObject *self)
330 static PyObject *dirstate_item_get_from_p2_removed(dirstateItemObject *self)
314 {
331 {
315 if (self->state == 'r' && self->size == dirstate_v1_from_p2) {
332 if (self->state == 'r' && self->size == dirstate_v1_from_p2) {
316 Py_RETURN_TRUE;
333 Py_RETURN_TRUE;
317 } else {
334 } else {
318 Py_RETURN_FALSE;
335 Py_RETURN_FALSE;
319 }
336 }
320 };
337 };
321
338
322 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
339 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
323 {
340 {
324 if (self->state == 'r') {
341 if (self->state == 'r') {
325 Py_RETURN_TRUE;
342 Py_RETURN_TRUE;
326 } else {
343 } else {
327 Py_RETURN_FALSE;
344 Py_RETURN_FALSE;
328 }
345 }
329 };
346 };
330
347
331 static PyGetSetDef dirstate_item_getset[] = {
348 static PyGetSetDef dirstate_item_getset[] = {
332 {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
349 {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
333 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
350 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
334 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
351 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
335 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
352 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
336 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
353 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
337 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
354 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
338 {"merged_removed", (getter)dirstate_item_get_merged_removed, NULL,
355 {"merged_removed", (getter)dirstate_item_get_merged_removed, NULL,
339 "merged_removed", NULL},
356 "merged_removed", NULL},
340 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
357 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
341 {"from_p2_removed", (getter)dirstate_item_get_from_p2_removed, NULL,
358 {"from_p2_removed", (getter)dirstate_item_get_from_p2_removed, NULL,
342 "from_p2_removed", NULL},
359 "from_p2_removed", NULL},
343 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
360 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
344 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
361 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
345 {NULL} /* Sentinel */
362 {NULL} /* Sentinel */
346 };
363 };
347
364
348 PyTypeObject dirstateItemType = {
365 PyTypeObject dirstateItemType = {
349 PyVarObject_HEAD_INIT(NULL, 0) /* header */
366 PyVarObject_HEAD_INIT(NULL, 0) /* header */
350 "dirstate_tuple", /* tp_name */
367 "dirstate_tuple", /* tp_name */
351 sizeof(dirstateItemObject), /* tp_basicsize */
368 sizeof(dirstateItemObject), /* tp_basicsize */
352 0, /* tp_itemsize */
369 0, /* tp_itemsize */
353 (destructor)dirstate_item_dealloc, /* tp_dealloc */
370 (destructor)dirstate_item_dealloc, /* tp_dealloc */
354 0, /* tp_print */
371 0, /* tp_print */
355 0, /* tp_getattr */
372 0, /* tp_getattr */
356 0, /* tp_setattr */
373 0, /* tp_setattr */
357 0, /* tp_compare */
374 0, /* tp_compare */
358 0, /* tp_repr */
375 0, /* tp_repr */
359 0, /* tp_as_number */
376 0, /* tp_as_number */
360 &dirstate_item_sq, /* tp_as_sequence */
377 &dirstate_item_sq, /* tp_as_sequence */
361 0, /* tp_as_mapping */
378 0, /* tp_as_mapping */
362 0, /* tp_hash */
379 0, /* tp_hash */
363 0, /* tp_call */
380 0, /* tp_call */
364 0, /* tp_str */
381 0, /* tp_str */
365 0, /* tp_getattro */
382 0, /* tp_getattro */
366 0, /* tp_setattro */
383 0, /* tp_setattro */
367 0, /* tp_as_buffer */
384 0, /* tp_as_buffer */
368 Py_TPFLAGS_DEFAULT, /* tp_flags */
385 Py_TPFLAGS_DEFAULT, /* tp_flags */
369 "dirstate tuple", /* tp_doc */
386 "dirstate tuple", /* tp_doc */
370 0, /* tp_traverse */
387 0, /* tp_traverse */
371 0, /* tp_clear */
388 0, /* tp_clear */
372 0, /* tp_richcompare */
389 0, /* tp_richcompare */
373 0, /* tp_weaklistoffset */
390 0, /* tp_weaklistoffset */
374 0, /* tp_iter */
391 0, /* tp_iter */
375 0, /* tp_iternext */
392 0, /* tp_iternext */
376 dirstate_item_methods, /* tp_methods */
393 dirstate_item_methods, /* tp_methods */
377 0, /* tp_members */
394 0, /* tp_members */
378 dirstate_item_getset, /* tp_getset */
395 dirstate_item_getset, /* tp_getset */
379 0, /* tp_base */
396 0, /* tp_base */
380 0, /* tp_dict */
397 0, /* tp_dict */
381 0, /* tp_descr_get */
398 0, /* tp_descr_get */
382 0, /* tp_descr_set */
399 0, /* tp_descr_set */
383 0, /* tp_dictoffset */
400 0, /* tp_dictoffset */
384 0, /* tp_init */
401 0, /* tp_init */
385 0, /* tp_alloc */
402 0, /* tp_alloc */
386 dirstate_item_new, /* tp_new */
403 dirstate_item_new, /* tp_new */
387 };
404 };
388
405
389 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
406 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
390 {
407 {
391 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
408 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
392 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
409 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
393 char state, *cur, *str, *cpos;
410 char state, *cur, *str, *cpos;
394 int mode, size, mtime;
411 int mode, size, mtime;
395 unsigned int flen, pos = 40;
412 unsigned int flen, pos = 40;
396 Py_ssize_t len = 40;
413 Py_ssize_t len = 40;
397 Py_ssize_t readlen;
414 Py_ssize_t readlen;
398
415
399 if (!PyArg_ParseTuple(
416 if (!PyArg_ParseTuple(
400 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
417 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
401 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
418 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
402 goto quit;
419 goto quit;
403 }
420 }
404
421
405 len = readlen;
422 len = readlen;
406
423
407 /* read parents */
424 /* read parents */
408 if (len < 40) {
425 if (len < 40) {
409 PyErr_SetString(PyExc_ValueError,
426 PyErr_SetString(PyExc_ValueError,
410 "too little data for parents");
427 "too little data for parents");
411 goto quit;
428 goto quit;
412 }
429 }
413
430
414 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
431 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
415 str + 20, (Py_ssize_t)20);
432 str + 20, (Py_ssize_t)20);
416 if (!parents) {
433 if (!parents) {
417 goto quit;
434 goto quit;
418 }
435 }
419
436
420 /* read filenames */
437 /* read filenames */
421 while (pos >= 40 && pos < len) {
438 while (pos >= 40 && pos < len) {
422 if (pos + 17 > len) {
439 if (pos + 17 > len) {
423 PyErr_SetString(PyExc_ValueError,
440 PyErr_SetString(PyExc_ValueError,
424 "overflow in dirstate");
441 "overflow in dirstate");
425 goto quit;
442 goto quit;
426 }
443 }
427 cur = str + pos;
444 cur = str + pos;
428 /* unpack header */
445 /* unpack header */
429 state = *cur;
446 state = *cur;
430 mode = getbe32(cur + 1);
447 mode = getbe32(cur + 1);
431 size = getbe32(cur + 5);
448 size = getbe32(cur + 5);
432 mtime = getbe32(cur + 9);
449 mtime = getbe32(cur + 9);
433 flen = getbe32(cur + 13);
450 flen = getbe32(cur + 13);
434 pos += 17;
451 pos += 17;
435 cur += 17;
452 cur += 17;
436 if (flen > len - pos) {
453 if (flen > len - pos) {
437 PyErr_SetString(PyExc_ValueError,
454 PyErr_SetString(PyExc_ValueError,
438 "overflow in dirstate");
455 "overflow in dirstate");
439 goto quit;
456 goto quit;
440 }
457 }
441
458
442 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
459 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
443 size, mtime);
460 size, mtime);
444 cpos = memchr(cur, 0, flen);
461 cpos = memchr(cur, 0, flen);
445 if (cpos) {
462 if (cpos) {
446 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
463 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
447 cname = PyBytes_FromStringAndSize(
464 cname = PyBytes_FromStringAndSize(
448 cpos + 1, flen - (cpos - cur) - 1);
465 cpos + 1, flen - (cpos - cur) - 1);
449 if (!fname || !cname ||
466 if (!fname || !cname ||
450 PyDict_SetItem(cmap, fname, cname) == -1 ||
467 PyDict_SetItem(cmap, fname, cname) == -1 ||
451 PyDict_SetItem(dmap, fname, entry) == -1) {
468 PyDict_SetItem(dmap, fname, entry) == -1) {
452 goto quit;
469 goto quit;
453 }
470 }
454 Py_DECREF(cname);
471 Py_DECREF(cname);
455 } else {
472 } else {
456 fname = PyBytes_FromStringAndSize(cur, flen);
473 fname = PyBytes_FromStringAndSize(cur, flen);
457 if (!fname ||
474 if (!fname ||
458 PyDict_SetItem(dmap, fname, entry) == -1) {
475 PyDict_SetItem(dmap, fname, entry) == -1) {
459 goto quit;
476 goto quit;
460 }
477 }
461 }
478 }
462 Py_DECREF(fname);
479 Py_DECREF(fname);
463 Py_DECREF(entry);
480 Py_DECREF(entry);
464 fname = cname = entry = NULL;
481 fname = cname = entry = NULL;
465 pos += flen;
482 pos += flen;
466 }
483 }
467
484
468 ret = parents;
485 ret = parents;
469 Py_INCREF(ret);
486 Py_INCREF(ret);
470 quit:
487 quit:
471 Py_XDECREF(fname);
488 Py_XDECREF(fname);
472 Py_XDECREF(cname);
489 Py_XDECREF(cname);
473 Py_XDECREF(entry);
490 Py_XDECREF(entry);
474 Py_XDECREF(parents);
491 Py_XDECREF(parents);
475 return ret;
492 return ret;
476 }
493 }
477
494
478 /*
495 /*
479 * Build a set of non-normal and other parent entries from the dirstate dmap
496 * Build a set of non-normal and other parent entries from the dirstate dmap
480 */
497 */
481 static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args)
498 static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args)
482 {
499 {
483 PyObject *dmap, *fname, *v;
500 PyObject *dmap, *fname, *v;
484 PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
501 PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
485 Py_ssize_t pos;
502 Py_ssize_t pos;
486
503
487 if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
504 if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
488 &dmap)) {
505 &dmap)) {
489 goto bail;
506 goto bail;
490 }
507 }
491
508
492 nonnset = PySet_New(NULL);
509 nonnset = PySet_New(NULL);
493 if (nonnset == NULL) {
510 if (nonnset == NULL) {
494 goto bail;
511 goto bail;
495 }
512 }
496
513
497 otherpset = PySet_New(NULL);
514 otherpset = PySet_New(NULL);
498 if (otherpset == NULL) {
515 if (otherpset == NULL) {
499 goto bail;
516 goto bail;
500 }
517 }
501
518
502 pos = 0;
519 pos = 0;
503 while (PyDict_Next(dmap, &pos, &fname, &v)) {
520 while (PyDict_Next(dmap, &pos, &fname, &v)) {
504 dirstateItemObject *t;
521 dirstateItemObject *t;
505 if (!dirstate_tuple_check(v)) {
522 if (!dirstate_tuple_check(v)) {
506 PyErr_SetString(PyExc_TypeError,
523 PyErr_SetString(PyExc_TypeError,
507 "expected a dirstate tuple");
524 "expected a dirstate tuple");
508 goto bail;
525 goto bail;
509 }
526 }
510 t = (dirstateItemObject *)v;
527 t = (dirstateItemObject *)v;
511
528
512 if (t->state == 'n' && t->size == -2) {
529 if (t->state == 'n' && t->size == -2) {
513 if (PySet_Add(otherpset, fname) == -1) {
530 if (PySet_Add(otherpset, fname) == -1) {
514 goto bail;
531 goto bail;
515 }
532 }
516 }
533 }
517
534
518 if (t->state == 'n' && t->mtime != -1) {
535 if (t->state == 'n' && t->mtime != -1) {
519 continue;
536 continue;
520 }
537 }
521 if (PySet_Add(nonnset, fname) == -1) {
538 if (PySet_Add(nonnset, fname) == -1) {
522 goto bail;
539 goto bail;
523 }
540 }
524 }
541 }
525
542
526 result = Py_BuildValue("(OO)", nonnset, otherpset);
543 result = Py_BuildValue("(OO)", nonnset, otherpset);
527 if (result == NULL) {
544 if (result == NULL) {
528 goto bail;
545 goto bail;
529 }
546 }
530 Py_DECREF(nonnset);
547 Py_DECREF(nonnset);
531 Py_DECREF(otherpset);
548 Py_DECREF(otherpset);
532 return result;
549 return result;
533 bail:
550 bail:
534 Py_XDECREF(nonnset);
551 Py_XDECREF(nonnset);
535 Py_XDECREF(otherpset);
552 Py_XDECREF(otherpset);
536 Py_XDECREF(result);
553 Py_XDECREF(result);
537 return NULL;
554 return NULL;
538 }
555 }
539
556
540 /*
557 /*
541 * Efficiently pack a dirstate object into its on-disk format.
558 * Efficiently pack a dirstate object into its on-disk format.
542 */
559 */
543 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
560 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
544 {
561 {
545 PyObject *packobj = NULL;
562 PyObject *packobj = NULL;
546 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
563 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
547 Py_ssize_t nbytes, pos, l;
564 Py_ssize_t nbytes, pos, l;
548 PyObject *k, *v = NULL, *pn;
565 PyObject *k, *v = NULL, *pn;
549 char *p, *s;
566 char *p, *s;
550 int now;
567 int now;
551
568
552 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
569 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
553 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
570 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
554 &now)) {
571 &now)) {
555 return NULL;
572 return NULL;
556 }
573 }
557
574
558 if (PyTuple_Size(pl) != 2) {
575 if (PyTuple_Size(pl) != 2) {
559 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
576 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
560 return NULL;
577 return NULL;
561 }
578 }
562
579
563 /* Figure out how much we need to allocate. */
580 /* Figure out how much we need to allocate. */
564 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
581 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
565 PyObject *c;
582 PyObject *c;
566 if (!PyBytes_Check(k)) {
583 if (!PyBytes_Check(k)) {
567 PyErr_SetString(PyExc_TypeError, "expected string key");
584 PyErr_SetString(PyExc_TypeError, "expected string key");
568 goto bail;
585 goto bail;
569 }
586 }
570 nbytes += PyBytes_GET_SIZE(k) + 17;
587 nbytes += PyBytes_GET_SIZE(k) + 17;
571 c = PyDict_GetItem(copymap, k);
588 c = PyDict_GetItem(copymap, k);
572 if (c) {
589 if (c) {
573 if (!PyBytes_Check(c)) {
590 if (!PyBytes_Check(c)) {
574 PyErr_SetString(PyExc_TypeError,
591 PyErr_SetString(PyExc_TypeError,
575 "expected string key");
592 "expected string key");
576 goto bail;
593 goto bail;
577 }
594 }
578 nbytes += PyBytes_GET_SIZE(c) + 1;
595 nbytes += PyBytes_GET_SIZE(c) + 1;
579 }
596 }
580 }
597 }
581
598
582 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
599 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
583 if (packobj == NULL) {
600 if (packobj == NULL) {
584 goto bail;
601 goto bail;
585 }
602 }
586
603
587 p = PyBytes_AS_STRING(packobj);
604 p = PyBytes_AS_STRING(packobj);
588
605
589 pn = PyTuple_GET_ITEM(pl, 0);
606 pn = PyTuple_GET_ITEM(pl, 0);
590 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
607 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
591 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
608 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
592 goto bail;
609 goto bail;
593 }
610 }
594 memcpy(p, s, l);
611 memcpy(p, s, l);
595 p += 20;
612 p += 20;
596 pn = PyTuple_GET_ITEM(pl, 1);
613 pn = PyTuple_GET_ITEM(pl, 1);
597 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
614 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
598 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
615 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
599 goto bail;
616 goto bail;
600 }
617 }
601 memcpy(p, s, l);
618 memcpy(p, s, l);
602 p += 20;
619 p += 20;
603
620
604 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
621 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
605 dirstateItemObject *tuple;
622 dirstateItemObject *tuple;
606 char state;
623 char state;
607 int mode, size, mtime;
624 int mode, size, mtime;
608 Py_ssize_t len, l;
625 Py_ssize_t len, l;
609 PyObject *o;
626 PyObject *o;
610 char *t;
627 char *t;
611
628
612 if (!dirstate_tuple_check(v)) {
629 if (!dirstate_tuple_check(v)) {
613 PyErr_SetString(PyExc_TypeError,
630 PyErr_SetString(PyExc_TypeError,
614 "expected a dirstate tuple");
631 "expected a dirstate tuple");
615 goto bail;
632 goto bail;
616 }
633 }
617 tuple = (dirstateItemObject *)v;
634 tuple = (dirstateItemObject *)v;
618
635
619 state = tuple->state;
636 state = tuple->state;
620 mode = tuple->mode;
637 mode = tuple->mode;
621 size = tuple->size;
638 size = tuple->size;
622 mtime = tuple->mtime;
639 mtime = tuple->mtime;
623 if (state == 'n' && mtime == now) {
640 if (state == 'n' && mtime == now) {
624 /* See pure/parsers.py:pack_dirstate for why we do
641 /* See pure/parsers.py:pack_dirstate for why we do
625 * this. */
642 * this. */
626 mtime = -1;
643 mtime = -1;
627 mtime_unset = (PyObject *)make_dirstate_item(
644 mtime_unset = (PyObject *)make_dirstate_item(
628 state, mode, size, mtime);
645 state, mode, size, mtime);
629 if (!mtime_unset) {
646 if (!mtime_unset) {
630 goto bail;
647 goto bail;
631 }
648 }
632 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
649 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
633 goto bail;
650 goto bail;
634 }
651 }
635 Py_DECREF(mtime_unset);
652 Py_DECREF(mtime_unset);
636 mtime_unset = NULL;
653 mtime_unset = NULL;
637 }
654 }
638 *p++ = state;
655 *p++ = state;
639 putbe32((uint32_t)mode, p);
656 putbe32((uint32_t)mode, p);
640 putbe32((uint32_t)size, p + 4);
657 putbe32((uint32_t)size, p + 4);
641 putbe32((uint32_t)mtime, p + 8);
658 putbe32((uint32_t)mtime, p + 8);
642 t = p + 12;
659 t = p + 12;
643 p += 16;
660 p += 16;
644 len = PyBytes_GET_SIZE(k);
661 len = PyBytes_GET_SIZE(k);
645 memcpy(p, PyBytes_AS_STRING(k), len);
662 memcpy(p, PyBytes_AS_STRING(k), len);
646 p += len;
663 p += len;
647 o = PyDict_GetItem(copymap, k);
664 o = PyDict_GetItem(copymap, k);
648 if (o) {
665 if (o) {
649 *p++ = '\0';
666 *p++ = '\0';
650 l = PyBytes_GET_SIZE(o);
667 l = PyBytes_GET_SIZE(o);
651 memcpy(p, PyBytes_AS_STRING(o), l);
668 memcpy(p, PyBytes_AS_STRING(o), l);
652 p += l;
669 p += l;
653 len += l + 1;
670 len += l + 1;
654 }
671 }
655 putbe32((uint32_t)len, t);
672 putbe32((uint32_t)len, t);
656 }
673 }
657
674
658 pos = p - PyBytes_AS_STRING(packobj);
675 pos = p - PyBytes_AS_STRING(packobj);
659 if (pos != nbytes) {
676 if (pos != nbytes) {
660 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
677 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
661 (long)pos, (long)nbytes);
678 (long)pos, (long)nbytes);
662 goto bail;
679 goto bail;
663 }
680 }
664
681
665 return packobj;
682 return packobj;
666 bail:
683 bail:
667 Py_XDECREF(mtime_unset);
684 Py_XDECREF(mtime_unset);
668 Py_XDECREF(packobj);
685 Py_XDECREF(packobj);
669 Py_XDECREF(v);
686 Py_XDECREF(v);
670 return NULL;
687 return NULL;
671 }
688 }
672
689
673 #define BUMPED_FIX 1
690 #define BUMPED_FIX 1
674 #define USING_SHA_256 2
691 #define USING_SHA_256 2
675 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
692 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
676
693
677 static PyObject *readshas(const char *source, unsigned char num,
694 static PyObject *readshas(const char *source, unsigned char num,
678 Py_ssize_t hashwidth)
695 Py_ssize_t hashwidth)
679 {
696 {
680 int i;
697 int i;
681 PyObject *list = PyTuple_New(num);
698 PyObject *list = PyTuple_New(num);
682 if (list == NULL) {
699 if (list == NULL) {
683 return NULL;
700 return NULL;
684 }
701 }
685 for (i = 0; i < num; i++) {
702 for (i = 0; i < num; i++) {
686 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
703 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
687 if (hash == NULL) {
704 if (hash == NULL) {
688 Py_DECREF(list);
705 Py_DECREF(list);
689 return NULL;
706 return NULL;
690 }
707 }
691 PyTuple_SET_ITEM(list, i, hash);
708 PyTuple_SET_ITEM(list, i, hash);
692 source += hashwidth;
709 source += hashwidth;
693 }
710 }
694 return list;
711 return list;
695 }
712 }
696
713
697 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
714 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
698 uint32_t *msize)
715 uint32_t *msize)
699 {
716 {
700 const char *data = databegin;
717 const char *data = databegin;
701 const char *meta;
718 const char *meta;
702
719
703 double mtime;
720 double mtime;
704 int16_t tz;
721 int16_t tz;
705 uint16_t flags;
722 uint16_t flags;
706 unsigned char nsuccs, nparents, nmetadata;
723 unsigned char nsuccs, nparents, nmetadata;
707 Py_ssize_t hashwidth = 20;
724 Py_ssize_t hashwidth = 20;
708
725
709 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
726 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
710 PyObject *metadata = NULL, *ret = NULL;
727 PyObject *metadata = NULL, *ret = NULL;
711 int i;
728 int i;
712
729
713 if (data + FM1_HEADER_SIZE > dataend) {
730 if (data + FM1_HEADER_SIZE > dataend) {
714 goto overflow;
731 goto overflow;
715 }
732 }
716
733
717 *msize = getbe32(data);
734 *msize = getbe32(data);
718 data += 4;
735 data += 4;
719 mtime = getbefloat64(data);
736 mtime = getbefloat64(data);
720 data += 8;
737 data += 8;
721 tz = getbeint16(data);
738 tz = getbeint16(data);
722 data += 2;
739 data += 2;
723 flags = getbeuint16(data);
740 flags = getbeuint16(data);
724 data += 2;
741 data += 2;
725
742
726 if (flags & USING_SHA_256) {
743 if (flags & USING_SHA_256) {
727 hashwidth = 32;
744 hashwidth = 32;
728 }
745 }
729
746
730 nsuccs = (unsigned char)(*data++);
747 nsuccs = (unsigned char)(*data++);
731 nparents = (unsigned char)(*data++);
748 nparents = (unsigned char)(*data++);
732 nmetadata = (unsigned char)(*data++);
749 nmetadata = (unsigned char)(*data++);
733
750
734 if (databegin + *msize > dataend) {
751 if (databegin + *msize > dataend) {
735 goto overflow;
752 goto overflow;
736 }
753 }
737 dataend = databegin + *msize; /* narrow down to marker size */
754 dataend = databegin + *msize; /* narrow down to marker size */
738
755
739 if (data + hashwidth > dataend) {
756 if (data + hashwidth > dataend) {
740 goto overflow;
757 goto overflow;
741 }
758 }
742 prec = PyBytes_FromStringAndSize(data, hashwidth);
759 prec = PyBytes_FromStringAndSize(data, hashwidth);
743 data += hashwidth;
760 data += hashwidth;
744 if (prec == NULL) {
761 if (prec == NULL) {
745 goto bail;
762 goto bail;
746 }
763 }
747
764
748 if (data + nsuccs * hashwidth > dataend) {
765 if (data + nsuccs * hashwidth > dataend) {
749 goto overflow;
766 goto overflow;
750 }
767 }
751 succs = readshas(data, nsuccs, hashwidth);
768 succs = readshas(data, nsuccs, hashwidth);
752 if (succs == NULL) {
769 if (succs == NULL) {
753 goto bail;
770 goto bail;
754 }
771 }
755 data += nsuccs * hashwidth;
772 data += nsuccs * hashwidth;
756
773
757 if (nparents == 1 || nparents == 2) {
774 if (nparents == 1 || nparents == 2) {
758 if (data + nparents * hashwidth > dataend) {
775 if (data + nparents * hashwidth > dataend) {
759 goto overflow;
776 goto overflow;
760 }
777 }
761 parents = readshas(data, nparents, hashwidth);
778 parents = readshas(data, nparents, hashwidth);
762 if (parents == NULL) {
779 if (parents == NULL) {
763 goto bail;
780 goto bail;
764 }
781 }
765 data += nparents * hashwidth;
782 data += nparents * hashwidth;
766 } else {
783 } else {
767 parents = Py_None;
784 parents = Py_None;
768 Py_INCREF(parents);
785 Py_INCREF(parents);
769 }
786 }
770
787
771 if (data + 2 * nmetadata > dataend) {
788 if (data + 2 * nmetadata > dataend) {
772 goto overflow;
789 goto overflow;
773 }
790 }
774 meta = data + (2 * nmetadata);
791 meta = data + (2 * nmetadata);
775 metadata = PyTuple_New(nmetadata);
792 metadata = PyTuple_New(nmetadata);
776 if (metadata == NULL) {
793 if (metadata == NULL) {
777 goto bail;
794 goto bail;
778 }
795 }
779 for (i = 0; i < nmetadata; i++) {
796 for (i = 0; i < nmetadata; i++) {
780 PyObject *tmp, *left = NULL, *right = NULL;
797 PyObject *tmp, *left = NULL, *right = NULL;
781 Py_ssize_t leftsize = (unsigned char)(*data++);
798 Py_ssize_t leftsize = (unsigned char)(*data++);
782 Py_ssize_t rightsize = (unsigned char)(*data++);
799 Py_ssize_t rightsize = (unsigned char)(*data++);
783 if (meta + leftsize + rightsize > dataend) {
800 if (meta + leftsize + rightsize > dataend) {
784 goto overflow;
801 goto overflow;
785 }
802 }
786 left = PyBytes_FromStringAndSize(meta, leftsize);
803 left = PyBytes_FromStringAndSize(meta, leftsize);
787 meta += leftsize;
804 meta += leftsize;
788 right = PyBytes_FromStringAndSize(meta, rightsize);
805 right = PyBytes_FromStringAndSize(meta, rightsize);
789 meta += rightsize;
806 meta += rightsize;
790 tmp = PyTuple_New(2);
807 tmp = PyTuple_New(2);
791 if (!left || !right || !tmp) {
808 if (!left || !right || !tmp) {
792 Py_XDECREF(left);
809 Py_XDECREF(left);
793 Py_XDECREF(right);
810 Py_XDECREF(right);
794 Py_XDECREF(tmp);
811 Py_XDECREF(tmp);
795 goto bail;
812 goto bail;
796 }
813 }
797 PyTuple_SET_ITEM(tmp, 0, left);
814 PyTuple_SET_ITEM(tmp, 0, left);
798 PyTuple_SET_ITEM(tmp, 1, right);
815 PyTuple_SET_ITEM(tmp, 1, right);
799 PyTuple_SET_ITEM(metadata, i, tmp);
816 PyTuple_SET_ITEM(metadata, i, tmp);
800 }
817 }
801 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
818 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
802 (int)tz * 60, parents);
819 (int)tz * 60, parents);
803 goto bail; /* return successfully */
820 goto bail; /* return successfully */
804
821
805 overflow:
822 overflow:
806 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
823 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
807 bail:
824 bail:
808 Py_XDECREF(prec);
825 Py_XDECREF(prec);
809 Py_XDECREF(succs);
826 Py_XDECREF(succs);
810 Py_XDECREF(metadata);
827 Py_XDECREF(metadata);
811 Py_XDECREF(parents);
828 Py_XDECREF(parents);
812 return ret;
829 return ret;
813 }
830 }
814
831
815 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
832 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
816 {
833 {
817 const char *data, *dataend;
834 const char *data, *dataend;
818 Py_ssize_t datalen, offset, stop;
835 Py_ssize_t datalen, offset, stop;
819 PyObject *markers = NULL;
836 PyObject *markers = NULL;
820
837
821 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
838 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
822 &offset, &stop)) {
839 &offset, &stop)) {
823 return NULL;
840 return NULL;
824 }
841 }
825 if (offset < 0) {
842 if (offset < 0) {
826 PyErr_SetString(PyExc_ValueError,
843 PyErr_SetString(PyExc_ValueError,
827 "invalid negative offset in fm1readmarkers");
844 "invalid negative offset in fm1readmarkers");
828 return NULL;
845 return NULL;
829 }
846 }
830 if (stop > datalen) {
847 if (stop > datalen) {
831 PyErr_SetString(
848 PyErr_SetString(
832 PyExc_ValueError,
849 PyExc_ValueError,
833 "stop longer than data length in fm1readmarkers");
850 "stop longer than data length in fm1readmarkers");
834 return NULL;
851 return NULL;
835 }
852 }
836 dataend = data + datalen;
853 dataend = data + datalen;
837 data += offset;
854 data += offset;
838 markers = PyList_New(0);
855 markers = PyList_New(0);
839 if (!markers) {
856 if (!markers) {
840 return NULL;
857 return NULL;
841 }
858 }
842 while (offset < stop) {
859 while (offset < stop) {
843 uint32_t msize;
860 uint32_t msize;
844 int error;
861 int error;
845 PyObject *record = fm1readmarker(data, dataend, &msize);
862 PyObject *record = fm1readmarker(data, dataend, &msize);
846 if (!record) {
863 if (!record) {
847 goto bail;
864 goto bail;
848 }
865 }
849 error = PyList_Append(markers, record);
866 error = PyList_Append(markers, record);
850 Py_DECREF(record);
867 Py_DECREF(record);
851 if (error) {
868 if (error) {
852 goto bail;
869 goto bail;
853 }
870 }
854 data += msize;
871 data += msize;
855 offset += msize;
872 offset += msize;
856 }
873 }
857 return markers;
874 return markers;
858 bail:
875 bail:
859 Py_DECREF(markers);
876 Py_DECREF(markers);
860 return NULL;
877 return NULL;
861 }
878 }
862
879
863 static char parsers_doc[] = "Efficient content parsing.";
880 static char parsers_doc[] = "Efficient content parsing.";
864
881
865 PyObject *encodedir(PyObject *self, PyObject *args);
882 PyObject *encodedir(PyObject *self, PyObject *args);
866 PyObject *pathencode(PyObject *self, PyObject *args);
883 PyObject *pathencode(PyObject *self, PyObject *args);
867 PyObject *lowerencode(PyObject *self, PyObject *args);
884 PyObject *lowerencode(PyObject *self, PyObject *args);
868 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
885 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
869
886
870 static PyMethodDef methods[] = {
887 static PyMethodDef methods[] = {
871 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
888 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
872 {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
889 {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
873 "create a set containing non-normal and other parent entries of given "
890 "create a set containing non-normal and other parent entries of given "
874 "dirstate\n"},
891 "dirstate\n"},
875 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
892 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
876 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
893 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
877 "parse a revlog index\n"},
894 "parse a revlog index\n"},
878 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
895 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
879 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
896 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
880 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
897 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
881 {"dict_new_presized", dict_new_presized, METH_VARARGS,
898 {"dict_new_presized", dict_new_presized, METH_VARARGS,
882 "construct a dict with an expected size\n"},
899 "construct a dict with an expected size\n"},
883 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
900 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
884 "make file foldmap\n"},
901 "make file foldmap\n"},
885 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
902 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
886 "escape a UTF-8 byte string to JSON (fast path)\n"},
903 "escape a UTF-8 byte string to JSON (fast path)\n"},
887 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
904 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
888 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
905 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
889 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
906 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
890 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
907 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
891 "parse v1 obsolete markers\n"},
908 "parse v1 obsolete markers\n"},
892 {NULL, NULL}};
909 {NULL, NULL}};
893
910
894 void dirs_module_init(PyObject *mod);
911 void dirs_module_init(PyObject *mod);
895 void manifest_module_init(PyObject *mod);
912 void manifest_module_init(PyObject *mod);
896 void revlog_module_init(PyObject *mod);
913 void revlog_module_init(PyObject *mod);
897
914
898 static const int version = 20;
915 static const int version = 20;
899
916
900 static void module_init(PyObject *mod)
917 static void module_init(PyObject *mod)
901 {
918 {
902 PyObject *capsule = NULL;
919 PyObject *capsule = NULL;
903 PyModule_AddIntConstant(mod, "version", version);
920 PyModule_AddIntConstant(mod, "version", version);
904
921
905 /* This module constant has two purposes. First, it lets us unit test
922 /* This module constant has two purposes. First, it lets us unit test
906 * the ImportError raised without hard-coding any error text. This
923 * the ImportError raised without hard-coding any error text. This
907 * means we can change the text in the future without breaking tests,
924 * means we can change the text in the future without breaking tests,
908 * even across changesets without a recompile. Second, its presence
925 * even across changesets without a recompile. Second, its presence
909 * can be used to determine whether the version-checking logic is
926 * can be used to determine whether the version-checking logic is
910 * present, which also helps in testing across changesets without a
927 * present, which also helps in testing across changesets without a
911 * recompile. Note that this means the pure-Python version of parsers
928 * recompile. Note that this means the pure-Python version of parsers
912 * should not have this module constant. */
929 * should not have this module constant. */
913 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
930 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
914
931
915 dirs_module_init(mod);
932 dirs_module_init(mod);
916 manifest_module_init(mod);
933 manifest_module_init(mod);
917 revlog_module_init(mod);
934 revlog_module_init(mod);
918
935
919 capsule = PyCapsule_New(
936 capsule = PyCapsule_New(
920 make_dirstate_item,
937 make_dirstate_item,
921 "mercurial.cext.parsers.make_dirstate_item_CAPI", NULL);
938 "mercurial.cext.parsers.make_dirstate_item_CAPI", NULL);
922 if (capsule != NULL)
939 if (capsule != NULL)
923 PyModule_AddObject(mod, "make_dirstate_item_CAPI", capsule);
940 PyModule_AddObject(mod, "make_dirstate_item_CAPI", capsule);
924
941
925 if (PyType_Ready(&dirstateItemType) < 0) {
942 if (PyType_Ready(&dirstateItemType) < 0) {
926 return;
943 return;
927 }
944 }
928 Py_INCREF(&dirstateItemType);
945 Py_INCREF(&dirstateItemType);
929 PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
946 PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
930 }
947 }
931
948
932 static int check_python_version(void)
949 static int check_python_version(void)
933 {
950 {
934 PyObject *sys = PyImport_ImportModule("sys"), *ver;
951 PyObject *sys = PyImport_ImportModule("sys"), *ver;
935 long hexversion;
952 long hexversion;
936 if (!sys) {
953 if (!sys) {
937 return -1;
954 return -1;
938 }
955 }
939 ver = PyObject_GetAttrString(sys, "hexversion");
956 ver = PyObject_GetAttrString(sys, "hexversion");
940 Py_DECREF(sys);
957 Py_DECREF(sys);
941 if (!ver) {
958 if (!ver) {
942 return -1;
959 return -1;
943 }
960 }
944 hexversion = PyInt_AsLong(ver);
961 hexversion = PyInt_AsLong(ver);
945 Py_DECREF(ver);
962 Py_DECREF(ver);
946 /* sys.hexversion is a 32-bit number by default, so the -1 case
963 /* sys.hexversion is a 32-bit number by default, so the -1 case
947 * should only occur in unusual circumstances (e.g. if sys.hexversion
964 * should only occur in unusual circumstances (e.g. if sys.hexversion
948 * is manually set to an invalid value). */
965 * is manually set to an invalid value). */
949 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
966 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
950 PyErr_Format(PyExc_ImportError,
967 PyErr_Format(PyExc_ImportError,
951 "%s: The Mercurial extension "
968 "%s: The Mercurial extension "
952 "modules were compiled with Python " PY_VERSION
969 "modules were compiled with Python " PY_VERSION
953 ", but "
970 ", but "
954 "Mercurial is currently using Python with "
971 "Mercurial is currently using Python with "
955 "sys.hexversion=%ld: "
972 "sys.hexversion=%ld: "
956 "Python %s\n at: %s",
973 "Python %s\n at: %s",
957 versionerrortext, hexversion, Py_GetVersion(),
974 versionerrortext, hexversion, Py_GetVersion(),
958 Py_GetProgramFullPath());
975 Py_GetProgramFullPath());
959 return -1;
976 return -1;
960 }
977 }
961 return 0;
978 return 0;
962 }
979 }
963
980
964 #ifdef IS_PY3K
981 #ifdef IS_PY3K
965 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
982 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
966 parsers_doc, -1, methods};
983 parsers_doc, -1, methods};
967
984
968 PyMODINIT_FUNC PyInit_parsers(void)
985 PyMODINIT_FUNC PyInit_parsers(void)
969 {
986 {
970 PyObject *mod;
987 PyObject *mod;
971
988
972 if (check_python_version() == -1)
989 if (check_python_version() == -1)
973 return NULL;
990 return NULL;
974 mod = PyModule_Create(&parsers_module);
991 mod = PyModule_Create(&parsers_module);
975 module_init(mod);
992 module_init(mod);
976 return mod;
993 return mod;
977 }
994 }
978 #else
995 #else
979 PyMODINIT_FUNC initparsers(void)
996 PyMODINIT_FUNC initparsers(void)
980 {
997 {
981 PyObject *mod;
998 PyObject *mod;
982
999
983 if (check_python_version() == -1) {
1000 if (check_python_version() == -1) {
984 return;
1001 return;
985 }
1002 }
986 mod = Py_InitModule3("parsers", methods, parsers_doc);
1003 mod = Py_InitModule3("parsers", methods, parsers_doc);
987 module_init(mod);
1004 module_init(mod);
988 }
1005 }
989 #endif
1006 #endif
@@ -1,1783 +1,1783 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self._normallookup(f)
406 self._normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self._normallookup(filename)
479 self._normallookup(filename)
480 return True
480 return True
481 # XXX This is probably overkill for more case, but we need this to
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
484 self.set_possibly_dirty(filename)
485 return False
485 return False
486
486
487 @requires_no_parents_change
487 @requires_no_parents_change
488 def set_untracked(self, filename):
488 def set_untracked(self, filename):
489 """a "public" method for generic code to mark a file as untracked
489 """a "public" method for generic code to mark a file as untracked
490
490
491 This function is to be called outside of "update/merge" case. For
491 This function is to be called outside of "update/merge" case. For
492 example by a command like `hg remove X`.
492 example by a command like `hg remove X`.
493
493
494 return True the file was previously tracked, False otherwise.
494 return True the file was previously tracked, False otherwise.
495 """
495 """
496 entry = self._map.get(filename)
496 entry = self._map.get(filename)
497 if entry is None:
497 if entry is None:
498 return False
498 return False
499 elif entry.added:
499 elif entry.added:
500 self._drop(filename)
500 self._drop(filename)
501 return True
501 return True
502 else:
502 else:
503 self._dirty = True
503 self._dirty = True
504 self._updatedfiles.add(filename)
504 self._updatedfiles.add(filename)
505 self._map.removefile(filename, in_merge=self.in_merge)
505 self._map.set_untracked(filename)
506 return True
506 return True
507
507
508 @requires_no_parents_change
508 @requires_no_parents_change
509 def set_clean(self, filename, parentfiledata=None):
509 def set_clean(self, filename, parentfiledata=None):
510 """record that the current state of the file on disk is known to be clean"""
510 """record that the current state of the file on disk is known to be clean"""
511 self._dirty = True
511 self._dirty = True
512 self._updatedfiles.add(filename)
512 self._updatedfiles.add(filename)
513 self._normal(filename, parentfiledata=parentfiledata)
513 self._normal(filename, parentfiledata=parentfiledata)
514
514
515 @requires_no_parents_change
515 @requires_no_parents_change
516 def set_possibly_dirty(self, filename):
516 def set_possibly_dirty(self, filename):
517 """record that the current state of the file on disk is unknown"""
517 """record that the current state of the file on disk is unknown"""
518 self._dirty = True
518 self._dirty = True
519 self._updatedfiles.add(filename)
519 self._updatedfiles.add(filename)
520 self._map.set_possibly_dirty(filename)
520 self._map.set_possibly_dirty(filename)
521
521
522 @requires_parents_change
522 @requires_parents_change
523 def update_file_p1(
523 def update_file_p1(
524 self,
524 self,
525 filename,
525 filename,
526 p1_tracked,
526 p1_tracked,
527 ):
527 ):
528 """Set a file as tracked in the parent (or not)
528 """Set a file as tracked in the parent (or not)
529
529
530 This is to be called when adjust the dirstate to a new parent after an history
530 This is to be called when adjust the dirstate to a new parent after an history
531 rewriting operation.
531 rewriting operation.
532
532
533 It should not be called during a merge (p2 != nullid) and only within
533 It should not be called during a merge (p2 != nullid) and only within
534 a `with dirstate.parentchange():` context.
534 a `with dirstate.parentchange():` context.
535 """
535 """
536 if self.in_merge:
536 if self.in_merge:
537 msg = b'update_file_reference should not be called when merging'
537 msg = b'update_file_reference should not be called when merging'
538 raise error.ProgrammingError(msg)
538 raise error.ProgrammingError(msg)
539 entry = self._map.get(filename)
539 entry = self._map.get(filename)
540 if entry is None:
540 if entry is None:
541 wc_tracked = False
541 wc_tracked = False
542 else:
542 else:
543 wc_tracked = entry.tracked
543 wc_tracked = entry.tracked
544 possibly_dirty = False
544 possibly_dirty = False
545 if p1_tracked and wc_tracked:
545 if p1_tracked and wc_tracked:
546 # the underlying reference might have changed, we will have to
546 # the underlying reference might have changed, we will have to
547 # check it.
547 # check it.
548 possibly_dirty = True
548 possibly_dirty = True
549 elif not (p1_tracked or wc_tracked):
549 elif not (p1_tracked or wc_tracked):
550 # the file is no longer relevant to anyone
550 # the file is no longer relevant to anyone
551 self._drop(filename)
551 self._drop(filename)
552 elif (not p1_tracked) and wc_tracked:
552 elif (not p1_tracked) and wc_tracked:
553 if entry is not None and entry.added:
553 if entry is not None and entry.added:
554 return # avoid dropping copy information (maybe?)
554 return # avoid dropping copy information (maybe?)
555 elif p1_tracked and not wc_tracked:
555 elif p1_tracked and not wc_tracked:
556 pass
556 pass
557 else:
557 else:
558 assert False, 'unreachable'
558 assert False, 'unreachable'
559
559
560 # this mean we are doing call for file we do not really care about the
560 # this mean we are doing call for file we do not really care about the
561 # data (eg: added or removed), however this should be a minor overhead
561 # data (eg: added or removed), however this should be a minor overhead
562 # compared to the overall update process calling this.
562 # compared to the overall update process calling this.
563 parentfiledata = None
563 parentfiledata = None
564 if wc_tracked:
564 if wc_tracked:
565 parentfiledata = self._get_filedata(filename)
565 parentfiledata = self._get_filedata(filename)
566
566
567 self._updatedfiles.add(filename)
567 self._updatedfiles.add(filename)
568 self._map.reset_state(
568 self._map.reset_state(
569 filename,
569 filename,
570 wc_tracked,
570 wc_tracked,
571 p1_tracked,
571 p1_tracked,
572 possibly_dirty=possibly_dirty,
572 possibly_dirty=possibly_dirty,
573 parentfiledata=parentfiledata,
573 parentfiledata=parentfiledata,
574 )
574 )
575 if (
575 if (
576 parentfiledata is not None
576 parentfiledata is not None
577 and parentfiledata[2] > self._lastnormaltime
577 and parentfiledata[2] > self._lastnormaltime
578 ):
578 ):
579 # Remember the most recent modification timeslot for status(),
579 # Remember the most recent modification timeslot for status(),
580 # to make sure we won't miss future size-preserving file content
580 # to make sure we won't miss future size-preserving file content
581 # modifications that happen within the same timeslot.
581 # modifications that happen within the same timeslot.
582 self._lastnormaltime = parentfiledata[2]
582 self._lastnormaltime = parentfiledata[2]
583
583
584 @requires_parents_change
584 @requires_parents_change
585 def update_file(
585 def update_file(
586 self,
586 self,
587 filename,
587 filename,
588 wc_tracked,
588 wc_tracked,
589 p1_tracked,
589 p1_tracked,
590 p2_tracked=False,
590 p2_tracked=False,
591 merged=False,
591 merged=False,
592 clean_p1=False,
592 clean_p1=False,
593 clean_p2=False,
593 clean_p2=False,
594 possibly_dirty=False,
594 possibly_dirty=False,
595 parentfiledata=None,
595 parentfiledata=None,
596 ):
596 ):
597 """update the information about a file in the dirstate
597 """update the information about a file in the dirstate
598
598
599 This is to be called when the direstates parent changes to keep track
599 This is to be called when the direstates parent changes to keep track
600 of what is the file situation in regards to the working copy and its parent.
600 of what is the file situation in regards to the working copy and its parent.
601
601
602 This function must be called within a `dirstate.parentchange` context.
602 This function must be called within a `dirstate.parentchange` context.
603
603
604 note: the API is at an early stage and we might need to adjust it
604 note: the API is at an early stage and we might need to adjust it
605 depending of what information ends up being relevant and useful to
605 depending of what information ends up being relevant and useful to
606 other processing.
606 other processing.
607 """
607 """
608 if merged and (clean_p1 or clean_p2):
608 if merged and (clean_p1 or clean_p2):
609 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
609 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
610 raise error.ProgrammingError(msg)
610 raise error.ProgrammingError(msg)
611
611
612 # note: I do not think we need to double check name clash here since we
612 # note: I do not think we need to double check name clash here since we
613 # are in a update/merge case that should already have taken care of
613 # are in a update/merge case that should already have taken care of
614 # this. The test agrees
614 # this. The test agrees
615
615
616 self._dirty = True
616 self._dirty = True
617 self._updatedfiles.add(filename)
617 self._updatedfiles.add(filename)
618
618
619 need_parent_file_data = (
619 need_parent_file_data = (
620 not (possibly_dirty or clean_p2 or merged)
620 not (possibly_dirty or clean_p2 or merged)
621 and wc_tracked
621 and wc_tracked
622 and p1_tracked
622 and p1_tracked
623 )
623 )
624
624
625 # this mean we are doing call for file we do not really care about the
625 # this mean we are doing call for file we do not really care about the
626 # data (eg: added or removed), however this should be a minor overhead
626 # data (eg: added or removed), however this should be a minor overhead
627 # compared to the overall update process calling this.
627 # compared to the overall update process calling this.
628 if need_parent_file_data:
628 if need_parent_file_data:
629 if parentfiledata is None:
629 if parentfiledata is None:
630 parentfiledata = self._get_filedata(filename)
630 parentfiledata = self._get_filedata(filename)
631 mtime = parentfiledata[2]
631 mtime = parentfiledata[2]
632
632
633 if mtime > self._lastnormaltime:
633 if mtime > self._lastnormaltime:
634 # Remember the most recent modification timeslot for
634 # Remember the most recent modification timeslot for
635 # status(), to make sure we won't miss future
635 # status(), to make sure we won't miss future
636 # size-preserving file content modifications that happen
636 # size-preserving file content modifications that happen
637 # within the same timeslot.
637 # within the same timeslot.
638 self._lastnormaltime = mtime
638 self._lastnormaltime = mtime
639
639
640 self._map.reset_state(
640 self._map.reset_state(
641 filename,
641 filename,
642 wc_tracked,
642 wc_tracked,
643 p1_tracked,
643 p1_tracked,
644 p2_tracked=p2_tracked,
644 p2_tracked=p2_tracked,
645 merged=merged,
645 merged=merged,
646 clean_p1=clean_p1,
646 clean_p1=clean_p1,
647 clean_p2=clean_p2,
647 clean_p2=clean_p2,
648 possibly_dirty=possibly_dirty,
648 possibly_dirty=possibly_dirty,
649 parentfiledata=parentfiledata,
649 parentfiledata=parentfiledata,
650 )
650 )
651 if (
651 if (
652 parentfiledata is not None
652 parentfiledata is not None
653 and parentfiledata[2] > self._lastnormaltime
653 and parentfiledata[2] > self._lastnormaltime
654 ):
654 ):
655 # Remember the most recent modification timeslot for status(),
655 # Remember the most recent modification timeslot for status(),
656 # to make sure we won't miss future size-preserving file content
656 # to make sure we won't miss future size-preserving file content
657 # modifications that happen within the same timeslot.
657 # modifications that happen within the same timeslot.
658 self._lastnormaltime = parentfiledata[2]
658 self._lastnormaltime = parentfiledata[2]
659
659
660 def _addpath(
660 def _addpath(
661 self,
661 self,
662 f,
662 f,
663 mode=0,
663 mode=0,
664 size=None,
664 size=None,
665 mtime=None,
665 mtime=None,
666 added=False,
666 added=False,
667 merged=False,
667 merged=False,
668 from_p2=False,
668 from_p2=False,
669 possibly_dirty=False,
669 possibly_dirty=False,
670 ):
670 ):
671 entry = self._map.get(f)
671 entry = self._map.get(f)
672 if added or entry is not None and entry.removed:
672 if added or entry is not None and entry.removed:
673 scmutil.checkfilename(f)
673 scmutil.checkfilename(f)
674 if self._map.hastrackeddir(f):
674 if self._map.hastrackeddir(f):
675 msg = _(b'directory %r already in dirstate')
675 msg = _(b'directory %r already in dirstate')
676 msg %= pycompat.bytestr(f)
676 msg %= pycompat.bytestr(f)
677 raise error.Abort(msg)
677 raise error.Abort(msg)
678 # shadows
678 # shadows
679 for d in pathutil.finddirs(f):
679 for d in pathutil.finddirs(f):
680 if self._map.hastrackeddir(d):
680 if self._map.hastrackeddir(d):
681 break
681 break
682 entry = self._map.get(d)
682 entry = self._map.get(d)
683 if entry is not None and not entry.removed:
683 if entry is not None and not entry.removed:
684 msg = _(b'file %r in dirstate clashes with %r')
684 msg = _(b'file %r in dirstate clashes with %r')
685 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
685 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
686 raise error.Abort(msg)
686 raise error.Abort(msg)
687 self._dirty = True
687 self._dirty = True
688 self._updatedfiles.add(f)
688 self._updatedfiles.add(f)
689 self._map.addfile(
689 self._map.addfile(
690 f,
690 f,
691 mode=mode,
691 mode=mode,
692 size=size,
692 size=size,
693 mtime=mtime,
693 mtime=mtime,
694 added=added,
694 added=added,
695 merged=merged,
695 merged=merged,
696 from_p2=from_p2,
696 from_p2=from_p2,
697 possibly_dirty=possibly_dirty,
697 possibly_dirty=possibly_dirty,
698 )
698 )
699
699
700 def _get_filedata(self, filename):
700 def _get_filedata(self, filename):
701 """returns"""
701 """returns"""
702 s = os.lstat(self._join(filename))
702 s = os.lstat(self._join(filename))
703 mode = s.st_mode
703 mode = s.st_mode
704 size = s.st_size
704 size = s.st_size
705 mtime = s[stat.ST_MTIME]
705 mtime = s[stat.ST_MTIME]
706 return (mode, size, mtime)
706 return (mode, size, mtime)
707
707
708 def normal(self, f, parentfiledata=None):
708 def normal(self, f, parentfiledata=None):
709 """Mark a file normal and clean.
709 """Mark a file normal and clean.
710
710
711 parentfiledata: (mode, size, mtime) of the clean file
711 parentfiledata: (mode, size, mtime) of the clean file
712
712
713 parentfiledata should be computed from memory (for mode,
713 parentfiledata should be computed from memory (for mode,
714 size), as or close as possible from the point where we
714 size), as or close as possible from the point where we
715 determined the file was clean, to limit the risk of the
715 determined the file was clean, to limit the risk of the
716 file having been changed by an external process between the
716 file having been changed by an external process between the
717 moment where the file was determined to be clean and now."""
717 moment where the file was determined to be clean and now."""
718 if self.pendingparentchange():
718 if self.pendingparentchange():
719 util.nouideprecwarn(
719 util.nouideprecwarn(
720 b"do not use `normal` inside of update/merge context."
720 b"do not use `normal` inside of update/merge context."
721 b" Use `update_file` or `update_file_p1`",
721 b" Use `update_file` or `update_file_p1`",
722 b'6.0',
722 b'6.0',
723 stacklevel=2,
723 stacklevel=2,
724 )
724 )
725 else:
725 else:
726 util.nouideprecwarn(
726 util.nouideprecwarn(
727 b"do not use `normal` outside of update/merge context."
727 b"do not use `normal` outside of update/merge context."
728 b" Use `set_tracked`",
728 b" Use `set_tracked`",
729 b'6.0',
729 b'6.0',
730 stacklevel=2,
730 stacklevel=2,
731 )
731 )
732 self._normal(f, parentfiledata=parentfiledata)
732 self._normal(f, parentfiledata=parentfiledata)
733
733
734 def _normal(self, f, parentfiledata=None):
734 def _normal(self, f, parentfiledata=None):
735 if parentfiledata:
735 if parentfiledata:
736 (mode, size, mtime) = parentfiledata
736 (mode, size, mtime) = parentfiledata
737 else:
737 else:
738 (mode, size, mtime) = self._get_filedata(f)
738 (mode, size, mtime) = self._get_filedata(f)
739 self._addpath(f, mode=mode, size=size, mtime=mtime)
739 self._addpath(f, mode=mode, size=size, mtime=mtime)
740 self._map.copymap.pop(f, None)
740 self._map.copymap.pop(f, None)
741 if f in self._map.nonnormalset:
741 if f in self._map.nonnormalset:
742 self._map.nonnormalset.remove(f)
742 self._map.nonnormalset.remove(f)
743 if mtime > self._lastnormaltime:
743 if mtime > self._lastnormaltime:
744 # Remember the most recent modification timeslot for status(),
744 # Remember the most recent modification timeslot for status(),
745 # to make sure we won't miss future size-preserving file content
745 # to make sure we won't miss future size-preserving file content
746 # modifications that happen within the same timeslot.
746 # modifications that happen within the same timeslot.
747 self._lastnormaltime = mtime
747 self._lastnormaltime = mtime
748
748
749 def normallookup(self, f):
749 def normallookup(self, f):
750 '''Mark a file normal, but possibly dirty.'''
750 '''Mark a file normal, but possibly dirty.'''
751 if self.pendingparentchange():
751 if self.pendingparentchange():
752 util.nouideprecwarn(
752 util.nouideprecwarn(
753 b"do not use `normallookup` inside of update/merge context."
753 b"do not use `normallookup` inside of update/merge context."
754 b" Use `update_file` or `update_file_p1`",
754 b" Use `update_file` or `update_file_p1`",
755 b'6.0',
755 b'6.0',
756 stacklevel=2,
756 stacklevel=2,
757 )
757 )
758 else:
758 else:
759 util.nouideprecwarn(
759 util.nouideprecwarn(
760 b"do not use `normallookup` outside of update/merge context."
760 b"do not use `normallookup` outside of update/merge context."
761 b" Use `set_possibly_dirty` or `set_tracked`",
761 b" Use `set_possibly_dirty` or `set_tracked`",
762 b'6.0',
762 b'6.0',
763 stacklevel=2,
763 stacklevel=2,
764 )
764 )
765 self._normallookup(f)
765 self._normallookup(f)
766
766
767 def _normallookup(self, f):
767 def _normallookup(self, f):
768 '''Mark a file normal, but possibly dirty.'''
768 '''Mark a file normal, but possibly dirty.'''
769 if self.in_merge:
769 if self.in_merge:
770 # if there is a merge going on and the file was either
770 # if there is a merge going on and the file was either
771 # "merged" or coming from other parent (-2) before
771 # "merged" or coming from other parent (-2) before
772 # being removed, restore that state.
772 # being removed, restore that state.
773 entry = self._map.get(f)
773 entry = self._map.get(f)
774 if entry is not None:
774 if entry is not None:
775 # XXX this should probably be dealt with a a lower level
775 # XXX this should probably be dealt with a a lower level
776 # (see `merged_removed` and `from_p2_removed`)
776 # (see `merged_removed` and `from_p2_removed`)
777 if entry.merged_removed or entry.from_p2_removed:
777 if entry.merged_removed or entry.from_p2_removed:
778 source = self._map.copymap.get(f)
778 source = self._map.copymap.get(f)
779 if entry.merged_removed:
779 if entry.merged_removed:
780 self._merge(f)
780 self._merge(f)
781 elif entry.from_p2_removed:
781 elif entry.from_p2_removed:
782 self._otherparent(f)
782 self._otherparent(f)
783 if source is not None:
783 if source is not None:
784 self.copy(source, f)
784 self.copy(source, f)
785 return
785 return
786 elif entry.merged or entry.from_p2:
786 elif entry.merged or entry.from_p2:
787 return
787 return
788 self._addpath(f, possibly_dirty=True)
788 self._addpath(f, possibly_dirty=True)
789 self._map.copymap.pop(f, None)
789 self._map.copymap.pop(f, None)
790
790
791 def otherparent(self, f):
791 def otherparent(self, f):
792 '''Mark as coming from the other parent, always dirty.'''
792 '''Mark as coming from the other parent, always dirty.'''
793 if self.pendingparentchange():
793 if self.pendingparentchange():
794 util.nouideprecwarn(
794 util.nouideprecwarn(
795 b"do not use `otherparent` inside of update/merge context."
795 b"do not use `otherparent` inside of update/merge context."
796 b" Use `update_file` or `update_file_p1`",
796 b" Use `update_file` or `update_file_p1`",
797 b'6.0',
797 b'6.0',
798 stacklevel=2,
798 stacklevel=2,
799 )
799 )
800 else:
800 else:
801 util.nouideprecwarn(
801 util.nouideprecwarn(
802 b"do not use `otherparent` outside of update/merge context."
802 b"do not use `otherparent` outside of update/merge context."
803 b"It should have been set by the update/merge code",
803 b"It should have been set by the update/merge code",
804 b'6.0',
804 b'6.0',
805 stacklevel=2,
805 stacklevel=2,
806 )
806 )
807 self._otherparent(f)
807 self._otherparent(f)
808
808
809 def _otherparent(self, f):
809 def _otherparent(self, f):
810 if not self.in_merge:
810 if not self.in_merge:
811 msg = _(b"setting %r to other parent only allowed in merges") % f
811 msg = _(b"setting %r to other parent only allowed in merges") % f
812 raise error.Abort(msg)
812 raise error.Abort(msg)
813 entry = self._map.get(f)
813 entry = self._map.get(f)
814 if entry is not None and entry.tracked:
814 if entry is not None and entry.tracked:
815 # merge-like
815 # merge-like
816 self._addpath(f, merged=True)
816 self._addpath(f, merged=True)
817 else:
817 else:
818 # add-like
818 # add-like
819 self._addpath(f, from_p2=True)
819 self._addpath(f, from_p2=True)
820 self._map.copymap.pop(f, None)
820 self._map.copymap.pop(f, None)
821
821
822 def add(self, f):
822 def add(self, f):
823 '''Mark a file added.'''
823 '''Mark a file added.'''
824 if self.pendingparentchange():
824 if self.pendingparentchange():
825 util.nouideprecwarn(
825 util.nouideprecwarn(
826 b"do not use `add` inside of update/merge context."
826 b"do not use `add` inside of update/merge context."
827 b" Use `update_file`",
827 b" Use `update_file`",
828 b'6.0',
828 b'6.0',
829 stacklevel=2,
829 stacklevel=2,
830 )
830 )
831 else:
831 else:
832 util.nouideprecwarn(
832 util.nouideprecwarn(
833 b"do not use `add` outside of update/merge context."
833 b"do not use `add` outside of update/merge context."
834 b" Use `set_tracked`",
834 b" Use `set_tracked`",
835 b'6.0',
835 b'6.0',
836 stacklevel=2,
836 stacklevel=2,
837 )
837 )
838 self._add(f)
838 self._add(f)
839
839
840 def _add(self, filename):
840 def _add(self, filename):
841 """internal function to mark a file as added"""
841 """internal function to mark a file as added"""
842 self._addpath(filename, added=True)
842 self._addpath(filename, added=True)
843 self._map.copymap.pop(filename, None)
843 self._map.copymap.pop(filename, None)
844
844
845 def remove(self, f):
845 def remove(self, f):
846 '''Mark a file removed'''
846 '''Mark a file removed'''
847 if self.pendingparentchange():
847 if self.pendingparentchange():
848 util.nouideprecwarn(
848 util.nouideprecwarn(
849 b"do not use `remove` insde of update/merge context."
849 b"do not use `remove` insde of update/merge context."
850 b" Use `update_file` or `update_file_p1`",
850 b" Use `update_file` or `update_file_p1`",
851 b'6.0',
851 b'6.0',
852 stacklevel=2,
852 stacklevel=2,
853 )
853 )
854 else:
854 else:
855 util.nouideprecwarn(
855 util.nouideprecwarn(
856 b"do not use `remove` outside of update/merge context."
856 b"do not use `remove` outside of update/merge context."
857 b" Use `set_untracked`",
857 b" Use `set_untracked`",
858 b'6.0',
858 b'6.0',
859 stacklevel=2,
859 stacklevel=2,
860 )
860 )
861 self._dirty = True
861 self._dirty = True
862 self._updatedfiles.add(f)
862 self._updatedfiles.add(f)
863 entry = self._map.get(f)
863 entry = self._map.get(f)
864 if entry is None:
864 if entry is None:
865 # Assuming we are in a update/merge case
865 # Assuming we are in a update/merge case
866 self.update_file(f, p1_tracked=True, wc_tracked=False)
866 self.update_file(f, p1_tracked=True, wc_tracked=False)
867 else:
867 else:
868 self.set_untracked(f)
868 self.set_untracked(f)
869
869
870 def merge(self, f):
870 def merge(self, f):
871 '''Mark a file merged.'''
871 '''Mark a file merged.'''
872 if self.pendingparentchange():
872 if self.pendingparentchange():
873 util.nouideprecwarn(
873 util.nouideprecwarn(
874 b"do not use `merge` inside of update/merge context."
874 b"do not use `merge` inside of update/merge context."
875 b" Use `update_file`",
875 b" Use `update_file`",
876 b'6.0',
876 b'6.0',
877 stacklevel=2,
877 stacklevel=2,
878 )
878 )
879 else:
879 else:
880 util.nouideprecwarn(
880 util.nouideprecwarn(
881 b"do not use `merge` outside of update/merge context."
881 b"do not use `merge` outside of update/merge context."
882 b"It should have been set by the update/merge code",
882 b"It should have been set by the update/merge code",
883 b'6.0',
883 b'6.0',
884 stacklevel=2,
884 stacklevel=2,
885 )
885 )
886 self._merge(f)
886 self._merge(f)
887
887
888 def _merge(self, f):
888 def _merge(self, f):
889 if not self.in_merge:
889 if not self.in_merge:
890 return self._normallookup(f)
890 return self._normallookup(f)
891 return self._otherparent(f)
891 return self._otherparent(f)
892
892
893 def drop(self, f):
893 def drop(self, f):
894 '''Drop a file from the dirstate'''
894 '''Drop a file from the dirstate'''
895 if self.pendingparentchange():
895 if self.pendingparentchange():
896 util.nouideprecwarn(
896 util.nouideprecwarn(
897 b"do not use `drop` inside of update/merge context."
897 b"do not use `drop` inside of update/merge context."
898 b" Use `update_file`",
898 b" Use `update_file`",
899 b'6.0',
899 b'6.0',
900 stacklevel=2,
900 stacklevel=2,
901 )
901 )
902 else:
902 else:
903 util.nouideprecwarn(
903 util.nouideprecwarn(
904 b"do not use `drop` outside of update/merge context."
904 b"do not use `drop` outside of update/merge context."
905 b" Use `set_untracked`",
905 b" Use `set_untracked`",
906 b'6.0',
906 b'6.0',
907 stacklevel=2,
907 stacklevel=2,
908 )
908 )
909 self._drop(f)
909 self._drop(f)
910
910
911 def _drop(self, filename):
911 def _drop(self, filename):
912 """internal function to drop a file from the dirstate"""
912 """internal function to drop a file from the dirstate"""
913 if self._map.dropfile(filename):
913 if self._map.dropfile(filename):
914 self._dirty = True
914 self._dirty = True
915 self._updatedfiles.add(filename)
915 self._updatedfiles.add(filename)
916 self._map.copymap.pop(filename, None)
916 self._map.copymap.pop(filename, None)
917
917
918 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
918 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
919 if exists is None:
919 if exists is None:
920 exists = os.path.lexists(os.path.join(self._root, path))
920 exists = os.path.lexists(os.path.join(self._root, path))
921 if not exists:
921 if not exists:
922 # Maybe a path component exists
922 # Maybe a path component exists
923 if not ignoremissing and b'/' in path:
923 if not ignoremissing and b'/' in path:
924 d, f = path.rsplit(b'/', 1)
924 d, f = path.rsplit(b'/', 1)
925 d = self._normalize(d, False, ignoremissing, None)
925 d = self._normalize(d, False, ignoremissing, None)
926 folded = d + b"/" + f
926 folded = d + b"/" + f
927 else:
927 else:
928 # No path components, preserve original case
928 # No path components, preserve original case
929 folded = path
929 folded = path
930 else:
930 else:
931 # recursively normalize leading directory components
931 # recursively normalize leading directory components
932 # against dirstate
932 # against dirstate
933 if b'/' in normed:
933 if b'/' in normed:
934 d, f = normed.rsplit(b'/', 1)
934 d, f = normed.rsplit(b'/', 1)
935 d = self._normalize(d, False, ignoremissing, True)
935 d = self._normalize(d, False, ignoremissing, True)
936 r = self._root + b"/" + d
936 r = self._root + b"/" + d
937 folded = d + b"/" + util.fspath(f, r)
937 folded = d + b"/" + util.fspath(f, r)
938 else:
938 else:
939 folded = util.fspath(normed, self._root)
939 folded = util.fspath(normed, self._root)
940 storemap[normed] = folded
940 storemap[normed] = folded
941
941
942 return folded
942 return folded
943
943
944 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
944 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
945 normed = util.normcase(path)
945 normed = util.normcase(path)
946 folded = self._map.filefoldmap.get(normed, None)
946 folded = self._map.filefoldmap.get(normed, None)
947 if folded is None:
947 if folded is None:
948 if isknown:
948 if isknown:
949 folded = path
949 folded = path
950 else:
950 else:
951 folded = self._discoverpath(
951 folded = self._discoverpath(
952 path, normed, ignoremissing, exists, self._map.filefoldmap
952 path, normed, ignoremissing, exists, self._map.filefoldmap
953 )
953 )
954 return folded
954 return folded
955
955
956 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
956 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
957 normed = util.normcase(path)
957 normed = util.normcase(path)
958 folded = self._map.filefoldmap.get(normed, None)
958 folded = self._map.filefoldmap.get(normed, None)
959 if folded is None:
959 if folded is None:
960 folded = self._map.dirfoldmap.get(normed, None)
960 folded = self._map.dirfoldmap.get(normed, None)
961 if folded is None:
961 if folded is None:
962 if isknown:
962 if isknown:
963 folded = path
963 folded = path
964 else:
964 else:
965 # store discovered result in dirfoldmap so that future
965 # store discovered result in dirfoldmap so that future
966 # normalizefile calls don't start matching directories
966 # normalizefile calls don't start matching directories
967 folded = self._discoverpath(
967 folded = self._discoverpath(
968 path, normed, ignoremissing, exists, self._map.dirfoldmap
968 path, normed, ignoremissing, exists, self._map.dirfoldmap
969 )
969 )
970 return folded
970 return folded
971
971
972 def normalize(self, path, isknown=False, ignoremissing=False):
972 def normalize(self, path, isknown=False, ignoremissing=False):
973 """
973 """
974 normalize the case of a pathname when on a casefolding filesystem
974 normalize the case of a pathname when on a casefolding filesystem
975
975
976 isknown specifies whether the filename came from walking the
976 isknown specifies whether the filename came from walking the
977 disk, to avoid extra filesystem access.
977 disk, to avoid extra filesystem access.
978
978
979 If ignoremissing is True, missing path are returned
979 If ignoremissing is True, missing path are returned
980 unchanged. Otherwise, we try harder to normalize possibly
980 unchanged. Otherwise, we try harder to normalize possibly
981 existing path components.
981 existing path components.
982
982
983 The normalized case is determined based on the following precedence:
983 The normalized case is determined based on the following precedence:
984
984
985 - version of name already stored in the dirstate
985 - version of name already stored in the dirstate
986 - version of name stored on disk
986 - version of name stored on disk
987 - version provided via command arguments
987 - version provided via command arguments
988 """
988 """
989
989
990 if self._checkcase:
990 if self._checkcase:
991 return self._normalize(path, isknown, ignoremissing)
991 return self._normalize(path, isknown, ignoremissing)
992 return path
992 return path
993
993
994 def clear(self):
994 def clear(self):
995 self._map.clear()
995 self._map.clear()
996 self._lastnormaltime = 0
996 self._lastnormaltime = 0
997 self._updatedfiles.clear()
997 self._updatedfiles.clear()
998 self._dirty = True
998 self._dirty = True
999
999
1000 def rebuild(self, parent, allfiles, changedfiles=None):
1000 def rebuild(self, parent, allfiles, changedfiles=None):
1001 if changedfiles is None:
1001 if changedfiles is None:
1002 # Rebuild entire dirstate
1002 # Rebuild entire dirstate
1003 to_lookup = allfiles
1003 to_lookup = allfiles
1004 to_drop = []
1004 to_drop = []
1005 lastnormaltime = self._lastnormaltime
1005 lastnormaltime = self._lastnormaltime
1006 self.clear()
1006 self.clear()
1007 self._lastnormaltime = lastnormaltime
1007 self._lastnormaltime = lastnormaltime
1008 elif len(changedfiles) < 10:
1008 elif len(changedfiles) < 10:
1009 # Avoid turning allfiles into a set, which can be expensive if it's
1009 # Avoid turning allfiles into a set, which can be expensive if it's
1010 # large.
1010 # large.
1011 to_lookup = []
1011 to_lookup = []
1012 to_drop = []
1012 to_drop = []
1013 for f in changedfiles:
1013 for f in changedfiles:
1014 if f in allfiles:
1014 if f in allfiles:
1015 to_lookup.append(f)
1015 to_lookup.append(f)
1016 else:
1016 else:
1017 to_drop.append(f)
1017 to_drop.append(f)
1018 else:
1018 else:
1019 changedfilesset = set(changedfiles)
1019 changedfilesset = set(changedfiles)
1020 to_lookup = changedfilesset & set(allfiles)
1020 to_lookup = changedfilesset & set(allfiles)
1021 to_drop = changedfilesset - to_lookup
1021 to_drop = changedfilesset - to_lookup
1022
1022
1023 if self._origpl is None:
1023 if self._origpl is None:
1024 self._origpl = self._pl
1024 self._origpl = self._pl
1025 self._map.setparents(parent, self._nodeconstants.nullid)
1025 self._map.setparents(parent, self._nodeconstants.nullid)
1026
1026
1027 for f in to_lookup:
1027 for f in to_lookup:
1028 self._normallookup(f)
1028 self._normallookup(f)
1029 for f in to_drop:
1029 for f in to_drop:
1030 self._drop(f)
1030 self._drop(f)
1031
1031
1032 self._dirty = True
1032 self._dirty = True
1033
1033
1034 def identity(self):
1034 def identity(self):
1035 """Return identity of dirstate itself to detect changing in storage
1035 """Return identity of dirstate itself to detect changing in storage
1036
1036
1037 If identity of previous dirstate is equal to this, writing
1037 If identity of previous dirstate is equal to this, writing
1038 changes based on the former dirstate out can keep consistency.
1038 changes based on the former dirstate out can keep consistency.
1039 """
1039 """
1040 return self._map.identity
1040 return self._map.identity
1041
1041
1042 def write(self, tr):
1042 def write(self, tr):
1043 if not self._dirty:
1043 if not self._dirty:
1044 return
1044 return
1045
1045
1046 filename = self._filename
1046 filename = self._filename
1047 if tr:
1047 if tr:
1048 # 'dirstate.write()' is not only for writing in-memory
1048 # 'dirstate.write()' is not only for writing in-memory
1049 # changes out, but also for dropping ambiguous timestamp.
1049 # changes out, but also for dropping ambiguous timestamp.
1050 # delayed writing re-raise "ambiguous timestamp issue".
1050 # delayed writing re-raise "ambiguous timestamp issue".
1051 # See also the wiki page below for detail:
1051 # See also the wiki page below for detail:
1052 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1052 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1053
1053
1054 # emulate dropping timestamp in 'parsers.pack_dirstate'
1054 # emulate dropping timestamp in 'parsers.pack_dirstate'
1055 now = _getfsnow(self._opener)
1055 now = _getfsnow(self._opener)
1056 self._map.clearambiguoustimes(self._updatedfiles, now)
1056 self._map.clearambiguoustimes(self._updatedfiles, now)
1057
1057
1058 # emulate that all 'dirstate.normal' results are written out
1058 # emulate that all 'dirstate.normal' results are written out
1059 self._lastnormaltime = 0
1059 self._lastnormaltime = 0
1060 self._updatedfiles.clear()
1060 self._updatedfiles.clear()
1061
1061
1062 # delay writing in-memory changes out
1062 # delay writing in-memory changes out
1063 tr.addfilegenerator(
1063 tr.addfilegenerator(
1064 b'dirstate',
1064 b'dirstate',
1065 (self._filename,),
1065 (self._filename,),
1066 lambda f: self._writedirstate(tr, f),
1066 lambda f: self._writedirstate(tr, f),
1067 location=b'plain',
1067 location=b'plain',
1068 )
1068 )
1069 return
1069 return
1070
1070
1071 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
1071 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
1072 self._writedirstate(tr, st)
1072 self._writedirstate(tr, st)
1073
1073
1074 def addparentchangecallback(self, category, callback):
1074 def addparentchangecallback(self, category, callback):
1075 """add a callback to be called when the wd parents are changed
1075 """add a callback to be called when the wd parents are changed
1076
1076
1077 Callback will be called with the following arguments:
1077 Callback will be called with the following arguments:
1078 dirstate, (oldp1, oldp2), (newp1, newp2)
1078 dirstate, (oldp1, oldp2), (newp1, newp2)
1079
1079
1080 Category is a unique identifier to allow overwriting an old callback
1080 Category is a unique identifier to allow overwriting an old callback
1081 with a newer callback.
1081 with a newer callback.
1082 """
1082 """
1083 self._plchangecallbacks[category] = callback
1083 self._plchangecallbacks[category] = callback
1084
1084
1085 def _writedirstate(self, tr, st):
1085 def _writedirstate(self, tr, st):
1086 # notify callbacks about parents change
1086 # notify callbacks about parents change
1087 if self._origpl is not None and self._origpl != self._pl:
1087 if self._origpl is not None and self._origpl != self._pl:
1088 for c, callback in sorted(
1088 for c, callback in sorted(
1089 pycompat.iteritems(self._plchangecallbacks)
1089 pycompat.iteritems(self._plchangecallbacks)
1090 ):
1090 ):
1091 callback(self, self._origpl, self._pl)
1091 callback(self, self._origpl, self._pl)
1092 self._origpl = None
1092 self._origpl = None
1093 # use the modification time of the newly created temporary file as the
1093 # use the modification time of the newly created temporary file as the
1094 # filesystem's notion of 'now'
1094 # filesystem's notion of 'now'
1095 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1095 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1096
1096
1097 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1097 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1098 # timestamp of each entries in dirstate, because of 'now > mtime'
1098 # timestamp of each entries in dirstate, because of 'now > mtime'
1099 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1099 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1100 if delaywrite > 0:
1100 if delaywrite > 0:
1101 # do we have any files to delay for?
1101 # do we have any files to delay for?
1102 for f, e in pycompat.iteritems(self._map):
1102 for f, e in pycompat.iteritems(self._map):
1103 if e.need_delay(now):
1103 if e.need_delay(now):
1104 import time # to avoid useless import
1104 import time # to avoid useless import
1105
1105
1106 # rather than sleep n seconds, sleep until the next
1106 # rather than sleep n seconds, sleep until the next
1107 # multiple of n seconds
1107 # multiple of n seconds
1108 clock = time.time()
1108 clock = time.time()
1109 start = int(clock) - (int(clock) % delaywrite)
1109 start = int(clock) - (int(clock) % delaywrite)
1110 end = start + delaywrite
1110 end = start + delaywrite
1111 time.sleep(end - clock)
1111 time.sleep(end - clock)
1112 now = end # trust our estimate that the end is near now
1112 now = end # trust our estimate that the end is near now
1113 break
1113 break
1114
1114
1115 self._map.write(tr, st, now)
1115 self._map.write(tr, st, now)
1116 self._lastnormaltime = 0
1116 self._lastnormaltime = 0
1117 self._dirty = False
1117 self._dirty = False
1118
1118
1119 def _dirignore(self, f):
1119 def _dirignore(self, f):
1120 if self._ignore(f):
1120 if self._ignore(f):
1121 return True
1121 return True
1122 for p in pathutil.finddirs(f):
1122 for p in pathutil.finddirs(f):
1123 if self._ignore(p):
1123 if self._ignore(p):
1124 return True
1124 return True
1125 return False
1125 return False
1126
1126
1127 def _ignorefiles(self):
1127 def _ignorefiles(self):
1128 files = []
1128 files = []
1129 if os.path.exists(self._join(b'.hgignore')):
1129 if os.path.exists(self._join(b'.hgignore')):
1130 files.append(self._join(b'.hgignore'))
1130 files.append(self._join(b'.hgignore'))
1131 for name, path in self._ui.configitems(b"ui"):
1131 for name, path in self._ui.configitems(b"ui"):
1132 if name == b'ignore' or name.startswith(b'ignore.'):
1132 if name == b'ignore' or name.startswith(b'ignore.'):
1133 # we need to use os.path.join here rather than self._join
1133 # we need to use os.path.join here rather than self._join
1134 # because path is arbitrary and user-specified
1134 # because path is arbitrary and user-specified
1135 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1135 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1136 return files
1136 return files
1137
1137
1138 def _ignorefileandline(self, f):
1138 def _ignorefileandline(self, f):
1139 files = collections.deque(self._ignorefiles())
1139 files = collections.deque(self._ignorefiles())
1140 visited = set()
1140 visited = set()
1141 while files:
1141 while files:
1142 i = files.popleft()
1142 i = files.popleft()
1143 patterns = matchmod.readpatternfile(
1143 patterns = matchmod.readpatternfile(
1144 i, self._ui.warn, sourceinfo=True
1144 i, self._ui.warn, sourceinfo=True
1145 )
1145 )
1146 for pattern, lineno, line in patterns:
1146 for pattern, lineno, line in patterns:
1147 kind, p = matchmod._patsplit(pattern, b'glob')
1147 kind, p = matchmod._patsplit(pattern, b'glob')
1148 if kind == b"subinclude":
1148 if kind == b"subinclude":
1149 if p not in visited:
1149 if p not in visited:
1150 files.append(p)
1150 files.append(p)
1151 continue
1151 continue
1152 m = matchmod.match(
1152 m = matchmod.match(
1153 self._root, b'', [], [pattern], warn=self._ui.warn
1153 self._root, b'', [], [pattern], warn=self._ui.warn
1154 )
1154 )
1155 if m(f):
1155 if m(f):
1156 return (i, lineno, line)
1156 return (i, lineno, line)
1157 visited.add(i)
1157 visited.add(i)
1158 return (None, -1, b"")
1158 return (None, -1, b"")
1159
1159
1160 def _walkexplicit(self, match, subrepos):
1160 def _walkexplicit(self, match, subrepos):
1161 """Get stat data about the files explicitly specified by match.
1161 """Get stat data about the files explicitly specified by match.
1162
1162
1163 Return a triple (results, dirsfound, dirsnotfound).
1163 Return a triple (results, dirsfound, dirsnotfound).
1164 - results is a mapping from filename to stat result. It also contains
1164 - results is a mapping from filename to stat result. It also contains
1165 listings mapping subrepos and .hg to None.
1165 listings mapping subrepos and .hg to None.
1166 - dirsfound is a list of files found to be directories.
1166 - dirsfound is a list of files found to be directories.
1167 - dirsnotfound is a list of files that the dirstate thinks are
1167 - dirsnotfound is a list of files that the dirstate thinks are
1168 directories and that were not found."""
1168 directories and that were not found."""
1169
1169
1170 def badtype(mode):
1170 def badtype(mode):
1171 kind = _(b'unknown')
1171 kind = _(b'unknown')
1172 if stat.S_ISCHR(mode):
1172 if stat.S_ISCHR(mode):
1173 kind = _(b'character device')
1173 kind = _(b'character device')
1174 elif stat.S_ISBLK(mode):
1174 elif stat.S_ISBLK(mode):
1175 kind = _(b'block device')
1175 kind = _(b'block device')
1176 elif stat.S_ISFIFO(mode):
1176 elif stat.S_ISFIFO(mode):
1177 kind = _(b'fifo')
1177 kind = _(b'fifo')
1178 elif stat.S_ISSOCK(mode):
1178 elif stat.S_ISSOCK(mode):
1179 kind = _(b'socket')
1179 kind = _(b'socket')
1180 elif stat.S_ISDIR(mode):
1180 elif stat.S_ISDIR(mode):
1181 kind = _(b'directory')
1181 kind = _(b'directory')
1182 return _(b'unsupported file type (type is %s)') % kind
1182 return _(b'unsupported file type (type is %s)') % kind
1183
1183
1184 badfn = match.bad
1184 badfn = match.bad
1185 dmap = self._map
1185 dmap = self._map
1186 lstat = os.lstat
1186 lstat = os.lstat
1187 getkind = stat.S_IFMT
1187 getkind = stat.S_IFMT
1188 dirkind = stat.S_IFDIR
1188 dirkind = stat.S_IFDIR
1189 regkind = stat.S_IFREG
1189 regkind = stat.S_IFREG
1190 lnkkind = stat.S_IFLNK
1190 lnkkind = stat.S_IFLNK
1191 join = self._join
1191 join = self._join
1192 dirsfound = []
1192 dirsfound = []
1193 foundadd = dirsfound.append
1193 foundadd = dirsfound.append
1194 dirsnotfound = []
1194 dirsnotfound = []
1195 notfoundadd = dirsnotfound.append
1195 notfoundadd = dirsnotfound.append
1196
1196
1197 if not match.isexact() and self._checkcase:
1197 if not match.isexact() and self._checkcase:
1198 normalize = self._normalize
1198 normalize = self._normalize
1199 else:
1199 else:
1200 normalize = None
1200 normalize = None
1201
1201
1202 files = sorted(match.files())
1202 files = sorted(match.files())
1203 subrepos.sort()
1203 subrepos.sort()
1204 i, j = 0, 0
1204 i, j = 0, 0
1205 while i < len(files) and j < len(subrepos):
1205 while i < len(files) and j < len(subrepos):
1206 subpath = subrepos[j] + b"/"
1206 subpath = subrepos[j] + b"/"
1207 if files[i] < subpath:
1207 if files[i] < subpath:
1208 i += 1
1208 i += 1
1209 continue
1209 continue
1210 while i < len(files) and files[i].startswith(subpath):
1210 while i < len(files) and files[i].startswith(subpath):
1211 del files[i]
1211 del files[i]
1212 j += 1
1212 j += 1
1213
1213
1214 if not files or b'' in files:
1214 if not files or b'' in files:
1215 files = [b'']
1215 files = [b'']
1216 # constructing the foldmap is expensive, so don't do it for the
1216 # constructing the foldmap is expensive, so don't do it for the
1217 # common case where files is ['']
1217 # common case where files is ['']
1218 normalize = None
1218 normalize = None
1219 results = dict.fromkeys(subrepos)
1219 results = dict.fromkeys(subrepos)
1220 results[b'.hg'] = None
1220 results[b'.hg'] = None
1221
1221
1222 for ff in files:
1222 for ff in files:
1223 if normalize:
1223 if normalize:
1224 nf = normalize(ff, False, True)
1224 nf = normalize(ff, False, True)
1225 else:
1225 else:
1226 nf = ff
1226 nf = ff
1227 if nf in results:
1227 if nf in results:
1228 continue
1228 continue
1229
1229
1230 try:
1230 try:
1231 st = lstat(join(nf))
1231 st = lstat(join(nf))
1232 kind = getkind(st.st_mode)
1232 kind = getkind(st.st_mode)
1233 if kind == dirkind:
1233 if kind == dirkind:
1234 if nf in dmap:
1234 if nf in dmap:
1235 # file replaced by dir on disk but still in dirstate
1235 # file replaced by dir on disk but still in dirstate
1236 results[nf] = None
1236 results[nf] = None
1237 foundadd((nf, ff))
1237 foundadd((nf, ff))
1238 elif kind == regkind or kind == lnkkind:
1238 elif kind == regkind or kind == lnkkind:
1239 results[nf] = st
1239 results[nf] = st
1240 else:
1240 else:
1241 badfn(ff, badtype(kind))
1241 badfn(ff, badtype(kind))
1242 if nf in dmap:
1242 if nf in dmap:
1243 results[nf] = None
1243 results[nf] = None
1244 except OSError as inst: # nf not found on disk - it is dirstate only
1244 except OSError as inst: # nf not found on disk - it is dirstate only
1245 if nf in dmap: # does it exactly match a missing file?
1245 if nf in dmap: # does it exactly match a missing file?
1246 results[nf] = None
1246 results[nf] = None
1247 else: # does it match a missing directory?
1247 else: # does it match a missing directory?
1248 if self._map.hasdir(nf):
1248 if self._map.hasdir(nf):
1249 notfoundadd(nf)
1249 notfoundadd(nf)
1250 else:
1250 else:
1251 badfn(ff, encoding.strtolocal(inst.strerror))
1251 badfn(ff, encoding.strtolocal(inst.strerror))
1252
1252
1253 # match.files() may contain explicitly-specified paths that shouldn't
1253 # match.files() may contain explicitly-specified paths that shouldn't
1254 # be taken; drop them from the list of files found. dirsfound/notfound
1254 # be taken; drop them from the list of files found. dirsfound/notfound
1255 # aren't filtered here because they will be tested later.
1255 # aren't filtered here because they will be tested later.
1256 if match.anypats():
1256 if match.anypats():
1257 for f in list(results):
1257 for f in list(results):
1258 if f == b'.hg' or f in subrepos:
1258 if f == b'.hg' or f in subrepos:
1259 # keep sentinel to disable further out-of-repo walks
1259 # keep sentinel to disable further out-of-repo walks
1260 continue
1260 continue
1261 if not match(f):
1261 if not match(f):
1262 del results[f]
1262 del results[f]
1263
1263
1264 # Case insensitive filesystems cannot rely on lstat() failing to detect
1264 # Case insensitive filesystems cannot rely on lstat() failing to detect
1265 # a case-only rename. Prune the stat object for any file that does not
1265 # a case-only rename. Prune the stat object for any file that does not
1266 # match the case in the filesystem, if there are multiple files that
1266 # match the case in the filesystem, if there are multiple files that
1267 # normalize to the same path.
1267 # normalize to the same path.
1268 if match.isexact() and self._checkcase:
1268 if match.isexact() and self._checkcase:
1269 normed = {}
1269 normed = {}
1270
1270
1271 for f, st in pycompat.iteritems(results):
1271 for f, st in pycompat.iteritems(results):
1272 if st is None:
1272 if st is None:
1273 continue
1273 continue
1274
1274
1275 nc = util.normcase(f)
1275 nc = util.normcase(f)
1276 paths = normed.get(nc)
1276 paths = normed.get(nc)
1277
1277
1278 if paths is None:
1278 if paths is None:
1279 paths = set()
1279 paths = set()
1280 normed[nc] = paths
1280 normed[nc] = paths
1281
1281
1282 paths.add(f)
1282 paths.add(f)
1283
1283
1284 for norm, paths in pycompat.iteritems(normed):
1284 for norm, paths in pycompat.iteritems(normed):
1285 if len(paths) > 1:
1285 if len(paths) > 1:
1286 for path in paths:
1286 for path in paths:
1287 folded = self._discoverpath(
1287 folded = self._discoverpath(
1288 path, norm, True, None, self._map.dirfoldmap
1288 path, norm, True, None, self._map.dirfoldmap
1289 )
1289 )
1290 if path != folded:
1290 if path != folded:
1291 results[path] = None
1291 results[path] = None
1292
1292
1293 return results, dirsfound, dirsnotfound
1293 return results, dirsfound, dirsnotfound
1294
1294
1295 def walk(self, match, subrepos, unknown, ignored, full=True):
1295 def walk(self, match, subrepos, unknown, ignored, full=True):
1296 """
1296 """
1297 Walk recursively through the directory tree, finding all files
1297 Walk recursively through the directory tree, finding all files
1298 matched by match.
1298 matched by match.
1299
1299
1300 If full is False, maybe skip some known-clean files.
1300 If full is False, maybe skip some known-clean files.
1301
1301
1302 Return a dict mapping filename to stat-like object (either
1302 Return a dict mapping filename to stat-like object (either
1303 mercurial.osutil.stat instance or return value of os.stat()).
1303 mercurial.osutil.stat instance or return value of os.stat()).
1304
1304
1305 """
1305 """
1306 # full is a flag that extensions that hook into walk can use -- this
1306 # full is a flag that extensions that hook into walk can use -- this
1307 # implementation doesn't use it at all. This satisfies the contract
1307 # implementation doesn't use it at all. This satisfies the contract
1308 # because we only guarantee a "maybe".
1308 # because we only guarantee a "maybe".
1309
1309
1310 if ignored:
1310 if ignored:
1311 ignore = util.never
1311 ignore = util.never
1312 dirignore = util.never
1312 dirignore = util.never
1313 elif unknown:
1313 elif unknown:
1314 ignore = self._ignore
1314 ignore = self._ignore
1315 dirignore = self._dirignore
1315 dirignore = self._dirignore
1316 else:
1316 else:
1317 # if not unknown and not ignored, drop dir recursion and step 2
1317 # if not unknown and not ignored, drop dir recursion and step 2
1318 ignore = util.always
1318 ignore = util.always
1319 dirignore = util.always
1319 dirignore = util.always
1320
1320
1321 matchfn = match.matchfn
1321 matchfn = match.matchfn
1322 matchalways = match.always()
1322 matchalways = match.always()
1323 matchtdir = match.traversedir
1323 matchtdir = match.traversedir
1324 dmap = self._map
1324 dmap = self._map
1325 listdir = util.listdir
1325 listdir = util.listdir
1326 lstat = os.lstat
1326 lstat = os.lstat
1327 dirkind = stat.S_IFDIR
1327 dirkind = stat.S_IFDIR
1328 regkind = stat.S_IFREG
1328 regkind = stat.S_IFREG
1329 lnkkind = stat.S_IFLNK
1329 lnkkind = stat.S_IFLNK
1330 join = self._join
1330 join = self._join
1331
1331
1332 exact = skipstep3 = False
1332 exact = skipstep3 = False
1333 if match.isexact(): # match.exact
1333 if match.isexact(): # match.exact
1334 exact = True
1334 exact = True
1335 dirignore = util.always # skip step 2
1335 dirignore = util.always # skip step 2
1336 elif match.prefix(): # match.match, no patterns
1336 elif match.prefix(): # match.match, no patterns
1337 skipstep3 = True
1337 skipstep3 = True
1338
1338
1339 if not exact and self._checkcase:
1339 if not exact and self._checkcase:
1340 normalize = self._normalize
1340 normalize = self._normalize
1341 normalizefile = self._normalizefile
1341 normalizefile = self._normalizefile
1342 skipstep3 = False
1342 skipstep3 = False
1343 else:
1343 else:
1344 normalize = self._normalize
1344 normalize = self._normalize
1345 normalizefile = None
1345 normalizefile = None
1346
1346
1347 # step 1: find all explicit files
1347 # step 1: find all explicit files
1348 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1348 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1349 if matchtdir:
1349 if matchtdir:
1350 for d in work:
1350 for d in work:
1351 matchtdir(d[0])
1351 matchtdir(d[0])
1352 for d in dirsnotfound:
1352 for d in dirsnotfound:
1353 matchtdir(d)
1353 matchtdir(d)
1354
1354
1355 skipstep3 = skipstep3 and not (work or dirsnotfound)
1355 skipstep3 = skipstep3 and not (work or dirsnotfound)
1356 work = [d for d in work if not dirignore(d[0])]
1356 work = [d for d in work if not dirignore(d[0])]
1357
1357
1358 # step 2: visit subdirectories
1358 # step 2: visit subdirectories
1359 def traverse(work, alreadynormed):
1359 def traverse(work, alreadynormed):
1360 wadd = work.append
1360 wadd = work.append
1361 while work:
1361 while work:
1362 tracing.counter('dirstate.walk work', len(work))
1362 tracing.counter('dirstate.walk work', len(work))
1363 nd = work.pop()
1363 nd = work.pop()
1364 visitentries = match.visitchildrenset(nd)
1364 visitentries = match.visitchildrenset(nd)
1365 if not visitentries:
1365 if not visitentries:
1366 continue
1366 continue
1367 if visitentries == b'this' or visitentries == b'all':
1367 if visitentries == b'this' or visitentries == b'all':
1368 visitentries = None
1368 visitentries = None
1369 skip = None
1369 skip = None
1370 if nd != b'':
1370 if nd != b'':
1371 skip = b'.hg'
1371 skip = b'.hg'
1372 try:
1372 try:
1373 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1373 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1374 entries = listdir(join(nd), stat=True, skip=skip)
1374 entries = listdir(join(nd), stat=True, skip=skip)
1375 except OSError as inst:
1375 except OSError as inst:
1376 if inst.errno in (errno.EACCES, errno.ENOENT):
1376 if inst.errno in (errno.EACCES, errno.ENOENT):
1377 match.bad(
1377 match.bad(
1378 self.pathto(nd), encoding.strtolocal(inst.strerror)
1378 self.pathto(nd), encoding.strtolocal(inst.strerror)
1379 )
1379 )
1380 continue
1380 continue
1381 raise
1381 raise
1382 for f, kind, st in entries:
1382 for f, kind, st in entries:
1383 # Some matchers may return files in the visitentries set,
1383 # Some matchers may return files in the visitentries set,
1384 # instead of 'this', if the matcher explicitly mentions them
1384 # instead of 'this', if the matcher explicitly mentions them
1385 # and is not an exactmatcher. This is acceptable; we do not
1385 # and is not an exactmatcher. This is acceptable; we do not
1386 # make any hard assumptions about file-or-directory below
1386 # make any hard assumptions about file-or-directory below
1387 # based on the presence of `f` in visitentries. If
1387 # based on the presence of `f` in visitentries. If
1388 # visitchildrenset returned a set, we can always skip the
1388 # visitchildrenset returned a set, we can always skip the
1389 # entries *not* in the set it provided regardless of whether
1389 # entries *not* in the set it provided regardless of whether
1390 # they're actually a file or a directory.
1390 # they're actually a file or a directory.
1391 if visitentries and f not in visitentries:
1391 if visitentries and f not in visitentries:
1392 continue
1392 continue
1393 if normalizefile:
1393 if normalizefile:
1394 # even though f might be a directory, we're only
1394 # even though f might be a directory, we're only
1395 # interested in comparing it to files currently in the
1395 # interested in comparing it to files currently in the
1396 # dmap -- therefore normalizefile is enough
1396 # dmap -- therefore normalizefile is enough
1397 nf = normalizefile(
1397 nf = normalizefile(
1398 nd and (nd + b"/" + f) or f, True, True
1398 nd and (nd + b"/" + f) or f, True, True
1399 )
1399 )
1400 else:
1400 else:
1401 nf = nd and (nd + b"/" + f) or f
1401 nf = nd and (nd + b"/" + f) or f
1402 if nf not in results:
1402 if nf not in results:
1403 if kind == dirkind:
1403 if kind == dirkind:
1404 if not ignore(nf):
1404 if not ignore(nf):
1405 if matchtdir:
1405 if matchtdir:
1406 matchtdir(nf)
1406 matchtdir(nf)
1407 wadd(nf)
1407 wadd(nf)
1408 if nf in dmap and (matchalways or matchfn(nf)):
1408 if nf in dmap and (matchalways or matchfn(nf)):
1409 results[nf] = None
1409 results[nf] = None
1410 elif kind == regkind or kind == lnkkind:
1410 elif kind == regkind or kind == lnkkind:
1411 if nf in dmap:
1411 if nf in dmap:
1412 if matchalways or matchfn(nf):
1412 if matchalways or matchfn(nf):
1413 results[nf] = st
1413 results[nf] = st
1414 elif (matchalways or matchfn(nf)) and not ignore(
1414 elif (matchalways or matchfn(nf)) and not ignore(
1415 nf
1415 nf
1416 ):
1416 ):
1417 # unknown file -- normalize if necessary
1417 # unknown file -- normalize if necessary
1418 if not alreadynormed:
1418 if not alreadynormed:
1419 nf = normalize(nf, False, True)
1419 nf = normalize(nf, False, True)
1420 results[nf] = st
1420 results[nf] = st
1421 elif nf in dmap and (matchalways or matchfn(nf)):
1421 elif nf in dmap and (matchalways or matchfn(nf)):
1422 results[nf] = None
1422 results[nf] = None
1423
1423
1424 for nd, d in work:
1424 for nd, d in work:
1425 # alreadynormed means that processwork doesn't have to do any
1425 # alreadynormed means that processwork doesn't have to do any
1426 # expensive directory normalization
1426 # expensive directory normalization
1427 alreadynormed = not normalize or nd == d
1427 alreadynormed = not normalize or nd == d
1428 traverse([d], alreadynormed)
1428 traverse([d], alreadynormed)
1429
1429
1430 for s in subrepos:
1430 for s in subrepos:
1431 del results[s]
1431 del results[s]
1432 del results[b'.hg']
1432 del results[b'.hg']
1433
1433
1434 # step 3: visit remaining files from dmap
1434 # step 3: visit remaining files from dmap
1435 if not skipstep3 and not exact:
1435 if not skipstep3 and not exact:
1436 # If a dmap file is not in results yet, it was either
1436 # If a dmap file is not in results yet, it was either
1437 # a) not matching matchfn b) ignored, c) missing, or d) under a
1437 # a) not matching matchfn b) ignored, c) missing, or d) under a
1438 # symlink directory.
1438 # symlink directory.
1439 if not results and matchalways:
1439 if not results and matchalways:
1440 visit = [f for f in dmap]
1440 visit = [f for f in dmap]
1441 else:
1441 else:
1442 visit = [f for f in dmap if f not in results and matchfn(f)]
1442 visit = [f for f in dmap if f not in results and matchfn(f)]
1443 visit.sort()
1443 visit.sort()
1444
1444
1445 if unknown:
1445 if unknown:
1446 # unknown == True means we walked all dirs under the roots
1446 # unknown == True means we walked all dirs under the roots
1447 # that wasn't ignored, and everything that matched was stat'ed
1447 # that wasn't ignored, and everything that matched was stat'ed
1448 # and is already in results.
1448 # and is already in results.
1449 # The rest must thus be ignored or under a symlink.
1449 # The rest must thus be ignored or under a symlink.
1450 audit_path = pathutil.pathauditor(self._root, cached=True)
1450 audit_path = pathutil.pathauditor(self._root, cached=True)
1451
1451
1452 for nf in iter(visit):
1452 for nf in iter(visit):
1453 # If a stat for the same file was already added with a
1453 # If a stat for the same file was already added with a
1454 # different case, don't add one for this, since that would
1454 # different case, don't add one for this, since that would
1455 # make it appear as if the file exists under both names
1455 # make it appear as if the file exists under both names
1456 # on disk.
1456 # on disk.
1457 if (
1457 if (
1458 normalizefile
1458 normalizefile
1459 and normalizefile(nf, True, True) in results
1459 and normalizefile(nf, True, True) in results
1460 ):
1460 ):
1461 results[nf] = None
1461 results[nf] = None
1462 # Report ignored items in the dmap as long as they are not
1462 # Report ignored items in the dmap as long as they are not
1463 # under a symlink directory.
1463 # under a symlink directory.
1464 elif audit_path.check(nf):
1464 elif audit_path.check(nf):
1465 try:
1465 try:
1466 results[nf] = lstat(join(nf))
1466 results[nf] = lstat(join(nf))
1467 # file was just ignored, no links, and exists
1467 # file was just ignored, no links, and exists
1468 except OSError:
1468 except OSError:
1469 # file doesn't exist
1469 # file doesn't exist
1470 results[nf] = None
1470 results[nf] = None
1471 else:
1471 else:
1472 # It's either missing or under a symlink directory
1472 # It's either missing or under a symlink directory
1473 # which we in this case report as missing
1473 # which we in this case report as missing
1474 results[nf] = None
1474 results[nf] = None
1475 else:
1475 else:
1476 # We may not have walked the full directory tree above,
1476 # We may not have walked the full directory tree above,
1477 # so stat and check everything we missed.
1477 # so stat and check everything we missed.
1478 iv = iter(visit)
1478 iv = iter(visit)
1479 for st in util.statfiles([join(i) for i in visit]):
1479 for st in util.statfiles([join(i) for i in visit]):
1480 results[next(iv)] = st
1480 results[next(iv)] = st
1481 return results
1481 return results
1482
1482
1483 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1483 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1484 # Force Rayon (Rust parallelism library) to respect the number of
1484 # Force Rayon (Rust parallelism library) to respect the number of
1485 # workers. This is a temporary workaround until Rust code knows
1485 # workers. This is a temporary workaround until Rust code knows
1486 # how to read the config file.
1486 # how to read the config file.
1487 numcpus = self._ui.configint(b"worker", b"numcpus")
1487 numcpus = self._ui.configint(b"worker", b"numcpus")
1488 if numcpus is not None:
1488 if numcpus is not None:
1489 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1489 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1490
1490
1491 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1491 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1492 if not workers_enabled:
1492 if not workers_enabled:
1493 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1493 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1494
1494
1495 (
1495 (
1496 lookup,
1496 lookup,
1497 modified,
1497 modified,
1498 added,
1498 added,
1499 removed,
1499 removed,
1500 deleted,
1500 deleted,
1501 clean,
1501 clean,
1502 ignored,
1502 ignored,
1503 unknown,
1503 unknown,
1504 warnings,
1504 warnings,
1505 bad,
1505 bad,
1506 traversed,
1506 traversed,
1507 dirty,
1507 dirty,
1508 ) = rustmod.status(
1508 ) = rustmod.status(
1509 self._map._rustmap,
1509 self._map._rustmap,
1510 matcher,
1510 matcher,
1511 self._rootdir,
1511 self._rootdir,
1512 self._ignorefiles(),
1512 self._ignorefiles(),
1513 self._checkexec,
1513 self._checkexec,
1514 self._lastnormaltime,
1514 self._lastnormaltime,
1515 bool(list_clean),
1515 bool(list_clean),
1516 bool(list_ignored),
1516 bool(list_ignored),
1517 bool(list_unknown),
1517 bool(list_unknown),
1518 bool(matcher.traversedir),
1518 bool(matcher.traversedir),
1519 )
1519 )
1520
1520
1521 self._dirty |= dirty
1521 self._dirty |= dirty
1522
1522
1523 if matcher.traversedir:
1523 if matcher.traversedir:
1524 for dir in traversed:
1524 for dir in traversed:
1525 matcher.traversedir(dir)
1525 matcher.traversedir(dir)
1526
1526
1527 if self._ui.warn:
1527 if self._ui.warn:
1528 for item in warnings:
1528 for item in warnings:
1529 if isinstance(item, tuple):
1529 if isinstance(item, tuple):
1530 file_path, syntax = item
1530 file_path, syntax = item
1531 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1531 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1532 file_path,
1532 file_path,
1533 syntax,
1533 syntax,
1534 )
1534 )
1535 self._ui.warn(msg)
1535 self._ui.warn(msg)
1536 else:
1536 else:
1537 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1537 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1538 self._ui.warn(
1538 self._ui.warn(
1539 msg
1539 msg
1540 % (
1540 % (
1541 pathutil.canonpath(
1541 pathutil.canonpath(
1542 self._rootdir, self._rootdir, item
1542 self._rootdir, self._rootdir, item
1543 ),
1543 ),
1544 b"No such file or directory",
1544 b"No such file or directory",
1545 )
1545 )
1546 )
1546 )
1547
1547
1548 for (fn, message) in bad:
1548 for (fn, message) in bad:
1549 matcher.bad(fn, encoding.strtolocal(message))
1549 matcher.bad(fn, encoding.strtolocal(message))
1550
1550
1551 status = scmutil.status(
1551 status = scmutil.status(
1552 modified=modified,
1552 modified=modified,
1553 added=added,
1553 added=added,
1554 removed=removed,
1554 removed=removed,
1555 deleted=deleted,
1555 deleted=deleted,
1556 unknown=unknown,
1556 unknown=unknown,
1557 ignored=ignored,
1557 ignored=ignored,
1558 clean=clean,
1558 clean=clean,
1559 )
1559 )
1560 return (lookup, status)
1560 return (lookup, status)
1561
1561
1562 def status(self, match, subrepos, ignored, clean, unknown):
1562 def status(self, match, subrepos, ignored, clean, unknown):
1563 """Determine the status of the working copy relative to the
1563 """Determine the status of the working copy relative to the
1564 dirstate and return a pair of (unsure, status), where status is of type
1564 dirstate and return a pair of (unsure, status), where status is of type
1565 scmutil.status and:
1565 scmutil.status and:
1566
1566
1567 unsure:
1567 unsure:
1568 files that might have been modified since the dirstate was
1568 files that might have been modified since the dirstate was
1569 written, but need to be read to be sure (size is the same
1569 written, but need to be read to be sure (size is the same
1570 but mtime differs)
1570 but mtime differs)
1571 status.modified:
1571 status.modified:
1572 files that have definitely been modified since the dirstate
1572 files that have definitely been modified since the dirstate
1573 was written (different size or mode)
1573 was written (different size or mode)
1574 status.clean:
1574 status.clean:
1575 files that have definitely not been modified since the
1575 files that have definitely not been modified since the
1576 dirstate was written
1576 dirstate was written
1577 """
1577 """
1578 listignored, listclean, listunknown = ignored, clean, unknown
1578 listignored, listclean, listunknown = ignored, clean, unknown
1579 lookup, modified, added, unknown, ignored = [], [], [], [], []
1579 lookup, modified, added, unknown, ignored = [], [], [], [], []
1580 removed, deleted, clean = [], [], []
1580 removed, deleted, clean = [], [], []
1581
1581
1582 dmap = self._map
1582 dmap = self._map
1583 dmap.preload()
1583 dmap.preload()
1584
1584
1585 use_rust = True
1585 use_rust = True
1586
1586
1587 allowed_matchers = (
1587 allowed_matchers = (
1588 matchmod.alwaysmatcher,
1588 matchmod.alwaysmatcher,
1589 matchmod.exactmatcher,
1589 matchmod.exactmatcher,
1590 matchmod.includematcher,
1590 matchmod.includematcher,
1591 )
1591 )
1592
1592
1593 if rustmod is None:
1593 if rustmod is None:
1594 use_rust = False
1594 use_rust = False
1595 elif self._checkcase:
1595 elif self._checkcase:
1596 # Case-insensitive filesystems are not handled yet
1596 # Case-insensitive filesystems are not handled yet
1597 use_rust = False
1597 use_rust = False
1598 elif subrepos:
1598 elif subrepos:
1599 use_rust = False
1599 use_rust = False
1600 elif sparse.enabled:
1600 elif sparse.enabled:
1601 use_rust = False
1601 use_rust = False
1602 elif not isinstance(match, allowed_matchers):
1602 elif not isinstance(match, allowed_matchers):
1603 # Some matchers have yet to be implemented
1603 # Some matchers have yet to be implemented
1604 use_rust = False
1604 use_rust = False
1605
1605
1606 if use_rust:
1606 if use_rust:
1607 try:
1607 try:
1608 return self._rust_status(
1608 return self._rust_status(
1609 match, listclean, listignored, listunknown
1609 match, listclean, listignored, listunknown
1610 )
1610 )
1611 except rustmod.FallbackError:
1611 except rustmod.FallbackError:
1612 pass
1612 pass
1613
1613
1614 def noop(f):
1614 def noop(f):
1615 pass
1615 pass
1616
1616
1617 dcontains = dmap.__contains__
1617 dcontains = dmap.__contains__
1618 dget = dmap.__getitem__
1618 dget = dmap.__getitem__
1619 ladd = lookup.append # aka "unsure"
1619 ladd = lookup.append # aka "unsure"
1620 madd = modified.append
1620 madd = modified.append
1621 aadd = added.append
1621 aadd = added.append
1622 uadd = unknown.append if listunknown else noop
1622 uadd = unknown.append if listunknown else noop
1623 iadd = ignored.append if listignored else noop
1623 iadd = ignored.append if listignored else noop
1624 radd = removed.append
1624 radd = removed.append
1625 dadd = deleted.append
1625 dadd = deleted.append
1626 cadd = clean.append if listclean else noop
1626 cadd = clean.append if listclean else noop
1627 mexact = match.exact
1627 mexact = match.exact
1628 dirignore = self._dirignore
1628 dirignore = self._dirignore
1629 checkexec = self._checkexec
1629 checkexec = self._checkexec
1630 copymap = self._map.copymap
1630 copymap = self._map.copymap
1631 lastnormaltime = self._lastnormaltime
1631 lastnormaltime = self._lastnormaltime
1632
1632
1633 # We need to do full walks when either
1633 # We need to do full walks when either
1634 # - we're listing all clean files, or
1634 # - we're listing all clean files, or
1635 # - match.traversedir does something, because match.traversedir should
1635 # - match.traversedir does something, because match.traversedir should
1636 # be called for every dir in the working dir
1636 # be called for every dir in the working dir
1637 full = listclean or match.traversedir is not None
1637 full = listclean or match.traversedir is not None
1638 for fn, st in pycompat.iteritems(
1638 for fn, st in pycompat.iteritems(
1639 self.walk(match, subrepos, listunknown, listignored, full=full)
1639 self.walk(match, subrepos, listunknown, listignored, full=full)
1640 ):
1640 ):
1641 if not dcontains(fn):
1641 if not dcontains(fn):
1642 if (listignored or mexact(fn)) and dirignore(fn):
1642 if (listignored or mexact(fn)) and dirignore(fn):
1643 if listignored:
1643 if listignored:
1644 iadd(fn)
1644 iadd(fn)
1645 else:
1645 else:
1646 uadd(fn)
1646 uadd(fn)
1647 continue
1647 continue
1648
1648
1649 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1649 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1650 # written like that for performance reasons. dmap[fn] is not a
1650 # written like that for performance reasons. dmap[fn] is not a
1651 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1651 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1652 # opcode has fast paths when the value to be unpacked is a tuple or
1652 # opcode has fast paths when the value to be unpacked is a tuple or
1653 # a list, but falls back to creating a full-fledged iterator in
1653 # a list, but falls back to creating a full-fledged iterator in
1654 # general. That is much slower than simply accessing and storing the
1654 # general. That is much slower than simply accessing and storing the
1655 # tuple members one by one.
1655 # tuple members one by one.
1656 t = dget(fn)
1656 t = dget(fn)
1657 mode = t.mode
1657 mode = t.mode
1658 size = t.size
1658 size = t.size
1659 time = t.mtime
1659 time = t.mtime
1660
1660
1661 if not st and t.tracked:
1661 if not st and t.tracked:
1662 dadd(fn)
1662 dadd(fn)
1663 elif t.merged:
1663 elif t.merged:
1664 madd(fn)
1664 madd(fn)
1665 elif t.added:
1665 elif t.added:
1666 aadd(fn)
1666 aadd(fn)
1667 elif t.removed:
1667 elif t.removed:
1668 radd(fn)
1668 radd(fn)
1669 elif t.tracked:
1669 elif t.tracked:
1670 if (
1670 if (
1671 size >= 0
1671 size >= 0
1672 and (
1672 and (
1673 (size != st.st_size and size != st.st_size & _rangemask)
1673 (size != st.st_size and size != st.st_size & _rangemask)
1674 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1674 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1675 )
1675 )
1676 or t.from_p2
1676 or t.from_p2
1677 or fn in copymap
1677 or fn in copymap
1678 ):
1678 ):
1679 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1679 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1680 # issue6456: Size returned may be longer due to
1680 # issue6456: Size returned may be longer due to
1681 # encryption on EXT-4 fscrypt, undecided.
1681 # encryption on EXT-4 fscrypt, undecided.
1682 ladd(fn)
1682 ladd(fn)
1683 else:
1683 else:
1684 madd(fn)
1684 madd(fn)
1685 elif (
1685 elif (
1686 time != st[stat.ST_MTIME]
1686 time != st[stat.ST_MTIME]
1687 and time != st[stat.ST_MTIME] & _rangemask
1687 and time != st[stat.ST_MTIME] & _rangemask
1688 ):
1688 ):
1689 ladd(fn)
1689 ladd(fn)
1690 elif st[stat.ST_MTIME] == lastnormaltime:
1690 elif st[stat.ST_MTIME] == lastnormaltime:
1691 # fn may have just been marked as normal and it may have
1691 # fn may have just been marked as normal and it may have
1692 # changed in the same second without changing its size.
1692 # changed in the same second without changing its size.
1693 # This can happen if we quickly do multiple commits.
1693 # This can happen if we quickly do multiple commits.
1694 # Force lookup, so we don't miss such a racy file change.
1694 # Force lookup, so we don't miss such a racy file change.
1695 ladd(fn)
1695 ladd(fn)
1696 elif listclean:
1696 elif listclean:
1697 cadd(fn)
1697 cadd(fn)
1698 status = scmutil.status(
1698 status = scmutil.status(
1699 modified, added, removed, deleted, unknown, ignored, clean
1699 modified, added, removed, deleted, unknown, ignored, clean
1700 )
1700 )
1701 return (lookup, status)
1701 return (lookup, status)
1702
1702
1703 def matches(self, match):
1703 def matches(self, match):
1704 """
1704 """
1705 return files in the dirstate (in whatever state) filtered by match
1705 return files in the dirstate (in whatever state) filtered by match
1706 """
1706 """
1707 dmap = self._map
1707 dmap = self._map
1708 if rustmod is not None:
1708 if rustmod is not None:
1709 dmap = self._map._rustmap
1709 dmap = self._map._rustmap
1710
1710
1711 if match.always():
1711 if match.always():
1712 return dmap.keys()
1712 return dmap.keys()
1713 files = match.files()
1713 files = match.files()
1714 if match.isexact():
1714 if match.isexact():
1715 # fast path -- filter the other way around, since typically files is
1715 # fast path -- filter the other way around, since typically files is
1716 # much smaller than dmap
1716 # much smaller than dmap
1717 return [f for f in files if f in dmap]
1717 return [f for f in files if f in dmap]
1718 if match.prefix() and all(fn in dmap for fn in files):
1718 if match.prefix() and all(fn in dmap for fn in files):
1719 # fast path -- all the values are known to be files, so just return
1719 # fast path -- all the values are known to be files, so just return
1720 # that
1720 # that
1721 return list(files)
1721 return list(files)
1722 return [f for f in dmap if match(f)]
1722 return [f for f in dmap if match(f)]
1723
1723
1724 def _actualfilename(self, tr):
1724 def _actualfilename(self, tr):
1725 if tr:
1725 if tr:
1726 return self._pendingfilename
1726 return self._pendingfilename
1727 else:
1727 else:
1728 return self._filename
1728 return self._filename
1729
1729
1730 def savebackup(self, tr, backupname):
1730 def savebackup(self, tr, backupname):
1731 '''Save current dirstate into backup file'''
1731 '''Save current dirstate into backup file'''
1732 filename = self._actualfilename(tr)
1732 filename = self._actualfilename(tr)
1733 assert backupname != filename
1733 assert backupname != filename
1734
1734
1735 # use '_writedirstate' instead of 'write' to write changes certainly,
1735 # use '_writedirstate' instead of 'write' to write changes certainly,
1736 # because the latter omits writing out if transaction is running.
1736 # because the latter omits writing out if transaction is running.
1737 # output file will be used to create backup of dirstate at this point.
1737 # output file will be used to create backup of dirstate at this point.
1738 if self._dirty or not self._opener.exists(filename):
1738 if self._dirty or not self._opener.exists(filename):
1739 self._writedirstate(
1739 self._writedirstate(
1740 tr,
1740 tr,
1741 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1741 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1742 )
1742 )
1743
1743
1744 if tr:
1744 if tr:
1745 # ensure that subsequent tr.writepending returns True for
1745 # ensure that subsequent tr.writepending returns True for
1746 # changes written out above, even if dirstate is never
1746 # changes written out above, even if dirstate is never
1747 # changed after this
1747 # changed after this
1748 tr.addfilegenerator(
1748 tr.addfilegenerator(
1749 b'dirstate',
1749 b'dirstate',
1750 (self._filename,),
1750 (self._filename,),
1751 lambda f: self._writedirstate(tr, f),
1751 lambda f: self._writedirstate(tr, f),
1752 location=b'plain',
1752 location=b'plain',
1753 )
1753 )
1754
1754
1755 # ensure that pending file written above is unlinked at
1755 # ensure that pending file written above is unlinked at
1756 # failure, even if tr.writepending isn't invoked until the
1756 # failure, even if tr.writepending isn't invoked until the
1757 # end of this transaction
1757 # end of this transaction
1758 tr.registertmp(filename, location=b'plain')
1758 tr.registertmp(filename, location=b'plain')
1759
1759
1760 self._opener.tryunlink(backupname)
1760 self._opener.tryunlink(backupname)
1761 # hardlink backup is okay because _writedirstate is always called
1761 # hardlink backup is okay because _writedirstate is always called
1762 # with an "atomictemp=True" file.
1762 # with an "atomictemp=True" file.
1763 util.copyfile(
1763 util.copyfile(
1764 self._opener.join(filename),
1764 self._opener.join(filename),
1765 self._opener.join(backupname),
1765 self._opener.join(backupname),
1766 hardlink=True,
1766 hardlink=True,
1767 )
1767 )
1768
1768
1769 def restorebackup(self, tr, backupname):
1769 def restorebackup(self, tr, backupname):
1770 '''Restore dirstate by backup file'''
1770 '''Restore dirstate by backup file'''
1771 # this "invalidate()" prevents "wlock.release()" from writing
1771 # this "invalidate()" prevents "wlock.release()" from writing
1772 # changes of dirstate out after restoring from backup file
1772 # changes of dirstate out after restoring from backup file
1773 self.invalidate()
1773 self.invalidate()
1774 filename = self._actualfilename(tr)
1774 filename = self._actualfilename(tr)
1775 o = self._opener
1775 o = self._opener
1776 if util.samefile(o.join(backupname), o.join(filename)):
1776 if util.samefile(o.join(backupname), o.join(filename)):
1777 o.unlink(backupname)
1777 o.unlink(backupname)
1778 else:
1778 else:
1779 o.rename(backupname, filename, checkambig=True)
1779 o.rename(backupname, filename, checkambig=True)
1780
1780
1781 def clearbackup(self, tr, backupname):
1781 def clearbackup(self, tr, backupname):
1782 '''Clear backup file'''
1782 '''Clear backup file'''
1783 self._opener.unlink(backupname)
1783 self._opener.unlink(backupname)
@@ -1,925 +1,916 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 )
23 )
24
24
25 parsers = policy.importmod('parsers')
25 parsers = policy.importmod('parsers')
26 rustmod = policy.importrust('dirstate')
26 rustmod = policy.importrust('dirstate')
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29
29
30 DirstateItem = parsers.DirstateItem
30 DirstateItem = parsers.DirstateItem
31
31
32
32
33 # a special value used internally for `size` if the file come from the other parent
33 # a special value used internally for `size` if the file come from the other parent
34 FROM_P2 = -2
34 FROM_P2 = -2
35
35
36 # a special value used internally for `size` if the file is modified/merged/added
36 # a special value used internally for `size` if the file is modified/merged/added
37 NONNORMAL = -1
37 NONNORMAL = -1
38
38
39 # a special value used internally for `time` if the time is ambigeous
39 # a special value used internally for `time` if the time is ambigeous
40 AMBIGUOUS_TIME = -1
40 AMBIGUOUS_TIME = -1
41
41
42 rangemask = 0x7FFFFFFF
42 rangemask = 0x7FFFFFFF
43
43
44
44
45 class dirstatemap(object):
45 class dirstatemap(object):
46 """Map encapsulating the dirstate's contents.
46 """Map encapsulating the dirstate's contents.
47
47
48 The dirstate contains the following state:
48 The dirstate contains the following state:
49
49
50 - `identity` is the identity of the dirstate file, which can be used to
50 - `identity` is the identity of the dirstate file, which can be used to
51 detect when changes have occurred to the dirstate file.
51 detect when changes have occurred to the dirstate file.
52
52
53 - `parents` is a pair containing the parents of the working copy. The
53 - `parents` is a pair containing the parents of the working copy. The
54 parents are updated by calling `setparents`.
54 parents are updated by calling `setparents`.
55
55
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
57 where state is a single character representing 'normal', 'added',
57 where state is a single character representing 'normal', 'added',
58 'removed', or 'merged'. It is read by treating the dirstate as a
58 'removed', or 'merged'. It is read by treating the dirstate as a
59 dict. File state is updated by calling the `addfile`, `removefile` and
59 dict. File state is updated by calling the `addfile`, `removefile` and
60 `dropfile` methods.
60 `dropfile` methods.
61
61
62 - `copymap` maps destination filenames to their source filename.
62 - `copymap` maps destination filenames to their source filename.
63
63
64 The dirstate also provides the following views onto the state:
64 The dirstate also provides the following views onto the state:
65
65
66 - `nonnormalset` is a set of the filenames that have state other
66 - `nonnormalset` is a set of the filenames that have state other
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
68
68
69 - `otherparentset` is a set of the filenames that are marked as coming
69 - `otherparentset` is a set of the filenames that are marked as coming
70 from the second parent when the dirstate is currently being merged.
70 from the second parent when the dirstate is currently being merged.
71
71
72 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
72 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
73 form that they appear as in the dirstate.
73 form that they appear as in the dirstate.
74
74
75 - `dirfoldmap` is a dict mapping normalized directory names to the
75 - `dirfoldmap` is a dict mapping normalized directory names to the
76 denormalized form that they appear as in the dirstate.
76 denormalized form that they appear as in the dirstate.
77 """
77 """
78
78
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
80 self._ui = ui
80 self._ui = ui
81 self._opener = opener
81 self._opener = opener
82 self._root = root
82 self._root = root
83 self._filename = b'dirstate'
83 self._filename = b'dirstate'
84 self._nodelen = 20
84 self._nodelen = 20
85 self._nodeconstants = nodeconstants
85 self._nodeconstants = nodeconstants
86 assert (
86 assert (
87 not use_dirstate_v2
87 not use_dirstate_v2
88 ), "should have detected unsupported requirement"
88 ), "should have detected unsupported requirement"
89
89
90 self._parents = None
90 self._parents = None
91 self._dirtyparents = False
91 self._dirtyparents = False
92
92
93 # for consistent view between _pl() and _read() invocations
93 # for consistent view between _pl() and _read() invocations
94 self._pendingmode = None
94 self._pendingmode = None
95
95
96 @propertycache
96 @propertycache
97 def _map(self):
97 def _map(self):
98 self._map = {}
98 self._map = {}
99 self.read()
99 self.read()
100 return self._map
100 return self._map
101
101
102 @propertycache
102 @propertycache
103 def copymap(self):
103 def copymap(self):
104 self.copymap = {}
104 self.copymap = {}
105 self._map
105 self._map
106 return self.copymap
106 return self.copymap
107
107
108 def clear(self):
108 def clear(self):
109 self._map.clear()
109 self._map.clear()
110 self.copymap.clear()
110 self.copymap.clear()
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
112 util.clearcachedproperty(self, b"_dirs")
112 util.clearcachedproperty(self, b"_dirs")
113 util.clearcachedproperty(self, b"_alldirs")
113 util.clearcachedproperty(self, b"_alldirs")
114 util.clearcachedproperty(self, b"filefoldmap")
114 util.clearcachedproperty(self, b"filefoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
116 util.clearcachedproperty(self, b"nonnormalset")
116 util.clearcachedproperty(self, b"nonnormalset")
117 util.clearcachedproperty(self, b"otherparentset")
117 util.clearcachedproperty(self, b"otherparentset")
118
118
119 def items(self):
119 def items(self):
120 return pycompat.iteritems(self._map)
120 return pycompat.iteritems(self._map)
121
121
122 # forward for python2,3 compat
122 # forward for python2,3 compat
123 iteritems = items
123 iteritems = items
124
124
125 debug_iter = items
125 debug_iter = items
126
126
127 def __len__(self):
127 def __len__(self):
128 return len(self._map)
128 return len(self._map)
129
129
130 def __iter__(self):
130 def __iter__(self):
131 return iter(self._map)
131 return iter(self._map)
132
132
133 def get(self, key, default=None):
133 def get(self, key, default=None):
134 return self._map.get(key, default)
134 return self._map.get(key, default)
135
135
136 def __contains__(self, key):
136 def __contains__(self, key):
137 return key in self._map
137 return key in self._map
138
138
139 def __getitem__(self, key):
139 def __getitem__(self, key):
140 return self._map[key]
140 return self._map[key]
141
141
142 def keys(self):
142 def keys(self):
143 return self._map.keys()
143 return self._map.keys()
144
144
145 def preload(self):
145 def preload(self):
146 """Loads the underlying data, if it's not already loaded"""
146 """Loads the underlying data, if it's not already loaded"""
147 self._map
147 self._map
148
148
149 def _dirs_incr(self, filename, old_entry=None):
149 def _dirs_incr(self, filename, old_entry=None):
150 """incremente the dirstate counter if applicable"""
150 """incremente the dirstate counter if applicable"""
151 if (
151 if (
152 old_entry is None or old_entry.removed
152 old_entry is None or old_entry.removed
153 ) and "_dirs" in self.__dict__:
153 ) and "_dirs" in self.__dict__:
154 self._dirs.addpath(filename)
154 self._dirs.addpath(filename)
155 if old_entry is None and "_alldirs" in self.__dict__:
155 if old_entry is None and "_alldirs" in self.__dict__:
156 self._alldirs.addpath(filename)
156 self._alldirs.addpath(filename)
157
157
158 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
158 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
159 """decremente the dirstate counter if applicable"""
159 """decremente the dirstate counter if applicable"""
160 if old_entry is not None:
160 if old_entry is not None:
161 if "_dirs" in self.__dict__ and not old_entry.removed:
161 if "_dirs" in self.__dict__ and not old_entry.removed:
162 self._dirs.delpath(filename)
162 self._dirs.delpath(filename)
163 if "_alldirs" in self.__dict__ and not remove_variant:
163 if "_alldirs" in self.__dict__ and not remove_variant:
164 self._alldirs.delpath(filename)
164 self._alldirs.delpath(filename)
165 elif remove_variant and "_alldirs" in self.__dict__:
165 elif remove_variant and "_alldirs" in self.__dict__:
166 self._alldirs.addpath(filename)
166 self._alldirs.addpath(filename)
167 if "filefoldmap" in self.__dict__:
167 if "filefoldmap" in self.__dict__:
168 normed = util.normcase(filename)
168 normed = util.normcase(filename)
169 self.filefoldmap.pop(normed, None)
169 self.filefoldmap.pop(normed, None)
170
170
171 def set_possibly_dirty(self, filename):
171 def set_possibly_dirty(self, filename):
172 """record that the current state of the file on disk is unknown"""
172 """record that the current state of the file on disk is unknown"""
173 self[filename].set_possibly_dirty()
173 self[filename].set_possibly_dirty()
174
174
175 def addfile(
175 def addfile(
176 self,
176 self,
177 f,
177 f,
178 mode=0,
178 mode=0,
179 size=None,
179 size=None,
180 mtime=None,
180 mtime=None,
181 added=False,
181 added=False,
182 merged=False,
182 merged=False,
183 from_p2=False,
183 from_p2=False,
184 possibly_dirty=False,
184 possibly_dirty=False,
185 ):
185 ):
186 """Add a tracked file to the dirstate."""
186 """Add a tracked file to the dirstate."""
187 if added:
187 if added:
188 assert not merged
188 assert not merged
189 assert not possibly_dirty
189 assert not possibly_dirty
190 assert not from_p2
190 assert not from_p2
191 state = b'a'
191 state = b'a'
192 size = NONNORMAL
192 size = NONNORMAL
193 mtime = AMBIGUOUS_TIME
193 mtime = AMBIGUOUS_TIME
194 elif merged:
194 elif merged:
195 assert not possibly_dirty
195 assert not possibly_dirty
196 assert not from_p2
196 assert not from_p2
197 state = b'm'
197 state = b'm'
198 size = FROM_P2
198 size = FROM_P2
199 mtime = AMBIGUOUS_TIME
199 mtime = AMBIGUOUS_TIME
200 elif from_p2:
200 elif from_p2:
201 assert not possibly_dirty
201 assert not possibly_dirty
202 state = b'n'
202 state = b'n'
203 size = FROM_P2
203 size = FROM_P2
204 mtime = AMBIGUOUS_TIME
204 mtime = AMBIGUOUS_TIME
205 elif possibly_dirty:
205 elif possibly_dirty:
206 state = b'n'
206 state = b'n'
207 size = NONNORMAL
207 size = NONNORMAL
208 mtime = AMBIGUOUS_TIME
208 mtime = AMBIGUOUS_TIME
209 else:
209 else:
210 assert size != FROM_P2
210 assert size != FROM_P2
211 assert size != NONNORMAL
211 assert size != NONNORMAL
212 assert size is not None
212 assert size is not None
213 assert mtime is not None
213 assert mtime is not None
214
214
215 state = b'n'
215 state = b'n'
216 size = size & rangemask
216 size = size & rangemask
217 mtime = mtime & rangemask
217 mtime = mtime & rangemask
218 assert state is not None
218 assert state is not None
219 assert size is not None
219 assert size is not None
220 assert mtime is not None
220 assert mtime is not None
221 old_entry = self.get(f)
221 old_entry = self.get(f)
222 self._dirs_incr(f, old_entry)
222 self._dirs_incr(f, old_entry)
223 e = self._map[f] = DirstateItem(state, mode, size, mtime)
223 e = self._map[f] = DirstateItem(state, mode, size, mtime)
224 if e.dm_nonnormal:
224 if e.dm_nonnormal:
225 self.nonnormalset.add(f)
225 self.nonnormalset.add(f)
226 if e.dm_otherparent:
226 if e.dm_otherparent:
227 self.otherparentset.add(f)
227 self.otherparentset.add(f)
228
228
229 def reset_state(
229 def reset_state(
230 self,
230 self,
231 filename,
231 filename,
232 wc_tracked,
232 wc_tracked,
233 p1_tracked,
233 p1_tracked,
234 p2_tracked=False,
234 p2_tracked=False,
235 merged=False,
235 merged=False,
236 clean_p1=False,
236 clean_p1=False,
237 clean_p2=False,
237 clean_p2=False,
238 possibly_dirty=False,
238 possibly_dirty=False,
239 parentfiledata=None,
239 parentfiledata=None,
240 ):
240 ):
241 """Set a entry to a given state, diregarding all previous state
241 """Set a entry to a given state, diregarding all previous state
242
242
243 This is to be used by the part of the dirstate API dedicated to
243 This is to be used by the part of the dirstate API dedicated to
244 adjusting the dirstate after a update/merge.
244 adjusting the dirstate after a update/merge.
245
245
246 note: calling this might result to no entry existing at all if the
246 note: calling this might result to no entry existing at all if the
247 dirstate map does not see any point at having one for this file
247 dirstate map does not see any point at having one for this file
248 anymore.
248 anymore.
249 """
249 """
250 if merged and (clean_p1 or clean_p2):
250 if merged and (clean_p1 or clean_p2):
251 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
251 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
252 raise error.ProgrammingError(msg)
252 raise error.ProgrammingError(msg)
253 # copy information are now outdated
253 # copy information are now outdated
254 # (maybe new information should be in directly passed to this function)
254 # (maybe new information should be in directly passed to this function)
255 self.copymap.pop(filename, None)
255 self.copymap.pop(filename, None)
256
256
257 if not (p1_tracked or p2_tracked or wc_tracked):
257 if not (p1_tracked or p2_tracked or wc_tracked):
258 self.dropfile(filename)
258 self.dropfile(filename)
259 elif merged:
259 elif merged:
260 # XXX might be merged and removed ?
260 # XXX might be merged and removed ?
261 entry = self.get(filename)
261 entry = self.get(filename)
262 if entry is not None and entry.tracked:
262 if entry is not None and entry.tracked:
263 # XXX mostly replicate dirstate.other parent. We should get
263 # XXX mostly replicate dirstate.other parent. We should get
264 # the higher layer to pass us more reliable data where `merged`
264 # the higher layer to pass us more reliable data where `merged`
265 # actually mean merged. Dropping the else clause will show
265 # actually mean merged. Dropping the else clause will show
266 # failure in `test-graft.t`
266 # failure in `test-graft.t`
267 self.addfile(filename, merged=True)
267 self.addfile(filename, merged=True)
268 else:
268 else:
269 self.addfile(filename, from_p2=True)
269 self.addfile(filename, from_p2=True)
270 elif not (p1_tracked or p2_tracked) and wc_tracked:
270 elif not (p1_tracked or p2_tracked) and wc_tracked:
271 self.addfile(filename, added=True, possibly_dirty=possibly_dirty)
271 self.addfile(filename, added=True, possibly_dirty=possibly_dirty)
272 elif (p1_tracked or p2_tracked) and not wc_tracked:
272 elif (p1_tracked or p2_tracked) and not wc_tracked:
273 # XXX might be merged and removed ?
273 # XXX might be merged and removed ?
274 old_entry = self._map.get(filename)
274 old_entry = self._map.get(filename)
275 self._dirs_decr(filename, old_entry=old_entry, remove_variant=True)
275 self._dirs_decr(filename, old_entry=old_entry, remove_variant=True)
276 self._map[filename] = DirstateItem(b'r', 0, 0, 0)
276 self._map[filename] = DirstateItem(b'r', 0, 0, 0)
277 self.nonnormalset.add(filename)
277 self.nonnormalset.add(filename)
278 elif clean_p2 and wc_tracked:
278 elif clean_p2 and wc_tracked:
279 if p1_tracked or self.get(filename) is not None:
279 if p1_tracked or self.get(filename) is not None:
280 # XXX the `self.get` call is catching some case in
280 # XXX the `self.get` call is catching some case in
281 # `test-merge-remove.t` where the file is tracked in p1, the
281 # `test-merge-remove.t` where the file is tracked in p1, the
282 # p1_tracked argument is False.
282 # p1_tracked argument is False.
283 #
283 #
284 # In addition, this seems to be a case where the file is marked
284 # In addition, this seems to be a case where the file is marked
285 # as merged without actually being the result of a merge
285 # as merged without actually being the result of a merge
286 # action. So thing are not ideal here.
286 # action. So thing are not ideal here.
287 self.addfile(filename, merged=True)
287 self.addfile(filename, merged=True)
288 else:
288 else:
289 self.addfile(filename, from_p2=True)
289 self.addfile(filename, from_p2=True)
290 elif not p1_tracked and p2_tracked and wc_tracked:
290 elif not p1_tracked and p2_tracked and wc_tracked:
291 self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty)
291 self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty)
292 elif possibly_dirty:
292 elif possibly_dirty:
293 self.addfile(filename, possibly_dirty=possibly_dirty)
293 self.addfile(filename, possibly_dirty=possibly_dirty)
294 elif wc_tracked:
294 elif wc_tracked:
295 # this is a "normal" file
295 # this is a "normal" file
296 if parentfiledata is None:
296 if parentfiledata is None:
297 msg = b'failed to pass parentfiledata for a normal file: %s'
297 msg = b'failed to pass parentfiledata for a normal file: %s'
298 msg %= filename
298 msg %= filename
299 raise error.ProgrammingError(msg)
299 raise error.ProgrammingError(msg)
300 mode, size, mtime = parentfiledata
300 mode, size, mtime = parentfiledata
301 self.addfile(filename, mode=mode, size=size, mtime=mtime)
301 self.addfile(filename, mode=mode, size=size, mtime=mtime)
302 self.nonnormalset.discard(filename)
302 self.nonnormalset.discard(filename)
303 else:
303 else:
304 assert False, 'unreachable'
304 assert False, 'unreachable'
305
305
306 def removefile(self, f, in_merge=False):
306 def set_untracked(self, f):
307 """
307 """Mark a file as no longer tracked in the dirstate map"""
308 Mark a file as removed in the dirstate.
308 entry = self[f]
309
309 self._dirs_decr(f, old_entry=entry, remove_variant=True)
310 The `size` parameter is used to store sentinel values that indicate
310 if entry.from_p2:
311 the file's previous state. In the future, we should refactor this
311 self.otherparentset.add(f)
312 to be more explicit about what that state is.
312 elif not entry.merged:
313 """
314 entry = self.get(f)
315 size = 0
316 if in_merge:
317 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
318 # during a merge. So I (marmoute) am not sure we need the
319 # conditionnal at all. Adding double checking this with assert
320 # would be nice.
321 if entry is not None:
322 # backup the previous state
323 if entry.merged: # merge
324 size = NONNORMAL
325 elif entry.from_p2:
326 size = FROM_P2
327 self.otherparentset.add(f)
328 if entry is not None and not (entry.merged or entry.from_p2):
329 self.copymap.pop(f, None)
313 self.copymap.pop(f, None)
330 self._dirs_decr(f, old_entry=entry, remove_variant=True)
314 entry.set_untracked()
331 self._map[f] = DirstateItem(b'r', 0, size, 0)
332 self.nonnormalset.add(f)
315 self.nonnormalset.add(f)
333
316
334 def dropfile(self, f):
317 def dropfile(self, f):
335 """
318 """
336 Remove a file from the dirstate. Returns True if the file was
319 Remove a file from the dirstate. Returns True if the file was
337 previously recorded.
320 previously recorded.
338 """
321 """
339 old_entry = self._map.pop(f, None)
322 old_entry = self._map.pop(f, None)
340 self._dirs_decr(f, old_entry=old_entry)
323 self._dirs_decr(f, old_entry=old_entry)
341 self.nonnormalset.discard(f)
324 self.nonnormalset.discard(f)
342 return old_entry is not None
325 return old_entry is not None
343
326
344 def clearambiguoustimes(self, files, now):
327 def clearambiguoustimes(self, files, now):
345 for f in files:
328 for f in files:
346 e = self.get(f)
329 e = self.get(f)
347 if e is not None and e.need_delay(now):
330 if e is not None and e.need_delay(now):
348 e.set_possibly_dirty()
331 e.set_possibly_dirty()
349 self.nonnormalset.add(f)
332 self.nonnormalset.add(f)
350
333
351 def nonnormalentries(self):
334 def nonnormalentries(self):
352 '''Compute the nonnormal dirstate entries from the dmap'''
335 '''Compute the nonnormal dirstate entries from the dmap'''
353 try:
336 try:
354 return parsers.nonnormalotherparententries(self._map)
337 return parsers.nonnormalotherparententries(self._map)
355 except AttributeError:
338 except AttributeError:
356 nonnorm = set()
339 nonnorm = set()
357 otherparent = set()
340 otherparent = set()
358 for fname, e in pycompat.iteritems(self._map):
341 for fname, e in pycompat.iteritems(self._map):
359 if e.dm_nonnormal:
342 if e.dm_nonnormal:
360 nonnorm.add(fname)
343 nonnorm.add(fname)
361 if e.from_p2:
344 if e.from_p2:
362 otherparent.add(fname)
345 otherparent.add(fname)
363 return nonnorm, otherparent
346 return nonnorm, otherparent
364
347
365 @propertycache
348 @propertycache
366 def filefoldmap(self):
349 def filefoldmap(self):
367 """Returns a dictionary mapping normalized case paths to their
350 """Returns a dictionary mapping normalized case paths to their
368 non-normalized versions.
351 non-normalized versions.
369 """
352 """
370 try:
353 try:
371 makefilefoldmap = parsers.make_file_foldmap
354 makefilefoldmap = parsers.make_file_foldmap
372 except AttributeError:
355 except AttributeError:
373 pass
356 pass
374 else:
357 else:
375 return makefilefoldmap(
358 return makefilefoldmap(
376 self._map, util.normcasespec, util.normcasefallback
359 self._map, util.normcasespec, util.normcasefallback
377 )
360 )
378
361
379 f = {}
362 f = {}
380 normcase = util.normcase
363 normcase = util.normcase
381 for name, s in pycompat.iteritems(self._map):
364 for name, s in pycompat.iteritems(self._map):
382 if not s.removed:
365 if not s.removed:
383 f[normcase(name)] = name
366 f[normcase(name)] = name
384 f[b'.'] = b'.' # prevents useless util.fspath() invocation
367 f[b'.'] = b'.' # prevents useless util.fspath() invocation
385 return f
368 return f
386
369
387 def hastrackeddir(self, d):
370 def hastrackeddir(self, d):
388 """
371 """
389 Returns True if the dirstate contains a tracked (not removed) file
372 Returns True if the dirstate contains a tracked (not removed) file
390 in this directory.
373 in this directory.
391 """
374 """
392 return d in self._dirs
375 return d in self._dirs
393
376
394 def hasdir(self, d):
377 def hasdir(self, d):
395 """
378 """
396 Returns True if the dirstate contains a file (tracked or removed)
379 Returns True if the dirstate contains a file (tracked or removed)
397 in this directory.
380 in this directory.
398 """
381 """
399 return d in self._alldirs
382 return d in self._alldirs
400
383
401 @propertycache
384 @propertycache
402 def _dirs(self):
385 def _dirs(self):
403 return pathutil.dirs(self._map, b'r')
386 return pathutil.dirs(self._map, b'r')
404
387
405 @propertycache
388 @propertycache
406 def _alldirs(self):
389 def _alldirs(self):
407 return pathutil.dirs(self._map)
390 return pathutil.dirs(self._map)
408
391
409 def _opendirstatefile(self):
392 def _opendirstatefile(self):
410 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
393 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
411 if self._pendingmode is not None and self._pendingmode != mode:
394 if self._pendingmode is not None and self._pendingmode != mode:
412 fp.close()
395 fp.close()
413 raise error.Abort(
396 raise error.Abort(
414 _(b'working directory state may be changed parallelly')
397 _(b'working directory state may be changed parallelly')
415 )
398 )
416 self._pendingmode = mode
399 self._pendingmode = mode
417 return fp
400 return fp
418
401
419 def parents(self):
402 def parents(self):
420 if not self._parents:
403 if not self._parents:
421 try:
404 try:
422 fp = self._opendirstatefile()
405 fp = self._opendirstatefile()
423 st = fp.read(2 * self._nodelen)
406 st = fp.read(2 * self._nodelen)
424 fp.close()
407 fp.close()
425 except IOError as err:
408 except IOError as err:
426 if err.errno != errno.ENOENT:
409 if err.errno != errno.ENOENT:
427 raise
410 raise
428 # File doesn't exist, so the current state is empty
411 # File doesn't exist, so the current state is empty
429 st = b''
412 st = b''
430
413
431 l = len(st)
414 l = len(st)
432 if l == self._nodelen * 2:
415 if l == self._nodelen * 2:
433 self._parents = (
416 self._parents = (
434 st[: self._nodelen],
417 st[: self._nodelen],
435 st[self._nodelen : 2 * self._nodelen],
418 st[self._nodelen : 2 * self._nodelen],
436 )
419 )
437 elif l == 0:
420 elif l == 0:
438 self._parents = (
421 self._parents = (
439 self._nodeconstants.nullid,
422 self._nodeconstants.nullid,
440 self._nodeconstants.nullid,
423 self._nodeconstants.nullid,
441 )
424 )
442 else:
425 else:
443 raise error.Abort(
426 raise error.Abort(
444 _(b'working directory state appears damaged!')
427 _(b'working directory state appears damaged!')
445 )
428 )
446
429
447 return self._parents
430 return self._parents
448
431
449 def setparents(self, p1, p2):
432 def setparents(self, p1, p2):
450 self._parents = (p1, p2)
433 self._parents = (p1, p2)
451 self._dirtyparents = True
434 self._dirtyparents = True
452
435
453 def read(self):
436 def read(self):
454 # ignore HG_PENDING because identity is used only for writing
437 # ignore HG_PENDING because identity is used only for writing
455 self.identity = util.filestat.frompath(
438 self.identity = util.filestat.frompath(
456 self._opener.join(self._filename)
439 self._opener.join(self._filename)
457 )
440 )
458
441
459 try:
442 try:
460 fp = self._opendirstatefile()
443 fp = self._opendirstatefile()
461 try:
444 try:
462 st = fp.read()
445 st = fp.read()
463 finally:
446 finally:
464 fp.close()
447 fp.close()
465 except IOError as err:
448 except IOError as err:
466 if err.errno != errno.ENOENT:
449 if err.errno != errno.ENOENT:
467 raise
450 raise
468 return
451 return
469 if not st:
452 if not st:
470 return
453 return
471
454
472 if util.safehasattr(parsers, b'dict_new_presized'):
455 if util.safehasattr(parsers, b'dict_new_presized'):
473 # Make an estimate of the number of files in the dirstate based on
456 # Make an estimate of the number of files in the dirstate based on
474 # its size. This trades wasting some memory for avoiding costly
457 # its size. This trades wasting some memory for avoiding costly
475 # resizes. Each entry have a prefix of 17 bytes followed by one or
458 # resizes. Each entry have a prefix of 17 bytes followed by one or
476 # two path names. Studies on various large-scale real-world repositories
459 # two path names. Studies on various large-scale real-world repositories
477 # found 54 bytes a reasonable upper limit for the average path names.
460 # found 54 bytes a reasonable upper limit for the average path names.
478 # Copy entries are ignored for the sake of this estimate.
461 # Copy entries are ignored for the sake of this estimate.
479 self._map = parsers.dict_new_presized(len(st) // 71)
462 self._map = parsers.dict_new_presized(len(st) // 71)
480
463
481 # Python's garbage collector triggers a GC each time a certain number
464 # Python's garbage collector triggers a GC each time a certain number
482 # of container objects (the number being defined by
465 # of container objects (the number being defined by
483 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
466 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
484 # for each file in the dirstate. The C version then immediately marks
467 # for each file in the dirstate. The C version then immediately marks
485 # them as not to be tracked by the collector. However, this has no
468 # them as not to be tracked by the collector. However, this has no
486 # effect on when GCs are triggered, only on what objects the GC looks
469 # effect on when GCs are triggered, only on what objects the GC looks
487 # into. This means that O(number of files) GCs are unavoidable.
470 # into. This means that O(number of files) GCs are unavoidable.
488 # Depending on when in the process's lifetime the dirstate is parsed,
471 # Depending on when in the process's lifetime the dirstate is parsed,
489 # this can get very expensive. As a workaround, disable GC while
472 # this can get very expensive. As a workaround, disable GC while
490 # parsing the dirstate.
473 # parsing the dirstate.
491 #
474 #
492 # (we cannot decorate the function directly since it is in a C module)
475 # (we cannot decorate the function directly since it is in a C module)
493 parse_dirstate = util.nogc(parsers.parse_dirstate)
476 parse_dirstate = util.nogc(parsers.parse_dirstate)
494 p = parse_dirstate(self._map, self.copymap, st)
477 p = parse_dirstate(self._map, self.copymap, st)
495 if not self._dirtyparents:
478 if not self._dirtyparents:
496 self.setparents(*p)
479 self.setparents(*p)
497
480
498 # Avoid excess attribute lookups by fast pathing certain checks
481 # Avoid excess attribute lookups by fast pathing certain checks
499 self.__contains__ = self._map.__contains__
482 self.__contains__ = self._map.__contains__
500 self.__getitem__ = self._map.__getitem__
483 self.__getitem__ = self._map.__getitem__
501 self.get = self._map.get
484 self.get = self._map.get
502
485
503 def write(self, _tr, st, now):
486 def write(self, _tr, st, now):
504 st.write(
487 st.write(
505 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
488 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
506 )
489 )
507 st.close()
490 st.close()
508 self._dirtyparents = False
491 self._dirtyparents = False
509 self.nonnormalset, self.otherparentset = self.nonnormalentries()
492 self.nonnormalset, self.otherparentset = self.nonnormalentries()
510
493
511 @propertycache
494 @propertycache
512 def nonnormalset(self):
495 def nonnormalset(self):
513 nonnorm, otherparents = self.nonnormalentries()
496 nonnorm, otherparents = self.nonnormalentries()
514 self.otherparentset = otherparents
497 self.otherparentset = otherparents
515 return nonnorm
498 return nonnorm
516
499
517 @propertycache
500 @propertycache
518 def otherparentset(self):
501 def otherparentset(self):
519 nonnorm, otherparents = self.nonnormalentries()
502 nonnorm, otherparents = self.nonnormalentries()
520 self.nonnormalset = nonnorm
503 self.nonnormalset = nonnorm
521 return otherparents
504 return otherparents
522
505
523 def non_normal_or_other_parent_paths(self):
506 def non_normal_or_other_parent_paths(self):
524 return self.nonnormalset.union(self.otherparentset)
507 return self.nonnormalset.union(self.otherparentset)
525
508
526 @propertycache
509 @propertycache
527 def identity(self):
510 def identity(self):
528 self._map
511 self._map
529 return self.identity
512 return self.identity
530
513
531 @propertycache
514 @propertycache
532 def dirfoldmap(self):
515 def dirfoldmap(self):
533 f = {}
516 f = {}
534 normcase = util.normcase
517 normcase = util.normcase
535 for name in self._dirs:
518 for name in self._dirs:
536 f[normcase(name)] = name
519 f[normcase(name)] = name
537 return f
520 return f
538
521
539
522
540 if rustmod is not None:
523 if rustmod is not None:
541
524
542 class dirstatemap(object):
525 class dirstatemap(object):
543 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
526 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
544 self._use_dirstate_v2 = use_dirstate_v2
527 self._use_dirstate_v2 = use_dirstate_v2
545 self._nodeconstants = nodeconstants
528 self._nodeconstants = nodeconstants
546 self._ui = ui
529 self._ui = ui
547 self._opener = opener
530 self._opener = opener
548 self._root = root
531 self._root = root
549 self._filename = b'dirstate'
532 self._filename = b'dirstate'
550 self._nodelen = 20 # Also update Rust code when changing this!
533 self._nodelen = 20 # Also update Rust code when changing this!
551 self._parents = None
534 self._parents = None
552 self._dirtyparents = False
535 self._dirtyparents = False
553 self._docket = None
536 self._docket = None
554
537
555 # for consistent view between _pl() and _read() invocations
538 # for consistent view between _pl() and _read() invocations
556 self._pendingmode = None
539 self._pendingmode = None
557
540
558 self._use_dirstate_tree = self._ui.configbool(
541 self._use_dirstate_tree = self._ui.configbool(
559 b"experimental",
542 b"experimental",
560 b"dirstate-tree.in-memory",
543 b"dirstate-tree.in-memory",
561 False,
544 False,
562 )
545 )
563
546
564 def addfile(
547 def addfile(
565 self,
548 self,
566 f,
549 f,
567 mode=0,
550 mode=0,
568 size=None,
551 size=None,
569 mtime=None,
552 mtime=None,
570 added=False,
553 added=False,
571 merged=False,
554 merged=False,
572 from_p2=False,
555 from_p2=False,
573 possibly_dirty=False,
556 possibly_dirty=False,
574 ):
557 ):
575 return self._rustmap.addfile(
558 return self._rustmap.addfile(
576 f,
559 f,
577 mode,
560 mode,
578 size,
561 size,
579 mtime,
562 mtime,
580 added,
563 added,
581 merged,
564 merged,
582 from_p2,
565 from_p2,
583 possibly_dirty,
566 possibly_dirty,
584 )
567 )
585
568
586 def reset_state(
569 def reset_state(
587 self,
570 self,
588 filename,
571 filename,
589 wc_tracked,
572 wc_tracked,
590 p1_tracked,
573 p1_tracked,
591 p2_tracked=False,
574 p2_tracked=False,
592 merged=False,
575 merged=False,
593 clean_p1=False,
576 clean_p1=False,
594 clean_p2=False,
577 clean_p2=False,
595 possibly_dirty=False,
578 possibly_dirty=False,
596 parentfiledata=None,
579 parentfiledata=None,
597 ):
580 ):
598 """Set a entry to a given state, disregarding all previous state
581 """Set a entry to a given state, disregarding all previous state
599
582
600 This is to be used by the part of the dirstate API dedicated to
583 This is to be used by the part of the dirstate API dedicated to
601 adjusting the dirstate after a update/merge.
584 adjusting the dirstate after a update/merge.
602
585
603 note: calling this might result to no entry existing at all if the
586 note: calling this might result to no entry existing at all if the
604 dirstate map does not see any point at having one for this file
587 dirstate map does not see any point at having one for this file
605 anymore.
588 anymore.
606 """
589 """
607 if merged and (clean_p1 or clean_p2):
590 if merged and (clean_p1 or clean_p2):
608 msg = (
591 msg = (
609 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
592 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
610 )
593 )
611 raise error.ProgrammingError(msg)
594 raise error.ProgrammingError(msg)
612 # copy information are now outdated
595 # copy information are now outdated
613 # (maybe new information should be in directly passed to this function)
596 # (maybe new information should be in directly passed to this function)
614 self.copymap.pop(filename, None)
597 self.copymap.pop(filename, None)
615
598
616 if not (p1_tracked or p2_tracked or wc_tracked):
599 if not (p1_tracked or p2_tracked or wc_tracked):
617 self.dropfile(filename)
600 self.dropfile(filename)
618 elif merged:
601 elif merged:
619 # XXX might be merged and removed ?
602 # XXX might be merged and removed ?
620 entry = self.get(filename)
603 entry = self.get(filename)
621 if entry is not None and entry.tracked:
604 if entry is not None and entry.tracked:
622 # XXX mostly replicate dirstate.other parent. We should get
605 # XXX mostly replicate dirstate.other parent. We should get
623 # the higher layer to pass us more reliable data where `merged`
606 # the higher layer to pass us more reliable data where `merged`
624 # actually mean merged. Dropping the else clause will show
607 # actually mean merged. Dropping the else clause will show
625 # failure in `test-graft.t`
608 # failure in `test-graft.t`
626 self.addfile(filename, merged=True)
609 self.addfile(filename, merged=True)
627 else:
610 else:
628 self.addfile(filename, from_p2=True)
611 self.addfile(filename, from_p2=True)
629 elif not (p1_tracked or p2_tracked) and wc_tracked:
612 elif not (p1_tracked or p2_tracked) and wc_tracked:
630 self.addfile(
613 self.addfile(
631 filename, added=True, possibly_dirty=possibly_dirty
614 filename, added=True, possibly_dirty=possibly_dirty
632 )
615 )
633 elif (p1_tracked or p2_tracked) and not wc_tracked:
616 elif (p1_tracked or p2_tracked) and not wc_tracked:
634 # XXX might be merged and removed ?
617 # XXX might be merged and removed ?
635 self[filename] = DirstateItem(b'r', 0, 0, 0)
618 self[filename] = DirstateItem(b'r', 0, 0, 0)
636 self.nonnormalset.add(filename)
619 self.nonnormalset.add(filename)
637 elif clean_p2 and wc_tracked:
620 elif clean_p2 and wc_tracked:
638 if p1_tracked or self.get(filename) is not None:
621 if p1_tracked or self.get(filename) is not None:
639 # XXX the `self.get` call is catching some case in
622 # XXX the `self.get` call is catching some case in
640 # `test-merge-remove.t` where the file is tracked in p1, the
623 # `test-merge-remove.t` where the file is tracked in p1, the
641 # p1_tracked argument is False.
624 # p1_tracked argument is False.
642 #
625 #
643 # In addition, this seems to be a case where the file is marked
626 # In addition, this seems to be a case where the file is marked
644 # as merged without actually being the result of a merge
627 # as merged without actually being the result of a merge
645 # action. So thing are not ideal here.
628 # action. So thing are not ideal here.
646 self.addfile(filename, merged=True)
629 self.addfile(filename, merged=True)
647 else:
630 else:
648 self.addfile(filename, from_p2=True)
631 self.addfile(filename, from_p2=True)
649 elif not p1_tracked and p2_tracked and wc_tracked:
632 elif not p1_tracked and p2_tracked and wc_tracked:
650 self.addfile(
633 self.addfile(
651 filename, from_p2=True, possibly_dirty=possibly_dirty
634 filename, from_p2=True, possibly_dirty=possibly_dirty
652 )
635 )
653 elif possibly_dirty:
636 elif possibly_dirty:
654 self.addfile(filename, possibly_dirty=possibly_dirty)
637 self.addfile(filename, possibly_dirty=possibly_dirty)
655 elif wc_tracked:
638 elif wc_tracked:
656 # this is a "normal" file
639 # this is a "normal" file
657 if parentfiledata is None:
640 if parentfiledata is None:
658 msg = b'failed to pass parentfiledata for a normal file: %s'
641 msg = b'failed to pass parentfiledata for a normal file: %s'
659 msg %= filename
642 msg %= filename
660 raise error.ProgrammingError(msg)
643 raise error.ProgrammingError(msg)
661 mode, size, mtime = parentfiledata
644 mode, size, mtime = parentfiledata
662 self.addfile(filename, mode=mode, size=size, mtime=mtime)
645 self.addfile(filename, mode=mode, size=size, mtime=mtime)
663 self.nonnormalset.discard(filename)
646 self.nonnormalset.discard(filename)
664 else:
647 else:
665 assert False, 'unreachable'
648 assert False, 'unreachable'
666
649
650 def set_untracked(self, f):
651 """Mark a file as no longer tracked in the dirstate map"""
652 # in merge is only trigger more logic, so it "fine" to pass it.
653 #
654 # the inner rust dirstate map code need to be adjusted once the API
655 # for dirstate/dirstatemap/DirstateItem is a bit more settled
656 self._rustmap.removefile(f, in_merge=True)
657
667 def removefile(self, *args, **kwargs):
658 def removefile(self, *args, **kwargs):
668 return self._rustmap.removefile(*args, **kwargs)
659 return self._rustmap.removefile(*args, **kwargs)
669
660
670 def dropfile(self, *args, **kwargs):
661 def dropfile(self, *args, **kwargs):
671 return self._rustmap.dropfile(*args, **kwargs)
662 return self._rustmap.dropfile(*args, **kwargs)
672
663
673 def clearambiguoustimes(self, *args, **kwargs):
664 def clearambiguoustimes(self, *args, **kwargs):
674 return self._rustmap.clearambiguoustimes(*args, **kwargs)
665 return self._rustmap.clearambiguoustimes(*args, **kwargs)
675
666
676 def nonnormalentries(self):
667 def nonnormalentries(self):
677 return self._rustmap.nonnormalentries()
668 return self._rustmap.nonnormalentries()
678
669
679 def get(self, *args, **kwargs):
670 def get(self, *args, **kwargs):
680 return self._rustmap.get(*args, **kwargs)
671 return self._rustmap.get(*args, **kwargs)
681
672
682 @property
673 @property
683 def copymap(self):
674 def copymap(self):
684 return self._rustmap.copymap()
675 return self._rustmap.copymap()
685
676
686 def directories(self):
677 def directories(self):
687 return self._rustmap.directories()
678 return self._rustmap.directories()
688
679
689 def debug_iter(self):
680 def debug_iter(self):
690 return self._rustmap.debug_iter()
681 return self._rustmap.debug_iter()
691
682
692 def preload(self):
683 def preload(self):
693 self._rustmap
684 self._rustmap
694
685
695 def clear(self):
686 def clear(self):
696 self._rustmap.clear()
687 self._rustmap.clear()
697 self.setparents(
688 self.setparents(
698 self._nodeconstants.nullid, self._nodeconstants.nullid
689 self._nodeconstants.nullid, self._nodeconstants.nullid
699 )
690 )
700 util.clearcachedproperty(self, b"_dirs")
691 util.clearcachedproperty(self, b"_dirs")
701 util.clearcachedproperty(self, b"_alldirs")
692 util.clearcachedproperty(self, b"_alldirs")
702 util.clearcachedproperty(self, b"dirfoldmap")
693 util.clearcachedproperty(self, b"dirfoldmap")
703
694
704 def items(self):
695 def items(self):
705 return self._rustmap.items()
696 return self._rustmap.items()
706
697
707 def keys(self):
698 def keys(self):
708 return iter(self._rustmap)
699 return iter(self._rustmap)
709
700
710 def __contains__(self, key):
701 def __contains__(self, key):
711 return key in self._rustmap
702 return key in self._rustmap
712
703
713 def __getitem__(self, item):
704 def __getitem__(self, item):
714 return self._rustmap[item]
705 return self._rustmap[item]
715
706
716 def __len__(self):
707 def __len__(self):
717 return len(self._rustmap)
708 return len(self._rustmap)
718
709
719 def __iter__(self):
710 def __iter__(self):
720 return iter(self._rustmap)
711 return iter(self._rustmap)
721
712
722 # forward for python2,3 compat
713 # forward for python2,3 compat
723 iteritems = items
714 iteritems = items
724
715
725 def _opendirstatefile(self):
716 def _opendirstatefile(self):
726 fp, mode = txnutil.trypending(
717 fp, mode = txnutil.trypending(
727 self._root, self._opener, self._filename
718 self._root, self._opener, self._filename
728 )
719 )
729 if self._pendingmode is not None and self._pendingmode != mode:
720 if self._pendingmode is not None and self._pendingmode != mode:
730 fp.close()
721 fp.close()
731 raise error.Abort(
722 raise error.Abort(
732 _(b'working directory state may be changed parallelly')
723 _(b'working directory state may be changed parallelly')
733 )
724 )
734 self._pendingmode = mode
725 self._pendingmode = mode
735 return fp
726 return fp
736
727
737 def _readdirstatefile(self, size=-1):
728 def _readdirstatefile(self, size=-1):
738 try:
729 try:
739 with self._opendirstatefile() as fp:
730 with self._opendirstatefile() as fp:
740 return fp.read(size)
731 return fp.read(size)
741 except IOError as err:
732 except IOError as err:
742 if err.errno != errno.ENOENT:
733 if err.errno != errno.ENOENT:
743 raise
734 raise
744 # File doesn't exist, so the current state is empty
735 # File doesn't exist, so the current state is empty
745 return b''
736 return b''
746
737
747 def setparents(self, p1, p2):
738 def setparents(self, p1, p2):
748 self._parents = (p1, p2)
739 self._parents = (p1, p2)
749 self._dirtyparents = True
740 self._dirtyparents = True
750
741
751 def parents(self):
742 def parents(self):
752 if not self._parents:
743 if not self._parents:
753 if self._use_dirstate_v2:
744 if self._use_dirstate_v2:
754 self._parents = self.docket.parents
745 self._parents = self.docket.parents
755 else:
746 else:
756 read_len = self._nodelen * 2
747 read_len = self._nodelen * 2
757 st = self._readdirstatefile(read_len)
748 st = self._readdirstatefile(read_len)
758 l = len(st)
749 l = len(st)
759 if l == read_len:
750 if l == read_len:
760 self._parents = (
751 self._parents = (
761 st[: self._nodelen],
752 st[: self._nodelen],
762 st[self._nodelen : 2 * self._nodelen],
753 st[self._nodelen : 2 * self._nodelen],
763 )
754 )
764 elif l == 0:
755 elif l == 0:
765 self._parents = (
756 self._parents = (
766 self._nodeconstants.nullid,
757 self._nodeconstants.nullid,
767 self._nodeconstants.nullid,
758 self._nodeconstants.nullid,
768 )
759 )
769 else:
760 else:
770 raise error.Abort(
761 raise error.Abort(
771 _(b'working directory state appears damaged!')
762 _(b'working directory state appears damaged!')
772 )
763 )
773
764
774 return self._parents
765 return self._parents
775
766
776 @property
767 @property
777 def docket(self):
768 def docket(self):
778 if not self._docket:
769 if not self._docket:
779 if not self._use_dirstate_v2:
770 if not self._use_dirstate_v2:
780 raise error.ProgrammingError(
771 raise error.ProgrammingError(
781 b'dirstate only has a docket in v2 format'
772 b'dirstate only has a docket in v2 format'
782 )
773 )
783 self._docket = docketmod.DirstateDocket.parse(
774 self._docket = docketmod.DirstateDocket.parse(
784 self._readdirstatefile(), self._nodeconstants
775 self._readdirstatefile(), self._nodeconstants
785 )
776 )
786 return self._docket
777 return self._docket
787
778
788 @propertycache
779 @propertycache
789 def _rustmap(self):
780 def _rustmap(self):
790 """
781 """
791 Fills the Dirstatemap when called.
782 Fills the Dirstatemap when called.
792 """
783 """
793 # ignore HG_PENDING because identity is used only for writing
784 # ignore HG_PENDING because identity is used only for writing
794 self.identity = util.filestat.frompath(
785 self.identity = util.filestat.frompath(
795 self._opener.join(self._filename)
786 self._opener.join(self._filename)
796 )
787 )
797
788
798 if self._use_dirstate_v2:
789 if self._use_dirstate_v2:
799 if self.docket.uuid:
790 if self.docket.uuid:
800 # TODO: use mmap when possible
791 # TODO: use mmap when possible
801 data = self._opener.read(self.docket.data_filename())
792 data = self._opener.read(self.docket.data_filename())
802 else:
793 else:
803 data = b''
794 data = b''
804 self._rustmap = rustmod.DirstateMap.new_v2(
795 self._rustmap = rustmod.DirstateMap.new_v2(
805 data, self.docket.data_size, self.docket.tree_metadata
796 data, self.docket.data_size, self.docket.tree_metadata
806 )
797 )
807 parents = self.docket.parents
798 parents = self.docket.parents
808 else:
799 else:
809 self._rustmap, parents = rustmod.DirstateMap.new_v1(
800 self._rustmap, parents = rustmod.DirstateMap.new_v1(
810 self._use_dirstate_tree, self._readdirstatefile()
801 self._use_dirstate_tree, self._readdirstatefile()
811 )
802 )
812
803
813 if parents and not self._dirtyparents:
804 if parents and not self._dirtyparents:
814 self.setparents(*parents)
805 self.setparents(*parents)
815
806
816 self.__contains__ = self._rustmap.__contains__
807 self.__contains__ = self._rustmap.__contains__
817 self.__getitem__ = self._rustmap.__getitem__
808 self.__getitem__ = self._rustmap.__getitem__
818 self.get = self._rustmap.get
809 self.get = self._rustmap.get
819 return self._rustmap
810 return self._rustmap
820
811
821 def write(self, tr, st, now):
812 def write(self, tr, st, now):
822 if not self._use_dirstate_v2:
813 if not self._use_dirstate_v2:
823 p1, p2 = self.parents()
814 p1, p2 = self.parents()
824 packed = self._rustmap.write_v1(p1, p2, now)
815 packed = self._rustmap.write_v1(p1, p2, now)
825 st.write(packed)
816 st.write(packed)
826 st.close()
817 st.close()
827 self._dirtyparents = False
818 self._dirtyparents = False
828 return
819 return
829
820
830 # We can only append to an existing data file if there is one
821 # We can only append to an existing data file if there is one
831 can_append = self.docket.uuid is not None
822 can_append = self.docket.uuid is not None
832 packed, meta, append = self._rustmap.write_v2(now, can_append)
823 packed, meta, append = self._rustmap.write_v2(now, can_append)
833 if append:
824 if append:
834 docket = self.docket
825 docket = self.docket
835 data_filename = docket.data_filename()
826 data_filename = docket.data_filename()
836 if tr:
827 if tr:
837 tr.add(data_filename, docket.data_size)
828 tr.add(data_filename, docket.data_size)
838 with self._opener(data_filename, b'r+b') as fp:
829 with self._opener(data_filename, b'r+b') as fp:
839 fp.seek(docket.data_size)
830 fp.seek(docket.data_size)
840 assert fp.tell() == docket.data_size
831 assert fp.tell() == docket.data_size
841 written = fp.write(packed)
832 written = fp.write(packed)
842 if written is not None: # py2 may return None
833 if written is not None: # py2 may return None
843 assert written == len(packed), (written, len(packed))
834 assert written == len(packed), (written, len(packed))
844 docket.data_size += len(packed)
835 docket.data_size += len(packed)
845 docket.parents = self.parents()
836 docket.parents = self.parents()
846 docket.tree_metadata = meta
837 docket.tree_metadata = meta
847 st.write(docket.serialize())
838 st.write(docket.serialize())
848 st.close()
839 st.close()
849 else:
840 else:
850 old_docket = self.docket
841 old_docket = self.docket
851 new_docket = docketmod.DirstateDocket.with_new_uuid(
842 new_docket = docketmod.DirstateDocket.with_new_uuid(
852 self.parents(), len(packed), meta
843 self.parents(), len(packed), meta
853 )
844 )
854 data_filename = new_docket.data_filename()
845 data_filename = new_docket.data_filename()
855 if tr:
846 if tr:
856 tr.add(data_filename, 0)
847 tr.add(data_filename, 0)
857 self._opener.write(data_filename, packed)
848 self._opener.write(data_filename, packed)
858 # Write the new docket after the new data file has been
849 # Write the new docket after the new data file has been
859 # written. Because `st` was opened with `atomictemp=True`,
850 # written. Because `st` was opened with `atomictemp=True`,
860 # the actual `.hg/dirstate` file is only affected on close.
851 # the actual `.hg/dirstate` file is only affected on close.
861 st.write(new_docket.serialize())
852 st.write(new_docket.serialize())
862 st.close()
853 st.close()
863 # Remove the old data file after the new docket pointing to
854 # Remove the old data file after the new docket pointing to
864 # the new data file was written.
855 # the new data file was written.
865 if old_docket.uuid:
856 if old_docket.uuid:
866 data_filename = old_docket.data_filename()
857 data_filename = old_docket.data_filename()
867 unlink = lambda _tr=None: self._opener.unlink(data_filename)
858 unlink = lambda _tr=None: self._opener.unlink(data_filename)
868 if tr:
859 if tr:
869 category = b"dirstate-v2-clean-" + old_docket.uuid
860 category = b"dirstate-v2-clean-" + old_docket.uuid
870 tr.addpostclose(category, unlink)
861 tr.addpostclose(category, unlink)
871 else:
862 else:
872 unlink()
863 unlink()
873 self._docket = new_docket
864 self._docket = new_docket
874 # Reload from the newly-written file
865 # Reload from the newly-written file
875 util.clearcachedproperty(self, b"_rustmap")
866 util.clearcachedproperty(self, b"_rustmap")
876 self._dirtyparents = False
867 self._dirtyparents = False
877
868
878 @propertycache
869 @propertycache
879 def filefoldmap(self):
870 def filefoldmap(self):
880 """Returns a dictionary mapping normalized case paths to their
871 """Returns a dictionary mapping normalized case paths to their
881 non-normalized versions.
872 non-normalized versions.
882 """
873 """
883 return self._rustmap.filefoldmapasdict()
874 return self._rustmap.filefoldmapasdict()
884
875
885 def hastrackeddir(self, d):
876 def hastrackeddir(self, d):
886 return self._rustmap.hastrackeddir(d)
877 return self._rustmap.hastrackeddir(d)
887
878
888 def hasdir(self, d):
879 def hasdir(self, d):
889 return self._rustmap.hasdir(d)
880 return self._rustmap.hasdir(d)
890
881
891 @propertycache
882 @propertycache
892 def identity(self):
883 def identity(self):
893 self._rustmap
884 self._rustmap
894 return self.identity
885 return self.identity
895
886
896 @property
887 @property
897 def nonnormalset(self):
888 def nonnormalset(self):
898 nonnorm = self._rustmap.non_normal_entries()
889 nonnorm = self._rustmap.non_normal_entries()
899 return nonnorm
890 return nonnorm
900
891
901 @propertycache
892 @propertycache
902 def otherparentset(self):
893 def otherparentset(self):
903 otherparents = self._rustmap.other_parent_entries()
894 otherparents = self._rustmap.other_parent_entries()
904 return otherparents
895 return otherparents
905
896
906 def non_normal_or_other_parent_paths(self):
897 def non_normal_or_other_parent_paths(self):
907 return self._rustmap.non_normal_or_other_parent_paths()
898 return self._rustmap.non_normal_or_other_parent_paths()
908
899
909 @propertycache
900 @propertycache
910 def dirfoldmap(self):
901 def dirfoldmap(self):
911 f = {}
902 f = {}
912 normcase = util.normcase
903 normcase = util.normcase
913 for name in self._rustmap.tracked_dirs():
904 for name in self._rustmap.tracked_dirs():
914 f[normcase(name)] = name
905 f[normcase(name)] = name
915 return f
906 return f
916
907
917 def set_possibly_dirty(self, filename):
908 def set_possibly_dirty(self, filename):
918 """record that the current state of the file on disk is unknown"""
909 """record that the current state of the file on disk is unknown"""
919 entry = self[filename]
910 entry = self[filename]
920 entry.set_possibly_dirty()
911 entry.set_possibly_dirty()
921 self._rustmap.set_v1(filename, entry)
912 self._rustmap.set_v1(filename, entry)
922
913
923 def __setitem__(self, key, value):
914 def __setitem__(self, key, value):
924 assert isinstance(value, DirstateItem)
915 assert isinstance(value, DirstateItem)
925 self._rustmap.set_v1(key, value)
916 self._rustmap.set_v1(key, value)
@@ -1,621 +1,637 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It contains:
51 It contains:
52
52
53 - state (one of 'n', 'a', 'r', 'm')
53 - state (one of 'n', 'a', 'r', 'm')
54 - mode,
54 - mode,
55 - size,
55 - size,
56 - mtime,
56 - mtime,
57 """
57 """
58
58
59 _state = attr.ib()
59 _state = attr.ib()
60 _mode = attr.ib()
60 _mode = attr.ib()
61 _size = attr.ib()
61 _size = attr.ib()
62 _mtime = attr.ib()
62 _mtime = attr.ib()
63
63
64 def __init__(self, state, mode, size, mtime):
64 def __init__(self, state, mode, size, mtime):
65 self._state = state
65 self._state = state
66 self._mode = mode
66 self._mode = mode
67 self._size = size
67 self._size = size
68 self._mtime = mtime
68 self._mtime = mtime
69
69
70 @classmethod
70 @classmethod
71 def from_v1_data(cls, state, mode, size, mtime):
71 def from_v1_data(cls, state, mode, size, mtime):
72 """Build a new DirstateItem object from V1 data
72 """Build a new DirstateItem object from V1 data
73
73
74 Since the dirstate-v1 format is frozen, the signature of this function
74 Since the dirstate-v1 format is frozen, the signature of this function
75 is not expected to change, unlike the __init__ one.
75 is not expected to change, unlike the __init__ one.
76 """
76 """
77 return cls(
77 return cls(
78 state=state,
78 state=state,
79 mode=mode,
79 mode=mode,
80 size=size,
80 size=size,
81 mtime=mtime,
81 mtime=mtime,
82 )
82 )
83
83
84 def set_possibly_dirty(self):
84 def set_possibly_dirty(self):
85 """Mark a file as "possibly dirty"
85 """Mark a file as "possibly dirty"
86
86
87 This means the next status call will have to actually check its content
87 This means the next status call will have to actually check its content
88 to make sure it is correct.
88 to make sure it is correct.
89 """
89 """
90 self._mtime = AMBIGUOUS_TIME
90 self._mtime = AMBIGUOUS_TIME
91
91
92 def set_untracked(self):
93 """mark a file as untracked in the working copy
94
95 This will ultimately be called by command like `hg remove`.
96 """
97 # backup the previous state (useful for merge)
98 size = 0
99 if self.merged: # merge
100 size = NONNORMAL
101 elif self.from_p2:
102 size = FROM_P2
103 self._state = b'r'
104 self._mode = 0
105 self._size = size
106 self._mtime = 0
107
92 def __getitem__(self, idx):
108 def __getitem__(self, idx):
93 if idx == 0 or idx == -4:
109 if idx == 0 or idx == -4:
94 msg = b"do not use item[x], use item.state"
110 msg = b"do not use item[x], use item.state"
95 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
111 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
96 return self._state
112 return self._state
97 elif idx == 1 or idx == -3:
113 elif idx == 1 or idx == -3:
98 msg = b"do not use item[x], use item.mode"
114 msg = b"do not use item[x], use item.mode"
99 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
115 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
100 return self._mode
116 return self._mode
101 elif idx == 2 or idx == -2:
117 elif idx == 2 or idx == -2:
102 msg = b"do not use item[x], use item.size"
118 msg = b"do not use item[x], use item.size"
103 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
119 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
104 return self._size
120 return self._size
105 elif idx == 3 or idx == -1:
121 elif idx == 3 or idx == -1:
106 msg = b"do not use item[x], use item.mtime"
122 msg = b"do not use item[x], use item.mtime"
107 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
123 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
108 return self._mtime
124 return self._mtime
109 else:
125 else:
110 raise IndexError(idx)
126 raise IndexError(idx)
111
127
112 @property
128 @property
113 def mode(self):
129 def mode(self):
114 return self._mode
130 return self._mode
115
131
116 @property
132 @property
117 def size(self):
133 def size(self):
118 return self._size
134 return self._size
119
135
120 @property
136 @property
121 def mtime(self):
137 def mtime(self):
122 return self._mtime
138 return self._mtime
123
139
124 @property
140 @property
125 def state(self):
141 def state(self):
126 """
142 """
127 States are:
143 States are:
128 n normal
144 n normal
129 m needs merging
145 m needs merging
130 r marked for removal
146 r marked for removal
131 a marked for addition
147 a marked for addition
132
148
133 XXX This "state" is a bit obscure and mostly a direct expression of the
149 XXX This "state" is a bit obscure and mostly a direct expression of the
134 dirstatev1 format. It would make sense to ultimately deprecate it in
150 dirstatev1 format. It would make sense to ultimately deprecate it in
135 favor of the more "semantic" attributes.
151 favor of the more "semantic" attributes.
136 """
152 """
137 return self._state
153 return self._state
138
154
139 @property
155 @property
140 def tracked(self):
156 def tracked(self):
141 """True is the file is tracked in the working copy"""
157 """True is the file is tracked in the working copy"""
142 return self._state in b"nma"
158 return self._state in b"nma"
143
159
144 @property
160 @property
145 def added(self):
161 def added(self):
146 """True if the file has been added"""
162 """True if the file has been added"""
147 return self._state == b'a'
163 return self._state == b'a'
148
164
149 @property
165 @property
150 def merged(self):
166 def merged(self):
151 """True if the file has been merged
167 """True if the file has been merged
152
168
153 Should only be set if a merge is in progress in the dirstate
169 Should only be set if a merge is in progress in the dirstate
154 """
170 """
155 return self._state == b'm'
171 return self._state == b'm'
156
172
157 @property
173 @property
158 def from_p2(self):
174 def from_p2(self):
159 """True if the file have been fetched from p2 during the current merge
175 """True if the file have been fetched from p2 during the current merge
160
176
161 This is only True is the file is currently tracked.
177 This is only True is the file is currently tracked.
162
178
163 Should only be set if a merge is in progress in the dirstate
179 Should only be set if a merge is in progress in the dirstate
164 """
180 """
165 return self._state == b'n' and self._size == FROM_P2
181 return self._state == b'n' and self._size == FROM_P2
166
182
167 @property
183 @property
168 def from_p2_removed(self):
184 def from_p2_removed(self):
169 """True if the file has been removed, but was "from_p2" initially
185 """True if the file has been removed, but was "from_p2" initially
170
186
171 This property seems like an abstraction leakage and should probably be
187 This property seems like an abstraction leakage and should probably be
172 dealt in this class (or maybe the dirstatemap) directly.
188 dealt in this class (or maybe the dirstatemap) directly.
173 """
189 """
174 return self._state == b'r' and self._size == FROM_P2
190 return self._state == b'r' and self._size == FROM_P2
175
191
176 @property
192 @property
177 def removed(self):
193 def removed(self):
178 """True if the file has been removed"""
194 """True if the file has been removed"""
179 return self._state == b'r'
195 return self._state == b'r'
180
196
181 @property
197 @property
182 def merged_removed(self):
198 def merged_removed(self):
183 """True if the file has been removed, but was "merged" initially
199 """True if the file has been removed, but was "merged" initially
184
200
185 This property seems like an abstraction leakage and should probably be
201 This property seems like an abstraction leakage and should probably be
186 dealt in this class (or maybe the dirstatemap) directly.
202 dealt in this class (or maybe the dirstatemap) directly.
187 """
203 """
188 return self._state == b'r' and self._size == NONNORMAL
204 return self._state == b'r' and self._size == NONNORMAL
189
205
190 @property
206 @property
191 def dm_nonnormal(self):
207 def dm_nonnormal(self):
192 """True is the entry is non-normal in the dirstatemap sense
208 """True is the entry is non-normal in the dirstatemap sense
193
209
194 There is no reason for any code, but the dirstatemap one to use this.
210 There is no reason for any code, but the dirstatemap one to use this.
195 """
211 """
196 return self.state != b'n' or self.mtime == AMBIGUOUS_TIME
212 return self.state != b'n' or self.mtime == AMBIGUOUS_TIME
197
213
198 @property
214 @property
199 def dm_otherparent(self):
215 def dm_otherparent(self):
200 """True is the entry is `otherparent` in the dirstatemap sense
216 """True is the entry is `otherparent` in the dirstatemap sense
201
217
202 There is no reason for any code, but the dirstatemap one to use this.
218 There is no reason for any code, but the dirstatemap one to use this.
203 """
219 """
204 return self._size == FROM_P2
220 return self._size == FROM_P2
205
221
206 def v1_state(self):
222 def v1_state(self):
207 """return a "state" suitable for v1 serialization"""
223 """return a "state" suitable for v1 serialization"""
208 return self._state
224 return self._state
209
225
210 def v1_mode(self):
226 def v1_mode(self):
211 """return a "mode" suitable for v1 serialization"""
227 """return a "mode" suitable for v1 serialization"""
212 return self._mode
228 return self._mode
213
229
214 def v1_size(self):
230 def v1_size(self):
215 """return a "size" suitable for v1 serialization"""
231 """return a "size" suitable for v1 serialization"""
216 return self._size
232 return self._size
217
233
218 def v1_mtime(self):
234 def v1_mtime(self):
219 """return a "mtime" suitable for v1 serialization"""
235 """return a "mtime" suitable for v1 serialization"""
220 return self._mtime
236 return self._mtime
221
237
222 def need_delay(self, now):
238 def need_delay(self, now):
223 """True if the stored mtime would be ambiguous with the current time"""
239 """True if the stored mtime would be ambiguous with the current time"""
224 return self._state == b'n' and self._mtime == now
240 return self._state == b'n' and self._mtime == now
225
241
226
242
227 def gettype(q):
243 def gettype(q):
228 return int(q & 0xFFFF)
244 return int(q & 0xFFFF)
229
245
230
246
231 class BaseIndexObject(object):
247 class BaseIndexObject(object):
232 # Can I be passed to an algorithme implemented in Rust ?
248 # Can I be passed to an algorithme implemented in Rust ?
233 rust_ext_compat = 0
249 rust_ext_compat = 0
234 # Format of an index entry according to Python's `struct` language
250 # Format of an index entry according to Python's `struct` language
235 index_format = revlog_constants.INDEX_ENTRY_V1
251 index_format = revlog_constants.INDEX_ENTRY_V1
236 # Size of a C unsigned long long int, platform independent
252 # Size of a C unsigned long long int, platform independent
237 big_int_size = struct.calcsize(b'>Q')
253 big_int_size = struct.calcsize(b'>Q')
238 # Size of a C long int, platform independent
254 # Size of a C long int, platform independent
239 int_size = struct.calcsize(b'>i')
255 int_size = struct.calcsize(b'>i')
240 # An empty index entry, used as a default value to be overridden, or nullrev
256 # An empty index entry, used as a default value to be overridden, or nullrev
241 null_item = (
257 null_item = (
242 0,
258 0,
243 0,
259 0,
244 0,
260 0,
245 -1,
261 -1,
246 -1,
262 -1,
247 -1,
263 -1,
248 -1,
264 -1,
249 sha1nodeconstants.nullid,
265 sha1nodeconstants.nullid,
250 0,
266 0,
251 0,
267 0,
252 revlog_constants.COMP_MODE_INLINE,
268 revlog_constants.COMP_MODE_INLINE,
253 revlog_constants.COMP_MODE_INLINE,
269 revlog_constants.COMP_MODE_INLINE,
254 )
270 )
255
271
256 @util.propertycache
272 @util.propertycache
257 def entry_size(self):
273 def entry_size(self):
258 return self.index_format.size
274 return self.index_format.size
259
275
260 @property
276 @property
261 def nodemap(self):
277 def nodemap(self):
262 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
278 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
263 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
279 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
264 return self._nodemap
280 return self._nodemap
265
281
266 @util.propertycache
282 @util.propertycache
267 def _nodemap(self):
283 def _nodemap(self):
268 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
284 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
269 for r in range(0, len(self)):
285 for r in range(0, len(self)):
270 n = self[r][7]
286 n = self[r][7]
271 nodemap[n] = r
287 nodemap[n] = r
272 return nodemap
288 return nodemap
273
289
274 def has_node(self, node):
290 def has_node(self, node):
275 """return True if the node exist in the index"""
291 """return True if the node exist in the index"""
276 return node in self._nodemap
292 return node in self._nodemap
277
293
278 def rev(self, node):
294 def rev(self, node):
279 """return a revision for a node
295 """return a revision for a node
280
296
281 If the node is unknown, raise a RevlogError"""
297 If the node is unknown, raise a RevlogError"""
282 return self._nodemap[node]
298 return self._nodemap[node]
283
299
284 def get_rev(self, node):
300 def get_rev(self, node):
285 """return a revision for a node
301 """return a revision for a node
286
302
287 If the node is unknown, return None"""
303 If the node is unknown, return None"""
288 return self._nodemap.get(node)
304 return self._nodemap.get(node)
289
305
290 def _stripnodes(self, start):
306 def _stripnodes(self, start):
291 if '_nodemap' in vars(self):
307 if '_nodemap' in vars(self):
292 for r in range(start, len(self)):
308 for r in range(start, len(self)):
293 n = self[r][7]
309 n = self[r][7]
294 del self._nodemap[n]
310 del self._nodemap[n]
295
311
296 def clearcaches(self):
312 def clearcaches(self):
297 self.__dict__.pop('_nodemap', None)
313 self.__dict__.pop('_nodemap', None)
298
314
299 def __len__(self):
315 def __len__(self):
300 return self._lgt + len(self._extra)
316 return self._lgt + len(self._extra)
301
317
302 def append(self, tup):
318 def append(self, tup):
303 if '_nodemap' in vars(self):
319 if '_nodemap' in vars(self):
304 self._nodemap[tup[7]] = len(self)
320 self._nodemap[tup[7]] = len(self)
305 data = self._pack_entry(len(self), tup)
321 data = self._pack_entry(len(self), tup)
306 self._extra.append(data)
322 self._extra.append(data)
307
323
308 def _pack_entry(self, rev, entry):
324 def _pack_entry(self, rev, entry):
309 assert entry[8] == 0
325 assert entry[8] == 0
310 assert entry[9] == 0
326 assert entry[9] == 0
311 return self.index_format.pack(*entry[:8])
327 return self.index_format.pack(*entry[:8])
312
328
313 def _check_index(self, i):
329 def _check_index(self, i):
314 if not isinstance(i, int):
330 if not isinstance(i, int):
315 raise TypeError(b"expecting int indexes")
331 raise TypeError(b"expecting int indexes")
316 if i < 0 or i >= len(self):
332 if i < 0 or i >= len(self):
317 raise IndexError
333 raise IndexError
318
334
319 def __getitem__(self, i):
335 def __getitem__(self, i):
320 if i == -1:
336 if i == -1:
321 return self.null_item
337 return self.null_item
322 self._check_index(i)
338 self._check_index(i)
323 if i >= self._lgt:
339 if i >= self._lgt:
324 data = self._extra[i - self._lgt]
340 data = self._extra[i - self._lgt]
325 else:
341 else:
326 index = self._calculate_index(i)
342 index = self._calculate_index(i)
327 data = self._data[index : index + self.entry_size]
343 data = self._data[index : index + self.entry_size]
328 r = self._unpack_entry(i, data)
344 r = self._unpack_entry(i, data)
329 if self._lgt and i == 0:
345 if self._lgt and i == 0:
330 offset = revlogutils.offset_type(0, gettype(r[0]))
346 offset = revlogutils.offset_type(0, gettype(r[0]))
331 r = (offset,) + r[1:]
347 r = (offset,) + r[1:]
332 return r
348 return r
333
349
334 def _unpack_entry(self, rev, data):
350 def _unpack_entry(self, rev, data):
335 r = self.index_format.unpack(data)
351 r = self.index_format.unpack(data)
336 r = r + (
352 r = r + (
337 0,
353 0,
338 0,
354 0,
339 revlog_constants.COMP_MODE_INLINE,
355 revlog_constants.COMP_MODE_INLINE,
340 revlog_constants.COMP_MODE_INLINE,
356 revlog_constants.COMP_MODE_INLINE,
341 )
357 )
342 return r
358 return r
343
359
344 def pack_header(self, header):
360 def pack_header(self, header):
345 """pack header information as binary"""
361 """pack header information as binary"""
346 v_fmt = revlog_constants.INDEX_HEADER
362 v_fmt = revlog_constants.INDEX_HEADER
347 return v_fmt.pack(header)
363 return v_fmt.pack(header)
348
364
349 def entry_binary(self, rev):
365 def entry_binary(self, rev):
350 """return the raw binary string representing a revision"""
366 """return the raw binary string representing a revision"""
351 entry = self[rev]
367 entry = self[rev]
352 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
368 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
353 if rev == 0:
369 if rev == 0:
354 p = p[revlog_constants.INDEX_HEADER.size :]
370 p = p[revlog_constants.INDEX_HEADER.size :]
355 return p
371 return p
356
372
357
373
358 class IndexObject(BaseIndexObject):
374 class IndexObject(BaseIndexObject):
359 def __init__(self, data):
375 def __init__(self, data):
360 assert len(data) % self.entry_size == 0, (
376 assert len(data) % self.entry_size == 0, (
361 len(data),
377 len(data),
362 self.entry_size,
378 self.entry_size,
363 len(data) % self.entry_size,
379 len(data) % self.entry_size,
364 )
380 )
365 self._data = data
381 self._data = data
366 self._lgt = len(data) // self.entry_size
382 self._lgt = len(data) // self.entry_size
367 self._extra = []
383 self._extra = []
368
384
369 def _calculate_index(self, i):
385 def _calculate_index(self, i):
370 return i * self.entry_size
386 return i * self.entry_size
371
387
372 def __delitem__(self, i):
388 def __delitem__(self, i):
373 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
389 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
374 raise ValueError(b"deleting slices only supports a:-1 with step 1")
390 raise ValueError(b"deleting slices only supports a:-1 with step 1")
375 i = i.start
391 i = i.start
376 self._check_index(i)
392 self._check_index(i)
377 self._stripnodes(i)
393 self._stripnodes(i)
378 if i < self._lgt:
394 if i < self._lgt:
379 self._data = self._data[: i * self.entry_size]
395 self._data = self._data[: i * self.entry_size]
380 self._lgt = i
396 self._lgt = i
381 self._extra = []
397 self._extra = []
382 else:
398 else:
383 self._extra = self._extra[: i - self._lgt]
399 self._extra = self._extra[: i - self._lgt]
384
400
385
401
386 class PersistentNodeMapIndexObject(IndexObject):
402 class PersistentNodeMapIndexObject(IndexObject):
387 """a Debug oriented class to test persistent nodemap
403 """a Debug oriented class to test persistent nodemap
388
404
389 We need a simple python object to test API and higher level behavior. See
405 We need a simple python object to test API and higher level behavior. See
390 the Rust implementation for more serious usage. This should be used only
406 the Rust implementation for more serious usage. This should be used only
391 through the dedicated `devel.persistent-nodemap` config.
407 through the dedicated `devel.persistent-nodemap` config.
392 """
408 """
393
409
394 def nodemap_data_all(self):
410 def nodemap_data_all(self):
395 """Return bytes containing a full serialization of a nodemap
411 """Return bytes containing a full serialization of a nodemap
396
412
397 The nodemap should be valid for the full set of revisions in the
413 The nodemap should be valid for the full set of revisions in the
398 index."""
414 index."""
399 return nodemaputil.persistent_data(self)
415 return nodemaputil.persistent_data(self)
400
416
401 def nodemap_data_incremental(self):
417 def nodemap_data_incremental(self):
402 """Return bytes containing a incremental update to persistent nodemap
418 """Return bytes containing a incremental update to persistent nodemap
403
419
404 This containst the data for an append-only update of the data provided
420 This containst the data for an append-only update of the data provided
405 in the last call to `update_nodemap_data`.
421 in the last call to `update_nodemap_data`.
406 """
422 """
407 if self._nm_root is None:
423 if self._nm_root is None:
408 return None
424 return None
409 docket = self._nm_docket
425 docket = self._nm_docket
410 changed, data = nodemaputil.update_persistent_data(
426 changed, data = nodemaputil.update_persistent_data(
411 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
427 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
412 )
428 )
413
429
414 self._nm_root = self._nm_max_idx = self._nm_docket = None
430 self._nm_root = self._nm_max_idx = self._nm_docket = None
415 return docket, changed, data
431 return docket, changed, data
416
432
417 def update_nodemap_data(self, docket, nm_data):
433 def update_nodemap_data(self, docket, nm_data):
418 """provide full block of persisted binary data for a nodemap
434 """provide full block of persisted binary data for a nodemap
419
435
420 The data are expected to come from disk. See `nodemap_data_all` for a
436 The data are expected to come from disk. See `nodemap_data_all` for a
421 produceur of such data."""
437 produceur of such data."""
422 if nm_data is not None:
438 if nm_data is not None:
423 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
439 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
424 if self._nm_root:
440 if self._nm_root:
425 self._nm_docket = docket
441 self._nm_docket = docket
426 else:
442 else:
427 self._nm_root = self._nm_max_idx = self._nm_docket = None
443 self._nm_root = self._nm_max_idx = self._nm_docket = None
428
444
429
445
430 class InlinedIndexObject(BaseIndexObject):
446 class InlinedIndexObject(BaseIndexObject):
431 def __init__(self, data, inline=0):
447 def __init__(self, data, inline=0):
432 self._data = data
448 self._data = data
433 self._lgt = self._inline_scan(None)
449 self._lgt = self._inline_scan(None)
434 self._inline_scan(self._lgt)
450 self._inline_scan(self._lgt)
435 self._extra = []
451 self._extra = []
436
452
437 def _inline_scan(self, lgt):
453 def _inline_scan(self, lgt):
438 off = 0
454 off = 0
439 if lgt is not None:
455 if lgt is not None:
440 self._offsets = [0] * lgt
456 self._offsets = [0] * lgt
441 count = 0
457 count = 0
442 while off <= len(self._data) - self.entry_size:
458 while off <= len(self._data) - self.entry_size:
443 start = off + self.big_int_size
459 start = off + self.big_int_size
444 (s,) = struct.unpack(
460 (s,) = struct.unpack(
445 b'>i',
461 b'>i',
446 self._data[start : start + self.int_size],
462 self._data[start : start + self.int_size],
447 )
463 )
448 if lgt is not None:
464 if lgt is not None:
449 self._offsets[count] = off
465 self._offsets[count] = off
450 count += 1
466 count += 1
451 off += self.entry_size + s
467 off += self.entry_size + s
452 if off != len(self._data):
468 if off != len(self._data):
453 raise ValueError(b"corrupted data")
469 raise ValueError(b"corrupted data")
454 return count
470 return count
455
471
456 def __delitem__(self, i):
472 def __delitem__(self, i):
457 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
473 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
458 raise ValueError(b"deleting slices only supports a:-1 with step 1")
474 raise ValueError(b"deleting slices only supports a:-1 with step 1")
459 i = i.start
475 i = i.start
460 self._check_index(i)
476 self._check_index(i)
461 self._stripnodes(i)
477 self._stripnodes(i)
462 if i < self._lgt:
478 if i < self._lgt:
463 self._offsets = self._offsets[:i]
479 self._offsets = self._offsets[:i]
464 self._lgt = i
480 self._lgt = i
465 self._extra = []
481 self._extra = []
466 else:
482 else:
467 self._extra = self._extra[: i - self._lgt]
483 self._extra = self._extra[: i - self._lgt]
468
484
469 def _calculate_index(self, i):
485 def _calculate_index(self, i):
470 return self._offsets[i]
486 return self._offsets[i]
471
487
472
488
473 def parse_index2(data, inline, revlogv2=False):
489 def parse_index2(data, inline, revlogv2=False):
474 if not inline:
490 if not inline:
475 cls = IndexObject2 if revlogv2 else IndexObject
491 cls = IndexObject2 if revlogv2 else IndexObject
476 return cls(data), None
492 return cls(data), None
477 cls = InlinedIndexObject
493 cls = InlinedIndexObject
478 return cls(data, inline), (0, data)
494 return cls(data, inline), (0, data)
479
495
480
496
481 def parse_index_cl_v2(data):
497 def parse_index_cl_v2(data):
482 return IndexChangelogV2(data), None
498 return IndexChangelogV2(data), None
483
499
484
500
485 class IndexObject2(IndexObject):
501 class IndexObject2(IndexObject):
486 index_format = revlog_constants.INDEX_ENTRY_V2
502 index_format = revlog_constants.INDEX_ENTRY_V2
487
503
488 def replace_sidedata_info(
504 def replace_sidedata_info(
489 self,
505 self,
490 rev,
506 rev,
491 sidedata_offset,
507 sidedata_offset,
492 sidedata_length,
508 sidedata_length,
493 offset_flags,
509 offset_flags,
494 compression_mode,
510 compression_mode,
495 ):
511 ):
496 """
512 """
497 Replace an existing index entry's sidedata offset and length with new
513 Replace an existing index entry's sidedata offset and length with new
498 ones.
514 ones.
499 This cannot be used outside of the context of sidedata rewriting,
515 This cannot be used outside of the context of sidedata rewriting,
500 inside the transaction that creates the revision `rev`.
516 inside the transaction that creates the revision `rev`.
501 """
517 """
502 if rev < 0:
518 if rev < 0:
503 raise KeyError
519 raise KeyError
504 self._check_index(rev)
520 self._check_index(rev)
505 if rev < self._lgt:
521 if rev < self._lgt:
506 msg = b"cannot rewrite entries outside of this transaction"
522 msg = b"cannot rewrite entries outside of this transaction"
507 raise KeyError(msg)
523 raise KeyError(msg)
508 else:
524 else:
509 entry = list(self[rev])
525 entry = list(self[rev])
510 entry[0] = offset_flags
526 entry[0] = offset_flags
511 entry[8] = sidedata_offset
527 entry[8] = sidedata_offset
512 entry[9] = sidedata_length
528 entry[9] = sidedata_length
513 entry[11] = compression_mode
529 entry[11] = compression_mode
514 entry = tuple(entry)
530 entry = tuple(entry)
515 new = self._pack_entry(rev, entry)
531 new = self._pack_entry(rev, entry)
516 self._extra[rev - self._lgt] = new
532 self._extra[rev - self._lgt] = new
517
533
518 def _unpack_entry(self, rev, data):
534 def _unpack_entry(self, rev, data):
519 data = self.index_format.unpack(data)
535 data = self.index_format.unpack(data)
520 entry = data[:10]
536 entry = data[:10]
521 data_comp = data[10] & 3
537 data_comp = data[10] & 3
522 sidedata_comp = (data[10] & (3 << 2)) >> 2
538 sidedata_comp = (data[10] & (3 << 2)) >> 2
523 return entry + (data_comp, sidedata_comp)
539 return entry + (data_comp, sidedata_comp)
524
540
525 def _pack_entry(self, rev, entry):
541 def _pack_entry(self, rev, entry):
526 data = entry[:10]
542 data = entry[:10]
527 data_comp = entry[10] & 3
543 data_comp = entry[10] & 3
528 sidedata_comp = (entry[11] & 3) << 2
544 sidedata_comp = (entry[11] & 3) << 2
529 data += (data_comp | sidedata_comp,)
545 data += (data_comp | sidedata_comp,)
530
546
531 return self.index_format.pack(*data)
547 return self.index_format.pack(*data)
532
548
533 def entry_binary(self, rev):
549 def entry_binary(self, rev):
534 """return the raw binary string representing a revision"""
550 """return the raw binary string representing a revision"""
535 entry = self[rev]
551 entry = self[rev]
536 return self._pack_entry(rev, entry)
552 return self._pack_entry(rev, entry)
537
553
538 def pack_header(self, header):
554 def pack_header(self, header):
539 """pack header information as binary"""
555 """pack header information as binary"""
540 msg = 'version header should go in the docket, not the index: %d'
556 msg = 'version header should go in the docket, not the index: %d'
541 msg %= header
557 msg %= header
542 raise error.ProgrammingError(msg)
558 raise error.ProgrammingError(msg)
543
559
544
560
545 class IndexChangelogV2(IndexObject2):
561 class IndexChangelogV2(IndexObject2):
546 index_format = revlog_constants.INDEX_ENTRY_CL_V2
562 index_format = revlog_constants.INDEX_ENTRY_CL_V2
547
563
548 def _unpack_entry(self, rev, data, r=True):
564 def _unpack_entry(self, rev, data, r=True):
549 items = self.index_format.unpack(data)
565 items = self.index_format.unpack(data)
550 entry = items[:3] + (rev, rev) + items[3:8]
566 entry = items[:3] + (rev, rev) + items[3:8]
551 data_comp = items[8] & 3
567 data_comp = items[8] & 3
552 sidedata_comp = (items[8] >> 2) & 3
568 sidedata_comp = (items[8] >> 2) & 3
553 return entry + (data_comp, sidedata_comp)
569 return entry + (data_comp, sidedata_comp)
554
570
555 def _pack_entry(self, rev, entry):
571 def _pack_entry(self, rev, entry):
556 assert entry[3] == rev, entry[3]
572 assert entry[3] == rev, entry[3]
557 assert entry[4] == rev, entry[4]
573 assert entry[4] == rev, entry[4]
558 data = entry[:3] + entry[5:10]
574 data = entry[:3] + entry[5:10]
559 data_comp = entry[10] & 3
575 data_comp = entry[10] & 3
560 sidedata_comp = (entry[11] & 3) << 2
576 sidedata_comp = (entry[11] & 3) << 2
561 data += (data_comp | sidedata_comp,)
577 data += (data_comp | sidedata_comp,)
562 return self.index_format.pack(*data)
578 return self.index_format.pack(*data)
563
579
564
580
565 def parse_index_devel_nodemap(data, inline):
581 def parse_index_devel_nodemap(data, inline):
566 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
582 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
567 return PersistentNodeMapIndexObject(data), None
583 return PersistentNodeMapIndexObject(data), None
568
584
569
585
570 def parse_dirstate(dmap, copymap, st):
586 def parse_dirstate(dmap, copymap, st):
571 parents = [st[:20], st[20:40]]
587 parents = [st[:20], st[20:40]]
572 # dereference fields so they will be local in loop
588 # dereference fields so they will be local in loop
573 format = b">cllll"
589 format = b">cllll"
574 e_size = struct.calcsize(format)
590 e_size = struct.calcsize(format)
575 pos1 = 40
591 pos1 = 40
576 l = len(st)
592 l = len(st)
577
593
578 # the inner loop
594 # the inner loop
579 while pos1 < l:
595 while pos1 < l:
580 pos2 = pos1 + e_size
596 pos2 = pos1 + e_size
581 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
597 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
582 pos1 = pos2 + e[4]
598 pos1 = pos2 + e[4]
583 f = st[pos2:pos1]
599 f = st[pos2:pos1]
584 if b'\0' in f:
600 if b'\0' in f:
585 f, c = f.split(b'\0')
601 f, c = f.split(b'\0')
586 copymap[f] = c
602 copymap[f] = c
587 dmap[f] = DirstateItem.from_v1_data(*e[:4])
603 dmap[f] = DirstateItem.from_v1_data(*e[:4])
588 return parents
604 return parents
589
605
590
606
591 def pack_dirstate(dmap, copymap, pl, now):
607 def pack_dirstate(dmap, copymap, pl, now):
592 now = int(now)
608 now = int(now)
593 cs = stringio()
609 cs = stringio()
594 write = cs.write
610 write = cs.write
595 write(b"".join(pl))
611 write(b"".join(pl))
596 for f, e in pycompat.iteritems(dmap):
612 for f, e in pycompat.iteritems(dmap):
597 if e.need_delay(now):
613 if e.need_delay(now):
598 # The file was last modified "simultaneously" with the current
614 # The file was last modified "simultaneously" with the current
599 # write to dirstate (i.e. within the same second for file-
615 # write to dirstate (i.e. within the same second for file-
600 # systems with a granularity of 1 sec). This commonly happens
616 # systems with a granularity of 1 sec). This commonly happens
601 # for at least a couple of files on 'update'.
617 # for at least a couple of files on 'update'.
602 # The user could change the file without changing its size
618 # The user could change the file without changing its size
603 # within the same second. Invalidate the file's mtime in
619 # within the same second. Invalidate the file's mtime in
604 # dirstate, forcing future 'status' calls to compare the
620 # dirstate, forcing future 'status' calls to compare the
605 # contents of the file if the size is the same. This prevents
621 # contents of the file if the size is the same. This prevents
606 # mistakenly treating such files as clean.
622 # mistakenly treating such files as clean.
607 e.set_possibly_dirty()
623 e.set_possibly_dirty()
608
624
609 if f in copymap:
625 if f in copymap:
610 f = b"%s\0%s" % (f, copymap[f])
626 f = b"%s\0%s" % (f, copymap[f])
611 e = _pack(
627 e = _pack(
612 b">cllll",
628 b">cllll",
613 e.v1_state(),
629 e.v1_state(),
614 e.v1_mode(),
630 e.v1_mode(),
615 e.v1_size(),
631 e.v1_size(),
616 e.v1_mtime(),
632 e.v1_mtime(),
617 len(f),
633 len(f),
618 )
634 )
619 write(e)
635 write(e)
620 write(f)
636 write(f)
621 return cs.getvalue()
637 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now