##// END OF EJS Templates
dirstate: drop all logic around the "non-normal" sets...
marmoute -
r48875:060cd909 default
parent child Browse files
Show More
@@ -1,1339 +1,1252 b''
1 /*
1 /*
2 parsers.c - efficient content parsing
2 parsers.c - efficient content parsing
3
3
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
5
5
6 This software may be used and distributed according to the terms of
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
7 the GNU General Public License, incorporated herein by reference.
8 */
8 */
9
9
10 #define PY_SSIZE_T_CLEAN
10 #define PY_SSIZE_T_CLEAN
11 #include <Python.h>
11 #include <Python.h>
12 #include <ctype.h>
12 #include <ctype.h>
13 #include <stddef.h>
13 #include <stddef.h>
14 #include <string.h>
14 #include <string.h>
15
15
16 #include "bitmanipulation.h"
16 #include "bitmanipulation.h"
17 #include "charencode.h"
17 #include "charencode.h"
18 #include "util.h"
18 #include "util.h"
19
19
20 #ifdef IS_PY3K
20 #ifdef IS_PY3K
21 /* The mapping of Python types is meant to be temporary to get Python
21 /* The mapping of Python types is meant to be temporary to get Python
22 * 3 to compile. We should remove this once Python 3 support is fully
22 * 3 to compile. We should remove this once Python 3 support is fully
23 * supported and proper types are used in the extensions themselves. */
23 * supported and proper types are used in the extensions themselves. */
24 #define PyInt_Check PyLong_Check
24 #define PyInt_Check PyLong_Check
25 #define PyInt_FromLong PyLong_FromLong
25 #define PyInt_FromLong PyLong_FromLong
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 #define PyInt_AsLong PyLong_AsLong
27 #define PyInt_AsLong PyLong_AsLong
28 #endif
28 #endif
29
29
30 static const char *const versionerrortext = "Python minor version mismatch";
30 static const char *const versionerrortext = "Python minor version mismatch";
31
31
32 static const int dirstate_v1_from_p2 = -2;
32 static const int dirstate_v1_from_p2 = -2;
33 static const int dirstate_v1_nonnormal = -1;
33 static const int dirstate_v1_nonnormal = -1;
34 static const int ambiguous_time = -1;
34 static const int ambiguous_time = -1;
35
35
36 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
36 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
37 {
37 {
38 Py_ssize_t expected_size;
38 Py_ssize_t expected_size;
39
39
40 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
40 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
41 return NULL;
41 return NULL;
42 }
42 }
43
43
44 return _dict_new_presized(expected_size);
44 return _dict_new_presized(expected_size);
45 }
45 }
46
46
47 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
47 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
48 PyObject *kwds)
48 PyObject *kwds)
49 {
49 {
50 /* We do all the initialization here and not a tp_init function because
50 /* We do all the initialization here and not a tp_init function because
51 * dirstate_item is immutable. */
51 * dirstate_item is immutable. */
52 dirstateItemObject *t;
52 dirstateItemObject *t;
53 int wc_tracked;
53 int wc_tracked;
54 int p1_tracked;
54 int p1_tracked;
55 int p2_tracked;
55 int p2_tracked;
56 int merged;
56 int merged;
57 int clean_p1;
57 int clean_p1;
58 int clean_p2;
58 int clean_p2;
59 int possibly_dirty;
59 int possibly_dirty;
60 PyObject *parentfiledata;
60 PyObject *parentfiledata;
61 static char *keywords_name[] = {
61 static char *keywords_name[] = {
62 "wc_tracked", "p1_tracked", "p2_tracked",
62 "wc_tracked", "p1_tracked", "p2_tracked",
63 "merged", "clean_p1", "clean_p2",
63 "merged", "clean_p1", "clean_p2",
64 "possibly_dirty", "parentfiledata", NULL,
64 "possibly_dirty", "parentfiledata", NULL,
65 };
65 };
66 wc_tracked = 0;
66 wc_tracked = 0;
67 p1_tracked = 0;
67 p1_tracked = 0;
68 p2_tracked = 0;
68 p2_tracked = 0;
69 merged = 0;
69 merged = 0;
70 clean_p1 = 0;
70 clean_p1 = 0;
71 clean_p2 = 0;
71 clean_p2 = 0;
72 possibly_dirty = 0;
72 possibly_dirty = 0;
73 parentfiledata = Py_None;
73 parentfiledata = Py_None;
74 if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiiiiiiO", keywords_name,
74 if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiiiiiiO", keywords_name,
75 &wc_tracked, &p1_tracked, &p2_tracked,
75 &wc_tracked, &p1_tracked, &p2_tracked,
76 &merged, &clean_p1, &clean_p2,
76 &merged, &clean_p1, &clean_p2,
77 &possibly_dirty, &parentfiledata
77 &possibly_dirty, &parentfiledata
78
78
79 )) {
79 )) {
80 return NULL;
80 return NULL;
81 }
81 }
82 if (merged && (clean_p1 || clean_p2)) {
82 if (merged && (clean_p1 || clean_p2)) {
83 PyErr_SetString(PyExc_RuntimeError,
83 PyErr_SetString(PyExc_RuntimeError,
84 "`merged` argument incompatible with "
84 "`merged` argument incompatible with "
85 "`clean_p1`/`clean_p2`");
85 "`clean_p1`/`clean_p2`");
86 return NULL;
86 return NULL;
87 }
87 }
88 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
88 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
89 if (!t) {
89 if (!t) {
90 return NULL;
90 return NULL;
91 }
91 }
92
92
93 t->flags = 0;
93 t->flags = 0;
94 if (wc_tracked) {
94 if (wc_tracked) {
95 t->flags |= dirstate_flag_wc_tracked;
95 t->flags |= dirstate_flag_wc_tracked;
96 }
96 }
97 if (p1_tracked) {
97 if (p1_tracked) {
98 t->flags |= dirstate_flag_p1_tracked;
98 t->flags |= dirstate_flag_p1_tracked;
99 }
99 }
100 if (p2_tracked) {
100 if (p2_tracked) {
101 t->flags |= dirstate_flag_p2_tracked;
101 t->flags |= dirstate_flag_p2_tracked;
102 }
102 }
103 if (possibly_dirty) {
103 if (possibly_dirty) {
104 t->flags |= dirstate_flag_possibly_dirty;
104 t->flags |= dirstate_flag_possibly_dirty;
105 }
105 }
106 if (merged) {
106 if (merged) {
107 t->flags |= dirstate_flag_merged;
107 t->flags |= dirstate_flag_merged;
108 }
108 }
109 if (clean_p1) {
109 if (clean_p1) {
110 t->flags |= dirstate_flag_clean_p1;
110 t->flags |= dirstate_flag_clean_p1;
111 }
111 }
112 if (clean_p2) {
112 if (clean_p2) {
113 t->flags |= dirstate_flag_clean_p2;
113 t->flags |= dirstate_flag_clean_p2;
114 }
114 }
115 t->mode = 0;
115 t->mode = 0;
116 t->size = dirstate_v1_nonnormal;
116 t->size = dirstate_v1_nonnormal;
117 t->mtime = ambiguous_time;
117 t->mtime = ambiguous_time;
118 if (parentfiledata != Py_None) {
118 if (parentfiledata != Py_None) {
119 if (!PyTuple_CheckExact(parentfiledata)) {
119 if (!PyTuple_CheckExact(parentfiledata)) {
120 PyErr_SetString(
120 PyErr_SetString(
121 PyExc_TypeError,
121 PyExc_TypeError,
122 "parentfiledata should be a Tuple or None");
122 "parentfiledata should be a Tuple or None");
123 return NULL;
123 return NULL;
124 }
124 }
125 t->mode =
125 t->mode =
126 (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 0));
126 (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 0));
127 t->size =
127 t->size =
128 (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 1));
128 (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 1));
129 t->mtime =
129 t->mtime =
130 (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 2));
130 (int)PyLong_AsLong(PyTuple_GetItem(parentfiledata, 2));
131 }
131 }
132 return (PyObject *)t;
132 return (PyObject *)t;
133 }
133 }
134
134
135 static void dirstate_item_dealloc(PyObject *o)
135 static void dirstate_item_dealloc(PyObject *o)
136 {
136 {
137 PyObject_Del(o);
137 PyObject_Del(o);
138 }
138 }
139
139
140 static inline bool dirstate_item_c_tracked(dirstateItemObject *self)
140 static inline bool dirstate_item_c_tracked(dirstateItemObject *self)
141 {
141 {
142 return (self->flags & dirstate_flag_wc_tracked);
142 return (self->flags & dirstate_flag_wc_tracked);
143 }
143 }
144
144
145 static inline bool dirstate_item_c_added(dirstateItemObject *self)
145 static inline bool dirstate_item_c_added(dirstateItemObject *self)
146 {
146 {
147 unsigned char mask =
147 unsigned char mask =
148 (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
148 (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
149 dirstate_flag_p2_tracked);
149 dirstate_flag_p2_tracked);
150 unsigned char target = dirstate_flag_wc_tracked;
150 unsigned char target = dirstate_flag_wc_tracked;
151 return (self->flags & mask) == target;
151 return (self->flags & mask) == target;
152 }
152 }
153
153
154 static inline bool dirstate_item_c_removed(dirstateItemObject *self)
154 static inline bool dirstate_item_c_removed(dirstateItemObject *self)
155 {
155 {
156 if (self->flags & dirstate_flag_wc_tracked) {
156 if (self->flags & dirstate_flag_wc_tracked) {
157 return false;
157 return false;
158 }
158 }
159 return (self->flags &
159 return (self->flags &
160 (dirstate_flag_p1_tracked | dirstate_flag_p2_tracked));
160 (dirstate_flag_p1_tracked | dirstate_flag_p2_tracked));
161 }
161 }
162
162
163 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
163 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
164 {
164 {
165 return ((self->flags & dirstate_flag_wc_tracked) &&
165 return ((self->flags & dirstate_flag_wc_tracked) &&
166 (self->flags & dirstate_flag_merged));
166 (self->flags & dirstate_flag_merged));
167 }
167 }
168
168
169 static inline bool dirstate_item_c_merged_removed(dirstateItemObject *self)
169 static inline bool dirstate_item_c_merged_removed(dirstateItemObject *self)
170 {
170 {
171 if (!dirstate_item_c_removed(self)) {
171 if (!dirstate_item_c_removed(self)) {
172 return false;
172 return false;
173 }
173 }
174 return (self->flags & dirstate_flag_merged);
174 return (self->flags & dirstate_flag_merged);
175 }
175 }
176
176
177 static inline bool dirstate_item_c_from_p2(dirstateItemObject *self)
177 static inline bool dirstate_item_c_from_p2(dirstateItemObject *self)
178 {
178 {
179 if (!dirstate_item_c_tracked(self)) {
179 if (!dirstate_item_c_tracked(self)) {
180 return false;
180 return false;
181 }
181 }
182 return (self->flags & dirstate_flag_clean_p2);
182 return (self->flags & dirstate_flag_clean_p2);
183 }
183 }
184
184
185 static inline bool dirstate_item_c_from_p2_removed(dirstateItemObject *self)
185 static inline bool dirstate_item_c_from_p2_removed(dirstateItemObject *self)
186 {
186 {
187 if (!dirstate_item_c_removed(self)) {
187 if (!dirstate_item_c_removed(self)) {
188 return false;
188 return false;
189 }
189 }
190 return (self->flags & dirstate_flag_clean_p2);
190 return (self->flags & dirstate_flag_clean_p2);
191 }
191 }
192
192
193 static inline char dirstate_item_c_v1_state(dirstateItemObject *self)
193 static inline char dirstate_item_c_v1_state(dirstateItemObject *self)
194 {
194 {
195 if (dirstate_item_c_removed(self)) {
195 if (dirstate_item_c_removed(self)) {
196 return 'r';
196 return 'r';
197 } else if (dirstate_item_c_merged(self)) {
197 } else if (dirstate_item_c_merged(self)) {
198 return 'm';
198 return 'm';
199 } else if (dirstate_item_c_added(self)) {
199 } else if (dirstate_item_c_added(self)) {
200 return 'a';
200 return 'a';
201 } else {
201 } else {
202 return 'n';
202 return 'n';
203 }
203 }
204 }
204 }
205
205
206 static inline int dirstate_item_c_v1_mode(dirstateItemObject *self)
206 static inline int dirstate_item_c_v1_mode(dirstateItemObject *self)
207 {
207 {
208 return self->mode;
208 return self->mode;
209 }
209 }
210
210
211 static inline int dirstate_item_c_v1_size(dirstateItemObject *self)
211 static inline int dirstate_item_c_v1_size(dirstateItemObject *self)
212 {
212 {
213 if (dirstate_item_c_merged_removed(self)) {
213 if (dirstate_item_c_merged_removed(self)) {
214 return dirstate_v1_nonnormal;
214 return dirstate_v1_nonnormal;
215 } else if (dirstate_item_c_from_p2_removed(self)) {
215 } else if (dirstate_item_c_from_p2_removed(self)) {
216 return dirstate_v1_from_p2;
216 return dirstate_v1_from_p2;
217 } else if (dirstate_item_c_removed(self)) {
217 } else if (dirstate_item_c_removed(self)) {
218 return 0;
218 return 0;
219 } else if (dirstate_item_c_merged(self)) {
219 } else if (dirstate_item_c_merged(self)) {
220 return dirstate_v1_from_p2;
220 return dirstate_v1_from_p2;
221 } else if (dirstate_item_c_added(self)) {
221 } else if (dirstate_item_c_added(self)) {
222 return dirstate_v1_nonnormal;
222 return dirstate_v1_nonnormal;
223 } else if (dirstate_item_c_from_p2(self)) {
223 } else if (dirstate_item_c_from_p2(self)) {
224 return dirstate_v1_from_p2;
224 return dirstate_v1_from_p2;
225 } else if (self->flags & dirstate_flag_possibly_dirty) {
225 } else if (self->flags & dirstate_flag_possibly_dirty) {
226 return self->size; /* NON NORMAL ? */
226 return self->size; /* NON NORMAL ? */
227 } else {
227 } else {
228 return self->size;
228 return self->size;
229 }
229 }
230 }
230 }
231
231
232 static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self)
232 static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self)
233 {
233 {
234 if (dirstate_item_c_removed(self)) {
234 if (dirstate_item_c_removed(self)) {
235 return 0;
235 return 0;
236 } else if (self->flags & dirstate_flag_possibly_dirty) {
236 } else if (self->flags & dirstate_flag_possibly_dirty) {
237 return ambiguous_time;
237 return ambiguous_time;
238 } else if (dirstate_item_c_merged(self)) {
238 } else if (dirstate_item_c_merged(self)) {
239 return ambiguous_time;
239 return ambiguous_time;
240 } else if (dirstate_item_c_added(self)) {
240 } else if (dirstate_item_c_added(self)) {
241 return ambiguous_time;
241 return ambiguous_time;
242 } else if (dirstate_item_c_from_p2(self)) {
242 } else if (dirstate_item_c_from_p2(self)) {
243 return ambiguous_time;
243 return ambiguous_time;
244 } else {
244 } else {
245 return self->mtime;
245 return self->mtime;
246 }
246 }
247 }
247 }
248
248
249 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
249 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
250 {
250 {
251 char state = dirstate_item_c_v1_state(self);
251 char state = dirstate_item_c_v1_state(self);
252 return PyBytes_FromStringAndSize(&state, 1);
252 return PyBytes_FromStringAndSize(&state, 1);
253 };
253 };
254
254
255 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
255 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
256 {
256 {
257 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
257 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
258 };
258 };
259
259
260 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
260 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
261 {
261 {
262 return PyInt_FromLong(dirstate_item_c_v1_size(self));
262 return PyInt_FromLong(dirstate_item_c_v1_size(self));
263 };
263 };
264
264
265 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
265 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
266 {
266 {
267 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
267 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
268 };
268 };
269
269
270 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
270 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
271 PyObject *value)
271 PyObject *value)
272 {
272 {
273 long now;
273 long now;
274 if (!pylong_to_long(value, &now)) {
274 if (!pylong_to_long(value, &now)) {
275 return NULL;
275 return NULL;
276 }
276 }
277 if (dirstate_item_c_v1_state(self) == 'n' &&
277 if (dirstate_item_c_v1_state(self) == 'n' &&
278 dirstate_item_c_v1_mtime(self) == now) {
278 dirstate_item_c_v1_mtime(self) == now) {
279 Py_RETURN_TRUE;
279 Py_RETURN_TRUE;
280 } else {
280 } else {
281 Py_RETURN_FALSE;
281 Py_RETURN_FALSE;
282 }
282 }
283 };
283 };
284
284
285 /* This will never change since it's bound to V1
285 /* This will never change since it's bound to V1
286 */
286 */
287 static inline dirstateItemObject *
287 static inline dirstateItemObject *
288 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
288 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
289 {
289 {
290 dirstateItemObject *t =
290 dirstateItemObject *t =
291 PyObject_New(dirstateItemObject, &dirstateItemType);
291 PyObject_New(dirstateItemObject, &dirstateItemType);
292 if (!t) {
292 if (!t) {
293 return NULL;
293 return NULL;
294 }
294 }
295
295
296 if (state == 'm') {
296 if (state == 'm') {
297 t->flags =
297 t->flags =
298 (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
298 (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
299 dirstate_flag_p2_tracked | dirstate_flag_merged);
299 dirstate_flag_p2_tracked | dirstate_flag_merged);
300 t->mode = 0;
300 t->mode = 0;
301 t->size = dirstate_v1_from_p2;
301 t->size = dirstate_v1_from_p2;
302 t->mtime = ambiguous_time;
302 t->mtime = ambiguous_time;
303 } else if (state == 'a') {
303 } else if (state == 'a') {
304 t->flags = dirstate_flag_wc_tracked;
304 t->flags = dirstate_flag_wc_tracked;
305 t->mode = 0;
305 t->mode = 0;
306 t->size = dirstate_v1_nonnormal;
306 t->size = dirstate_v1_nonnormal;
307 t->mtime = ambiguous_time;
307 t->mtime = ambiguous_time;
308 } else if (state == 'r') {
308 } else if (state == 'r') {
309 t->mode = 0;
309 t->mode = 0;
310 t->size = 0;
310 t->size = 0;
311 t->mtime = 0;
311 t->mtime = 0;
312 if (size == dirstate_v1_nonnormal) {
312 if (size == dirstate_v1_nonnormal) {
313 t->flags =
313 t->flags =
314 (dirstate_flag_p1_tracked |
314 (dirstate_flag_p1_tracked |
315 dirstate_flag_p2_tracked | dirstate_flag_merged);
315 dirstate_flag_p2_tracked | dirstate_flag_merged);
316 } else if (size == dirstate_v1_from_p2) {
316 } else if (size == dirstate_v1_from_p2) {
317 t->flags =
317 t->flags =
318 (dirstate_flag_p2_tracked | dirstate_flag_clean_p2);
318 (dirstate_flag_p2_tracked | dirstate_flag_clean_p2);
319 } else {
319 } else {
320 t->flags = dirstate_flag_p1_tracked;
320 t->flags = dirstate_flag_p1_tracked;
321 }
321 }
322 } else if (state == 'n') {
322 } else if (state == 'n') {
323 if (size == dirstate_v1_from_p2) {
323 if (size == dirstate_v1_from_p2) {
324 t->flags =
324 t->flags =
325 (dirstate_flag_wc_tracked |
325 (dirstate_flag_wc_tracked |
326 dirstate_flag_p2_tracked | dirstate_flag_clean_p2);
326 dirstate_flag_p2_tracked | dirstate_flag_clean_p2);
327 t->mode = 0;
327 t->mode = 0;
328 t->size = dirstate_v1_from_p2;
328 t->size = dirstate_v1_from_p2;
329 t->mtime = ambiguous_time;
329 t->mtime = ambiguous_time;
330 } else if (size == dirstate_v1_nonnormal) {
330 } else if (size == dirstate_v1_nonnormal) {
331 t->flags = (dirstate_flag_wc_tracked |
331 t->flags = (dirstate_flag_wc_tracked |
332 dirstate_flag_p1_tracked |
332 dirstate_flag_p1_tracked |
333 dirstate_flag_possibly_dirty);
333 dirstate_flag_possibly_dirty);
334 t->mode = 0;
334 t->mode = 0;
335 t->size = dirstate_v1_nonnormal;
335 t->size = dirstate_v1_nonnormal;
336 t->mtime = ambiguous_time;
336 t->mtime = ambiguous_time;
337 } else if (mtime == ambiguous_time) {
337 } else if (mtime == ambiguous_time) {
338 t->flags = (dirstate_flag_wc_tracked |
338 t->flags = (dirstate_flag_wc_tracked |
339 dirstate_flag_p1_tracked |
339 dirstate_flag_p1_tracked |
340 dirstate_flag_possibly_dirty);
340 dirstate_flag_possibly_dirty);
341 t->mode = mode;
341 t->mode = mode;
342 t->size = size;
342 t->size = size;
343 t->mtime = 0;
343 t->mtime = 0;
344 } else {
344 } else {
345 t->flags = (dirstate_flag_wc_tracked |
345 t->flags = (dirstate_flag_wc_tracked |
346 dirstate_flag_p1_tracked);
346 dirstate_flag_p1_tracked);
347 t->mode = mode;
347 t->mode = mode;
348 t->size = size;
348 t->size = size;
349 t->mtime = mtime;
349 t->mtime = mtime;
350 }
350 }
351 } else {
351 } else {
352 PyErr_Format(PyExc_RuntimeError,
352 PyErr_Format(PyExc_RuntimeError,
353 "unknown state: `%c` (%d, %d, %d)", state, mode,
353 "unknown state: `%c` (%d, %d, %d)", state, mode,
354 size, mtime, NULL);
354 size, mtime, NULL);
355 Py_DECREF(t);
355 Py_DECREF(t);
356 return NULL;
356 return NULL;
357 }
357 }
358
358
359 return t;
359 return t;
360 }
360 }
361
361
362 /* This will never change since it's bound to V1, unlike `dirstate_item_new` */
362 /* This will never change since it's bound to V1, unlike `dirstate_item_new` */
363 static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype,
363 static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype,
364 PyObject *args)
364 PyObject *args)
365 {
365 {
366 /* We do all the initialization here and not a tp_init function because
366 /* We do all the initialization here and not a tp_init function because
367 * dirstate_item is immutable. */
367 * dirstate_item is immutable. */
368 char state;
368 char state;
369 int size, mode, mtime;
369 int size, mode, mtime;
370 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
370 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
371 return NULL;
371 return NULL;
372 }
372 }
373 return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime);
373 return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime);
374 };
374 };
375
375
376 /* constructor to help legacy API to build a new "added" item
376 /* constructor to help legacy API to build a new "added" item
377
377
378 Should eventually be removed */
378 Should eventually be removed */
379 static PyObject *dirstate_item_new_added(PyTypeObject *subtype)
379 static PyObject *dirstate_item_new_added(PyTypeObject *subtype)
380 {
380 {
381 dirstateItemObject *t;
381 dirstateItemObject *t;
382 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
382 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
383 if (!t) {
383 if (!t) {
384 return NULL;
384 return NULL;
385 }
385 }
386 t->flags = dirstate_flag_wc_tracked;
386 t->flags = dirstate_flag_wc_tracked;
387 t->mode = 0;
387 t->mode = 0;
388 t->size = dirstate_v1_nonnormal;
388 t->size = dirstate_v1_nonnormal;
389 t->mtime = ambiguous_time;
389 t->mtime = ambiguous_time;
390 return (PyObject *)t;
390 return (PyObject *)t;
391 };
391 };
392
392
393 /* constructor to help legacy API to build a new "merged" item
393 /* constructor to help legacy API to build a new "merged" item
394
394
395 Should eventually be removed */
395 Should eventually be removed */
396 static PyObject *dirstate_item_new_merged(PyTypeObject *subtype)
396 static PyObject *dirstate_item_new_merged(PyTypeObject *subtype)
397 {
397 {
398 dirstateItemObject *t;
398 dirstateItemObject *t;
399 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
399 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
400 if (!t) {
400 if (!t) {
401 return NULL;
401 return NULL;
402 }
402 }
403 t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
403 t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
404 dirstate_flag_p2_tracked | dirstate_flag_merged);
404 dirstate_flag_p2_tracked | dirstate_flag_merged);
405 t->mode = 0;
405 t->mode = 0;
406 t->size = dirstate_v1_from_p2;
406 t->size = dirstate_v1_from_p2;
407 t->mtime = ambiguous_time;
407 t->mtime = ambiguous_time;
408 return (PyObject *)t;
408 return (PyObject *)t;
409 };
409 };
410
410
411 /* constructor to help legacy API to build a new "from_p2" item
411 /* constructor to help legacy API to build a new "from_p2" item
412
412
413 Should eventually be removed */
413 Should eventually be removed */
414 static PyObject *dirstate_item_new_from_p2(PyTypeObject *subtype)
414 static PyObject *dirstate_item_new_from_p2(PyTypeObject *subtype)
415 {
415 {
416 /* We do all the initialization here and not a tp_init function because
416 /* We do all the initialization here and not a tp_init function because
417 * dirstate_item is immutable. */
417 * dirstate_item is immutable. */
418 dirstateItemObject *t;
418 dirstateItemObject *t;
419 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
419 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
420 if (!t) {
420 if (!t) {
421 return NULL;
421 return NULL;
422 }
422 }
423 t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p2_tracked |
423 t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p2_tracked |
424 dirstate_flag_clean_p2);
424 dirstate_flag_clean_p2);
425 t->mode = 0;
425 t->mode = 0;
426 t->size = dirstate_v1_from_p2;
426 t->size = dirstate_v1_from_p2;
427 t->mtime = ambiguous_time;
427 t->mtime = ambiguous_time;
428 return (PyObject *)t;
428 return (PyObject *)t;
429 };
429 };
430
430
431 /* constructor to help legacy API to build a new "possibly" item
431 /* constructor to help legacy API to build a new "possibly" item
432
432
433 Should eventually be removed */
433 Should eventually be removed */
434 static PyObject *dirstate_item_new_possibly_dirty(PyTypeObject *subtype)
434 static PyObject *dirstate_item_new_possibly_dirty(PyTypeObject *subtype)
435 {
435 {
436 /* We do all the initialization here and not a tp_init function because
436 /* We do all the initialization here and not a tp_init function because
437 * dirstate_item is immutable. */
437 * dirstate_item is immutable. */
438 dirstateItemObject *t;
438 dirstateItemObject *t;
439 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
439 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
440 if (!t) {
440 if (!t) {
441 return NULL;
441 return NULL;
442 }
442 }
443 t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
443 t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
444 dirstate_flag_possibly_dirty);
444 dirstate_flag_possibly_dirty);
445 t->mode = 0;
445 t->mode = 0;
446 t->size = dirstate_v1_nonnormal;
446 t->size = dirstate_v1_nonnormal;
447 t->mtime = ambiguous_time;
447 t->mtime = ambiguous_time;
448 return (PyObject *)t;
448 return (PyObject *)t;
449 };
449 };
450
450
451 /* constructor to help legacy API to build a new "normal" item
451 /* constructor to help legacy API to build a new "normal" item
452
452
453 Should eventually be removed */
453 Should eventually be removed */
454 static PyObject *dirstate_item_new_normal(PyTypeObject *subtype, PyObject *args)
454 static PyObject *dirstate_item_new_normal(PyTypeObject *subtype, PyObject *args)
455 {
455 {
456 /* We do all the initialization here and not a tp_init function because
456 /* We do all the initialization here and not a tp_init function because
457 * dirstate_item is immutable. */
457 * dirstate_item is immutable. */
458 dirstateItemObject *t;
458 dirstateItemObject *t;
459 int size, mode, mtime;
459 int size, mode, mtime;
460 if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) {
460 if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) {
461 return NULL;
461 return NULL;
462 }
462 }
463
463
464 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
464 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
465 if (!t) {
465 if (!t) {
466 return NULL;
466 return NULL;
467 }
467 }
468 t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked);
468 t->flags = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked);
469 t->mode = mode;
469 t->mode = mode;
470 t->size = size;
470 t->size = size;
471 t->mtime = mtime;
471 t->mtime = mtime;
472 return (PyObject *)t;
472 return (PyObject *)t;
473 };
473 };
474
474
475 /* This means the next status call will have to actually check its content
475 /* This means the next status call will have to actually check its content
476 to make sure it is correct. */
476 to make sure it is correct. */
477 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
477 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
478 {
478 {
479 self->flags |= dirstate_flag_possibly_dirty;
479 self->flags |= dirstate_flag_possibly_dirty;
480 Py_RETURN_NONE;
480 Py_RETURN_NONE;
481 }
481 }
482
482
483 /* See docstring of the python implementation for details */
483 /* See docstring of the python implementation for details */
484 static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
484 static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
485 PyObject *args)
485 PyObject *args)
486 {
486 {
487 int size, mode, mtime;
487 int size, mode, mtime;
488 if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) {
488 if (!PyArg_ParseTuple(args, "iii", &mode, &size, &mtime)) {
489 return NULL;
489 return NULL;
490 }
490 }
491 self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked;
491 self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked;
492 self->mode = mode;
492 self->mode = mode;
493 self->size = size;
493 self->size = size;
494 self->mtime = mtime;
494 self->mtime = mtime;
495 Py_RETURN_NONE;
495 Py_RETURN_NONE;
496 }
496 }
497
497
498 static PyObject *dirstate_item_set_tracked(dirstateItemObject *self)
498 static PyObject *dirstate_item_set_tracked(dirstateItemObject *self)
499 {
499 {
500 self->flags |= dirstate_flag_wc_tracked;
500 self->flags |= dirstate_flag_wc_tracked;
501 self->flags |= dirstate_flag_possibly_dirty;
501 self->flags |= dirstate_flag_possibly_dirty;
502 /* size = None on the python size turn into size = NON_NORMAL when
502 /* size = None on the python size turn into size = NON_NORMAL when
503 * accessed. So the next line is currently required, but a some future
503 * accessed. So the next line is currently required, but a some future
504 * clean up would be welcome. */
504 * clean up would be welcome. */
505 self->size = dirstate_v1_nonnormal;
505 self->size = dirstate_v1_nonnormal;
506 Py_RETURN_NONE;
506 Py_RETURN_NONE;
507 }
507 }
508
508
509 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
509 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
510 {
510 {
511 self->flags &= ~dirstate_flag_wc_tracked;
511 self->flags &= ~dirstate_flag_wc_tracked;
512 self->mode = 0;
512 self->mode = 0;
513 self->mtime = 0;
513 self->mtime = 0;
514 self->size = 0;
514 self->size = 0;
515 Py_RETURN_NONE;
515 Py_RETURN_NONE;
516 }
516 }
517
517
518 static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self)
518 static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self)
519 {
519 {
520 if (dirstate_item_c_merged(self) || dirstate_item_c_from_p2(self)) {
520 if (dirstate_item_c_merged(self) || dirstate_item_c_from_p2(self)) {
521 if (dirstate_item_c_merged(self)) {
521 if (dirstate_item_c_merged(self)) {
522 self->flags |= dirstate_flag_p1_tracked;
522 self->flags |= dirstate_flag_p1_tracked;
523 } else {
523 } else {
524 self->flags &= ~dirstate_flag_p1_tracked;
524 self->flags &= ~dirstate_flag_p1_tracked;
525 }
525 }
526 self->flags &=
526 self->flags &=
527 ~(dirstate_flag_merged | dirstate_flag_clean_p1 |
527 ~(dirstate_flag_merged | dirstate_flag_clean_p1 |
528 dirstate_flag_clean_p2 | dirstate_flag_p2_tracked);
528 dirstate_flag_clean_p2 | dirstate_flag_p2_tracked);
529 self->flags |= dirstate_flag_possibly_dirty;
529 self->flags |= dirstate_flag_possibly_dirty;
530 self->mode = 0;
530 self->mode = 0;
531 self->mtime = 0;
531 self->mtime = 0;
532 /* size = None on the python size turn into size = NON_NORMAL
532 /* size = None on the python size turn into size = NON_NORMAL
533 * when accessed. So the next line is currently required, but a
533 * when accessed. So the next line is currently required, but a
534 * some future clean up would be welcome. */
534 * some future clean up would be welcome. */
535 self->size = dirstate_v1_nonnormal;
535 self->size = dirstate_v1_nonnormal;
536 }
536 }
537 Py_RETURN_NONE;
537 Py_RETURN_NONE;
538 }
538 }
539 static PyMethodDef dirstate_item_methods[] = {
539 static PyMethodDef dirstate_item_methods[] = {
540 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
540 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
541 "return a \"state\" suitable for v1 serialization"},
541 "return a \"state\" suitable for v1 serialization"},
542 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
542 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
543 "return a \"mode\" suitable for v1 serialization"},
543 "return a \"mode\" suitable for v1 serialization"},
544 {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS,
544 {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS,
545 "return a \"size\" suitable for v1 serialization"},
545 "return a \"size\" suitable for v1 serialization"},
546 {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
546 {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
547 "return a \"mtime\" suitable for v1 serialization"},
547 "return a \"mtime\" suitable for v1 serialization"},
548 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
548 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
549 "True if the stored mtime would be ambiguous with the current time"},
549 "True if the stored mtime would be ambiguous with the current time"},
550 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
550 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
551 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
551 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
552 {"new_added", (PyCFunction)dirstate_item_new_added,
552 {"new_added", (PyCFunction)dirstate_item_new_added,
553 METH_NOARGS | METH_CLASS,
553 METH_NOARGS | METH_CLASS,
554 "constructor to help legacy API to build a new \"added\" item"},
554 "constructor to help legacy API to build a new \"added\" item"},
555 {"new_merged", (PyCFunction)dirstate_item_new_merged,
555 {"new_merged", (PyCFunction)dirstate_item_new_merged,
556 METH_NOARGS | METH_CLASS,
556 METH_NOARGS | METH_CLASS,
557 "constructor to help legacy API to build a new \"merged\" item"},
557 "constructor to help legacy API to build a new \"merged\" item"},
558 {"new_from_p2", (PyCFunction)dirstate_item_new_from_p2,
558 {"new_from_p2", (PyCFunction)dirstate_item_new_from_p2,
559 METH_NOARGS | METH_CLASS,
559 METH_NOARGS | METH_CLASS,
560 "constructor to help legacy API to build a new \"from_p2\" item"},
560 "constructor to help legacy API to build a new \"from_p2\" item"},
561 {"new_possibly_dirty", (PyCFunction)dirstate_item_new_possibly_dirty,
561 {"new_possibly_dirty", (PyCFunction)dirstate_item_new_possibly_dirty,
562 METH_NOARGS | METH_CLASS,
562 METH_NOARGS | METH_CLASS,
563 "constructor to help legacy API to build a new \"possibly_dirty\" item"},
563 "constructor to help legacy API to build a new \"possibly_dirty\" item"},
564 {"new_normal", (PyCFunction)dirstate_item_new_normal,
564 {"new_normal", (PyCFunction)dirstate_item_new_normal,
565 METH_VARARGS | METH_CLASS,
565 METH_VARARGS | METH_CLASS,
566 "constructor to help legacy API to build a new \"normal\" item"},
566 "constructor to help legacy API to build a new \"normal\" item"},
567 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
567 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
568 METH_NOARGS, "mark a file as \"possibly dirty\""},
568 METH_NOARGS, "mark a file as \"possibly dirty\""},
569 {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS,
569 {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS,
570 "mark a file as \"clean\""},
570 "mark a file as \"clean\""},
571 {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS,
571 {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS,
572 "mark a file as \"tracked\""},
572 "mark a file as \"tracked\""},
573 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
573 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
574 "mark a file as \"untracked\""},
574 "mark a file as \"untracked\""},
575 {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS,
575 {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS,
576 "remove all \"merge-only\" from a DirstateItem"},
576 "remove all \"merge-only\" from a DirstateItem"},
577 {NULL} /* Sentinel */
577 {NULL} /* Sentinel */
578 };
578 };
579
579
580 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
580 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
581 {
581 {
582 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
582 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
583 };
583 };
584
584
585 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
585 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
586 {
586 {
587 return PyInt_FromLong(dirstate_item_c_v1_size(self));
587 return PyInt_FromLong(dirstate_item_c_v1_size(self));
588 };
588 };
589
589
590 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
590 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
591 {
591 {
592 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
592 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
593 };
593 };
594
594
595 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
595 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
596 {
596 {
597 char state = dirstate_item_c_v1_state(self);
597 char state = dirstate_item_c_v1_state(self);
598 return PyBytes_FromStringAndSize(&state, 1);
598 return PyBytes_FromStringAndSize(&state, 1);
599 };
599 };
600
600
601 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
601 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
602 {
602 {
603 if (dirstate_item_c_tracked(self)) {
603 if (dirstate_item_c_tracked(self)) {
604 Py_RETURN_TRUE;
604 Py_RETURN_TRUE;
605 } else {
605 } else {
606 Py_RETURN_FALSE;
606 Py_RETURN_FALSE;
607 }
607 }
608 };
608 };
609
609
610 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
610 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
611 {
611 {
612 if (dirstate_item_c_added(self)) {
612 if (dirstate_item_c_added(self)) {
613 Py_RETURN_TRUE;
613 Py_RETURN_TRUE;
614 } else {
614 } else {
615 Py_RETURN_FALSE;
615 Py_RETURN_FALSE;
616 }
616 }
617 };
617 };
618
618
619 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
619 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
620 {
620 {
621 if (dirstate_item_c_merged(self)) {
621 if (dirstate_item_c_merged(self)) {
622 Py_RETURN_TRUE;
622 Py_RETURN_TRUE;
623 } else {
623 } else {
624 Py_RETURN_FALSE;
624 Py_RETURN_FALSE;
625 }
625 }
626 };
626 };
627
627
628 static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self)
628 static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self)
629 {
629 {
630 if (dirstate_item_c_merged_removed(self)) {
630 if (dirstate_item_c_merged_removed(self)) {
631 Py_RETURN_TRUE;
631 Py_RETURN_TRUE;
632 } else {
632 } else {
633 Py_RETURN_FALSE;
633 Py_RETURN_FALSE;
634 }
634 }
635 };
635 };
636
636
637 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
637 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
638 {
638 {
639 if (dirstate_item_c_from_p2(self)) {
639 if (dirstate_item_c_from_p2(self)) {
640 Py_RETURN_TRUE;
640 Py_RETURN_TRUE;
641 } else {
641 } else {
642 Py_RETURN_FALSE;
642 Py_RETURN_FALSE;
643 }
643 }
644 };
644 };
645
645
646 static PyObject *dirstate_item_get_from_p2_removed(dirstateItemObject *self)
646 static PyObject *dirstate_item_get_from_p2_removed(dirstateItemObject *self)
647 {
647 {
648 if (dirstate_item_c_from_p2_removed(self)) {
648 if (dirstate_item_c_from_p2_removed(self)) {
649 Py_RETURN_TRUE;
649 Py_RETURN_TRUE;
650 } else {
650 } else {
651 Py_RETURN_FALSE;
651 Py_RETURN_FALSE;
652 }
652 }
653 };
653 };
654
654
655 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
655 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
656 {
656 {
657 if (dirstate_item_c_removed(self)) {
657 if (dirstate_item_c_removed(self)) {
658 Py_RETURN_TRUE;
658 Py_RETURN_TRUE;
659 } else {
659 } else {
660 Py_RETURN_FALSE;
660 Py_RETURN_FALSE;
661 }
661 }
662 };
662 };
663
663
664 static PyObject *dm_nonnormal(dirstateItemObject *self)
665 {
666 if ((dirstate_item_c_v1_state(self) != 'n') ||
667 (dirstate_item_c_v1_mtime(self) == ambiguous_time)) {
668 Py_RETURN_TRUE;
669 } else {
670 Py_RETURN_FALSE;
671 }
672 };
673 static PyObject *dm_otherparent(dirstateItemObject *self)
674 {
675 if (dirstate_item_c_v1_mtime(self) == dirstate_v1_from_p2) {
676 Py_RETURN_TRUE;
677 } else {
678 Py_RETURN_FALSE;
679 }
680 };
681
682 static PyGetSetDef dirstate_item_getset[] = {
664 static PyGetSetDef dirstate_item_getset[] = {
683 {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
665 {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
684 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
666 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
685 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
667 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
686 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
668 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
687 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
669 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
688 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
670 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
689 {"merged_removed", (getter)dirstate_item_get_merged_removed, NULL,
671 {"merged_removed", (getter)dirstate_item_get_merged_removed, NULL,
690 "merged_removed", NULL},
672 "merged_removed", NULL},
691 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
673 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
692 {"from_p2_removed", (getter)dirstate_item_get_from_p2_removed, NULL,
674 {"from_p2_removed", (getter)dirstate_item_get_from_p2_removed, NULL,
693 "from_p2_removed", NULL},
675 "from_p2_removed", NULL},
694 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
676 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
695 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
677 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
696 {"dm_nonnormal", (getter)dm_nonnormal, NULL, "dm_nonnormal", NULL},
697 {"dm_otherparent", (getter)dm_otherparent, NULL, "dm_otherparent", NULL},
698 {NULL} /* Sentinel */
678 {NULL} /* Sentinel */
699 };
679 };
700
680
701 PyTypeObject dirstateItemType = {
681 PyTypeObject dirstateItemType = {
702 PyVarObject_HEAD_INIT(NULL, 0) /* header */
682 PyVarObject_HEAD_INIT(NULL, 0) /* header */
703 "dirstate_tuple", /* tp_name */
683 "dirstate_tuple", /* tp_name */
704 sizeof(dirstateItemObject), /* tp_basicsize */
684 sizeof(dirstateItemObject), /* tp_basicsize */
705 0, /* tp_itemsize */
685 0, /* tp_itemsize */
706 (destructor)dirstate_item_dealloc, /* tp_dealloc */
686 (destructor)dirstate_item_dealloc, /* tp_dealloc */
707 0, /* tp_print */
687 0, /* tp_print */
708 0, /* tp_getattr */
688 0, /* tp_getattr */
709 0, /* tp_setattr */
689 0, /* tp_setattr */
710 0, /* tp_compare */
690 0, /* tp_compare */
711 0, /* tp_repr */
691 0, /* tp_repr */
712 0, /* tp_as_number */
692 0, /* tp_as_number */
713 0, /* tp_as_sequence */
693 0, /* tp_as_sequence */
714 0, /* tp_as_mapping */
694 0, /* tp_as_mapping */
715 0, /* tp_hash */
695 0, /* tp_hash */
716 0, /* tp_call */
696 0, /* tp_call */
717 0, /* tp_str */
697 0, /* tp_str */
718 0, /* tp_getattro */
698 0, /* tp_getattro */
719 0, /* tp_setattro */
699 0, /* tp_setattro */
720 0, /* tp_as_buffer */
700 0, /* tp_as_buffer */
721 Py_TPFLAGS_DEFAULT, /* tp_flags */
701 Py_TPFLAGS_DEFAULT, /* tp_flags */
722 "dirstate tuple", /* tp_doc */
702 "dirstate tuple", /* tp_doc */
723 0, /* tp_traverse */
703 0, /* tp_traverse */
724 0, /* tp_clear */
704 0, /* tp_clear */
725 0, /* tp_richcompare */
705 0, /* tp_richcompare */
726 0, /* tp_weaklistoffset */
706 0, /* tp_weaklistoffset */
727 0, /* tp_iter */
707 0, /* tp_iter */
728 0, /* tp_iternext */
708 0, /* tp_iternext */
729 dirstate_item_methods, /* tp_methods */
709 dirstate_item_methods, /* tp_methods */
730 0, /* tp_members */
710 0, /* tp_members */
731 dirstate_item_getset, /* tp_getset */
711 dirstate_item_getset, /* tp_getset */
732 0, /* tp_base */
712 0, /* tp_base */
733 0, /* tp_dict */
713 0, /* tp_dict */
734 0, /* tp_descr_get */
714 0, /* tp_descr_get */
735 0, /* tp_descr_set */
715 0, /* tp_descr_set */
736 0, /* tp_dictoffset */
716 0, /* tp_dictoffset */
737 0, /* tp_init */
717 0, /* tp_init */
738 0, /* tp_alloc */
718 0, /* tp_alloc */
739 dirstate_item_new, /* tp_new */
719 dirstate_item_new, /* tp_new */
740 };
720 };
741
721
742 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
722 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
743 {
723 {
744 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
724 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
745 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
725 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
746 char state, *cur, *str, *cpos;
726 char state, *cur, *str, *cpos;
747 int mode, size, mtime;
727 int mode, size, mtime;
748 unsigned int flen, pos = 40;
728 unsigned int flen, pos = 40;
749 Py_ssize_t len = 40;
729 Py_ssize_t len = 40;
750 Py_ssize_t readlen;
730 Py_ssize_t readlen;
751
731
752 if (!PyArg_ParseTuple(
732 if (!PyArg_ParseTuple(
753 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
733 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
754 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
734 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
755 goto quit;
735 goto quit;
756 }
736 }
757
737
758 len = readlen;
738 len = readlen;
759
739
760 /* read parents */
740 /* read parents */
761 if (len < 40) {
741 if (len < 40) {
762 PyErr_SetString(PyExc_ValueError,
742 PyErr_SetString(PyExc_ValueError,
763 "too little data for parents");
743 "too little data for parents");
764 goto quit;
744 goto quit;
765 }
745 }
766
746
767 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
747 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
768 str + 20, (Py_ssize_t)20);
748 str + 20, (Py_ssize_t)20);
769 if (!parents) {
749 if (!parents) {
770 goto quit;
750 goto quit;
771 }
751 }
772
752
773 /* read filenames */
753 /* read filenames */
774 while (pos >= 40 && pos < len) {
754 while (pos >= 40 && pos < len) {
775 if (pos + 17 > len) {
755 if (pos + 17 > len) {
776 PyErr_SetString(PyExc_ValueError,
756 PyErr_SetString(PyExc_ValueError,
777 "overflow in dirstate");
757 "overflow in dirstate");
778 goto quit;
758 goto quit;
779 }
759 }
780 cur = str + pos;
760 cur = str + pos;
781 /* unpack header */
761 /* unpack header */
782 state = *cur;
762 state = *cur;
783 mode = getbe32(cur + 1);
763 mode = getbe32(cur + 1);
784 size = getbe32(cur + 5);
764 size = getbe32(cur + 5);
785 mtime = getbe32(cur + 9);
765 mtime = getbe32(cur + 9);
786 flen = getbe32(cur + 13);
766 flen = getbe32(cur + 13);
787 pos += 17;
767 pos += 17;
788 cur += 17;
768 cur += 17;
789 if (flen > len - pos) {
769 if (flen > len - pos) {
790 PyErr_SetString(PyExc_ValueError,
770 PyErr_SetString(PyExc_ValueError,
791 "overflow in dirstate");
771 "overflow in dirstate");
792 goto quit;
772 goto quit;
793 }
773 }
794
774
795 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
775 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
796 size, mtime);
776 size, mtime);
797 if (!entry)
777 if (!entry)
798 goto quit;
778 goto quit;
799 cpos = memchr(cur, 0, flen);
779 cpos = memchr(cur, 0, flen);
800 if (cpos) {
780 if (cpos) {
801 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
781 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
802 cname = PyBytes_FromStringAndSize(
782 cname = PyBytes_FromStringAndSize(
803 cpos + 1, flen - (cpos - cur) - 1);
783 cpos + 1, flen - (cpos - cur) - 1);
804 if (!fname || !cname ||
784 if (!fname || !cname ||
805 PyDict_SetItem(cmap, fname, cname) == -1 ||
785 PyDict_SetItem(cmap, fname, cname) == -1 ||
806 PyDict_SetItem(dmap, fname, entry) == -1) {
786 PyDict_SetItem(dmap, fname, entry) == -1) {
807 goto quit;
787 goto quit;
808 }
788 }
809 Py_DECREF(cname);
789 Py_DECREF(cname);
810 } else {
790 } else {
811 fname = PyBytes_FromStringAndSize(cur, flen);
791 fname = PyBytes_FromStringAndSize(cur, flen);
812 if (!fname ||
792 if (!fname ||
813 PyDict_SetItem(dmap, fname, entry) == -1) {
793 PyDict_SetItem(dmap, fname, entry) == -1) {
814 goto quit;
794 goto quit;
815 }
795 }
816 }
796 }
817 Py_DECREF(fname);
797 Py_DECREF(fname);
818 Py_DECREF(entry);
798 Py_DECREF(entry);
819 fname = cname = entry = NULL;
799 fname = cname = entry = NULL;
820 pos += flen;
800 pos += flen;
821 }
801 }
822
802
823 ret = parents;
803 ret = parents;
824 Py_INCREF(ret);
804 Py_INCREF(ret);
825 quit:
805 quit:
826 Py_XDECREF(fname);
806 Py_XDECREF(fname);
827 Py_XDECREF(cname);
807 Py_XDECREF(cname);
828 Py_XDECREF(entry);
808 Py_XDECREF(entry);
829 Py_XDECREF(parents);
809 Py_XDECREF(parents);
830 return ret;
810 return ret;
831 }
811 }
832
812
833 /*
813 /*
834 * Build a set of non-normal and other parent entries from the dirstate dmap
835 */
836 static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args)
837 {
838 PyObject *dmap, *fname, *v;
839 PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
840 Py_ssize_t pos;
841
842 if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
843 &dmap)) {
844 goto bail;
845 }
846
847 nonnset = PySet_New(NULL);
848 if (nonnset == NULL) {
849 goto bail;
850 }
851
852 otherpset = PySet_New(NULL);
853 if (otherpset == NULL) {
854 goto bail;
855 }
856
857 pos = 0;
858 while (PyDict_Next(dmap, &pos, &fname, &v)) {
859 dirstateItemObject *t;
860 if (!dirstate_tuple_check(v)) {
861 PyErr_SetString(PyExc_TypeError,
862 "expected a dirstate tuple");
863 goto bail;
864 }
865 t = (dirstateItemObject *)v;
866
867 if (dirstate_item_c_from_p2(t)) {
868 if (PySet_Add(otherpset, fname) == -1) {
869 goto bail;
870 }
871 }
872 if (!(t->flags & dirstate_flag_wc_tracked) ||
873 !(t->flags &
874 (dirstate_flag_p1_tracked | dirstate_flag_p2_tracked)) ||
875 (t->flags &
876 (dirstate_flag_possibly_dirty | dirstate_flag_merged))) {
877 if (PySet_Add(nonnset, fname) == -1) {
878 goto bail;
879 }
880 }
881 }
882
883 result = Py_BuildValue("(OO)", nonnset, otherpset);
884 if (result == NULL) {
885 goto bail;
886 }
887 Py_DECREF(nonnset);
888 Py_DECREF(otherpset);
889 return result;
890 bail:
891 Py_XDECREF(nonnset);
892 Py_XDECREF(otherpset);
893 Py_XDECREF(result);
894 return NULL;
895 }
896
897 /*
898 * Efficiently pack a dirstate object into its on-disk format.
814 * Efficiently pack a dirstate object into its on-disk format.
899 */
815 */
900 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
816 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
901 {
817 {
902 PyObject *packobj = NULL;
818 PyObject *packobj = NULL;
903 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
819 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
904 Py_ssize_t nbytes, pos, l;
820 Py_ssize_t nbytes, pos, l;
905 PyObject *k, *v = NULL, *pn;
821 PyObject *k, *v = NULL, *pn;
906 char *p, *s;
822 char *p, *s;
907 int now;
823 int now;
908
824
909 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
825 if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
910 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
826 &PyDict_Type, &copymap, &PyTuple_Type, &pl,
911 &now)) {
827 &now)) {
912 return NULL;
828 return NULL;
913 }
829 }
914
830
915 if (PyTuple_Size(pl) != 2) {
831 if (PyTuple_Size(pl) != 2) {
916 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
832 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
917 return NULL;
833 return NULL;
918 }
834 }
919
835
920 /* Figure out how much we need to allocate. */
836 /* Figure out how much we need to allocate. */
921 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
837 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
922 PyObject *c;
838 PyObject *c;
923 if (!PyBytes_Check(k)) {
839 if (!PyBytes_Check(k)) {
924 PyErr_SetString(PyExc_TypeError, "expected string key");
840 PyErr_SetString(PyExc_TypeError, "expected string key");
925 goto bail;
841 goto bail;
926 }
842 }
927 nbytes += PyBytes_GET_SIZE(k) + 17;
843 nbytes += PyBytes_GET_SIZE(k) + 17;
928 c = PyDict_GetItem(copymap, k);
844 c = PyDict_GetItem(copymap, k);
929 if (c) {
845 if (c) {
930 if (!PyBytes_Check(c)) {
846 if (!PyBytes_Check(c)) {
931 PyErr_SetString(PyExc_TypeError,
847 PyErr_SetString(PyExc_TypeError,
932 "expected string key");
848 "expected string key");
933 goto bail;
849 goto bail;
934 }
850 }
935 nbytes += PyBytes_GET_SIZE(c) + 1;
851 nbytes += PyBytes_GET_SIZE(c) + 1;
936 }
852 }
937 }
853 }
938
854
939 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
855 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
940 if (packobj == NULL) {
856 if (packobj == NULL) {
941 goto bail;
857 goto bail;
942 }
858 }
943
859
944 p = PyBytes_AS_STRING(packobj);
860 p = PyBytes_AS_STRING(packobj);
945
861
946 pn = PyTuple_GET_ITEM(pl, 0);
862 pn = PyTuple_GET_ITEM(pl, 0);
947 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
863 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
948 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
864 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
949 goto bail;
865 goto bail;
950 }
866 }
951 memcpy(p, s, l);
867 memcpy(p, s, l);
952 p += 20;
868 p += 20;
953 pn = PyTuple_GET_ITEM(pl, 1);
869 pn = PyTuple_GET_ITEM(pl, 1);
954 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
870 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
955 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
871 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
956 goto bail;
872 goto bail;
957 }
873 }
958 memcpy(p, s, l);
874 memcpy(p, s, l);
959 p += 20;
875 p += 20;
960
876
961 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
877 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
962 dirstateItemObject *tuple;
878 dirstateItemObject *tuple;
963 char state;
879 char state;
964 int mode, size, mtime;
880 int mode, size, mtime;
965 Py_ssize_t len, l;
881 Py_ssize_t len, l;
966 PyObject *o;
882 PyObject *o;
967 char *t;
883 char *t;
968
884
969 if (!dirstate_tuple_check(v)) {
885 if (!dirstate_tuple_check(v)) {
970 PyErr_SetString(PyExc_TypeError,
886 PyErr_SetString(PyExc_TypeError,
971 "expected a dirstate tuple");
887 "expected a dirstate tuple");
972 goto bail;
888 goto bail;
973 }
889 }
974 tuple = (dirstateItemObject *)v;
890 tuple = (dirstateItemObject *)v;
975
891
976 state = dirstate_item_c_v1_state(tuple);
892 state = dirstate_item_c_v1_state(tuple);
977 mode = dirstate_item_c_v1_mode(tuple);
893 mode = dirstate_item_c_v1_mode(tuple);
978 size = dirstate_item_c_v1_size(tuple);
894 size = dirstate_item_c_v1_size(tuple);
979 mtime = dirstate_item_c_v1_mtime(tuple);
895 mtime = dirstate_item_c_v1_mtime(tuple);
980 if (state == 'n' && mtime == now) {
896 if (state == 'n' && mtime == now) {
981 /* See pure/parsers.py:pack_dirstate for why we do
897 /* See pure/parsers.py:pack_dirstate for why we do
982 * this. */
898 * this. */
983 mtime = -1;
899 mtime = -1;
984 mtime_unset = (PyObject *)dirstate_item_from_v1_data(
900 mtime_unset = (PyObject *)dirstate_item_from_v1_data(
985 state, mode, size, mtime);
901 state, mode, size, mtime);
986 if (!mtime_unset) {
902 if (!mtime_unset) {
987 goto bail;
903 goto bail;
988 }
904 }
989 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
905 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
990 goto bail;
906 goto bail;
991 }
907 }
992 Py_DECREF(mtime_unset);
908 Py_DECREF(mtime_unset);
993 mtime_unset = NULL;
909 mtime_unset = NULL;
994 }
910 }
995 *p++ = state;
911 *p++ = state;
996 putbe32((uint32_t)mode, p);
912 putbe32((uint32_t)mode, p);
997 putbe32((uint32_t)size, p + 4);
913 putbe32((uint32_t)size, p + 4);
998 putbe32((uint32_t)mtime, p + 8);
914 putbe32((uint32_t)mtime, p + 8);
999 t = p + 12;
915 t = p + 12;
1000 p += 16;
916 p += 16;
1001 len = PyBytes_GET_SIZE(k);
917 len = PyBytes_GET_SIZE(k);
1002 memcpy(p, PyBytes_AS_STRING(k), len);
918 memcpy(p, PyBytes_AS_STRING(k), len);
1003 p += len;
919 p += len;
1004 o = PyDict_GetItem(copymap, k);
920 o = PyDict_GetItem(copymap, k);
1005 if (o) {
921 if (o) {
1006 *p++ = '\0';
922 *p++ = '\0';
1007 l = PyBytes_GET_SIZE(o);
923 l = PyBytes_GET_SIZE(o);
1008 memcpy(p, PyBytes_AS_STRING(o), l);
924 memcpy(p, PyBytes_AS_STRING(o), l);
1009 p += l;
925 p += l;
1010 len += l + 1;
926 len += l + 1;
1011 }
927 }
1012 putbe32((uint32_t)len, t);
928 putbe32((uint32_t)len, t);
1013 }
929 }
1014
930
1015 pos = p - PyBytes_AS_STRING(packobj);
931 pos = p - PyBytes_AS_STRING(packobj);
1016 if (pos != nbytes) {
932 if (pos != nbytes) {
1017 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
933 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
1018 (long)pos, (long)nbytes);
934 (long)pos, (long)nbytes);
1019 goto bail;
935 goto bail;
1020 }
936 }
1021
937
1022 return packobj;
938 return packobj;
1023 bail:
939 bail:
1024 Py_XDECREF(mtime_unset);
940 Py_XDECREF(mtime_unset);
1025 Py_XDECREF(packobj);
941 Py_XDECREF(packobj);
1026 Py_XDECREF(v);
942 Py_XDECREF(v);
1027 return NULL;
943 return NULL;
1028 }
944 }
1029
945
1030 #define BUMPED_FIX 1
946 #define BUMPED_FIX 1
1031 #define USING_SHA_256 2
947 #define USING_SHA_256 2
1032 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
948 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
1033
949
1034 static PyObject *readshas(const char *source, unsigned char num,
950 static PyObject *readshas(const char *source, unsigned char num,
1035 Py_ssize_t hashwidth)
951 Py_ssize_t hashwidth)
1036 {
952 {
1037 int i;
953 int i;
1038 PyObject *list = PyTuple_New(num);
954 PyObject *list = PyTuple_New(num);
1039 if (list == NULL) {
955 if (list == NULL) {
1040 return NULL;
956 return NULL;
1041 }
957 }
1042 for (i = 0; i < num; i++) {
958 for (i = 0; i < num; i++) {
1043 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
959 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
1044 if (hash == NULL) {
960 if (hash == NULL) {
1045 Py_DECREF(list);
961 Py_DECREF(list);
1046 return NULL;
962 return NULL;
1047 }
963 }
1048 PyTuple_SET_ITEM(list, i, hash);
964 PyTuple_SET_ITEM(list, i, hash);
1049 source += hashwidth;
965 source += hashwidth;
1050 }
966 }
1051 return list;
967 return list;
1052 }
968 }
1053
969
1054 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
970 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
1055 uint32_t *msize)
971 uint32_t *msize)
1056 {
972 {
1057 const char *data = databegin;
973 const char *data = databegin;
1058 const char *meta;
974 const char *meta;
1059
975
1060 double mtime;
976 double mtime;
1061 int16_t tz;
977 int16_t tz;
1062 uint16_t flags;
978 uint16_t flags;
1063 unsigned char nsuccs, nparents, nmetadata;
979 unsigned char nsuccs, nparents, nmetadata;
1064 Py_ssize_t hashwidth = 20;
980 Py_ssize_t hashwidth = 20;
1065
981
1066 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
982 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
1067 PyObject *metadata = NULL, *ret = NULL;
983 PyObject *metadata = NULL, *ret = NULL;
1068 int i;
984 int i;
1069
985
1070 if (data + FM1_HEADER_SIZE > dataend) {
986 if (data + FM1_HEADER_SIZE > dataend) {
1071 goto overflow;
987 goto overflow;
1072 }
988 }
1073
989
1074 *msize = getbe32(data);
990 *msize = getbe32(data);
1075 data += 4;
991 data += 4;
1076 mtime = getbefloat64(data);
992 mtime = getbefloat64(data);
1077 data += 8;
993 data += 8;
1078 tz = getbeint16(data);
994 tz = getbeint16(data);
1079 data += 2;
995 data += 2;
1080 flags = getbeuint16(data);
996 flags = getbeuint16(data);
1081 data += 2;
997 data += 2;
1082
998
1083 if (flags & USING_SHA_256) {
999 if (flags & USING_SHA_256) {
1084 hashwidth = 32;
1000 hashwidth = 32;
1085 }
1001 }
1086
1002
1087 nsuccs = (unsigned char)(*data++);
1003 nsuccs = (unsigned char)(*data++);
1088 nparents = (unsigned char)(*data++);
1004 nparents = (unsigned char)(*data++);
1089 nmetadata = (unsigned char)(*data++);
1005 nmetadata = (unsigned char)(*data++);
1090
1006
1091 if (databegin + *msize > dataend) {
1007 if (databegin + *msize > dataend) {
1092 goto overflow;
1008 goto overflow;
1093 }
1009 }
1094 dataend = databegin + *msize; /* narrow down to marker size */
1010 dataend = databegin + *msize; /* narrow down to marker size */
1095
1011
1096 if (data + hashwidth > dataend) {
1012 if (data + hashwidth > dataend) {
1097 goto overflow;
1013 goto overflow;
1098 }
1014 }
1099 prec = PyBytes_FromStringAndSize(data, hashwidth);
1015 prec = PyBytes_FromStringAndSize(data, hashwidth);
1100 data += hashwidth;
1016 data += hashwidth;
1101 if (prec == NULL) {
1017 if (prec == NULL) {
1102 goto bail;
1018 goto bail;
1103 }
1019 }
1104
1020
1105 if (data + nsuccs * hashwidth > dataend) {
1021 if (data + nsuccs * hashwidth > dataend) {
1106 goto overflow;
1022 goto overflow;
1107 }
1023 }
1108 succs = readshas(data, nsuccs, hashwidth);
1024 succs = readshas(data, nsuccs, hashwidth);
1109 if (succs == NULL) {
1025 if (succs == NULL) {
1110 goto bail;
1026 goto bail;
1111 }
1027 }
1112 data += nsuccs * hashwidth;
1028 data += nsuccs * hashwidth;
1113
1029
1114 if (nparents == 1 || nparents == 2) {
1030 if (nparents == 1 || nparents == 2) {
1115 if (data + nparents * hashwidth > dataend) {
1031 if (data + nparents * hashwidth > dataend) {
1116 goto overflow;
1032 goto overflow;
1117 }
1033 }
1118 parents = readshas(data, nparents, hashwidth);
1034 parents = readshas(data, nparents, hashwidth);
1119 if (parents == NULL) {
1035 if (parents == NULL) {
1120 goto bail;
1036 goto bail;
1121 }
1037 }
1122 data += nparents * hashwidth;
1038 data += nparents * hashwidth;
1123 } else {
1039 } else {
1124 parents = Py_None;
1040 parents = Py_None;
1125 Py_INCREF(parents);
1041 Py_INCREF(parents);
1126 }
1042 }
1127
1043
1128 if (data + 2 * nmetadata > dataend) {
1044 if (data + 2 * nmetadata > dataend) {
1129 goto overflow;
1045 goto overflow;
1130 }
1046 }
1131 meta = data + (2 * nmetadata);
1047 meta = data + (2 * nmetadata);
1132 metadata = PyTuple_New(nmetadata);
1048 metadata = PyTuple_New(nmetadata);
1133 if (metadata == NULL) {
1049 if (metadata == NULL) {
1134 goto bail;
1050 goto bail;
1135 }
1051 }
1136 for (i = 0; i < nmetadata; i++) {
1052 for (i = 0; i < nmetadata; i++) {
1137 PyObject *tmp, *left = NULL, *right = NULL;
1053 PyObject *tmp, *left = NULL, *right = NULL;
1138 Py_ssize_t leftsize = (unsigned char)(*data++);
1054 Py_ssize_t leftsize = (unsigned char)(*data++);
1139 Py_ssize_t rightsize = (unsigned char)(*data++);
1055 Py_ssize_t rightsize = (unsigned char)(*data++);
1140 if (meta + leftsize + rightsize > dataend) {
1056 if (meta + leftsize + rightsize > dataend) {
1141 goto overflow;
1057 goto overflow;
1142 }
1058 }
1143 left = PyBytes_FromStringAndSize(meta, leftsize);
1059 left = PyBytes_FromStringAndSize(meta, leftsize);
1144 meta += leftsize;
1060 meta += leftsize;
1145 right = PyBytes_FromStringAndSize(meta, rightsize);
1061 right = PyBytes_FromStringAndSize(meta, rightsize);
1146 meta += rightsize;
1062 meta += rightsize;
1147 tmp = PyTuple_New(2);
1063 tmp = PyTuple_New(2);
1148 if (!left || !right || !tmp) {
1064 if (!left || !right || !tmp) {
1149 Py_XDECREF(left);
1065 Py_XDECREF(left);
1150 Py_XDECREF(right);
1066 Py_XDECREF(right);
1151 Py_XDECREF(tmp);
1067 Py_XDECREF(tmp);
1152 goto bail;
1068 goto bail;
1153 }
1069 }
1154 PyTuple_SET_ITEM(tmp, 0, left);
1070 PyTuple_SET_ITEM(tmp, 0, left);
1155 PyTuple_SET_ITEM(tmp, 1, right);
1071 PyTuple_SET_ITEM(tmp, 1, right);
1156 PyTuple_SET_ITEM(metadata, i, tmp);
1072 PyTuple_SET_ITEM(metadata, i, tmp);
1157 }
1073 }
1158 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
1074 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
1159 (int)tz * 60, parents);
1075 (int)tz * 60, parents);
1160 goto bail; /* return successfully */
1076 goto bail; /* return successfully */
1161
1077
1162 overflow:
1078 overflow:
1163 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
1079 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
1164 bail:
1080 bail:
1165 Py_XDECREF(prec);
1081 Py_XDECREF(prec);
1166 Py_XDECREF(succs);
1082 Py_XDECREF(succs);
1167 Py_XDECREF(metadata);
1083 Py_XDECREF(metadata);
1168 Py_XDECREF(parents);
1084 Py_XDECREF(parents);
1169 return ret;
1085 return ret;
1170 }
1086 }
1171
1087
1172 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
1088 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
1173 {
1089 {
1174 const char *data, *dataend;
1090 const char *data, *dataend;
1175 Py_ssize_t datalen, offset, stop;
1091 Py_ssize_t datalen, offset, stop;
1176 PyObject *markers = NULL;
1092 PyObject *markers = NULL;
1177
1093
1178 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
1094 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
1179 &offset, &stop)) {
1095 &offset, &stop)) {
1180 return NULL;
1096 return NULL;
1181 }
1097 }
1182 if (offset < 0) {
1098 if (offset < 0) {
1183 PyErr_SetString(PyExc_ValueError,
1099 PyErr_SetString(PyExc_ValueError,
1184 "invalid negative offset in fm1readmarkers");
1100 "invalid negative offset in fm1readmarkers");
1185 return NULL;
1101 return NULL;
1186 }
1102 }
1187 if (stop > datalen) {
1103 if (stop > datalen) {
1188 PyErr_SetString(
1104 PyErr_SetString(
1189 PyExc_ValueError,
1105 PyExc_ValueError,
1190 "stop longer than data length in fm1readmarkers");
1106 "stop longer than data length in fm1readmarkers");
1191 return NULL;
1107 return NULL;
1192 }
1108 }
1193 dataend = data + datalen;
1109 dataend = data + datalen;
1194 data += offset;
1110 data += offset;
1195 markers = PyList_New(0);
1111 markers = PyList_New(0);
1196 if (!markers) {
1112 if (!markers) {
1197 return NULL;
1113 return NULL;
1198 }
1114 }
1199 while (offset < stop) {
1115 while (offset < stop) {
1200 uint32_t msize;
1116 uint32_t msize;
1201 int error;
1117 int error;
1202 PyObject *record = fm1readmarker(data, dataend, &msize);
1118 PyObject *record = fm1readmarker(data, dataend, &msize);
1203 if (!record) {
1119 if (!record) {
1204 goto bail;
1120 goto bail;
1205 }
1121 }
1206 error = PyList_Append(markers, record);
1122 error = PyList_Append(markers, record);
1207 Py_DECREF(record);
1123 Py_DECREF(record);
1208 if (error) {
1124 if (error) {
1209 goto bail;
1125 goto bail;
1210 }
1126 }
1211 data += msize;
1127 data += msize;
1212 offset += msize;
1128 offset += msize;
1213 }
1129 }
1214 return markers;
1130 return markers;
1215 bail:
1131 bail:
1216 Py_DECREF(markers);
1132 Py_DECREF(markers);
1217 return NULL;
1133 return NULL;
1218 }
1134 }
1219
1135
1220 static char parsers_doc[] = "Efficient content parsing.";
1136 static char parsers_doc[] = "Efficient content parsing.";
1221
1137
1222 PyObject *encodedir(PyObject *self, PyObject *args);
1138 PyObject *encodedir(PyObject *self, PyObject *args);
1223 PyObject *pathencode(PyObject *self, PyObject *args);
1139 PyObject *pathencode(PyObject *self, PyObject *args);
1224 PyObject *lowerencode(PyObject *self, PyObject *args);
1140 PyObject *lowerencode(PyObject *self, PyObject *args);
1225 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
1141 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
1226
1142
1227 static PyMethodDef methods[] = {
1143 static PyMethodDef methods[] = {
1228 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
1144 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
1229 {"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
1230 "create a set containing non-normal and other parent entries of given "
1231 "dirstate\n"},
1232 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
1145 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
1233 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
1146 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
1234 "parse a revlog index\n"},
1147 "parse a revlog index\n"},
1235 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
1148 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
1236 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
1149 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
1237 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
1150 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
1238 {"dict_new_presized", dict_new_presized, METH_VARARGS,
1151 {"dict_new_presized", dict_new_presized, METH_VARARGS,
1239 "construct a dict with an expected size\n"},
1152 "construct a dict with an expected size\n"},
1240 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
1153 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
1241 "make file foldmap\n"},
1154 "make file foldmap\n"},
1242 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
1155 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
1243 "escape a UTF-8 byte string to JSON (fast path)\n"},
1156 "escape a UTF-8 byte string to JSON (fast path)\n"},
1244 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
1157 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
1245 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
1158 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
1246 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
1159 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
1247 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
1160 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
1248 "parse v1 obsolete markers\n"},
1161 "parse v1 obsolete markers\n"},
1249 {NULL, NULL}};
1162 {NULL, NULL}};
1250
1163
1251 void dirs_module_init(PyObject *mod);
1164 void dirs_module_init(PyObject *mod);
1252 void manifest_module_init(PyObject *mod);
1165 void manifest_module_init(PyObject *mod);
1253 void revlog_module_init(PyObject *mod);
1166 void revlog_module_init(PyObject *mod);
1254
1167
1255 static const int version = 20;
1168 static const int version = 20;
1256
1169
1257 static void module_init(PyObject *mod)
1170 static void module_init(PyObject *mod)
1258 {
1171 {
1259 PyModule_AddIntConstant(mod, "version", version);
1172 PyModule_AddIntConstant(mod, "version", version);
1260
1173
1261 /* This module constant has two purposes. First, it lets us unit test
1174 /* This module constant has two purposes. First, it lets us unit test
1262 * the ImportError raised without hard-coding any error text. This
1175 * the ImportError raised without hard-coding any error text. This
1263 * means we can change the text in the future without breaking tests,
1176 * means we can change the text in the future without breaking tests,
1264 * even across changesets without a recompile. Second, its presence
1177 * even across changesets without a recompile. Second, its presence
1265 * can be used to determine whether the version-checking logic is
1178 * can be used to determine whether the version-checking logic is
1266 * present, which also helps in testing across changesets without a
1179 * present, which also helps in testing across changesets without a
1267 * recompile. Note that this means the pure-Python version of parsers
1180 * recompile. Note that this means the pure-Python version of parsers
1268 * should not have this module constant. */
1181 * should not have this module constant. */
1269 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
1182 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
1270
1183
1271 dirs_module_init(mod);
1184 dirs_module_init(mod);
1272 manifest_module_init(mod);
1185 manifest_module_init(mod);
1273 revlog_module_init(mod);
1186 revlog_module_init(mod);
1274
1187
1275 if (PyType_Ready(&dirstateItemType) < 0) {
1188 if (PyType_Ready(&dirstateItemType) < 0) {
1276 return;
1189 return;
1277 }
1190 }
1278 Py_INCREF(&dirstateItemType);
1191 Py_INCREF(&dirstateItemType);
1279 PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
1192 PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
1280 }
1193 }
1281
1194
1282 static int check_python_version(void)
1195 static int check_python_version(void)
1283 {
1196 {
1284 PyObject *sys = PyImport_ImportModule("sys"), *ver;
1197 PyObject *sys = PyImport_ImportModule("sys"), *ver;
1285 long hexversion;
1198 long hexversion;
1286 if (!sys) {
1199 if (!sys) {
1287 return -1;
1200 return -1;
1288 }
1201 }
1289 ver = PyObject_GetAttrString(sys, "hexversion");
1202 ver = PyObject_GetAttrString(sys, "hexversion");
1290 Py_DECREF(sys);
1203 Py_DECREF(sys);
1291 if (!ver) {
1204 if (!ver) {
1292 return -1;
1205 return -1;
1293 }
1206 }
1294 hexversion = PyInt_AsLong(ver);
1207 hexversion = PyInt_AsLong(ver);
1295 Py_DECREF(ver);
1208 Py_DECREF(ver);
1296 /* sys.hexversion is a 32-bit number by default, so the -1 case
1209 /* sys.hexversion is a 32-bit number by default, so the -1 case
1297 * should only occur in unusual circumstances (e.g. if sys.hexversion
1210 * should only occur in unusual circumstances (e.g. if sys.hexversion
1298 * is manually set to an invalid value). */
1211 * is manually set to an invalid value). */
1299 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
1212 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
1300 PyErr_Format(PyExc_ImportError,
1213 PyErr_Format(PyExc_ImportError,
1301 "%s: The Mercurial extension "
1214 "%s: The Mercurial extension "
1302 "modules were compiled with Python " PY_VERSION
1215 "modules were compiled with Python " PY_VERSION
1303 ", but "
1216 ", but "
1304 "Mercurial is currently using Python with "
1217 "Mercurial is currently using Python with "
1305 "sys.hexversion=%ld: "
1218 "sys.hexversion=%ld: "
1306 "Python %s\n at: %s",
1219 "Python %s\n at: %s",
1307 versionerrortext, hexversion, Py_GetVersion(),
1220 versionerrortext, hexversion, Py_GetVersion(),
1308 Py_GetProgramFullPath());
1221 Py_GetProgramFullPath());
1309 return -1;
1222 return -1;
1310 }
1223 }
1311 return 0;
1224 return 0;
1312 }
1225 }
1313
1226
1314 #ifdef IS_PY3K
1227 #ifdef IS_PY3K
1315 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
1228 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
1316 parsers_doc, -1, methods};
1229 parsers_doc, -1, methods};
1317
1230
1318 PyMODINIT_FUNC PyInit_parsers(void)
1231 PyMODINIT_FUNC PyInit_parsers(void)
1319 {
1232 {
1320 PyObject *mod;
1233 PyObject *mod;
1321
1234
1322 if (check_python_version() == -1)
1235 if (check_python_version() == -1)
1323 return NULL;
1236 return NULL;
1324 mod = PyModule_Create(&parsers_module);
1237 mod = PyModule_Create(&parsers_module);
1325 module_init(mod);
1238 module_init(mod);
1326 return mod;
1239 return mod;
1327 }
1240 }
1328 #else
1241 #else
1329 PyMODINIT_FUNC initparsers(void)
1242 PyMODINIT_FUNC initparsers(void)
1330 {
1243 {
1331 PyObject *mod;
1244 PyObject *mod;
1332
1245
1333 if (check_python_version() == -1) {
1246 if (check_python_version() == -1) {
1334 return;
1247 return;
1335 }
1248 }
1336 mod = Py_InitModule3("parsers", methods, parsers_doc);
1249 mod = Py_InitModule3("parsers", methods, parsers_doc);
1337 module_init(mod);
1250 module_init(mod);
1338 }
1251 }
1339 #endif
1252 #endif
@@ -1,1006 +1,932 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 )
23 )
24
24
25 parsers = policy.importmod('parsers')
25 parsers = policy.importmod('parsers')
26 rustmod = policy.importrust('dirstate')
26 rustmod = policy.importrust('dirstate')
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29
29
30 if rustmod is None:
30 if rustmod is None:
31 DirstateItem = parsers.DirstateItem
31 DirstateItem = parsers.DirstateItem
32 else:
32 else:
33 DirstateItem = rustmod.DirstateItem
33 DirstateItem = rustmod.DirstateItem
34
34
35 rangemask = 0x7FFFFFFF
35 rangemask = 0x7FFFFFFF
36
36
37
37
38 class dirstatemap(object):
38 class dirstatemap(object):
39 """Map encapsulating the dirstate's contents.
39 """Map encapsulating the dirstate's contents.
40
40
41 The dirstate contains the following state:
41 The dirstate contains the following state:
42
42
43 - `identity` is the identity of the dirstate file, which can be used to
43 - `identity` is the identity of the dirstate file, which can be used to
44 detect when changes have occurred to the dirstate file.
44 detect when changes have occurred to the dirstate file.
45
45
46 - `parents` is a pair containing the parents of the working copy. The
46 - `parents` is a pair containing the parents of the working copy. The
47 parents are updated by calling `setparents`.
47 parents are updated by calling `setparents`.
48
48
49 - the state map maps filenames to tuples of (state, mode, size, mtime),
49 - the state map maps filenames to tuples of (state, mode, size, mtime),
50 where state is a single character representing 'normal', 'added',
50 where state is a single character representing 'normal', 'added',
51 'removed', or 'merged'. It is read by treating the dirstate as a
51 'removed', or 'merged'. It is read by treating the dirstate as a
52 dict. File state is updated by calling various methods (see each
52 dict. File state is updated by calling various methods (see each
53 documentation for details):
53 documentation for details):
54
54
55 - `reset_state`,
55 - `reset_state`,
56 - `set_tracked`
56 - `set_tracked`
57 - `set_untracked`
57 - `set_untracked`
58 - `set_clean`
58 - `set_clean`
59 - `set_possibly_dirty`
59 - `set_possibly_dirty`
60
60
61 - `copymap` maps destination filenames to their source filename.
61 - `copymap` maps destination filenames to their source filename.
62
62
63 The dirstate also provides the following views onto the state:
63 The dirstate also provides the following views onto the state:
64
64
65 - `nonnormalset` is a set of the filenames that have state other
66 than 'normal', or are normal but have an mtime of -1 ('normallookup').
67
68 - `otherparentset` is a set of the filenames that are marked as coming
69 from the second parent when the dirstate is currently being merged.
70
71 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
65 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
72 form that they appear as in the dirstate.
66 form that they appear as in the dirstate.
73
67
74 - `dirfoldmap` is a dict mapping normalized directory names to the
68 - `dirfoldmap` is a dict mapping normalized directory names to the
75 denormalized form that they appear as in the dirstate.
69 denormalized form that they appear as in the dirstate.
76 """
70 """
77
71
78 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
72 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
79 self._ui = ui
73 self._ui = ui
80 self._opener = opener
74 self._opener = opener
81 self._root = root
75 self._root = root
82 self._filename = b'dirstate'
76 self._filename = b'dirstate'
83 self._nodelen = 20
77 self._nodelen = 20
84 self._nodeconstants = nodeconstants
78 self._nodeconstants = nodeconstants
85 assert (
79 assert (
86 not use_dirstate_v2
80 not use_dirstate_v2
87 ), "should have detected unsupported requirement"
81 ), "should have detected unsupported requirement"
88
82
89 self._parents = None
83 self._parents = None
90 self._dirtyparents = False
84 self._dirtyparents = False
91
85
92 # for consistent view between _pl() and _read() invocations
86 # for consistent view between _pl() and _read() invocations
93 self._pendingmode = None
87 self._pendingmode = None
94
88
95 @propertycache
89 @propertycache
96 def _map(self):
90 def _map(self):
97 self._map = {}
91 self._map = {}
98 self.read()
92 self.read()
99 return self._map
93 return self._map
100
94
101 @propertycache
95 @propertycache
102 def copymap(self):
96 def copymap(self):
103 self.copymap = {}
97 self.copymap = {}
104 self._map
98 self._map
105 return self.copymap
99 return self.copymap
106
100
107 def clear(self):
101 def clear(self):
108 self._map.clear()
102 self._map.clear()
109 self.copymap.clear()
103 self.copymap.clear()
110 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
104 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
111 util.clearcachedproperty(self, b"_dirs")
105 util.clearcachedproperty(self, b"_dirs")
112 util.clearcachedproperty(self, b"_alldirs")
106 util.clearcachedproperty(self, b"_alldirs")
113 util.clearcachedproperty(self, b"filefoldmap")
107 util.clearcachedproperty(self, b"filefoldmap")
114 util.clearcachedproperty(self, b"dirfoldmap")
108 util.clearcachedproperty(self, b"dirfoldmap")
115 util.clearcachedproperty(self, b"nonnormalset")
116 util.clearcachedproperty(self, b"otherparentset")
117
109
118 def items(self):
110 def items(self):
119 return pycompat.iteritems(self._map)
111 return pycompat.iteritems(self._map)
120
112
121 # forward for python2,3 compat
113 # forward for python2,3 compat
122 iteritems = items
114 iteritems = items
123
115
124 def debug_iter(self, all):
116 def debug_iter(self, all):
125 """
117 """
126 Return an iterator of (filename, state, mode, size, mtime) tuples
118 Return an iterator of (filename, state, mode, size, mtime) tuples
127
119
128 `all` is unused when Rust is not enabled
120 `all` is unused when Rust is not enabled
129 """
121 """
130 for (filename, item) in self.items():
122 for (filename, item) in self.items():
131 yield (filename, item.state, item.mode, item.size, item.mtime)
123 yield (filename, item.state, item.mode, item.size, item.mtime)
132
124
133 def __len__(self):
125 def __len__(self):
134 return len(self._map)
126 return len(self._map)
135
127
136 def __iter__(self):
128 def __iter__(self):
137 return iter(self._map)
129 return iter(self._map)
138
130
139 def get(self, key, default=None):
131 def get(self, key, default=None):
140 return self._map.get(key, default)
132 return self._map.get(key, default)
141
133
142 def __contains__(self, key):
134 def __contains__(self, key):
143 return key in self._map
135 return key in self._map
144
136
145 def __getitem__(self, key):
137 def __getitem__(self, key):
146 return self._map[key]
138 return self._map[key]
147
139
148 def keys(self):
140 def keys(self):
149 return self._map.keys()
141 return self._map.keys()
150
142
151 def preload(self):
143 def preload(self):
152 """Loads the underlying data, if it's not already loaded"""
144 """Loads the underlying data, if it's not already loaded"""
153 self._map
145 self._map
154
146
155 def _dirs_incr(self, filename, old_entry=None):
147 def _dirs_incr(self, filename, old_entry=None):
156 """incremente the dirstate counter if applicable"""
148 """incremente the dirstate counter if applicable"""
157 if (
149 if (
158 old_entry is None or old_entry.removed
150 old_entry is None or old_entry.removed
159 ) and "_dirs" in self.__dict__:
151 ) and "_dirs" in self.__dict__:
160 self._dirs.addpath(filename)
152 self._dirs.addpath(filename)
161 if old_entry is None and "_alldirs" in self.__dict__:
153 if old_entry is None and "_alldirs" in self.__dict__:
162 self._alldirs.addpath(filename)
154 self._alldirs.addpath(filename)
163
155
164 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
156 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
165 """decremente the dirstate counter if applicable"""
157 """decremente the dirstate counter if applicable"""
166 if old_entry is not None:
158 if old_entry is not None:
167 if "_dirs" in self.__dict__ and not old_entry.removed:
159 if "_dirs" in self.__dict__ and not old_entry.removed:
168 self._dirs.delpath(filename)
160 self._dirs.delpath(filename)
169 if "_alldirs" in self.__dict__ and not remove_variant:
161 if "_alldirs" in self.__dict__ and not remove_variant:
170 self._alldirs.delpath(filename)
162 self._alldirs.delpath(filename)
171 elif remove_variant and "_alldirs" in self.__dict__:
163 elif remove_variant and "_alldirs" in self.__dict__:
172 self._alldirs.addpath(filename)
164 self._alldirs.addpath(filename)
173 if "filefoldmap" in self.__dict__:
165 if "filefoldmap" in self.__dict__:
174 normed = util.normcase(filename)
166 normed = util.normcase(filename)
175 self.filefoldmap.pop(normed, None)
167 self.filefoldmap.pop(normed, None)
176
168
177 def set_possibly_dirty(self, filename):
169 def set_possibly_dirty(self, filename):
178 """record that the current state of the file on disk is unknown"""
170 """record that the current state of the file on disk is unknown"""
179 self[filename].set_possibly_dirty()
171 self[filename].set_possibly_dirty()
180
172
181 def set_clean(self, filename, mode, size, mtime):
173 def set_clean(self, filename, mode, size, mtime):
182 """mark a file as back to a clean state"""
174 """mark a file as back to a clean state"""
183 entry = self[filename]
175 entry = self[filename]
184 mtime = mtime & rangemask
176 mtime = mtime & rangemask
185 size = size & rangemask
177 size = size & rangemask
186 entry.set_clean(mode, size, mtime)
178 entry.set_clean(mode, size, mtime)
187 self.copymap.pop(filename, None)
179 self.copymap.pop(filename, None)
188 self.nonnormalset.discard(filename)
189
180
190 def reset_state(
181 def reset_state(
191 self,
182 self,
192 filename,
183 filename,
193 wc_tracked=False,
184 wc_tracked=False,
194 p1_tracked=False,
185 p1_tracked=False,
195 p2_tracked=False,
186 p2_tracked=False,
196 merged=False,
187 merged=False,
197 clean_p1=False,
188 clean_p1=False,
198 clean_p2=False,
189 clean_p2=False,
199 possibly_dirty=False,
190 possibly_dirty=False,
200 parentfiledata=None,
191 parentfiledata=None,
201 ):
192 ):
202 """Set a entry to a given state, diregarding all previous state
193 """Set a entry to a given state, diregarding all previous state
203
194
204 This is to be used by the part of the dirstate API dedicated to
195 This is to be used by the part of the dirstate API dedicated to
205 adjusting the dirstate after a update/merge.
196 adjusting the dirstate after a update/merge.
206
197
207 note: calling this might result to no entry existing at all if the
198 note: calling this might result to no entry existing at all if the
208 dirstate map does not see any point at having one for this file
199 dirstate map does not see any point at having one for this file
209 anymore.
200 anymore.
210 """
201 """
211 if merged and (clean_p1 or clean_p2):
202 if merged and (clean_p1 or clean_p2):
212 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
203 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
213 raise error.ProgrammingError(msg)
204 raise error.ProgrammingError(msg)
214 # copy information are now outdated
205 # copy information are now outdated
215 # (maybe new information should be in directly passed to this function)
206 # (maybe new information should be in directly passed to this function)
216 self.copymap.pop(filename, None)
207 self.copymap.pop(filename, None)
217
208
218 if not (p1_tracked or p2_tracked or wc_tracked):
209 if not (p1_tracked or p2_tracked or wc_tracked):
219 old_entry = self._map.pop(filename, None)
210 old_entry = self._map.pop(filename, None)
220 self._dirs_decr(filename, old_entry=old_entry)
211 self._dirs_decr(filename, old_entry=old_entry)
221 self.nonnormalset.discard(filename)
222 self.copymap.pop(filename, None)
212 self.copymap.pop(filename, None)
223 return
213 return
224 elif merged:
214 elif merged:
225 # XXX might be merged and removed ?
215 # XXX might be merged and removed ?
226 entry = self.get(filename)
216 entry = self.get(filename)
227 if entry is None or not entry.tracked:
217 if entry is None or not entry.tracked:
228 # XXX mostly replicate dirstate.other parent. We should get
218 # XXX mostly replicate dirstate.other parent. We should get
229 # the higher layer to pass us more reliable data where `merged`
219 # the higher layer to pass us more reliable data where `merged`
230 # actually mean merged. Dropping this clause will show failure
220 # actually mean merged. Dropping this clause will show failure
231 # in `test-graft.t`
221 # in `test-graft.t`
232 merged = False
222 merged = False
233 clean_p2 = True
223 clean_p2 = True
234 elif not (p1_tracked or p2_tracked) and wc_tracked:
224 elif not (p1_tracked or p2_tracked) and wc_tracked:
235 pass # file is added, nothing special to adjust
225 pass # file is added, nothing special to adjust
236 elif (p1_tracked or p2_tracked) and not wc_tracked:
226 elif (p1_tracked or p2_tracked) and not wc_tracked:
237 pass
227 pass
238 elif clean_p2 and wc_tracked:
228 elif clean_p2 and wc_tracked:
239 if p1_tracked or self.get(filename) is not None:
229 if p1_tracked or self.get(filename) is not None:
240 # XXX the `self.get` call is catching some case in
230 # XXX the `self.get` call is catching some case in
241 # `test-merge-remove.t` where the file is tracked in p1, the
231 # `test-merge-remove.t` where the file is tracked in p1, the
242 # p1_tracked argument is False.
232 # p1_tracked argument is False.
243 #
233 #
244 # In addition, this seems to be a case where the file is marked
234 # In addition, this seems to be a case where the file is marked
245 # as merged without actually being the result of a merge
235 # as merged without actually being the result of a merge
246 # action. So thing are not ideal here.
236 # action. So thing are not ideal here.
247 merged = True
237 merged = True
248 clean_p2 = False
238 clean_p2 = False
249 elif not p1_tracked and p2_tracked and wc_tracked:
239 elif not p1_tracked and p2_tracked and wc_tracked:
250 clean_p2 = True
240 clean_p2 = True
251 elif possibly_dirty:
241 elif possibly_dirty:
252 pass
242 pass
253 elif wc_tracked:
243 elif wc_tracked:
254 # this is a "normal" file
244 # this is a "normal" file
255 if parentfiledata is None:
245 if parentfiledata is None:
256 msg = b'failed to pass parentfiledata for a normal file: %s'
246 msg = b'failed to pass parentfiledata for a normal file: %s'
257 msg %= filename
247 msg %= filename
258 raise error.ProgrammingError(msg)
248 raise error.ProgrammingError(msg)
259 else:
249 else:
260 assert False, 'unreachable'
250 assert False, 'unreachable'
261
251
262 old_entry = self._map.get(filename)
252 old_entry = self._map.get(filename)
263 self._dirs_incr(filename, old_entry)
253 self._dirs_incr(filename, old_entry)
264 entry = DirstateItem(
254 entry = DirstateItem(
265 wc_tracked=wc_tracked,
255 wc_tracked=wc_tracked,
266 p1_tracked=p1_tracked,
256 p1_tracked=p1_tracked,
267 p2_tracked=p2_tracked,
257 p2_tracked=p2_tracked,
268 merged=merged,
258 merged=merged,
269 clean_p1=clean_p1,
259 clean_p1=clean_p1,
270 clean_p2=clean_p2,
260 clean_p2=clean_p2,
271 possibly_dirty=possibly_dirty,
261 possibly_dirty=possibly_dirty,
272 parentfiledata=parentfiledata,
262 parentfiledata=parentfiledata,
273 )
263 )
274 if entry.dm_nonnormal:
275 self.nonnormalset.add(filename)
276 else:
277 self.nonnormalset.discard(filename)
278 if entry.dm_otherparent:
279 self.otherparentset.add(filename)
280 else:
281 self.otherparentset.discard(filename)
282 self._map[filename] = entry
264 self._map[filename] = entry
283
265
284 def set_tracked(self, filename):
266 def set_tracked(self, filename):
285 new = False
267 new = False
286 entry = self.get(filename)
268 entry = self.get(filename)
287 if entry is None:
269 if entry is None:
288 self._dirs_incr(filename)
270 self._dirs_incr(filename)
289 entry = DirstateItem(
271 entry = DirstateItem(
290 p1_tracked=False,
272 p1_tracked=False,
291 p2_tracked=False,
273 p2_tracked=False,
292 wc_tracked=True,
274 wc_tracked=True,
293 merged=False,
275 merged=False,
294 clean_p1=False,
276 clean_p1=False,
295 clean_p2=False,
277 clean_p2=False,
296 possibly_dirty=False,
278 possibly_dirty=False,
297 parentfiledata=None,
279 parentfiledata=None,
298 )
280 )
299 self._map[filename] = entry
281 self._map[filename] = entry
300 if entry.dm_nonnormal:
301 self.nonnormalset.add(filename)
302 new = True
282 new = True
303 elif not entry.tracked:
283 elif not entry.tracked:
304 self._dirs_incr(filename, entry)
284 self._dirs_incr(filename, entry)
305 entry.set_tracked()
285 entry.set_tracked()
306 new = True
286 new = True
307 else:
287 else:
308 # XXX This is probably overkill for more case, but we need this to
288 # XXX This is probably overkill for more case, but we need this to
309 # fully replace the `normallookup` call with `set_tracked` one.
289 # fully replace the `normallookup` call with `set_tracked` one.
310 # Consider smoothing this in the future.
290 # Consider smoothing this in the future.
311 self.set_possibly_dirty(filename)
291 self.set_possibly_dirty(filename)
312 return new
292 return new
313
293
314 def set_untracked(self, f):
294 def set_untracked(self, f):
315 """Mark a file as no longer tracked in the dirstate map"""
295 """Mark a file as no longer tracked in the dirstate map"""
316 entry = self.get(f)
296 entry = self.get(f)
317 if entry is None:
297 if entry is None:
318 return False
298 return False
319 else:
299 else:
320 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
300 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
321 if not entry.merged:
301 if not entry.merged:
322 self.copymap.pop(f, None)
302 self.copymap.pop(f, None)
323 if entry.added:
303 if entry.added:
324 self.nonnormalset.discard(f)
325 self._map.pop(f, None)
304 self._map.pop(f, None)
326 else:
305 else:
327 self.nonnormalset.add(f)
328 if entry.from_p2:
329 self.otherparentset.add(f)
330 entry.set_untracked()
306 entry.set_untracked()
331 return True
307 return True
332
308
333 def nonnormalentries(self):
334 '''Compute the nonnormal dirstate entries from the dmap'''
335 try:
336 return parsers.nonnormalotherparententries(self._map)
337 except AttributeError:
338 nonnorm = set()
339 otherparent = set()
340 for fname, e in pycompat.iteritems(self._map):
341 if e.dm_nonnormal:
342 nonnorm.add(fname)
343 if e.from_p2:
344 otherparent.add(fname)
345 return nonnorm, otherparent
346
347 @propertycache
309 @propertycache
348 def filefoldmap(self):
310 def filefoldmap(self):
349 """Returns a dictionary mapping normalized case paths to their
311 """Returns a dictionary mapping normalized case paths to their
350 non-normalized versions.
312 non-normalized versions.
351 """
313 """
352 try:
314 try:
353 makefilefoldmap = parsers.make_file_foldmap
315 makefilefoldmap = parsers.make_file_foldmap
354 except AttributeError:
316 except AttributeError:
355 pass
317 pass
356 else:
318 else:
357 return makefilefoldmap(
319 return makefilefoldmap(
358 self._map, util.normcasespec, util.normcasefallback
320 self._map, util.normcasespec, util.normcasefallback
359 )
321 )
360
322
361 f = {}
323 f = {}
362 normcase = util.normcase
324 normcase = util.normcase
363 for name, s in pycompat.iteritems(self._map):
325 for name, s in pycompat.iteritems(self._map):
364 if not s.removed:
326 if not s.removed:
365 f[normcase(name)] = name
327 f[normcase(name)] = name
366 f[b'.'] = b'.' # prevents useless util.fspath() invocation
328 f[b'.'] = b'.' # prevents useless util.fspath() invocation
367 return f
329 return f
368
330
369 def hastrackeddir(self, d):
331 def hastrackeddir(self, d):
370 """
332 """
371 Returns True if the dirstate contains a tracked (not removed) file
333 Returns True if the dirstate contains a tracked (not removed) file
372 in this directory.
334 in this directory.
373 """
335 """
374 return d in self._dirs
336 return d in self._dirs
375
337
376 def hasdir(self, d):
338 def hasdir(self, d):
377 """
339 """
378 Returns True if the dirstate contains a file (tracked or removed)
340 Returns True if the dirstate contains a file (tracked or removed)
379 in this directory.
341 in this directory.
380 """
342 """
381 return d in self._alldirs
343 return d in self._alldirs
382
344
383 @propertycache
345 @propertycache
384 def _dirs(self):
346 def _dirs(self):
385 return pathutil.dirs(self._map, only_tracked=True)
347 return pathutil.dirs(self._map, only_tracked=True)
386
348
387 @propertycache
349 @propertycache
388 def _alldirs(self):
350 def _alldirs(self):
389 return pathutil.dirs(self._map)
351 return pathutil.dirs(self._map)
390
352
391 def _opendirstatefile(self):
353 def _opendirstatefile(self):
392 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
354 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
393 if self._pendingmode is not None and self._pendingmode != mode:
355 if self._pendingmode is not None and self._pendingmode != mode:
394 fp.close()
356 fp.close()
395 raise error.Abort(
357 raise error.Abort(
396 _(b'working directory state may be changed parallelly')
358 _(b'working directory state may be changed parallelly')
397 )
359 )
398 self._pendingmode = mode
360 self._pendingmode = mode
399 return fp
361 return fp
400
362
401 def parents(self):
363 def parents(self):
402 if not self._parents:
364 if not self._parents:
403 try:
365 try:
404 fp = self._opendirstatefile()
366 fp = self._opendirstatefile()
405 st = fp.read(2 * self._nodelen)
367 st = fp.read(2 * self._nodelen)
406 fp.close()
368 fp.close()
407 except IOError as err:
369 except IOError as err:
408 if err.errno != errno.ENOENT:
370 if err.errno != errno.ENOENT:
409 raise
371 raise
410 # File doesn't exist, so the current state is empty
372 # File doesn't exist, so the current state is empty
411 st = b''
373 st = b''
412
374
413 l = len(st)
375 l = len(st)
414 if l == self._nodelen * 2:
376 if l == self._nodelen * 2:
415 self._parents = (
377 self._parents = (
416 st[: self._nodelen],
378 st[: self._nodelen],
417 st[self._nodelen : 2 * self._nodelen],
379 st[self._nodelen : 2 * self._nodelen],
418 )
380 )
419 elif l == 0:
381 elif l == 0:
420 self._parents = (
382 self._parents = (
421 self._nodeconstants.nullid,
383 self._nodeconstants.nullid,
422 self._nodeconstants.nullid,
384 self._nodeconstants.nullid,
423 )
385 )
424 else:
386 else:
425 raise error.Abort(
387 raise error.Abort(
426 _(b'working directory state appears damaged!')
388 _(b'working directory state appears damaged!')
427 )
389 )
428
390
429 return self._parents
391 return self._parents
430
392
431 def setparents(self, p1, p2, fold_p2=False):
393 def setparents(self, p1, p2, fold_p2=False):
432 self._parents = (p1, p2)
394 self._parents = (p1, p2)
433 self._dirtyparents = True
395 self._dirtyparents = True
434 copies = {}
396 copies = {}
435 if fold_p2:
397 if fold_p2:
436 candidatefiles = self.non_normal_or_other_parent_paths()
398 for f, s in pycompat.iteritems(self._map):
437
438 for f in candidatefiles:
439 s = self.get(f)
440 if s is None:
441 continue
442
443 # Discard "merged" markers when moving away from a merge state
399 # Discard "merged" markers when moving away from a merge state
444 if s.merged or s.from_p2:
400 if s.merged or s.from_p2:
445 source = self.copymap.pop(f, None)
401 source = self.copymap.pop(f, None)
446 if source:
402 if source:
447 copies[f] = source
403 copies[f] = source
448 s.drop_merge_data()
404 s.drop_merge_data()
449 return copies
405 return copies
450
406
451 def read(self):
407 def read(self):
452 # ignore HG_PENDING because identity is used only for writing
408 # ignore HG_PENDING because identity is used only for writing
453 self.identity = util.filestat.frompath(
409 self.identity = util.filestat.frompath(
454 self._opener.join(self._filename)
410 self._opener.join(self._filename)
455 )
411 )
456
412
457 try:
413 try:
458 fp = self._opendirstatefile()
414 fp = self._opendirstatefile()
459 try:
415 try:
460 st = fp.read()
416 st = fp.read()
461 finally:
417 finally:
462 fp.close()
418 fp.close()
463 except IOError as err:
419 except IOError as err:
464 if err.errno != errno.ENOENT:
420 if err.errno != errno.ENOENT:
465 raise
421 raise
466 return
422 return
467 if not st:
423 if not st:
468 return
424 return
469
425
470 if util.safehasattr(parsers, b'dict_new_presized'):
426 if util.safehasattr(parsers, b'dict_new_presized'):
471 # Make an estimate of the number of files in the dirstate based on
427 # Make an estimate of the number of files in the dirstate based on
472 # its size. This trades wasting some memory for avoiding costly
428 # its size. This trades wasting some memory for avoiding costly
473 # resizes. Each entry have a prefix of 17 bytes followed by one or
429 # resizes. Each entry have a prefix of 17 bytes followed by one or
474 # two path names. Studies on various large-scale real-world repositories
430 # two path names. Studies on various large-scale real-world repositories
475 # found 54 bytes a reasonable upper limit for the average path names.
431 # found 54 bytes a reasonable upper limit for the average path names.
476 # Copy entries are ignored for the sake of this estimate.
432 # Copy entries are ignored for the sake of this estimate.
477 self._map = parsers.dict_new_presized(len(st) // 71)
433 self._map = parsers.dict_new_presized(len(st) // 71)
478
434
479 # Python's garbage collector triggers a GC each time a certain number
435 # Python's garbage collector triggers a GC each time a certain number
480 # of container objects (the number being defined by
436 # of container objects (the number being defined by
481 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
437 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
482 # for each file in the dirstate. The C version then immediately marks
438 # for each file in the dirstate. The C version then immediately marks
483 # them as not to be tracked by the collector. However, this has no
439 # them as not to be tracked by the collector. However, this has no
484 # effect on when GCs are triggered, only on what objects the GC looks
440 # effect on when GCs are triggered, only on what objects the GC looks
485 # into. This means that O(number of files) GCs are unavoidable.
441 # into. This means that O(number of files) GCs are unavoidable.
486 # Depending on when in the process's lifetime the dirstate is parsed,
442 # Depending on when in the process's lifetime the dirstate is parsed,
487 # this can get very expensive. As a workaround, disable GC while
443 # this can get very expensive. As a workaround, disable GC while
488 # parsing the dirstate.
444 # parsing the dirstate.
489 #
445 #
490 # (we cannot decorate the function directly since it is in a C module)
446 # (we cannot decorate the function directly since it is in a C module)
491 parse_dirstate = util.nogc(parsers.parse_dirstate)
447 parse_dirstate = util.nogc(parsers.parse_dirstate)
492 p = parse_dirstate(self._map, self.copymap, st)
448 p = parse_dirstate(self._map, self.copymap, st)
493 if not self._dirtyparents:
449 if not self._dirtyparents:
494 self.setparents(*p)
450 self.setparents(*p)
495
451
496 # Avoid excess attribute lookups by fast pathing certain checks
452 # Avoid excess attribute lookups by fast pathing certain checks
497 self.__contains__ = self._map.__contains__
453 self.__contains__ = self._map.__contains__
498 self.__getitem__ = self._map.__getitem__
454 self.__getitem__ = self._map.__getitem__
499 self.get = self._map.get
455 self.get = self._map.get
500
456
501 def write(self, _tr, st, now):
457 def write(self, _tr, st, now):
502 st.write(
458 st.write(
503 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
459 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
504 )
460 )
505 st.close()
461 st.close()
506 self._dirtyparents = False
462 self._dirtyparents = False
507 self.nonnormalset, self.otherparentset = self.nonnormalentries()
508
509 @propertycache
510 def nonnormalset(self):
511 nonnorm, otherparents = self.nonnormalentries()
512 self.otherparentset = otherparents
513 return nonnorm
514
515 @propertycache
516 def otherparentset(self):
517 nonnorm, otherparents = self.nonnormalentries()
518 self.nonnormalset = nonnorm
519 return otherparents
520
521 def non_normal_or_other_parent_paths(self):
522 return self.nonnormalset.union(self.otherparentset)
523
463
524 @propertycache
464 @propertycache
525 def identity(self):
465 def identity(self):
526 self._map
466 self._map
527 return self.identity
467 return self.identity
528
468
529 @propertycache
469 @propertycache
530 def dirfoldmap(self):
470 def dirfoldmap(self):
531 f = {}
471 f = {}
532 normcase = util.normcase
472 normcase = util.normcase
533 for name in self._dirs:
473 for name in self._dirs:
534 f[normcase(name)] = name
474 f[normcase(name)] = name
535 return f
475 return f
536
476
537
477
538 if rustmod is not None:
478 if rustmod is not None:
539
479
540 class dirstatemap(object):
480 class dirstatemap(object):
541 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
481 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
542 self._use_dirstate_v2 = use_dirstate_v2
482 self._use_dirstate_v2 = use_dirstate_v2
543 self._nodeconstants = nodeconstants
483 self._nodeconstants = nodeconstants
544 self._ui = ui
484 self._ui = ui
545 self._opener = opener
485 self._opener = opener
546 self._root = root
486 self._root = root
547 self._filename = b'dirstate'
487 self._filename = b'dirstate'
548 self._nodelen = 20 # Also update Rust code when changing this!
488 self._nodelen = 20 # Also update Rust code when changing this!
549 self._parents = None
489 self._parents = None
550 self._dirtyparents = False
490 self._dirtyparents = False
551 self._docket = None
491 self._docket = None
552
492
553 # for consistent view between _pl() and _read() invocations
493 # for consistent view between _pl() and _read() invocations
554 self._pendingmode = None
494 self._pendingmode = None
555
495
556 self._use_dirstate_tree = self._ui.configbool(
496 self._use_dirstate_tree = self._ui.configbool(
557 b"experimental",
497 b"experimental",
558 b"dirstate-tree.in-memory",
498 b"dirstate-tree.in-memory",
559 False,
499 False,
560 )
500 )
561
501
562 def addfile(
502 def addfile(
563 self,
503 self,
564 f,
504 f,
565 mode=0,
505 mode=0,
566 size=None,
506 size=None,
567 mtime=None,
507 mtime=None,
568 added=False,
508 added=False,
569 merged=False,
509 merged=False,
570 from_p2=False,
510 from_p2=False,
571 possibly_dirty=False,
511 possibly_dirty=False,
572 ):
512 ):
573 if added:
513 if added:
574 assert not possibly_dirty
514 assert not possibly_dirty
575 assert not from_p2
515 assert not from_p2
576 item = DirstateItem.new_added()
516 item = DirstateItem.new_added()
577 elif merged:
517 elif merged:
578 assert not possibly_dirty
518 assert not possibly_dirty
579 assert not from_p2
519 assert not from_p2
580 item = DirstateItem.new_merged()
520 item = DirstateItem.new_merged()
581 elif from_p2:
521 elif from_p2:
582 assert not possibly_dirty
522 assert not possibly_dirty
583 item = DirstateItem.new_from_p2()
523 item = DirstateItem.new_from_p2()
584 elif possibly_dirty:
524 elif possibly_dirty:
585 item = DirstateItem.new_possibly_dirty()
525 item = DirstateItem.new_possibly_dirty()
586 else:
526 else:
587 assert size is not None
527 assert size is not None
588 assert mtime is not None
528 assert mtime is not None
589 size = size & rangemask
529 size = size & rangemask
590 mtime = mtime & rangemask
530 mtime = mtime & rangemask
591 item = DirstateItem.new_normal(mode, size, mtime)
531 item = DirstateItem.new_normal(mode, size, mtime)
592 self._rustmap.addfile(f, item)
532 self._rustmap.addfile(f, item)
593 if added:
533 if added:
594 self.copymap.pop(f, None)
534 self.copymap.pop(f, None)
595
535
596 def reset_state(
536 def reset_state(
597 self,
537 self,
598 filename,
538 filename,
599 wc_tracked=False,
539 wc_tracked=False,
600 p1_tracked=False,
540 p1_tracked=False,
601 p2_tracked=False,
541 p2_tracked=False,
602 merged=False,
542 merged=False,
603 clean_p1=False,
543 clean_p1=False,
604 clean_p2=False,
544 clean_p2=False,
605 possibly_dirty=False,
545 possibly_dirty=False,
606 parentfiledata=None,
546 parentfiledata=None,
607 ):
547 ):
608 """Set a entry to a given state, disregarding all previous state
548 """Set a entry to a given state, disregarding all previous state
609
549
610 This is to be used by the part of the dirstate API dedicated to
550 This is to be used by the part of the dirstate API dedicated to
611 adjusting the dirstate after a update/merge.
551 adjusting the dirstate after a update/merge.
612
552
613 note: calling this might result to no entry existing at all if the
553 note: calling this might result to no entry existing at all if the
614 dirstate map does not see any point at having one for this file
554 dirstate map does not see any point at having one for this file
615 anymore.
555 anymore.
616 """
556 """
617 if merged and (clean_p1 or clean_p2):
557 if merged and (clean_p1 or clean_p2):
618 msg = (
558 msg = (
619 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
559 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
620 )
560 )
621 raise error.ProgrammingError(msg)
561 raise error.ProgrammingError(msg)
622 # copy information are now outdated
562 # copy information are now outdated
623 # (maybe new information should be in directly passed to this function)
563 # (maybe new information should be in directly passed to this function)
624 self.copymap.pop(filename, None)
564 self.copymap.pop(filename, None)
625
565
626 if not (p1_tracked or p2_tracked or wc_tracked):
566 if not (p1_tracked or p2_tracked or wc_tracked):
627 self._rustmap.drop_item_and_copy_source(filename)
567 self._rustmap.drop_item_and_copy_source(filename)
628 elif merged:
568 elif merged:
629 # XXX might be merged and removed ?
569 # XXX might be merged and removed ?
630 entry = self.get(filename)
570 entry = self.get(filename)
631 if entry is not None and entry.tracked:
571 if entry is not None and entry.tracked:
632 # XXX mostly replicate dirstate.other parent. We should get
572 # XXX mostly replicate dirstate.other parent. We should get
633 # the higher layer to pass us more reliable data where `merged`
573 # the higher layer to pass us more reliable data where `merged`
634 # actually mean merged. Dropping the else clause will show
574 # actually mean merged. Dropping the else clause will show
635 # failure in `test-graft.t`
575 # failure in `test-graft.t`
636 self.addfile(filename, merged=True)
576 self.addfile(filename, merged=True)
637 else:
577 else:
638 self.addfile(filename, from_p2=True)
578 self.addfile(filename, from_p2=True)
639 elif not (p1_tracked or p2_tracked) and wc_tracked:
579 elif not (p1_tracked or p2_tracked) and wc_tracked:
640 self.addfile(
580 self.addfile(
641 filename, added=True, possibly_dirty=possibly_dirty
581 filename, added=True, possibly_dirty=possibly_dirty
642 )
582 )
643 elif (p1_tracked or p2_tracked) and not wc_tracked:
583 elif (p1_tracked or p2_tracked) and not wc_tracked:
644 # XXX might be merged and removed ?
584 # XXX might be merged and removed ?
645 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
585 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
646 self.nonnormalset.add(filename)
647 elif clean_p2 and wc_tracked:
586 elif clean_p2 and wc_tracked:
648 if p1_tracked or self.get(filename) is not None:
587 if p1_tracked or self.get(filename) is not None:
649 # XXX the `self.get` call is catching some case in
588 # XXX the `self.get` call is catching some case in
650 # `test-merge-remove.t` where the file is tracked in p1, the
589 # `test-merge-remove.t` where the file is tracked in p1, the
651 # p1_tracked argument is False.
590 # p1_tracked argument is False.
652 #
591 #
653 # In addition, this seems to be a case where the file is marked
592 # In addition, this seems to be a case where the file is marked
654 # as merged without actually being the result of a merge
593 # as merged without actually being the result of a merge
655 # action. So thing are not ideal here.
594 # action. So thing are not ideal here.
656 self.addfile(filename, merged=True)
595 self.addfile(filename, merged=True)
657 else:
596 else:
658 self.addfile(filename, from_p2=True)
597 self.addfile(filename, from_p2=True)
659 elif not p1_tracked and p2_tracked and wc_tracked:
598 elif not p1_tracked and p2_tracked and wc_tracked:
660 self.addfile(
599 self.addfile(
661 filename, from_p2=True, possibly_dirty=possibly_dirty
600 filename, from_p2=True, possibly_dirty=possibly_dirty
662 )
601 )
663 elif possibly_dirty:
602 elif possibly_dirty:
664 self.addfile(filename, possibly_dirty=possibly_dirty)
603 self.addfile(filename, possibly_dirty=possibly_dirty)
665 elif wc_tracked:
604 elif wc_tracked:
666 # this is a "normal" file
605 # this is a "normal" file
667 if parentfiledata is None:
606 if parentfiledata is None:
668 msg = b'failed to pass parentfiledata for a normal file: %s'
607 msg = b'failed to pass parentfiledata for a normal file: %s'
669 msg %= filename
608 msg %= filename
670 raise error.ProgrammingError(msg)
609 raise error.ProgrammingError(msg)
671 mode, size, mtime = parentfiledata
610 mode, size, mtime = parentfiledata
672 self.addfile(filename, mode=mode, size=size, mtime=mtime)
611 self.addfile(filename, mode=mode, size=size, mtime=mtime)
673 self.nonnormalset.discard(filename)
674 else:
612 else:
675 assert False, 'unreachable'
613 assert False, 'unreachable'
676
614
677 def set_tracked(self, filename):
615 def set_tracked(self, filename):
678 new = False
616 new = False
679 entry = self.get(filename)
617 entry = self.get(filename)
680 if entry is None:
618 if entry is None:
681 self.addfile(filename, added=True)
619 self.addfile(filename, added=True)
682 new = True
620 new = True
683 elif not entry.tracked:
621 elif not entry.tracked:
684 entry.set_tracked()
622 entry.set_tracked()
685 self._rustmap.set_dirstate_item(filename, entry)
623 self._rustmap.set_dirstate_item(filename, entry)
686 new = True
624 new = True
687 else:
625 else:
688 # XXX This is probably overkill for more case, but we need this to
626 # XXX This is probably overkill for more case, but we need this to
689 # fully replace the `normallookup` call with `set_tracked` one.
627 # fully replace the `normallookup` call with `set_tracked` one.
690 # Consider smoothing this in the future.
628 # Consider smoothing this in the future.
691 self.set_possibly_dirty(filename)
629 self.set_possibly_dirty(filename)
692 return new
630 return new
693
631
694 def set_untracked(self, f):
632 def set_untracked(self, f):
695 """Mark a file as no longer tracked in the dirstate map"""
633 """Mark a file as no longer tracked in the dirstate map"""
696 # in merge is only trigger more logic, so it "fine" to pass it.
634 # in merge is only trigger more logic, so it "fine" to pass it.
697 #
635 #
698 # the inner rust dirstate map code need to be adjusted once the API
636 # the inner rust dirstate map code need to be adjusted once the API
699 # for dirstate/dirstatemap/DirstateItem is a bit more settled
637 # for dirstate/dirstatemap/DirstateItem is a bit more settled
700 entry = self.get(f)
638 entry = self.get(f)
701 if entry is None:
639 if entry is None:
702 return False
640 return False
703 else:
641 else:
704 if entry.added:
642 if entry.added:
705 self._rustmap.drop_item_and_copy_source(f)
643 self._rustmap.drop_item_and_copy_source(f)
706 else:
644 else:
707 self._rustmap.removefile(f, in_merge=True)
645 self._rustmap.removefile(f, in_merge=True)
708 return True
646 return True
709
647
710 def removefile(self, *args, **kwargs):
648 def removefile(self, *args, **kwargs):
711 return self._rustmap.removefile(*args, **kwargs)
649 return self._rustmap.removefile(*args, **kwargs)
712
650
713 def nonnormalentries(self):
714 return self._rustmap.nonnormalentries()
715
716 def get(self, *args, **kwargs):
651 def get(self, *args, **kwargs):
717 return self._rustmap.get(*args, **kwargs)
652 return self._rustmap.get(*args, **kwargs)
718
653
719 @property
654 @property
720 def copymap(self):
655 def copymap(self):
721 return self._rustmap.copymap()
656 return self._rustmap.copymap()
722
657
723 def debug_iter(self, all):
658 def debug_iter(self, all):
724 """
659 """
725 Return an iterator of (filename, state, mode, size, mtime) tuples
660 Return an iterator of (filename, state, mode, size, mtime) tuples
726
661
727 `all`: also include with `state == b' '` dirstate tree nodes that
662 `all`: also include with `state == b' '` dirstate tree nodes that
728 don't have an associated `DirstateItem`.
663 don't have an associated `DirstateItem`.
729
664
730 """
665 """
731 return self._rustmap.debug_iter(all)
666 return self._rustmap.debug_iter(all)
732
667
733 def preload(self):
668 def preload(self):
734 self._rustmap
669 self._rustmap
735
670
736 def clear(self):
671 def clear(self):
737 self._rustmap.clear()
672 self._rustmap.clear()
738 self.setparents(
673 self.setparents(
739 self._nodeconstants.nullid, self._nodeconstants.nullid
674 self._nodeconstants.nullid, self._nodeconstants.nullid
740 )
675 )
741 util.clearcachedproperty(self, b"_dirs")
676 util.clearcachedproperty(self, b"_dirs")
742 util.clearcachedproperty(self, b"_alldirs")
677 util.clearcachedproperty(self, b"_alldirs")
743 util.clearcachedproperty(self, b"dirfoldmap")
678 util.clearcachedproperty(self, b"dirfoldmap")
744
679
745 def items(self):
680 def items(self):
746 return self._rustmap.items()
681 return self._rustmap.items()
747
682
748 def keys(self):
683 def keys(self):
749 return iter(self._rustmap)
684 return iter(self._rustmap)
750
685
751 def __contains__(self, key):
686 def __contains__(self, key):
752 return key in self._rustmap
687 return key in self._rustmap
753
688
754 def __getitem__(self, item):
689 def __getitem__(self, item):
755 return self._rustmap[item]
690 return self._rustmap[item]
756
691
757 def __len__(self):
692 def __len__(self):
758 return len(self._rustmap)
693 return len(self._rustmap)
759
694
760 def __iter__(self):
695 def __iter__(self):
761 return iter(self._rustmap)
696 return iter(self._rustmap)
762
697
763 # forward for python2,3 compat
698 # forward for python2,3 compat
764 iteritems = items
699 iteritems = items
765
700
766 def _opendirstatefile(self):
701 def _opendirstatefile(self):
767 fp, mode = txnutil.trypending(
702 fp, mode = txnutil.trypending(
768 self._root, self._opener, self._filename
703 self._root, self._opener, self._filename
769 )
704 )
770 if self._pendingmode is not None and self._pendingmode != mode:
705 if self._pendingmode is not None and self._pendingmode != mode:
771 fp.close()
706 fp.close()
772 raise error.Abort(
707 raise error.Abort(
773 _(b'working directory state may be changed parallelly')
708 _(b'working directory state may be changed parallelly')
774 )
709 )
775 self._pendingmode = mode
710 self._pendingmode = mode
776 return fp
711 return fp
777
712
778 def _readdirstatefile(self, size=-1):
713 def _readdirstatefile(self, size=-1):
779 try:
714 try:
780 with self._opendirstatefile() as fp:
715 with self._opendirstatefile() as fp:
781 return fp.read(size)
716 return fp.read(size)
782 except IOError as err:
717 except IOError as err:
783 if err.errno != errno.ENOENT:
718 if err.errno != errno.ENOENT:
784 raise
719 raise
785 # File doesn't exist, so the current state is empty
720 # File doesn't exist, so the current state is empty
786 return b''
721 return b''
787
722
788 def setparents(self, p1, p2, fold_p2=False):
723 def setparents(self, p1, p2, fold_p2=False):
789 self._parents = (p1, p2)
724 self._parents = (p1, p2)
790 self._dirtyparents = True
725 self._dirtyparents = True
791 copies = {}
726 copies = {}
792 if fold_p2:
727 if fold_p2:
793 candidatefiles = self.non_normal_or_other_parent_paths()
728 # Collect into an intermediate list to avoid a `RuntimeError`
794
729 # exception due to mutation during iteration.
795 for f in candidatefiles:
730 # TODO: move this the whole loop to Rust where `iter_mut`
796 s = self.get(f)
731 # enables in-place mutation of elements of a collection while
797 if s is None:
732 # iterating it, without mutating the collection itself.
798 continue
733 candidatefiles = [
799
734 (f, s)
735 for f, s in self._rustmap.items()
736 if s.merged or s.from_p2
737 ]
738 for f, s in candidatefiles:
800 # Discard "merged" markers when moving away from a merge state
739 # Discard "merged" markers when moving away from a merge state
801 if s.merged:
740 if s.merged:
802 source = self.copymap.get(f)
741 source = self.copymap.get(f)
803 if source:
742 if source:
804 copies[f] = source
743 copies[f] = source
805 self.reset_state(
744 self.reset_state(
806 f,
745 f,
807 wc_tracked=True,
746 wc_tracked=True,
808 p1_tracked=True,
747 p1_tracked=True,
809 possibly_dirty=True,
748 possibly_dirty=True,
810 )
749 )
811 # Also fix up otherparent markers
750 # Also fix up otherparent markers
812 elif s.from_p2:
751 elif s.from_p2:
813 source = self.copymap.get(f)
752 source = self.copymap.get(f)
814 if source:
753 if source:
815 copies[f] = source
754 copies[f] = source
816 self.reset_state(
755 self.reset_state(
817 f,
756 f,
818 p1_tracked=False,
757 p1_tracked=False,
819 wc_tracked=True,
758 wc_tracked=True,
820 )
759 )
821 return copies
760 return copies
822
761
823 def parents(self):
762 def parents(self):
824 if not self._parents:
763 if not self._parents:
825 if self._use_dirstate_v2:
764 if self._use_dirstate_v2:
826 self._parents = self.docket.parents
765 self._parents = self.docket.parents
827 else:
766 else:
828 read_len = self._nodelen * 2
767 read_len = self._nodelen * 2
829 st = self._readdirstatefile(read_len)
768 st = self._readdirstatefile(read_len)
830 l = len(st)
769 l = len(st)
831 if l == read_len:
770 if l == read_len:
832 self._parents = (
771 self._parents = (
833 st[: self._nodelen],
772 st[: self._nodelen],
834 st[self._nodelen : 2 * self._nodelen],
773 st[self._nodelen : 2 * self._nodelen],
835 )
774 )
836 elif l == 0:
775 elif l == 0:
837 self._parents = (
776 self._parents = (
838 self._nodeconstants.nullid,
777 self._nodeconstants.nullid,
839 self._nodeconstants.nullid,
778 self._nodeconstants.nullid,
840 )
779 )
841 else:
780 else:
842 raise error.Abort(
781 raise error.Abort(
843 _(b'working directory state appears damaged!')
782 _(b'working directory state appears damaged!')
844 )
783 )
845
784
846 return self._parents
785 return self._parents
847
786
848 @property
787 @property
849 def docket(self):
788 def docket(self):
850 if not self._docket:
789 if not self._docket:
851 if not self._use_dirstate_v2:
790 if not self._use_dirstate_v2:
852 raise error.ProgrammingError(
791 raise error.ProgrammingError(
853 b'dirstate only has a docket in v2 format'
792 b'dirstate only has a docket in v2 format'
854 )
793 )
855 self._docket = docketmod.DirstateDocket.parse(
794 self._docket = docketmod.DirstateDocket.parse(
856 self._readdirstatefile(), self._nodeconstants
795 self._readdirstatefile(), self._nodeconstants
857 )
796 )
858 return self._docket
797 return self._docket
859
798
860 @propertycache
799 @propertycache
861 def _rustmap(self):
800 def _rustmap(self):
862 """
801 """
863 Fills the Dirstatemap when called.
802 Fills the Dirstatemap when called.
864 """
803 """
865 # ignore HG_PENDING because identity is used only for writing
804 # ignore HG_PENDING because identity is used only for writing
866 self.identity = util.filestat.frompath(
805 self.identity = util.filestat.frompath(
867 self._opener.join(self._filename)
806 self._opener.join(self._filename)
868 )
807 )
869
808
870 if self._use_dirstate_v2:
809 if self._use_dirstate_v2:
871 if self.docket.uuid:
810 if self.docket.uuid:
872 # TODO: use mmap when possible
811 # TODO: use mmap when possible
873 data = self._opener.read(self.docket.data_filename())
812 data = self._opener.read(self.docket.data_filename())
874 else:
813 else:
875 data = b''
814 data = b''
876 self._rustmap = rustmod.DirstateMap.new_v2(
815 self._rustmap = rustmod.DirstateMap.new_v2(
877 data, self.docket.data_size, self.docket.tree_metadata
816 data, self.docket.data_size, self.docket.tree_metadata
878 )
817 )
879 parents = self.docket.parents
818 parents = self.docket.parents
880 else:
819 else:
881 self._rustmap, parents = rustmod.DirstateMap.new_v1(
820 self._rustmap, parents = rustmod.DirstateMap.new_v1(
882 self._use_dirstate_tree, self._readdirstatefile()
821 self._use_dirstate_tree, self._readdirstatefile()
883 )
822 )
884
823
885 if parents and not self._dirtyparents:
824 if parents and not self._dirtyparents:
886 self.setparents(*parents)
825 self.setparents(*parents)
887
826
888 self.__contains__ = self._rustmap.__contains__
827 self.__contains__ = self._rustmap.__contains__
889 self.__getitem__ = self._rustmap.__getitem__
828 self.__getitem__ = self._rustmap.__getitem__
890 self.get = self._rustmap.get
829 self.get = self._rustmap.get
891 return self._rustmap
830 return self._rustmap
892
831
893 def write(self, tr, st, now):
832 def write(self, tr, st, now):
894 if not self._use_dirstate_v2:
833 if not self._use_dirstate_v2:
895 p1, p2 = self.parents()
834 p1, p2 = self.parents()
896 packed = self._rustmap.write_v1(p1, p2, now)
835 packed = self._rustmap.write_v1(p1, p2, now)
897 st.write(packed)
836 st.write(packed)
898 st.close()
837 st.close()
899 self._dirtyparents = False
838 self._dirtyparents = False
900 return
839 return
901
840
902 # We can only append to an existing data file if there is one
841 # We can only append to an existing data file if there is one
903 can_append = self.docket.uuid is not None
842 can_append = self.docket.uuid is not None
904 packed, meta, append = self._rustmap.write_v2(now, can_append)
843 packed, meta, append = self._rustmap.write_v2(now, can_append)
905 if append:
844 if append:
906 docket = self.docket
845 docket = self.docket
907 data_filename = docket.data_filename()
846 data_filename = docket.data_filename()
908 if tr:
847 if tr:
909 tr.add(data_filename, docket.data_size)
848 tr.add(data_filename, docket.data_size)
910 with self._opener(data_filename, b'r+b') as fp:
849 with self._opener(data_filename, b'r+b') as fp:
911 fp.seek(docket.data_size)
850 fp.seek(docket.data_size)
912 assert fp.tell() == docket.data_size
851 assert fp.tell() == docket.data_size
913 written = fp.write(packed)
852 written = fp.write(packed)
914 if written is not None: # py2 may return None
853 if written is not None: # py2 may return None
915 assert written == len(packed), (written, len(packed))
854 assert written == len(packed), (written, len(packed))
916 docket.data_size += len(packed)
855 docket.data_size += len(packed)
917 docket.parents = self.parents()
856 docket.parents = self.parents()
918 docket.tree_metadata = meta
857 docket.tree_metadata = meta
919 st.write(docket.serialize())
858 st.write(docket.serialize())
920 st.close()
859 st.close()
921 else:
860 else:
922 old_docket = self.docket
861 old_docket = self.docket
923 new_docket = docketmod.DirstateDocket.with_new_uuid(
862 new_docket = docketmod.DirstateDocket.with_new_uuid(
924 self.parents(), len(packed), meta
863 self.parents(), len(packed), meta
925 )
864 )
926 data_filename = new_docket.data_filename()
865 data_filename = new_docket.data_filename()
927 if tr:
866 if tr:
928 tr.add(data_filename, 0)
867 tr.add(data_filename, 0)
929 self._opener.write(data_filename, packed)
868 self._opener.write(data_filename, packed)
930 # Write the new docket after the new data file has been
869 # Write the new docket after the new data file has been
931 # written. Because `st` was opened with `atomictemp=True`,
870 # written. Because `st` was opened with `atomictemp=True`,
932 # the actual `.hg/dirstate` file is only affected on close.
871 # the actual `.hg/dirstate` file is only affected on close.
933 st.write(new_docket.serialize())
872 st.write(new_docket.serialize())
934 st.close()
873 st.close()
935 # Remove the old data file after the new docket pointing to
874 # Remove the old data file after the new docket pointing to
936 # the new data file was written.
875 # the new data file was written.
937 if old_docket.uuid:
876 if old_docket.uuid:
938 data_filename = old_docket.data_filename()
877 data_filename = old_docket.data_filename()
939 unlink = lambda _tr=None: self._opener.unlink(data_filename)
878 unlink = lambda _tr=None: self._opener.unlink(data_filename)
940 if tr:
879 if tr:
941 category = b"dirstate-v2-clean-" + old_docket.uuid
880 category = b"dirstate-v2-clean-" + old_docket.uuid
942 tr.addpostclose(category, unlink)
881 tr.addpostclose(category, unlink)
943 else:
882 else:
944 unlink()
883 unlink()
945 self._docket = new_docket
884 self._docket = new_docket
946 # Reload from the newly-written file
885 # Reload from the newly-written file
947 util.clearcachedproperty(self, b"_rustmap")
886 util.clearcachedproperty(self, b"_rustmap")
948 self._dirtyparents = False
887 self._dirtyparents = False
949
888
950 @propertycache
889 @propertycache
951 def filefoldmap(self):
890 def filefoldmap(self):
952 """Returns a dictionary mapping normalized case paths to their
891 """Returns a dictionary mapping normalized case paths to their
953 non-normalized versions.
892 non-normalized versions.
954 """
893 """
955 return self._rustmap.filefoldmapasdict()
894 return self._rustmap.filefoldmapasdict()
956
895
957 def hastrackeddir(self, d):
896 def hastrackeddir(self, d):
958 return self._rustmap.hastrackeddir(d)
897 return self._rustmap.hastrackeddir(d)
959
898
960 def hasdir(self, d):
899 def hasdir(self, d):
961 return self._rustmap.hasdir(d)
900 return self._rustmap.hasdir(d)
962
901
963 @propertycache
902 @propertycache
964 def identity(self):
903 def identity(self):
965 self._rustmap
904 self._rustmap
966 return self.identity
905 return self.identity
967
906
968 @property
969 def nonnormalset(self):
970 nonnorm = self._rustmap.non_normal_entries()
971 return nonnorm
972
973 @propertycache
974 def otherparentset(self):
975 otherparents = self._rustmap.other_parent_entries()
976 return otherparents
977
978 def non_normal_or_other_parent_paths(self):
979 return self._rustmap.non_normal_or_other_parent_paths()
980
981 @propertycache
907 @propertycache
982 def dirfoldmap(self):
908 def dirfoldmap(self):
983 f = {}
909 f = {}
984 normcase = util.normcase
910 normcase = util.normcase
985 for name in self._rustmap.tracked_dirs():
911 for name in self._rustmap.tracked_dirs():
986 f[normcase(name)] = name
912 f[normcase(name)] = name
987 return f
913 return f
988
914
989 def set_possibly_dirty(self, filename):
915 def set_possibly_dirty(self, filename):
990 """record that the current state of the file on disk is unknown"""
916 """record that the current state of the file on disk is unknown"""
991 entry = self[filename]
917 entry = self[filename]
992 entry.set_possibly_dirty()
918 entry.set_possibly_dirty()
993 self._rustmap.set_dirstate_item(filename, entry)
919 self._rustmap.set_dirstate_item(filename, entry)
994
920
995 def set_clean(self, filename, mode, size, mtime):
921 def set_clean(self, filename, mode, size, mtime):
996 """mark a file as back to a clean state"""
922 """mark a file as back to a clean state"""
997 entry = self[filename]
923 entry = self[filename]
998 mtime = mtime & rangemask
924 mtime = mtime & rangemask
999 size = size & rangemask
925 size = size & rangemask
1000 entry.set_clean(mode, size, mtime)
926 entry.set_clean(mode, size, mtime)
1001 self._rustmap.set_dirstate_item(filename, entry)
927 self._rustmap.set_dirstate_item(filename, entry)
1002 self._rustmap.copymap().pop(filename, None)
928 self._rustmap.copymap().pop(filename, None)
1003
929
1004 def __setitem__(self, key, value):
930 def __setitem__(self, key, value):
1005 assert isinstance(value, DirstateItem)
931 assert isinstance(value, DirstateItem)
1006 self._rustmap.set_dirstate_item(key, value)
932 self._rustmap.set_dirstate_item(key, value)
@@ -1,840 +1,824 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It contains:
51 It contains:
52
52
53 - state (one of 'n', 'a', 'r', 'm')
53 - state (one of 'n', 'a', 'r', 'm')
54 - mode,
54 - mode,
55 - size,
55 - size,
56 - mtime,
56 - mtime,
57 """
57 """
58
58
59 _wc_tracked = attr.ib()
59 _wc_tracked = attr.ib()
60 _p1_tracked = attr.ib()
60 _p1_tracked = attr.ib()
61 _p2_tracked = attr.ib()
61 _p2_tracked = attr.ib()
62 # the three item above should probably be combined
62 # the three item above should probably be combined
63 #
63 #
64 # However it is unclear if they properly cover some of the most advanced
64 # However it is unclear if they properly cover some of the most advanced
65 # merge case. So we should probably wait on this to be settled.
65 # merge case. So we should probably wait on this to be settled.
66 _merged = attr.ib()
66 _merged = attr.ib()
67 _clean_p1 = attr.ib()
67 _clean_p1 = attr.ib()
68 _clean_p2 = attr.ib()
68 _clean_p2 = attr.ib()
69 _possibly_dirty = attr.ib()
69 _possibly_dirty = attr.ib()
70 _mode = attr.ib()
70 _mode = attr.ib()
71 _size = attr.ib()
71 _size = attr.ib()
72 _mtime = attr.ib()
72 _mtime = attr.ib()
73
73
74 def __init__(
74 def __init__(
75 self,
75 self,
76 wc_tracked=False,
76 wc_tracked=False,
77 p1_tracked=False,
77 p1_tracked=False,
78 p2_tracked=False,
78 p2_tracked=False,
79 merged=False,
79 merged=False,
80 clean_p1=False,
80 clean_p1=False,
81 clean_p2=False,
81 clean_p2=False,
82 possibly_dirty=False,
82 possibly_dirty=False,
83 parentfiledata=None,
83 parentfiledata=None,
84 ):
84 ):
85 if merged and (clean_p1 or clean_p2):
85 if merged and (clean_p1 or clean_p2):
86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
86 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
87 raise error.ProgrammingError(msg)
87 raise error.ProgrammingError(msg)
88
88
89 self._wc_tracked = wc_tracked
89 self._wc_tracked = wc_tracked
90 self._p1_tracked = p1_tracked
90 self._p1_tracked = p1_tracked
91 self._p2_tracked = p2_tracked
91 self._p2_tracked = p2_tracked
92 self._merged = merged
92 self._merged = merged
93 self._clean_p1 = clean_p1
93 self._clean_p1 = clean_p1
94 self._clean_p2 = clean_p2
94 self._clean_p2 = clean_p2
95 self._possibly_dirty = possibly_dirty
95 self._possibly_dirty = possibly_dirty
96 if parentfiledata is None:
96 if parentfiledata is None:
97 self._mode = None
97 self._mode = None
98 self._size = None
98 self._size = None
99 self._mtime = None
99 self._mtime = None
100 else:
100 else:
101 self._mode = parentfiledata[0]
101 self._mode = parentfiledata[0]
102 self._size = parentfiledata[1]
102 self._size = parentfiledata[1]
103 self._mtime = parentfiledata[2]
103 self._mtime = parentfiledata[2]
104
104
105 @classmethod
105 @classmethod
106 def new_added(cls):
106 def new_added(cls):
107 """constructor to help legacy API to build a new "added" item
107 """constructor to help legacy API to build a new "added" item
108
108
109 Should eventually be removed
109 Should eventually be removed
110 """
110 """
111 instance = cls()
111 instance = cls()
112 instance._wc_tracked = True
112 instance._wc_tracked = True
113 instance._p1_tracked = False
113 instance._p1_tracked = False
114 instance._p2_tracked = False
114 instance._p2_tracked = False
115 return instance
115 return instance
116
116
117 @classmethod
117 @classmethod
118 def new_merged(cls):
118 def new_merged(cls):
119 """constructor to help legacy API to build a new "merged" item
119 """constructor to help legacy API to build a new "merged" item
120
120
121 Should eventually be removed
121 Should eventually be removed
122 """
122 """
123 instance = cls()
123 instance = cls()
124 instance._wc_tracked = True
124 instance._wc_tracked = True
125 instance._p1_tracked = True # might not be True because of rename ?
125 instance._p1_tracked = True # might not be True because of rename ?
126 instance._p2_tracked = True # might not be True because of rename ?
126 instance._p2_tracked = True # might not be True because of rename ?
127 instance._merged = True
127 instance._merged = True
128 return instance
128 return instance
129
129
130 @classmethod
130 @classmethod
131 def new_from_p2(cls):
131 def new_from_p2(cls):
132 """constructor to help legacy API to build a new "from_p2" item
132 """constructor to help legacy API to build a new "from_p2" item
133
133
134 Should eventually be removed
134 Should eventually be removed
135 """
135 """
136 instance = cls()
136 instance = cls()
137 instance._wc_tracked = True
137 instance._wc_tracked = True
138 instance._p1_tracked = False # might actually be True
138 instance._p1_tracked = False # might actually be True
139 instance._p2_tracked = True
139 instance._p2_tracked = True
140 instance._clean_p2 = True
140 instance._clean_p2 = True
141 return instance
141 return instance
142
142
143 @classmethod
143 @classmethod
144 def new_possibly_dirty(cls):
144 def new_possibly_dirty(cls):
145 """constructor to help legacy API to build a new "possibly_dirty" item
145 """constructor to help legacy API to build a new "possibly_dirty" item
146
146
147 Should eventually be removed
147 Should eventually be removed
148 """
148 """
149 instance = cls()
149 instance = cls()
150 instance._wc_tracked = True
150 instance._wc_tracked = True
151 instance._p1_tracked = True
151 instance._p1_tracked = True
152 instance._possibly_dirty = True
152 instance._possibly_dirty = True
153 return instance
153 return instance
154
154
155 @classmethod
155 @classmethod
156 def new_normal(cls, mode, size, mtime):
156 def new_normal(cls, mode, size, mtime):
157 """constructor to help legacy API to build a new "normal" item
157 """constructor to help legacy API to build a new "normal" item
158
158
159 Should eventually be removed
159 Should eventually be removed
160 """
160 """
161 assert size != FROM_P2
161 assert size != FROM_P2
162 assert size != NONNORMAL
162 assert size != NONNORMAL
163 instance = cls()
163 instance = cls()
164 instance._wc_tracked = True
164 instance._wc_tracked = True
165 instance._p1_tracked = True
165 instance._p1_tracked = True
166 instance._mode = mode
166 instance._mode = mode
167 instance._size = size
167 instance._size = size
168 instance._mtime = mtime
168 instance._mtime = mtime
169 return instance
169 return instance
170
170
171 @classmethod
171 @classmethod
172 def from_v1_data(cls, state, mode, size, mtime):
172 def from_v1_data(cls, state, mode, size, mtime):
173 """Build a new DirstateItem object from V1 data
173 """Build a new DirstateItem object from V1 data
174
174
175 Since the dirstate-v1 format is frozen, the signature of this function
175 Since the dirstate-v1 format is frozen, the signature of this function
176 is not expected to change, unlike the __init__ one.
176 is not expected to change, unlike the __init__ one.
177 """
177 """
178 if state == b'm':
178 if state == b'm':
179 return cls.new_merged()
179 return cls.new_merged()
180 elif state == b'a':
180 elif state == b'a':
181 return cls.new_added()
181 return cls.new_added()
182 elif state == b'r':
182 elif state == b'r':
183 instance = cls()
183 instance = cls()
184 instance._wc_tracked = False
184 instance._wc_tracked = False
185 if size == NONNORMAL:
185 if size == NONNORMAL:
186 instance._merged = True
186 instance._merged = True
187 instance._p1_tracked = (
187 instance._p1_tracked = (
188 True # might not be True because of rename ?
188 True # might not be True because of rename ?
189 )
189 )
190 instance._p2_tracked = (
190 instance._p2_tracked = (
191 True # might not be True because of rename ?
191 True # might not be True because of rename ?
192 )
192 )
193 elif size == FROM_P2:
193 elif size == FROM_P2:
194 instance._clean_p2 = True
194 instance._clean_p2 = True
195 instance._p1_tracked = (
195 instance._p1_tracked = (
196 False # We actually don't know (file history)
196 False # We actually don't know (file history)
197 )
197 )
198 instance._p2_tracked = True
198 instance._p2_tracked = True
199 else:
199 else:
200 instance._p1_tracked = True
200 instance._p1_tracked = True
201 return instance
201 return instance
202 elif state == b'n':
202 elif state == b'n':
203 if size == FROM_P2:
203 if size == FROM_P2:
204 return cls.new_from_p2()
204 return cls.new_from_p2()
205 elif size == NONNORMAL:
205 elif size == NONNORMAL:
206 return cls.new_possibly_dirty()
206 return cls.new_possibly_dirty()
207 elif mtime == AMBIGUOUS_TIME:
207 elif mtime == AMBIGUOUS_TIME:
208 instance = cls.new_normal(mode, size, 42)
208 instance = cls.new_normal(mode, size, 42)
209 instance._mtime = None
209 instance._mtime = None
210 instance._possibly_dirty = True
210 instance._possibly_dirty = True
211 return instance
211 return instance
212 else:
212 else:
213 return cls.new_normal(mode, size, mtime)
213 return cls.new_normal(mode, size, mtime)
214 else:
214 else:
215 raise RuntimeError(b'unknown state: %s' % state)
215 raise RuntimeError(b'unknown state: %s' % state)
216
216
217 def set_possibly_dirty(self):
217 def set_possibly_dirty(self):
218 """Mark a file as "possibly dirty"
218 """Mark a file as "possibly dirty"
219
219
220 This means the next status call will have to actually check its content
220 This means the next status call will have to actually check its content
221 to make sure it is correct.
221 to make sure it is correct.
222 """
222 """
223 self._possibly_dirty = True
223 self._possibly_dirty = True
224
224
225 def set_clean(self, mode, size, mtime):
225 def set_clean(self, mode, size, mtime):
226 """mark a file as "clean" cancelling potential "possibly dirty call"
226 """mark a file as "clean" cancelling potential "possibly dirty call"
227
227
228 Note: this function is a descendant of `dirstate.normal` and is
228 Note: this function is a descendant of `dirstate.normal` and is
229 currently expected to be call on "normal" entry only. There are not
229 currently expected to be call on "normal" entry only. There are not
230 reason for this to not change in the future as long as the ccode is
230 reason for this to not change in the future as long as the ccode is
231 updated to preserve the proper state of the non-normal files.
231 updated to preserve the proper state of the non-normal files.
232 """
232 """
233 self._wc_tracked = True
233 self._wc_tracked = True
234 self._p1_tracked = True
234 self._p1_tracked = True
235 self._p2_tracked = False # this might be wrong
235 self._p2_tracked = False # this might be wrong
236 self._merged = False
236 self._merged = False
237 self._clean_p2 = False
237 self._clean_p2 = False
238 self._possibly_dirty = False
238 self._possibly_dirty = False
239 self._mode = mode
239 self._mode = mode
240 self._size = size
240 self._size = size
241 self._mtime = mtime
241 self._mtime = mtime
242
242
243 def set_tracked(self):
243 def set_tracked(self):
244 """mark a file as tracked in the working copy
244 """mark a file as tracked in the working copy
245
245
246 This will ultimately be called by command like `hg add`.
246 This will ultimately be called by command like `hg add`.
247 """
247 """
248 self._wc_tracked = True
248 self._wc_tracked = True
249 # `set_tracked` is replacing various `normallookup` call. So we set
249 # `set_tracked` is replacing various `normallookup` call. So we set
250 # "possibly dirty" to stay on the safe side.
250 # "possibly dirty" to stay on the safe side.
251 #
251 #
252 # Consider dropping this in the future in favor of something less broad.
252 # Consider dropping this in the future in favor of something less broad.
253 self._possibly_dirty = True
253 self._possibly_dirty = True
254
254
255 def set_untracked(self):
255 def set_untracked(self):
256 """mark a file as untracked in the working copy
256 """mark a file as untracked in the working copy
257
257
258 This will ultimately be called by command like `hg remove`.
258 This will ultimately be called by command like `hg remove`.
259 """
259 """
260 # backup the previous state (useful for merge)
260 # backup the previous state (useful for merge)
261 self._wc_tracked = False
261 self._wc_tracked = False
262 self._mode = None
262 self._mode = None
263 self._size = None
263 self._size = None
264 self._mtime = None
264 self._mtime = None
265
265
266 def drop_merge_data(self):
266 def drop_merge_data(self):
267 """remove all "merge-only" from a DirstateItem
267 """remove all "merge-only" from a DirstateItem
268
268
269 This is to be call by the dirstatemap code when the second parent is dropped
269 This is to be call by the dirstatemap code when the second parent is dropped
270 """
270 """
271 if not (self.merged or self.from_p2):
271 if not (self.merged or self.from_p2):
272 return
272 return
273 self._p1_tracked = self.merged # why is this not already properly set ?
273 self._p1_tracked = self.merged # why is this not already properly set ?
274
274
275 self._merged = False
275 self._merged = False
276 self._clean_p1 = False
276 self._clean_p1 = False
277 self._clean_p2 = False
277 self._clean_p2 = False
278 self._p2_tracked = False
278 self._p2_tracked = False
279 self._possibly_dirty = True
279 self._possibly_dirty = True
280 self._mode = None
280 self._mode = None
281 self._size = None
281 self._size = None
282 self._mtime = None
282 self._mtime = None
283
283
284 @property
284 @property
285 def mode(self):
285 def mode(self):
286 return self.v1_mode()
286 return self.v1_mode()
287
287
288 @property
288 @property
289 def size(self):
289 def size(self):
290 return self.v1_size()
290 return self.v1_size()
291
291
292 @property
292 @property
293 def mtime(self):
293 def mtime(self):
294 return self.v1_mtime()
294 return self.v1_mtime()
295
295
296 @property
296 @property
297 def state(self):
297 def state(self):
298 """
298 """
299 States are:
299 States are:
300 n normal
300 n normal
301 m needs merging
301 m needs merging
302 r marked for removal
302 r marked for removal
303 a marked for addition
303 a marked for addition
304
304
305 XXX This "state" is a bit obscure and mostly a direct expression of the
305 XXX This "state" is a bit obscure and mostly a direct expression of the
306 dirstatev1 format. It would make sense to ultimately deprecate it in
306 dirstatev1 format. It would make sense to ultimately deprecate it in
307 favor of the more "semantic" attributes.
307 favor of the more "semantic" attributes.
308 """
308 """
309 return self.v1_state()
309 return self.v1_state()
310
310
311 @property
311 @property
312 def tracked(self):
312 def tracked(self):
313 """True is the file is tracked in the working copy"""
313 """True is the file is tracked in the working copy"""
314 return self._wc_tracked
314 return self._wc_tracked
315
315
316 @property
316 @property
317 def added(self):
317 def added(self):
318 """True if the file has been added"""
318 """True if the file has been added"""
319 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
319 return self._wc_tracked and not (self._p1_tracked or self._p2_tracked)
320
320
321 @property
321 @property
322 def merged(self):
322 def merged(self):
323 """True if the file has been merged
323 """True if the file has been merged
324
324
325 Should only be set if a merge is in progress in the dirstate
325 Should only be set if a merge is in progress in the dirstate
326 """
326 """
327 return self._wc_tracked and self._merged
327 return self._wc_tracked and self._merged
328
328
329 @property
329 @property
330 def from_p2(self):
330 def from_p2(self):
331 """True if the file have been fetched from p2 during the current merge
331 """True if the file have been fetched from p2 during the current merge
332
332
333 This is only True is the file is currently tracked.
333 This is only True is the file is currently tracked.
334
334
335 Should only be set if a merge is in progress in the dirstate
335 Should only be set if a merge is in progress in the dirstate
336 """
336 """
337 if not self._wc_tracked:
337 if not self._wc_tracked:
338 return False
338 return False
339 return self._clean_p2 or (not self._p1_tracked and self._p2_tracked)
339 return self._clean_p2 or (not self._p1_tracked and self._p2_tracked)
340
340
341 @property
341 @property
342 def from_p2_removed(self):
342 def from_p2_removed(self):
343 """True if the file has been removed, but was "from_p2" initially
343 """True if the file has been removed, but was "from_p2" initially
344
344
345 This property seems like an abstraction leakage and should probably be
345 This property seems like an abstraction leakage and should probably be
346 dealt in this class (or maybe the dirstatemap) directly.
346 dealt in this class (or maybe the dirstatemap) directly.
347 """
347 """
348 return self.removed and self._clean_p2
348 return self.removed and self._clean_p2
349
349
350 @property
350 @property
351 def removed(self):
351 def removed(self):
352 """True if the file has been removed"""
352 """True if the file has been removed"""
353 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
353 return not self._wc_tracked and (self._p1_tracked or self._p2_tracked)
354
354
355 @property
355 @property
356 def merged_removed(self):
356 def merged_removed(self):
357 """True if the file has been removed, but was "merged" initially
357 """True if the file has been removed, but was "merged" initially
358
358
359 This property seems like an abstraction leakage and should probably be
359 This property seems like an abstraction leakage and should probably be
360 dealt in this class (or maybe the dirstatemap) directly.
360 dealt in this class (or maybe the dirstatemap) directly.
361 """
361 """
362 return self.removed and self._merged
362 return self.removed and self._merged
363
363
364 @property
365 def dm_nonnormal(self):
366 """True is the entry is non-normal in the dirstatemap sense
367
368 There is no reason for any code, but the dirstatemap one to use this.
369 """
370 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
371
372 @property
373 def dm_otherparent(self):
374 """True is the entry is `otherparent` in the dirstatemap sense
375
376 There is no reason for any code, but the dirstatemap one to use this.
377 """
378 return self.v1_size() == FROM_P2
379
380 def v1_state(self):
364 def v1_state(self):
381 """return a "state" suitable for v1 serialization"""
365 """return a "state" suitable for v1 serialization"""
382 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
366 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
383 # the object has no state to record, this is -currently-
367 # the object has no state to record, this is -currently-
384 # unsupported
368 # unsupported
385 raise RuntimeError('untracked item')
369 raise RuntimeError('untracked item')
386 elif self.removed:
370 elif self.removed:
387 return b'r'
371 return b'r'
388 elif self.merged:
372 elif self.merged:
389 return b'm'
373 return b'm'
390 elif self.added:
374 elif self.added:
391 return b'a'
375 return b'a'
392 else:
376 else:
393 return b'n'
377 return b'n'
394
378
395 def v1_mode(self):
379 def v1_mode(self):
396 """return a "mode" suitable for v1 serialization"""
380 """return a "mode" suitable for v1 serialization"""
397 return self._mode if self._mode is not None else 0
381 return self._mode if self._mode is not None else 0
398
382
399 def v1_size(self):
383 def v1_size(self):
400 """return a "size" suitable for v1 serialization"""
384 """return a "size" suitable for v1 serialization"""
401 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
385 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
402 # the object has no state to record, this is -currently-
386 # the object has no state to record, this is -currently-
403 # unsupported
387 # unsupported
404 raise RuntimeError('untracked item')
388 raise RuntimeError('untracked item')
405 elif self.merged_removed:
389 elif self.merged_removed:
406 return NONNORMAL
390 return NONNORMAL
407 elif self.from_p2_removed:
391 elif self.from_p2_removed:
408 return FROM_P2
392 return FROM_P2
409 elif self.removed:
393 elif self.removed:
410 return 0
394 return 0
411 elif self.merged:
395 elif self.merged:
412 return FROM_P2
396 return FROM_P2
413 elif self.added:
397 elif self.added:
414 return NONNORMAL
398 return NONNORMAL
415 elif self.from_p2:
399 elif self.from_p2:
416 return FROM_P2
400 return FROM_P2
417 elif self._possibly_dirty:
401 elif self._possibly_dirty:
418 return self._size if self._size is not None else NONNORMAL
402 return self._size if self._size is not None else NONNORMAL
419 else:
403 else:
420 return self._size
404 return self._size
421
405
422 def v1_mtime(self):
406 def v1_mtime(self):
423 """return a "mtime" suitable for v1 serialization"""
407 """return a "mtime" suitable for v1 serialization"""
424 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
408 if not (self._p1_tracked or self._p2_tracked or self._wc_tracked):
425 # the object has no state to record, this is -currently-
409 # the object has no state to record, this is -currently-
426 # unsupported
410 # unsupported
427 raise RuntimeError('untracked item')
411 raise RuntimeError('untracked item')
428 elif self.removed:
412 elif self.removed:
429 return 0
413 return 0
430 elif self._possibly_dirty:
414 elif self._possibly_dirty:
431 return AMBIGUOUS_TIME
415 return AMBIGUOUS_TIME
432 elif self.merged:
416 elif self.merged:
433 return AMBIGUOUS_TIME
417 return AMBIGUOUS_TIME
434 elif self.added:
418 elif self.added:
435 return AMBIGUOUS_TIME
419 return AMBIGUOUS_TIME
436 elif self.from_p2:
420 elif self.from_p2:
437 return AMBIGUOUS_TIME
421 return AMBIGUOUS_TIME
438 else:
422 else:
439 return self._mtime if self._mtime is not None else 0
423 return self._mtime if self._mtime is not None else 0
440
424
441 def need_delay(self, now):
425 def need_delay(self, now):
442 """True if the stored mtime would be ambiguous with the current time"""
426 """True if the stored mtime would be ambiguous with the current time"""
443 return self.v1_state() == b'n' and self.v1_mtime() == now
427 return self.v1_state() == b'n' and self.v1_mtime() == now
444
428
445
429
446 def gettype(q):
430 def gettype(q):
447 return int(q & 0xFFFF)
431 return int(q & 0xFFFF)
448
432
449
433
450 class BaseIndexObject(object):
434 class BaseIndexObject(object):
451 # Can I be passed to an algorithme implemented in Rust ?
435 # Can I be passed to an algorithme implemented in Rust ?
452 rust_ext_compat = 0
436 rust_ext_compat = 0
453 # Format of an index entry according to Python's `struct` language
437 # Format of an index entry according to Python's `struct` language
454 index_format = revlog_constants.INDEX_ENTRY_V1
438 index_format = revlog_constants.INDEX_ENTRY_V1
455 # Size of a C unsigned long long int, platform independent
439 # Size of a C unsigned long long int, platform independent
456 big_int_size = struct.calcsize(b'>Q')
440 big_int_size = struct.calcsize(b'>Q')
457 # Size of a C long int, platform independent
441 # Size of a C long int, platform independent
458 int_size = struct.calcsize(b'>i')
442 int_size = struct.calcsize(b'>i')
459 # An empty index entry, used as a default value to be overridden, or nullrev
443 # An empty index entry, used as a default value to be overridden, or nullrev
460 null_item = (
444 null_item = (
461 0,
445 0,
462 0,
446 0,
463 0,
447 0,
464 -1,
448 -1,
465 -1,
449 -1,
466 -1,
450 -1,
467 -1,
451 -1,
468 sha1nodeconstants.nullid,
452 sha1nodeconstants.nullid,
469 0,
453 0,
470 0,
454 0,
471 revlog_constants.COMP_MODE_INLINE,
455 revlog_constants.COMP_MODE_INLINE,
472 revlog_constants.COMP_MODE_INLINE,
456 revlog_constants.COMP_MODE_INLINE,
473 )
457 )
474
458
475 @util.propertycache
459 @util.propertycache
476 def entry_size(self):
460 def entry_size(self):
477 return self.index_format.size
461 return self.index_format.size
478
462
479 @property
463 @property
480 def nodemap(self):
464 def nodemap(self):
481 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
465 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
482 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
466 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
483 return self._nodemap
467 return self._nodemap
484
468
485 @util.propertycache
469 @util.propertycache
486 def _nodemap(self):
470 def _nodemap(self):
487 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
471 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
488 for r in range(0, len(self)):
472 for r in range(0, len(self)):
489 n = self[r][7]
473 n = self[r][7]
490 nodemap[n] = r
474 nodemap[n] = r
491 return nodemap
475 return nodemap
492
476
493 def has_node(self, node):
477 def has_node(self, node):
494 """return True if the node exist in the index"""
478 """return True if the node exist in the index"""
495 return node in self._nodemap
479 return node in self._nodemap
496
480
497 def rev(self, node):
481 def rev(self, node):
498 """return a revision for a node
482 """return a revision for a node
499
483
500 If the node is unknown, raise a RevlogError"""
484 If the node is unknown, raise a RevlogError"""
501 return self._nodemap[node]
485 return self._nodemap[node]
502
486
503 def get_rev(self, node):
487 def get_rev(self, node):
504 """return a revision for a node
488 """return a revision for a node
505
489
506 If the node is unknown, return None"""
490 If the node is unknown, return None"""
507 return self._nodemap.get(node)
491 return self._nodemap.get(node)
508
492
509 def _stripnodes(self, start):
493 def _stripnodes(self, start):
510 if '_nodemap' in vars(self):
494 if '_nodemap' in vars(self):
511 for r in range(start, len(self)):
495 for r in range(start, len(self)):
512 n = self[r][7]
496 n = self[r][7]
513 del self._nodemap[n]
497 del self._nodemap[n]
514
498
515 def clearcaches(self):
499 def clearcaches(self):
516 self.__dict__.pop('_nodemap', None)
500 self.__dict__.pop('_nodemap', None)
517
501
518 def __len__(self):
502 def __len__(self):
519 return self._lgt + len(self._extra)
503 return self._lgt + len(self._extra)
520
504
521 def append(self, tup):
505 def append(self, tup):
522 if '_nodemap' in vars(self):
506 if '_nodemap' in vars(self):
523 self._nodemap[tup[7]] = len(self)
507 self._nodemap[tup[7]] = len(self)
524 data = self._pack_entry(len(self), tup)
508 data = self._pack_entry(len(self), tup)
525 self._extra.append(data)
509 self._extra.append(data)
526
510
527 def _pack_entry(self, rev, entry):
511 def _pack_entry(self, rev, entry):
528 assert entry[8] == 0
512 assert entry[8] == 0
529 assert entry[9] == 0
513 assert entry[9] == 0
530 return self.index_format.pack(*entry[:8])
514 return self.index_format.pack(*entry[:8])
531
515
532 def _check_index(self, i):
516 def _check_index(self, i):
533 if not isinstance(i, int):
517 if not isinstance(i, int):
534 raise TypeError(b"expecting int indexes")
518 raise TypeError(b"expecting int indexes")
535 if i < 0 or i >= len(self):
519 if i < 0 or i >= len(self):
536 raise IndexError
520 raise IndexError
537
521
538 def __getitem__(self, i):
522 def __getitem__(self, i):
539 if i == -1:
523 if i == -1:
540 return self.null_item
524 return self.null_item
541 self._check_index(i)
525 self._check_index(i)
542 if i >= self._lgt:
526 if i >= self._lgt:
543 data = self._extra[i - self._lgt]
527 data = self._extra[i - self._lgt]
544 else:
528 else:
545 index = self._calculate_index(i)
529 index = self._calculate_index(i)
546 data = self._data[index : index + self.entry_size]
530 data = self._data[index : index + self.entry_size]
547 r = self._unpack_entry(i, data)
531 r = self._unpack_entry(i, data)
548 if self._lgt and i == 0:
532 if self._lgt and i == 0:
549 offset = revlogutils.offset_type(0, gettype(r[0]))
533 offset = revlogutils.offset_type(0, gettype(r[0]))
550 r = (offset,) + r[1:]
534 r = (offset,) + r[1:]
551 return r
535 return r
552
536
553 def _unpack_entry(self, rev, data):
537 def _unpack_entry(self, rev, data):
554 r = self.index_format.unpack(data)
538 r = self.index_format.unpack(data)
555 r = r + (
539 r = r + (
556 0,
540 0,
557 0,
541 0,
558 revlog_constants.COMP_MODE_INLINE,
542 revlog_constants.COMP_MODE_INLINE,
559 revlog_constants.COMP_MODE_INLINE,
543 revlog_constants.COMP_MODE_INLINE,
560 )
544 )
561 return r
545 return r
562
546
563 def pack_header(self, header):
547 def pack_header(self, header):
564 """pack header information as binary"""
548 """pack header information as binary"""
565 v_fmt = revlog_constants.INDEX_HEADER
549 v_fmt = revlog_constants.INDEX_HEADER
566 return v_fmt.pack(header)
550 return v_fmt.pack(header)
567
551
568 def entry_binary(self, rev):
552 def entry_binary(self, rev):
569 """return the raw binary string representing a revision"""
553 """return the raw binary string representing a revision"""
570 entry = self[rev]
554 entry = self[rev]
571 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
555 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
572 if rev == 0:
556 if rev == 0:
573 p = p[revlog_constants.INDEX_HEADER.size :]
557 p = p[revlog_constants.INDEX_HEADER.size :]
574 return p
558 return p
575
559
576
560
577 class IndexObject(BaseIndexObject):
561 class IndexObject(BaseIndexObject):
578 def __init__(self, data):
562 def __init__(self, data):
579 assert len(data) % self.entry_size == 0, (
563 assert len(data) % self.entry_size == 0, (
580 len(data),
564 len(data),
581 self.entry_size,
565 self.entry_size,
582 len(data) % self.entry_size,
566 len(data) % self.entry_size,
583 )
567 )
584 self._data = data
568 self._data = data
585 self._lgt = len(data) // self.entry_size
569 self._lgt = len(data) // self.entry_size
586 self._extra = []
570 self._extra = []
587
571
588 def _calculate_index(self, i):
572 def _calculate_index(self, i):
589 return i * self.entry_size
573 return i * self.entry_size
590
574
591 def __delitem__(self, i):
575 def __delitem__(self, i):
592 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
576 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
593 raise ValueError(b"deleting slices only supports a:-1 with step 1")
577 raise ValueError(b"deleting slices only supports a:-1 with step 1")
594 i = i.start
578 i = i.start
595 self._check_index(i)
579 self._check_index(i)
596 self._stripnodes(i)
580 self._stripnodes(i)
597 if i < self._lgt:
581 if i < self._lgt:
598 self._data = self._data[: i * self.entry_size]
582 self._data = self._data[: i * self.entry_size]
599 self._lgt = i
583 self._lgt = i
600 self._extra = []
584 self._extra = []
601 else:
585 else:
602 self._extra = self._extra[: i - self._lgt]
586 self._extra = self._extra[: i - self._lgt]
603
587
604
588
605 class PersistentNodeMapIndexObject(IndexObject):
589 class PersistentNodeMapIndexObject(IndexObject):
606 """a Debug oriented class to test persistent nodemap
590 """a Debug oriented class to test persistent nodemap
607
591
608 We need a simple python object to test API and higher level behavior. See
592 We need a simple python object to test API and higher level behavior. See
609 the Rust implementation for more serious usage. This should be used only
593 the Rust implementation for more serious usage. This should be used only
610 through the dedicated `devel.persistent-nodemap` config.
594 through the dedicated `devel.persistent-nodemap` config.
611 """
595 """
612
596
613 def nodemap_data_all(self):
597 def nodemap_data_all(self):
614 """Return bytes containing a full serialization of a nodemap
598 """Return bytes containing a full serialization of a nodemap
615
599
616 The nodemap should be valid for the full set of revisions in the
600 The nodemap should be valid for the full set of revisions in the
617 index."""
601 index."""
618 return nodemaputil.persistent_data(self)
602 return nodemaputil.persistent_data(self)
619
603
620 def nodemap_data_incremental(self):
604 def nodemap_data_incremental(self):
621 """Return bytes containing a incremental update to persistent nodemap
605 """Return bytes containing a incremental update to persistent nodemap
622
606
623 This containst the data for an append-only update of the data provided
607 This containst the data for an append-only update of the data provided
624 in the last call to `update_nodemap_data`.
608 in the last call to `update_nodemap_data`.
625 """
609 """
626 if self._nm_root is None:
610 if self._nm_root is None:
627 return None
611 return None
628 docket = self._nm_docket
612 docket = self._nm_docket
629 changed, data = nodemaputil.update_persistent_data(
613 changed, data = nodemaputil.update_persistent_data(
630 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
614 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
631 )
615 )
632
616
633 self._nm_root = self._nm_max_idx = self._nm_docket = None
617 self._nm_root = self._nm_max_idx = self._nm_docket = None
634 return docket, changed, data
618 return docket, changed, data
635
619
636 def update_nodemap_data(self, docket, nm_data):
620 def update_nodemap_data(self, docket, nm_data):
637 """provide full block of persisted binary data for a nodemap
621 """provide full block of persisted binary data for a nodemap
638
622
639 The data are expected to come from disk. See `nodemap_data_all` for a
623 The data are expected to come from disk. See `nodemap_data_all` for a
640 produceur of such data."""
624 produceur of such data."""
641 if nm_data is not None:
625 if nm_data is not None:
642 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
626 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
643 if self._nm_root:
627 if self._nm_root:
644 self._nm_docket = docket
628 self._nm_docket = docket
645 else:
629 else:
646 self._nm_root = self._nm_max_idx = self._nm_docket = None
630 self._nm_root = self._nm_max_idx = self._nm_docket = None
647
631
648
632
649 class InlinedIndexObject(BaseIndexObject):
633 class InlinedIndexObject(BaseIndexObject):
650 def __init__(self, data, inline=0):
634 def __init__(self, data, inline=0):
651 self._data = data
635 self._data = data
652 self._lgt = self._inline_scan(None)
636 self._lgt = self._inline_scan(None)
653 self._inline_scan(self._lgt)
637 self._inline_scan(self._lgt)
654 self._extra = []
638 self._extra = []
655
639
656 def _inline_scan(self, lgt):
640 def _inline_scan(self, lgt):
657 off = 0
641 off = 0
658 if lgt is not None:
642 if lgt is not None:
659 self._offsets = [0] * lgt
643 self._offsets = [0] * lgt
660 count = 0
644 count = 0
661 while off <= len(self._data) - self.entry_size:
645 while off <= len(self._data) - self.entry_size:
662 start = off + self.big_int_size
646 start = off + self.big_int_size
663 (s,) = struct.unpack(
647 (s,) = struct.unpack(
664 b'>i',
648 b'>i',
665 self._data[start : start + self.int_size],
649 self._data[start : start + self.int_size],
666 )
650 )
667 if lgt is not None:
651 if lgt is not None:
668 self._offsets[count] = off
652 self._offsets[count] = off
669 count += 1
653 count += 1
670 off += self.entry_size + s
654 off += self.entry_size + s
671 if off != len(self._data):
655 if off != len(self._data):
672 raise ValueError(b"corrupted data")
656 raise ValueError(b"corrupted data")
673 return count
657 return count
674
658
675 def __delitem__(self, i):
659 def __delitem__(self, i):
676 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
660 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
677 raise ValueError(b"deleting slices only supports a:-1 with step 1")
661 raise ValueError(b"deleting slices only supports a:-1 with step 1")
678 i = i.start
662 i = i.start
679 self._check_index(i)
663 self._check_index(i)
680 self._stripnodes(i)
664 self._stripnodes(i)
681 if i < self._lgt:
665 if i < self._lgt:
682 self._offsets = self._offsets[:i]
666 self._offsets = self._offsets[:i]
683 self._lgt = i
667 self._lgt = i
684 self._extra = []
668 self._extra = []
685 else:
669 else:
686 self._extra = self._extra[: i - self._lgt]
670 self._extra = self._extra[: i - self._lgt]
687
671
688 def _calculate_index(self, i):
672 def _calculate_index(self, i):
689 return self._offsets[i]
673 return self._offsets[i]
690
674
691
675
692 def parse_index2(data, inline, revlogv2=False):
676 def parse_index2(data, inline, revlogv2=False):
693 if not inline:
677 if not inline:
694 cls = IndexObject2 if revlogv2 else IndexObject
678 cls = IndexObject2 if revlogv2 else IndexObject
695 return cls(data), None
679 return cls(data), None
696 cls = InlinedIndexObject
680 cls = InlinedIndexObject
697 return cls(data, inline), (0, data)
681 return cls(data, inline), (0, data)
698
682
699
683
700 def parse_index_cl_v2(data):
684 def parse_index_cl_v2(data):
701 return IndexChangelogV2(data), None
685 return IndexChangelogV2(data), None
702
686
703
687
704 class IndexObject2(IndexObject):
688 class IndexObject2(IndexObject):
705 index_format = revlog_constants.INDEX_ENTRY_V2
689 index_format = revlog_constants.INDEX_ENTRY_V2
706
690
707 def replace_sidedata_info(
691 def replace_sidedata_info(
708 self,
692 self,
709 rev,
693 rev,
710 sidedata_offset,
694 sidedata_offset,
711 sidedata_length,
695 sidedata_length,
712 offset_flags,
696 offset_flags,
713 compression_mode,
697 compression_mode,
714 ):
698 ):
715 """
699 """
716 Replace an existing index entry's sidedata offset and length with new
700 Replace an existing index entry's sidedata offset and length with new
717 ones.
701 ones.
718 This cannot be used outside of the context of sidedata rewriting,
702 This cannot be used outside of the context of sidedata rewriting,
719 inside the transaction that creates the revision `rev`.
703 inside the transaction that creates the revision `rev`.
720 """
704 """
721 if rev < 0:
705 if rev < 0:
722 raise KeyError
706 raise KeyError
723 self._check_index(rev)
707 self._check_index(rev)
724 if rev < self._lgt:
708 if rev < self._lgt:
725 msg = b"cannot rewrite entries outside of this transaction"
709 msg = b"cannot rewrite entries outside of this transaction"
726 raise KeyError(msg)
710 raise KeyError(msg)
727 else:
711 else:
728 entry = list(self[rev])
712 entry = list(self[rev])
729 entry[0] = offset_flags
713 entry[0] = offset_flags
730 entry[8] = sidedata_offset
714 entry[8] = sidedata_offset
731 entry[9] = sidedata_length
715 entry[9] = sidedata_length
732 entry[11] = compression_mode
716 entry[11] = compression_mode
733 entry = tuple(entry)
717 entry = tuple(entry)
734 new = self._pack_entry(rev, entry)
718 new = self._pack_entry(rev, entry)
735 self._extra[rev - self._lgt] = new
719 self._extra[rev - self._lgt] = new
736
720
737 def _unpack_entry(self, rev, data):
721 def _unpack_entry(self, rev, data):
738 data = self.index_format.unpack(data)
722 data = self.index_format.unpack(data)
739 entry = data[:10]
723 entry = data[:10]
740 data_comp = data[10] & 3
724 data_comp = data[10] & 3
741 sidedata_comp = (data[10] & (3 << 2)) >> 2
725 sidedata_comp = (data[10] & (3 << 2)) >> 2
742 return entry + (data_comp, sidedata_comp)
726 return entry + (data_comp, sidedata_comp)
743
727
744 def _pack_entry(self, rev, entry):
728 def _pack_entry(self, rev, entry):
745 data = entry[:10]
729 data = entry[:10]
746 data_comp = entry[10] & 3
730 data_comp = entry[10] & 3
747 sidedata_comp = (entry[11] & 3) << 2
731 sidedata_comp = (entry[11] & 3) << 2
748 data += (data_comp | sidedata_comp,)
732 data += (data_comp | sidedata_comp,)
749
733
750 return self.index_format.pack(*data)
734 return self.index_format.pack(*data)
751
735
752 def entry_binary(self, rev):
736 def entry_binary(self, rev):
753 """return the raw binary string representing a revision"""
737 """return the raw binary string representing a revision"""
754 entry = self[rev]
738 entry = self[rev]
755 return self._pack_entry(rev, entry)
739 return self._pack_entry(rev, entry)
756
740
757 def pack_header(self, header):
741 def pack_header(self, header):
758 """pack header information as binary"""
742 """pack header information as binary"""
759 msg = 'version header should go in the docket, not the index: %d'
743 msg = 'version header should go in the docket, not the index: %d'
760 msg %= header
744 msg %= header
761 raise error.ProgrammingError(msg)
745 raise error.ProgrammingError(msg)
762
746
763
747
764 class IndexChangelogV2(IndexObject2):
748 class IndexChangelogV2(IndexObject2):
765 index_format = revlog_constants.INDEX_ENTRY_CL_V2
749 index_format = revlog_constants.INDEX_ENTRY_CL_V2
766
750
767 def _unpack_entry(self, rev, data, r=True):
751 def _unpack_entry(self, rev, data, r=True):
768 items = self.index_format.unpack(data)
752 items = self.index_format.unpack(data)
769 entry = items[:3] + (rev, rev) + items[3:8]
753 entry = items[:3] + (rev, rev) + items[3:8]
770 data_comp = items[8] & 3
754 data_comp = items[8] & 3
771 sidedata_comp = (items[8] >> 2) & 3
755 sidedata_comp = (items[8] >> 2) & 3
772 return entry + (data_comp, sidedata_comp)
756 return entry + (data_comp, sidedata_comp)
773
757
774 def _pack_entry(self, rev, entry):
758 def _pack_entry(self, rev, entry):
775 assert entry[3] == rev, entry[3]
759 assert entry[3] == rev, entry[3]
776 assert entry[4] == rev, entry[4]
760 assert entry[4] == rev, entry[4]
777 data = entry[:3] + entry[5:10]
761 data = entry[:3] + entry[5:10]
778 data_comp = entry[10] & 3
762 data_comp = entry[10] & 3
779 sidedata_comp = (entry[11] & 3) << 2
763 sidedata_comp = (entry[11] & 3) << 2
780 data += (data_comp | sidedata_comp,)
764 data += (data_comp | sidedata_comp,)
781 return self.index_format.pack(*data)
765 return self.index_format.pack(*data)
782
766
783
767
784 def parse_index_devel_nodemap(data, inline):
768 def parse_index_devel_nodemap(data, inline):
785 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
769 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
786 return PersistentNodeMapIndexObject(data), None
770 return PersistentNodeMapIndexObject(data), None
787
771
788
772
789 def parse_dirstate(dmap, copymap, st):
773 def parse_dirstate(dmap, copymap, st):
790 parents = [st[:20], st[20:40]]
774 parents = [st[:20], st[20:40]]
791 # dereference fields so they will be local in loop
775 # dereference fields so they will be local in loop
792 format = b">cllll"
776 format = b">cllll"
793 e_size = struct.calcsize(format)
777 e_size = struct.calcsize(format)
794 pos1 = 40
778 pos1 = 40
795 l = len(st)
779 l = len(st)
796
780
797 # the inner loop
781 # the inner loop
798 while pos1 < l:
782 while pos1 < l:
799 pos2 = pos1 + e_size
783 pos2 = pos1 + e_size
800 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
784 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
801 pos1 = pos2 + e[4]
785 pos1 = pos2 + e[4]
802 f = st[pos2:pos1]
786 f = st[pos2:pos1]
803 if b'\0' in f:
787 if b'\0' in f:
804 f, c = f.split(b'\0')
788 f, c = f.split(b'\0')
805 copymap[f] = c
789 copymap[f] = c
806 dmap[f] = DirstateItem.from_v1_data(*e[:4])
790 dmap[f] = DirstateItem.from_v1_data(*e[:4])
807 return parents
791 return parents
808
792
809
793
810 def pack_dirstate(dmap, copymap, pl, now):
794 def pack_dirstate(dmap, copymap, pl, now):
811 now = int(now)
795 now = int(now)
812 cs = stringio()
796 cs = stringio()
813 write = cs.write
797 write = cs.write
814 write(b"".join(pl))
798 write(b"".join(pl))
815 for f, e in pycompat.iteritems(dmap):
799 for f, e in pycompat.iteritems(dmap):
816 if e.need_delay(now):
800 if e.need_delay(now):
817 # The file was last modified "simultaneously" with the current
801 # The file was last modified "simultaneously" with the current
818 # write to dirstate (i.e. within the same second for file-
802 # write to dirstate (i.e. within the same second for file-
819 # systems with a granularity of 1 sec). This commonly happens
803 # systems with a granularity of 1 sec). This commonly happens
820 # for at least a couple of files on 'update'.
804 # for at least a couple of files on 'update'.
821 # The user could change the file without changing its size
805 # The user could change the file without changing its size
822 # within the same second. Invalidate the file's mtime in
806 # within the same second. Invalidate the file's mtime in
823 # dirstate, forcing future 'status' calls to compare the
807 # dirstate, forcing future 'status' calls to compare the
824 # contents of the file if the size is the same. This prevents
808 # contents of the file if the size is the same. This prevents
825 # mistakenly treating such files as clean.
809 # mistakenly treating such files as clean.
826 e.set_possibly_dirty()
810 e.set_possibly_dirty()
827
811
828 if f in copymap:
812 if f in copymap:
829 f = b"%s\0%s" % (f, copymap[f])
813 f = b"%s\0%s" % (f, copymap[f])
830 e = _pack(
814 e = _pack(
831 b">cllll",
815 b">cllll",
832 e.v1_state(),
816 e.v1_state(),
833 e.v1_mode(),
817 e.v1_mode(),
834 e.v1_size(),
818 e.v1_size(),
835 e.v1_mtime(),
819 e.v1_mtime(),
836 len(f),
820 len(f),
837 )
821 )
838 write(e)
822 write(e)
839 write(f)
823 write(f)
840 return cs.getvalue()
824 return cs.getvalue()
@@ -1,414 +1,263 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::dirstate::parsers::Timestamp;
8 use crate::dirstate::parsers::Timestamp;
9 use crate::errors::HgError;
9 use crate::{
10 use crate::{
10 dirstate::EntryState,
11 dirstate::EntryState,
11 dirstate::SIZE_FROM_OTHER_PARENT,
12 dirstate::SIZE_FROM_OTHER_PARENT,
12 dirstate::SIZE_NON_NORMAL,
13 dirstate::SIZE_NON_NORMAL,
13 pack_dirstate, parse_dirstate,
14 pack_dirstate, parse_dirstate,
14 utils::hg_path::{HgPath, HgPathBuf},
15 utils::hg_path::{HgPath, HgPathBuf},
15 CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
16 CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
16 StateMap,
17 StateMap,
17 };
18 };
18 use micro_timer::timed;
19 use micro_timer::timed;
19 use std::collections::HashSet;
20 use std::iter::FromIterator;
20 use std::iter::FromIterator;
21 use std::ops::Deref;
21 use std::ops::Deref;
22
22
23 #[derive(Default)]
23 #[derive(Default)]
24 pub struct DirstateMap {
24 pub struct DirstateMap {
25 state_map: StateMap,
25 state_map: StateMap,
26 pub copy_map: CopyMap,
26 pub copy_map: CopyMap,
27 pub dirs: Option<DirsMultiset>,
27 pub dirs: Option<DirsMultiset>,
28 pub all_dirs: Option<DirsMultiset>,
28 pub all_dirs: Option<DirsMultiset>,
29 non_normal_set: Option<HashSet<HgPathBuf>>,
30 other_parent_set: Option<HashSet<HgPathBuf>>,
31 }
29 }
32
30
33 /// Should only really be used in python interface code, for clarity
31 /// Should only really be used in python interface code, for clarity
34 impl Deref for DirstateMap {
32 impl Deref for DirstateMap {
35 type Target = StateMap;
33 type Target = StateMap;
36
34
37 fn deref(&self) -> &Self::Target {
35 fn deref(&self) -> &Self::Target {
38 &self.state_map
36 &self.state_map
39 }
37 }
40 }
38 }
41
39
42 impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
40 impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
43 fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
41 fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
44 iter: I,
42 iter: I,
45 ) -> Self {
43 ) -> Self {
46 Self {
44 Self {
47 state_map: iter.into_iter().collect(),
45 state_map: iter.into_iter().collect(),
48 ..Self::default()
46 ..Self::default()
49 }
47 }
50 }
48 }
51 }
49 }
52
50
53 impl DirstateMap {
51 impl DirstateMap {
54 pub fn new() -> Self {
52 pub fn new() -> Self {
55 Self::default()
53 Self::default()
56 }
54 }
57
55
58 pub fn clear(&mut self) {
56 pub fn clear(&mut self) {
59 self.state_map = StateMap::default();
57 self.state_map = StateMap::default();
60 self.copy_map.clear();
58 self.copy_map.clear();
61 self.non_normal_set = None;
62 self.other_parent_set = None;
63 }
59 }
64
60
65 pub fn set_entry(&mut self, filename: &HgPath, entry: DirstateEntry) {
61 pub fn set_entry(&mut self, filename: &HgPath, entry: DirstateEntry) {
66 self.state_map.insert(filename.to_owned(), entry);
62 self.state_map.insert(filename.to_owned(), entry);
67 }
63 }
68
64
69 /// Add a tracked file to the dirstate
65 /// Add a tracked file to the dirstate
70 pub fn add_file(
66 pub fn add_file(
71 &mut self,
67 &mut self,
72 filename: &HgPath,
68 filename: &HgPath,
73 entry: DirstateEntry,
69 entry: DirstateEntry,
74 ) -> Result<(), DirstateError> {
70 ) -> Result<(), DirstateError> {
75 let old_state = self.get(filename).map(|e| e.state());
71 let old_state = self.get(filename).map(|e| e.state());
76 if old_state.is_none() || old_state == Some(EntryState::Removed) {
72 if old_state.is_none() || old_state == Some(EntryState::Removed) {
77 if let Some(ref mut dirs) = self.dirs {
73 if let Some(ref mut dirs) = self.dirs {
78 dirs.add_path(filename)?;
74 dirs.add_path(filename)?;
79 }
75 }
80 }
76 }
81 if old_state.is_none() {
77 if old_state.is_none() {
82 if let Some(ref mut all_dirs) = self.all_dirs {
78 if let Some(ref mut all_dirs) = self.all_dirs {
83 all_dirs.add_path(filename)?;
79 all_dirs.add_path(filename)?;
84 }
80 }
85 }
81 }
86 self.state_map.insert(filename.to_owned(), entry.to_owned());
82 self.state_map.insert(filename.to_owned(), entry.to_owned());
87
88 if entry.is_non_normal() {
89 self.get_non_normal_other_parent_entries()
90 .0
91 .insert(filename.to_owned());
92 }
93
94 if entry.is_from_other_parent() {
95 self.get_non_normal_other_parent_entries()
96 .1
97 .insert(filename.to_owned());
98 }
99 Ok(())
83 Ok(())
100 }
84 }
101
85
102 /// Mark a file as removed in the dirstate.
86 /// Mark a file as removed in the dirstate.
103 ///
87 ///
104 /// The `size` parameter is used to store sentinel values that indicate
88 /// The `size` parameter is used to store sentinel values that indicate
105 /// the file's previous state. In the future, we should refactor this
89 /// the file's previous state. In the future, we should refactor this
106 /// to be more explicit about what that state is.
90 /// to be more explicit about what that state is.
107 pub fn remove_file(
91 pub fn remove_file(
108 &mut self,
92 &mut self,
109 filename: &HgPath,
93 filename: &HgPath,
110 in_merge: bool,
94 in_merge: bool,
111 ) -> Result<(), DirstateError> {
95 ) -> Result<(), DirstateError> {
112 let old_entry_opt = self.get(filename);
96 let old_entry_opt = self.get(filename);
113 let old_state = old_entry_opt.map(|e| e.state());
97 let old_state = old_entry_opt.map(|e| e.state());
114 let mut size = 0;
98 let mut size = 0;
115 if in_merge {
99 if in_merge {
116 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
100 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
117 // during a merge. So I (marmoute) am not sure we need the
101 // during a merge. So I (marmoute) am not sure we need the
118 // conditionnal at all. Adding double checking this with assert
102 // conditionnal at all. Adding double checking this with assert
119 // would be nice.
103 // would be nice.
120 if let Some(old_entry) = old_entry_opt {
104 if let Some(old_entry) = old_entry_opt {
121 // backup the previous state
105 // backup the previous state
122 if old_entry.state() == EntryState::Merged {
106 if old_entry.state() == EntryState::Merged {
123 size = SIZE_NON_NORMAL;
107 size = SIZE_NON_NORMAL;
124 } else if old_entry.state() == EntryState::Normal
108 } else if old_entry.state() == EntryState::Normal
125 && old_entry.size() == SIZE_FROM_OTHER_PARENT
109 && old_entry.size() == SIZE_FROM_OTHER_PARENT
126 {
110 {
127 // other parent
111 // other parent
128 size = SIZE_FROM_OTHER_PARENT;
112 size = SIZE_FROM_OTHER_PARENT;
129 self.get_non_normal_other_parent_entries()
130 .1
131 .insert(filename.to_owned());
132 }
113 }
133 }
114 }
134 }
115 }
135 if old_state.is_some() && old_state != Some(EntryState::Removed) {
116 if old_state.is_some() && old_state != Some(EntryState::Removed) {
136 if let Some(ref mut dirs) = self.dirs {
117 if let Some(ref mut dirs) = self.dirs {
137 dirs.delete_path(filename)?;
118 dirs.delete_path(filename)?;
138 }
119 }
139 }
120 }
140 if old_state.is_none() {
121 if old_state.is_none() {
141 if let Some(ref mut all_dirs) = self.all_dirs {
122 if let Some(ref mut all_dirs) = self.all_dirs {
142 all_dirs.add_path(filename)?;
123 all_dirs.add_path(filename)?;
143 }
124 }
144 }
125 }
145 if size == 0 {
126 if size == 0 {
146 self.copy_map.remove(filename);
127 self.copy_map.remove(filename);
147 }
128 }
148
129
149 self.state_map
130 self.state_map
150 .insert(filename.to_owned(), DirstateEntry::new_removed(size));
131 .insert(filename.to_owned(), DirstateEntry::new_removed(size));
151 self.get_non_normal_other_parent_entries()
152 .0
153 .insert(filename.to_owned());
154 Ok(())
132 Ok(())
155 }
133 }
156
134
157 /// Remove a file from the dirstate.
135 /// Remove a file from the dirstate.
158 /// Returns `true` if the file was previously recorded.
136 /// Returns `true` if the file was previously recorded.
159 pub fn drop_entry_and_copy_source(
137 pub fn drop_entry_and_copy_source(
160 &mut self,
138 &mut self,
161 filename: &HgPath,
139 filename: &HgPath,
162 ) -> Result<(), DirstateError> {
140 ) -> Result<(), DirstateError> {
163 let old_state = self.get(filename).map(|e| e.state());
141 let old_state = self.get(filename).map(|e| e.state());
164 let exists = self.state_map.remove(filename).is_some();
142 let exists = self.state_map.remove(filename).is_some();
165
143
166 if exists {
144 if exists {
167 if old_state != Some(EntryState::Removed) {
145 if old_state != Some(EntryState::Removed) {
168 if let Some(ref mut dirs) = self.dirs {
146 if let Some(ref mut dirs) = self.dirs {
169 dirs.delete_path(filename)?;
147 dirs.delete_path(filename)?;
170 }
148 }
171 }
149 }
172 if let Some(ref mut all_dirs) = self.all_dirs {
150 if let Some(ref mut all_dirs) = self.all_dirs {
173 all_dirs.delete_path(filename)?;
151 all_dirs.delete_path(filename)?;
174 }
152 }
175 }
153 }
176 self.get_non_normal_other_parent_entries()
177 .0
178 .remove(filename);
179
180 self.copy_map.remove(filename);
154 self.copy_map.remove(filename);
181
155
182 Ok(())
156 Ok(())
183 }
157 }
184
158
185 pub fn non_normal_entries_remove(
186 &mut self,
187 key: impl AsRef<HgPath>,
188 ) -> bool {
189 self.get_non_normal_other_parent_entries()
190 .0
191 .remove(key.as_ref())
192 }
193
194 pub fn non_normal_entries_add(&mut self, key: impl AsRef<HgPath>) {
195 self.get_non_normal_other_parent_entries()
196 .0
197 .insert(key.as_ref().into());
198 }
199
200 pub fn non_normal_entries_union(
201 &mut self,
202 other: HashSet<HgPathBuf>,
203 ) -> Vec<HgPathBuf> {
204 self.get_non_normal_other_parent_entries()
205 .0
206 .union(&other)
207 .map(ToOwned::to_owned)
208 .collect()
209 }
210
211 pub fn get_non_normal_other_parent_entries(
212 &mut self,
213 ) -> (&mut HashSet<HgPathBuf>, &mut HashSet<HgPathBuf>) {
214 self.set_non_normal_other_parent_entries(false);
215 (
216 self.non_normal_set.as_mut().unwrap(),
217 self.other_parent_set.as_mut().unwrap(),
218 )
219 }
220
221 /// Useful to get immutable references to those sets in contexts where
222 /// you only have an immutable reference to the `DirstateMap`, like when
223 /// sharing references with Python.
224 ///
225 /// TODO, get rid of this along with the other "setter/getter" stuff when
226 /// a nice typestate plan is defined.
227 ///
228 /// # Panics
229 ///
230 /// Will panic if either set is `None`.
231 pub fn get_non_normal_other_parent_entries_panic(
232 &self,
233 ) -> (&HashSet<HgPathBuf>, &HashSet<HgPathBuf>) {
234 (
235 self.non_normal_set.as_ref().unwrap(),
236 self.other_parent_set.as_ref().unwrap(),
237 )
238 }
239
240 pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
241 if !force
242 && self.non_normal_set.is_some()
243 && self.other_parent_set.is_some()
244 {
245 return;
246 }
247 let mut non_normal = HashSet::new();
248 let mut other_parent = HashSet::new();
249
250 for (filename, entry) in self.state_map.iter() {
251 if entry.is_non_normal() {
252 non_normal.insert(filename.to_owned());
253 }
254 if entry.is_from_other_parent() {
255 other_parent.insert(filename.to_owned());
256 }
257 }
258 self.non_normal_set = Some(non_normal);
259 self.other_parent_set = Some(other_parent);
260 }
261
262 /// Both of these setters and their uses appear to be the simplest way to
159 /// Both of these setters and their uses appear to be the simplest way to
263 /// emulate a Python lazy property, but it is ugly and unidiomatic.
160 /// emulate a Python lazy property, but it is ugly and unidiomatic.
264 /// TODO One day, rewriting this struct using the typestate might be a
161 /// TODO One day, rewriting this struct using the typestate might be a
265 /// good idea.
162 /// good idea.
266 pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
163 pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
267 if self.all_dirs.is_none() {
164 if self.all_dirs.is_none() {
268 self.all_dirs = Some(DirsMultiset::from_dirstate(
165 self.all_dirs = Some(DirsMultiset::from_dirstate(
269 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
166 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
270 false,
167 false,
271 )?);
168 )?);
272 }
169 }
273 Ok(())
170 Ok(())
274 }
171 }
275
172
276 pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
173 pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
277 if self.dirs.is_none() {
174 if self.dirs.is_none() {
278 self.dirs = Some(DirsMultiset::from_dirstate(
175 self.dirs = Some(DirsMultiset::from_dirstate(
279 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
176 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
280 true,
177 true,
281 )?);
178 )?);
282 }
179 }
283 Ok(())
180 Ok(())
284 }
181 }
285
182
286 pub fn has_tracked_dir(
183 pub fn has_tracked_dir(
287 &mut self,
184 &mut self,
288 directory: &HgPath,
185 directory: &HgPath,
289 ) -> Result<bool, DirstateError> {
186 ) -> Result<bool, DirstateError> {
290 self.set_dirs()?;
187 self.set_dirs()?;
291 Ok(self.dirs.as_ref().unwrap().contains(directory))
188 Ok(self.dirs.as_ref().unwrap().contains(directory))
292 }
189 }
293
190
294 pub fn has_dir(
191 pub fn has_dir(
295 &mut self,
192 &mut self,
296 directory: &HgPath,
193 directory: &HgPath,
297 ) -> Result<bool, DirstateError> {
194 ) -> Result<bool, DirstateError> {
298 self.set_all_dirs()?;
195 self.set_all_dirs()?;
299 Ok(self.all_dirs.as_ref().unwrap().contains(directory))
196 Ok(self.all_dirs.as_ref().unwrap().contains(directory))
300 }
197 }
301
198
302 #[timed]
199 #[timed]
303 pub fn read(
200 pub fn read(
304 &mut self,
201 &mut self,
305 file_contents: &[u8],
202 file_contents: &[u8],
306 ) -> Result<Option<DirstateParents>, DirstateError> {
203 ) -> Result<Option<DirstateParents>, DirstateError> {
307 if file_contents.is_empty() {
204 if file_contents.is_empty() {
308 return Ok(None);
205 return Ok(None);
309 }
206 }
310
207
311 let (parents, entries, copies) = parse_dirstate(file_contents)?;
208 let (parents, entries, copies) = parse_dirstate(file_contents)?;
312 self.state_map.extend(
209 self.state_map.extend(
313 entries
210 entries
314 .into_iter()
211 .into_iter()
315 .map(|(path, entry)| (path.to_owned(), entry)),
212 .map(|(path, entry)| (path.to_owned(), entry)),
316 );
213 );
317 self.copy_map.extend(
214 self.copy_map.extend(
318 copies
215 copies
319 .into_iter()
216 .into_iter()
320 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
217 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
321 );
218 );
322 Ok(Some(parents.clone()))
219 Ok(Some(parents.clone()))
323 }
220 }
324
221
325 pub fn pack(
222 pub fn pack(
326 &mut self,
223 &mut self,
327 parents: DirstateParents,
224 parents: DirstateParents,
328 now: Timestamp,
225 now: Timestamp,
329 ) -> Result<Vec<u8>, DirstateError> {
226 ) -> Result<Vec<u8>, HgError> {
330 let packed =
227 pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)
331 pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
332
333 self.set_non_normal_other_parent_entries(true);
334 Ok(packed)
335 }
228 }
336 }
229 }
337
230
338 #[cfg(test)]
231 #[cfg(test)]
339 mod tests {
232 mod tests {
340 use super::*;
233 use super::*;
341
234
342 #[test]
235 #[test]
343 fn test_dirs_multiset() {
236 fn test_dirs_multiset() {
344 let mut map = DirstateMap::new();
237 let mut map = DirstateMap::new();
345 assert!(map.dirs.is_none());
238 assert!(map.dirs.is_none());
346 assert!(map.all_dirs.is_none());
239 assert!(map.all_dirs.is_none());
347
240
348 assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
241 assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
349 assert!(map.all_dirs.is_some());
242 assert!(map.all_dirs.is_some());
350 assert!(map.dirs.is_none());
243 assert!(map.dirs.is_none());
351
244
352 assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
245 assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
353 assert!(map.dirs.is_some());
246 assert!(map.dirs.is_some());
354 }
247 }
355
248
356 #[test]
249 #[test]
357 fn test_add_file() {
250 fn test_add_file() {
358 let mut map = DirstateMap::new();
251 let mut map = DirstateMap::new();
359
252
360 assert_eq!(0, map.len());
253 assert_eq!(0, map.len());
361
254
362 map.add_file(
255 map.add_file(
363 HgPath::new(b"meh"),
256 HgPath::new(b"meh"),
364 DirstateEntry::from_v1_data(EntryState::Normal, 1337, 1337, 1337),
257 DirstateEntry::from_v1_data(EntryState::Normal, 1337, 1337, 1337),
365 )
258 )
366 .unwrap();
259 .unwrap();
367
260
368 assert_eq!(1, map.len());
261 assert_eq!(1, map.len());
369 assert_eq!(0, map.get_non_normal_other_parent_entries().0.len());
370 assert_eq!(0, map.get_non_normal_other_parent_entries().1.len());
371 }
372
373 #[test]
374 fn test_non_normal_other_parent_entries() {
375 let mut map: DirstateMap = [
376 (b"f1", (EntryState::Removed, 1337, 1337, 1337)),
377 (b"f2", (EntryState::Normal, 1337, 1337, -1)),
378 (b"f3", (EntryState::Normal, 1337, 1337, 1337)),
379 (b"f4", (EntryState::Normal, 1337, -2, 1337)),
380 (b"f5", (EntryState::Added, 1337, 1337, 1337)),
381 (b"f6", (EntryState::Added, 1337, 1337, -1)),
382 (b"f7", (EntryState::Merged, 1337, 1337, -1)),
383 (b"f8", (EntryState::Merged, 1337, 1337, 1337)),
384 (b"f9", (EntryState::Merged, 1337, -2, 1337)),
385 (b"fa", (EntryState::Added, 1337, -2, 1337)),
386 (b"fb", (EntryState::Removed, 1337, -2, 1337)),
387 ]
388 .iter()
389 .map(|(fname, (state, mode, size, mtime))| {
390 (
391 HgPathBuf::from_bytes(fname.as_ref()),
392 DirstateEntry::from_v1_data(*state, *mode, *size, *mtime),
393 )
394 })
395 .collect();
396
397 let mut non_normal = [
398 b"f1", b"f2", b"f4", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa",
399 b"fb",
400 ]
401 .iter()
402 .map(|x| HgPathBuf::from_bytes(x.as_ref()))
403 .collect();
404
405 let mut other_parent = HashSet::new();
406 other_parent.insert(HgPathBuf::from_bytes(b"f4"));
407 let entries = map.get_non_normal_other_parent_entries();
408
409 assert_eq!(
410 (&mut non_normal, &mut other_parent),
411 (entries.0, entries.1)
412 );
413 }
262 }
414 }
263 }
@@ -1,392 +1,388 b''
1 use crate::errors::HgError;
1 use crate::errors::HgError;
2 use bitflags::bitflags;
2 use bitflags::bitflags;
3 use std::convert::TryFrom;
3 use std::convert::TryFrom;
4
4
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
6 pub enum EntryState {
6 pub enum EntryState {
7 Normal,
7 Normal,
8 Added,
8 Added,
9 Removed,
9 Removed,
10 Merged,
10 Merged,
11 }
11 }
12
12
13 /// The C implementation uses all signed types. This will be an issue
13 /// The C implementation uses all signed types. This will be an issue
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
15 /// comes first.
15 /// comes first.
16 #[derive(Debug, PartialEq, Copy, Clone)]
16 #[derive(Debug, PartialEq, Copy, Clone)]
17 pub struct DirstateEntry {
17 pub struct DirstateEntry {
18 flags: Flags,
18 flags: Flags,
19 mode: i32,
19 mode: i32,
20 size: i32,
20 size: i32,
21 mtime: i32,
21 mtime: i32,
22 }
22 }
23
23
24 bitflags! {
24 bitflags! {
25 pub struct Flags: u8 {
25 pub struct Flags: u8 {
26 const WDIR_TRACKED = 1 << 0;
26 const WDIR_TRACKED = 1 << 0;
27 const P1_TRACKED = 1 << 1;
27 const P1_TRACKED = 1 << 1;
28 const P2_TRACKED = 1 << 2;
28 const P2_TRACKED = 1 << 2;
29 const POSSIBLY_DIRTY = 1 << 3;
29 const POSSIBLY_DIRTY = 1 << 3;
30 const MERGED = 1 << 4;
30 const MERGED = 1 << 4;
31 const CLEAN_P1 = 1 << 5;
31 const CLEAN_P1 = 1 << 5;
32 const CLEAN_P2 = 1 << 6;
32 const CLEAN_P2 = 1 << 6;
33 const ENTRYLESS_TREE_NODE = 1 << 7;
33 const ENTRYLESS_TREE_NODE = 1 << 7;
34 }
34 }
35 }
35 }
36
36
37 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
37 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
38
38
39 pub const MTIME_UNSET: i32 = -1;
39 pub const MTIME_UNSET: i32 = -1;
40
40
41 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
41 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
42 /// other parent. This allows revert to pick the right status back during a
42 /// other parent. This allows revert to pick the right status back during a
43 /// merge.
43 /// merge.
44 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
44 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
45 /// A special value used for internal representation of special case in
45 /// A special value used for internal representation of special case in
46 /// dirstate v1 format.
46 /// dirstate v1 format.
47 pub const SIZE_NON_NORMAL: i32 = -1;
47 pub const SIZE_NON_NORMAL: i32 = -1;
48
48
49 impl DirstateEntry {
49 impl DirstateEntry {
50 pub fn new(
50 pub fn new(
51 flags: Flags,
51 flags: Flags,
52 mode_size_mtime: Option<(i32, i32, i32)>,
52 mode_size_mtime: Option<(i32, i32, i32)>,
53 ) -> Self {
53 ) -> Self {
54 let (mode, size, mtime) =
54 let (mode, size, mtime) =
55 mode_size_mtime.unwrap_or((0, SIZE_NON_NORMAL, MTIME_UNSET));
55 mode_size_mtime.unwrap_or((0, SIZE_NON_NORMAL, MTIME_UNSET));
56 Self {
56 Self {
57 flags,
57 flags,
58 mode,
58 mode,
59 size,
59 size,
60 mtime,
60 mtime,
61 }
61 }
62 }
62 }
63
63
64 pub fn from_v1_data(
64 pub fn from_v1_data(
65 state: EntryState,
65 state: EntryState,
66 mode: i32,
66 mode: i32,
67 size: i32,
67 size: i32,
68 mtime: i32,
68 mtime: i32,
69 ) -> Self {
69 ) -> Self {
70 match state {
70 match state {
71 EntryState::Normal => {
71 EntryState::Normal => {
72 if size == SIZE_FROM_OTHER_PARENT {
72 if size == SIZE_FROM_OTHER_PARENT {
73 Self::new_from_p2()
73 Self::new_from_p2()
74 } else if size == SIZE_NON_NORMAL {
74 } else if size == SIZE_NON_NORMAL {
75 Self::new_possibly_dirty()
75 Self::new_possibly_dirty()
76 } else if mtime == MTIME_UNSET {
76 } else if mtime == MTIME_UNSET {
77 Self {
77 Self {
78 flags: Flags::WDIR_TRACKED
78 flags: Flags::WDIR_TRACKED
79 | Flags::P1_TRACKED
79 | Flags::P1_TRACKED
80 | Flags::POSSIBLY_DIRTY,
80 | Flags::POSSIBLY_DIRTY,
81 mode,
81 mode,
82 size,
82 size,
83 mtime: 0,
83 mtime: 0,
84 }
84 }
85 } else {
85 } else {
86 Self::new_normal(mode, size, mtime)
86 Self::new_normal(mode, size, mtime)
87 }
87 }
88 }
88 }
89 EntryState::Added => Self::new_added(),
89 EntryState::Added => Self::new_added(),
90 EntryState::Removed => Self {
90 EntryState::Removed => Self {
91 flags: if size == SIZE_NON_NORMAL {
91 flags: if size == SIZE_NON_NORMAL {
92 Flags::P1_TRACKED // might not be true because of rename ?
92 Flags::P1_TRACKED // might not be true because of rename ?
93 | Flags::P2_TRACKED // might not be true because of rename ?
93 | Flags::P2_TRACKED // might not be true because of rename ?
94 | Flags::MERGED
94 | Flags::MERGED
95 } else if size == SIZE_FROM_OTHER_PARENT {
95 } else if size == SIZE_FROM_OTHER_PARENT {
96 // We don’t know if P1_TRACKED should be set (file history)
96 // We don’t know if P1_TRACKED should be set (file history)
97 Flags::P2_TRACKED | Flags::CLEAN_P2
97 Flags::P2_TRACKED | Flags::CLEAN_P2
98 } else {
98 } else {
99 Flags::P1_TRACKED
99 Flags::P1_TRACKED
100 },
100 },
101 mode: 0,
101 mode: 0,
102 size: 0,
102 size: 0,
103 mtime: 0,
103 mtime: 0,
104 },
104 },
105 EntryState::Merged => Self::new_merged(),
105 EntryState::Merged => Self::new_merged(),
106 }
106 }
107 }
107 }
108
108
109 pub fn new_from_p2() -> Self {
109 pub fn new_from_p2() -> Self {
110 Self {
110 Self {
111 // might be missing P1_TRACKED
111 // might be missing P1_TRACKED
112 flags: Flags::WDIR_TRACKED | Flags::P2_TRACKED | Flags::CLEAN_P2,
112 flags: Flags::WDIR_TRACKED | Flags::P2_TRACKED | Flags::CLEAN_P2,
113 mode: 0,
113 mode: 0,
114 size: SIZE_FROM_OTHER_PARENT,
114 size: SIZE_FROM_OTHER_PARENT,
115 mtime: MTIME_UNSET,
115 mtime: MTIME_UNSET,
116 }
116 }
117 }
117 }
118
118
119 pub fn new_possibly_dirty() -> Self {
119 pub fn new_possibly_dirty() -> Self {
120 Self {
120 Self {
121 flags: Flags::WDIR_TRACKED
121 flags: Flags::WDIR_TRACKED
122 | Flags::P1_TRACKED
122 | Flags::P1_TRACKED
123 | Flags::POSSIBLY_DIRTY,
123 | Flags::POSSIBLY_DIRTY,
124 mode: 0,
124 mode: 0,
125 size: SIZE_NON_NORMAL,
125 size: SIZE_NON_NORMAL,
126 mtime: MTIME_UNSET,
126 mtime: MTIME_UNSET,
127 }
127 }
128 }
128 }
129
129
130 pub fn new_added() -> Self {
130 pub fn new_added() -> Self {
131 Self {
131 Self {
132 flags: Flags::WDIR_TRACKED,
132 flags: Flags::WDIR_TRACKED,
133 mode: 0,
133 mode: 0,
134 size: SIZE_NON_NORMAL,
134 size: SIZE_NON_NORMAL,
135 mtime: MTIME_UNSET,
135 mtime: MTIME_UNSET,
136 }
136 }
137 }
137 }
138
138
139 pub fn new_merged() -> Self {
139 pub fn new_merged() -> Self {
140 Self {
140 Self {
141 flags: Flags::WDIR_TRACKED
141 flags: Flags::WDIR_TRACKED
142 | Flags::P1_TRACKED // might not be true because of rename ?
142 | Flags::P1_TRACKED // might not be true because of rename ?
143 | Flags::P2_TRACKED // might not be true because of rename ?
143 | Flags::P2_TRACKED // might not be true because of rename ?
144 | Flags::MERGED,
144 | Flags::MERGED,
145 mode: 0,
145 mode: 0,
146 size: SIZE_NON_NORMAL,
146 size: SIZE_NON_NORMAL,
147 mtime: MTIME_UNSET,
147 mtime: MTIME_UNSET,
148 }
148 }
149 }
149 }
150
150
151 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
151 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
152 Self {
152 Self {
153 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
153 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
154 mode,
154 mode,
155 size,
155 size,
156 mtime,
156 mtime,
157 }
157 }
158 }
158 }
159
159
160 /// Creates a new entry in "removed" state.
160 /// Creates a new entry in "removed" state.
161 ///
161 ///
162 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
162 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
163 /// `SIZE_FROM_OTHER_PARENT`
163 /// `SIZE_FROM_OTHER_PARENT`
164 pub fn new_removed(size: i32) -> Self {
164 pub fn new_removed(size: i32) -> Self {
165 Self::from_v1_data(EntryState::Removed, 0, size, 0)
165 Self::from_v1_data(EntryState::Removed, 0, size, 0)
166 }
166 }
167
167
168 pub fn tracked(&self) -> bool {
168 pub fn tracked(&self) -> bool {
169 self.flags.contains(Flags::WDIR_TRACKED)
169 self.flags.contains(Flags::WDIR_TRACKED)
170 }
170 }
171
171
172 fn tracked_in_any_parent(&self) -> bool {
172 fn tracked_in_any_parent(&self) -> bool {
173 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_TRACKED)
173 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_TRACKED)
174 }
174 }
175
175
176 pub fn removed(&self) -> bool {
176 pub fn removed(&self) -> bool {
177 self.tracked_in_any_parent()
177 self.tracked_in_any_parent()
178 && !self.flags.contains(Flags::WDIR_TRACKED)
178 && !self.flags.contains(Flags::WDIR_TRACKED)
179 }
179 }
180
180
181 pub fn merged_removed(&self) -> bool {
181 pub fn merged_removed(&self) -> bool {
182 self.removed() && self.flags.contains(Flags::MERGED)
182 self.removed() && self.flags.contains(Flags::MERGED)
183 }
183 }
184
184
185 pub fn from_p2_removed(&self) -> bool {
185 pub fn from_p2_removed(&self) -> bool {
186 self.removed() && self.flags.contains(Flags::CLEAN_P2)
186 self.removed() && self.flags.contains(Flags::CLEAN_P2)
187 }
187 }
188
188
189 pub fn merged(&self) -> bool {
189 pub fn merged(&self) -> bool {
190 self.flags.contains(Flags::WDIR_TRACKED | Flags::MERGED)
190 self.flags.contains(Flags::WDIR_TRACKED | Flags::MERGED)
191 }
191 }
192
192
193 pub fn added(&self) -> bool {
193 pub fn added(&self) -> bool {
194 self.flags.contains(Flags::WDIR_TRACKED)
194 self.flags.contains(Flags::WDIR_TRACKED)
195 && !self.tracked_in_any_parent()
195 && !self.tracked_in_any_parent()
196 }
196 }
197
197
198 pub fn from_p2(&self) -> bool {
198 pub fn from_p2(&self) -> bool {
199 self.flags.contains(Flags::WDIR_TRACKED | Flags::CLEAN_P2)
199 self.flags.contains(Flags::WDIR_TRACKED | Flags::CLEAN_P2)
200 }
200 }
201
201
202 pub fn state(&self) -> EntryState {
202 pub fn state(&self) -> EntryState {
203 if self.removed() {
203 if self.removed() {
204 EntryState::Removed
204 EntryState::Removed
205 } else if self.merged() {
205 } else if self.merged() {
206 EntryState::Merged
206 EntryState::Merged
207 } else if self.added() {
207 } else if self.added() {
208 EntryState::Added
208 EntryState::Added
209 } else {
209 } else {
210 EntryState::Normal
210 EntryState::Normal
211 }
211 }
212 }
212 }
213
213
214 pub fn mode(&self) -> i32 {
214 pub fn mode(&self) -> i32 {
215 self.mode
215 self.mode
216 }
216 }
217
217
218 pub fn size(&self) -> i32 {
218 pub fn size(&self) -> i32 {
219 if self.merged_removed() {
219 if self.merged_removed() {
220 SIZE_NON_NORMAL
220 SIZE_NON_NORMAL
221 } else if self.from_p2_removed() {
221 } else if self.from_p2_removed() {
222 SIZE_FROM_OTHER_PARENT
222 SIZE_FROM_OTHER_PARENT
223 } else if self.removed() {
223 } else if self.removed() {
224 0
224 0
225 } else if self.merged() {
225 } else if self.merged() {
226 SIZE_FROM_OTHER_PARENT
226 SIZE_FROM_OTHER_PARENT
227 } else if self.added() {
227 } else if self.added() {
228 SIZE_NON_NORMAL
228 SIZE_NON_NORMAL
229 } else if self.from_p2() {
229 } else if self.from_p2() {
230 SIZE_FROM_OTHER_PARENT
230 SIZE_FROM_OTHER_PARENT
231 } else if self.flags.contains(Flags::POSSIBLY_DIRTY) {
231 } else if self.flags.contains(Flags::POSSIBLY_DIRTY) {
232 self.size // TODO: SIZE_NON_NORMAL ?
232 self.size // TODO: SIZE_NON_NORMAL ?
233 } else {
233 } else {
234 self.size
234 self.size
235 }
235 }
236 }
236 }
237
237
238 pub fn mtime(&self) -> i32 {
238 pub fn mtime(&self) -> i32 {
239 if self.removed() {
239 if self.removed() {
240 0
240 0
241 } else if self.flags.contains(Flags::POSSIBLY_DIRTY) {
241 } else if self.flags.contains(Flags::POSSIBLY_DIRTY) {
242 MTIME_UNSET
242 MTIME_UNSET
243 } else if self.merged() {
243 } else if self.merged() {
244 MTIME_UNSET
244 MTIME_UNSET
245 } else if self.added() {
245 } else if self.added() {
246 MTIME_UNSET
246 MTIME_UNSET
247 } else if self.from_p2() {
247 } else if self.from_p2() {
248 MTIME_UNSET
248 MTIME_UNSET
249 } else {
249 } else {
250 self.mtime
250 self.mtime
251 }
251 }
252 }
252 }
253
253
254 pub fn set_possibly_dirty(&mut self) {
254 pub fn set_possibly_dirty(&mut self) {
255 self.flags.insert(Flags::POSSIBLY_DIRTY)
255 self.flags.insert(Flags::POSSIBLY_DIRTY)
256 }
256 }
257
257
258 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
258 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
259 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
259 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
260 self.flags.remove(
260 self.flags.remove(
261 Flags::P2_TRACKED // This might be wrong
261 Flags::P2_TRACKED // This might be wrong
262 | Flags::MERGED
262 | Flags::MERGED
263 | Flags::CLEAN_P2
263 | Flags::CLEAN_P2
264 | Flags::POSSIBLY_DIRTY,
264 | Flags::POSSIBLY_DIRTY,
265 );
265 );
266 self.mode = mode;
266 self.mode = mode;
267 self.size = size;
267 self.size = size;
268 self.mtime = mtime;
268 self.mtime = mtime;
269 }
269 }
270
270
271 pub fn set_tracked(&mut self) {
271 pub fn set_tracked(&mut self) {
272 self.flags
272 self.flags
273 .insert(Flags::WDIR_TRACKED | Flags::POSSIBLY_DIRTY);
273 .insert(Flags::WDIR_TRACKED | Flags::POSSIBLY_DIRTY);
274 // size = None on the python size turn into size = NON_NORMAL when
274 // size = None on the python size turn into size = NON_NORMAL when
275 // accessed. So the next line is currently required, but a some future
275 // accessed. So the next line is currently required, but a some future
276 // clean up would be welcome.
276 // clean up would be welcome.
277 self.size = SIZE_NON_NORMAL;
277 self.size = SIZE_NON_NORMAL;
278 }
278 }
279
279
280 pub fn set_untracked(&mut self) {
280 pub fn set_untracked(&mut self) {
281 self.flags.remove(Flags::WDIR_TRACKED);
281 self.flags.remove(Flags::WDIR_TRACKED);
282 self.mode = 0;
282 self.mode = 0;
283 self.size = 0;
283 self.size = 0;
284 self.mtime = 0;
284 self.mtime = 0;
285 }
285 }
286
286
287 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
287 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
288 /// in the dirstate-v1 format.
288 /// in the dirstate-v1 format.
289 ///
289 ///
290 /// This includes marker values such as `mtime == -1`. In the future we may
290 /// This includes marker values such as `mtime == -1`. In the future we may
291 /// want to not represent these cases that way in memory, but serialization
291 /// want to not represent these cases that way in memory, but serialization
292 /// will need to keep the same format.
292 /// will need to keep the same format.
293 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
293 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
294 (self.state().into(), self.mode(), self.size(), self.mtime())
294 (self.state().into(), self.mode(), self.size(), self.mtime())
295 }
295 }
296
296
297 pub fn is_non_normal(&self) -> bool {
297 pub(crate) fn is_from_other_parent(&self) -> bool {
298 self.state() != EntryState::Normal || self.mtime() == MTIME_UNSET
299 }
300
301 pub fn is_from_other_parent(&self) -> bool {
302 self.state() == EntryState::Normal
298 self.state() == EntryState::Normal
303 && self.size() == SIZE_FROM_OTHER_PARENT
299 && self.size() == SIZE_FROM_OTHER_PARENT
304 }
300 }
305
301
306 // TODO: other platforms
302 // TODO: other platforms
307 #[cfg(unix)]
303 #[cfg(unix)]
308 pub fn mode_changed(
304 pub fn mode_changed(
309 &self,
305 &self,
310 filesystem_metadata: &std::fs::Metadata,
306 filesystem_metadata: &std::fs::Metadata,
311 ) -> bool {
307 ) -> bool {
312 use std::os::unix::fs::MetadataExt;
308 use std::os::unix::fs::MetadataExt;
313 const EXEC_BIT_MASK: u32 = 0o100;
309 const EXEC_BIT_MASK: u32 = 0o100;
314 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
310 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
315 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
311 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
316 dirstate_exec_bit != fs_exec_bit
312 dirstate_exec_bit != fs_exec_bit
317 }
313 }
318
314
319 /// Returns a `(state, mode, size, mtime)` tuple as for
315 /// Returns a `(state, mode, size, mtime)` tuple as for
320 /// `DirstateMapMethods::debug_iter`.
316 /// `DirstateMapMethods::debug_iter`.
321 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
317 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
322 let state = if self.flags.contains(Flags::ENTRYLESS_TREE_NODE) {
318 let state = if self.flags.contains(Flags::ENTRYLESS_TREE_NODE) {
323 b' '
319 b' '
324 } else {
320 } else {
325 self.state().into()
321 self.state().into()
326 };
322 };
327 (state, self.mode(), self.size(), self.mtime())
323 (state, self.mode(), self.size(), self.mtime())
328 }
324 }
329
325
330 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
326 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
331 self.state() == EntryState::Normal && self.mtime() == now
327 self.state() == EntryState::Normal && self.mtime() == now
332 }
328 }
333
329
334 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
330 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
335 let ambiguous = self.mtime_is_ambiguous(now);
331 let ambiguous = self.mtime_is_ambiguous(now);
336 if ambiguous {
332 if ambiguous {
337 // The file was last modified "simultaneously" with the current
333 // The file was last modified "simultaneously" with the current
338 // write to dirstate (i.e. within the same second for file-
334 // write to dirstate (i.e. within the same second for file-
339 // systems with a granularity of 1 sec). This commonly happens
335 // systems with a granularity of 1 sec). This commonly happens
340 // for at least a couple of files on 'update'.
336 // for at least a couple of files on 'update'.
341 // The user could change the file without changing its size
337 // The user could change the file without changing its size
342 // within the same second. Invalidate the file's mtime in
338 // within the same second. Invalidate the file's mtime in
343 // dirstate, forcing future 'status' calls to compare the
339 // dirstate, forcing future 'status' calls to compare the
344 // contents of the file if the size is the same. This prevents
340 // contents of the file if the size is the same. This prevents
345 // mistakenly treating such files as clean.
341 // mistakenly treating such files as clean.
346 self.clear_mtime()
342 self.clear_mtime()
347 }
343 }
348 ambiguous
344 ambiguous
349 }
345 }
350
346
351 pub fn clear_mtime(&mut self) {
347 pub fn clear_mtime(&mut self) {
352 self.mtime = -1;
348 self.mtime = -1;
353 }
349 }
354 }
350 }
355
351
356 impl EntryState {
352 impl EntryState {
357 pub fn is_tracked(self) -> bool {
353 pub fn is_tracked(self) -> bool {
358 use EntryState::*;
354 use EntryState::*;
359 match self {
355 match self {
360 Normal | Added | Merged => true,
356 Normal | Added | Merged => true,
361 Removed => false,
357 Removed => false,
362 }
358 }
363 }
359 }
364 }
360 }
365
361
366 impl TryFrom<u8> for EntryState {
362 impl TryFrom<u8> for EntryState {
367 type Error = HgError;
363 type Error = HgError;
368
364
369 fn try_from(value: u8) -> Result<Self, Self::Error> {
365 fn try_from(value: u8) -> Result<Self, Self::Error> {
370 match value {
366 match value {
371 b'n' => Ok(EntryState::Normal),
367 b'n' => Ok(EntryState::Normal),
372 b'a' => Ok(EntryState::Added),
368 b'a' => Ok(EntryState::Added),
373 b'r' => Ok(EntryState::Removed),
369 b'r' => Ok(EntryState::Removed),
374 b'm' => Ok(EntryState::Merged),
370 b'm' => Ok(EntryState::Merged),
375 _ => Err(HgError::CorruptedRepository(format!(
371 _ => Err(HgError::CorruptedRepository(format!(
376 "Incorrect dirstate entry state {}",
372 "Incorrect dirstate entry state {}",
377 value
373 value
378 ))),
374 ))),
379 }
375 }
380 }
376 }
381 }
377 }
382
378
383 impl Into<u8> for EntryState {
379 impl Into<u8> for EntryState {
384 fn into(self) -> u8 {
380 fn into(self) -> u8 {
385 match self {
381 match self {
386 EntryState::Normal => b'n',
382 EntryState::Normal => b'n',
387 EntryState::Added => b'a',
383 EntryState::Added => b'a',
388 EntryState::Removed => b'r',
384 EntryState::Removed => b'r',
389 EntryState::Merged => b'm',
385 EntryState::Merged => b'm',
390 }
386 }
391 }
387 }
392 }
388 }
@@ -1,1252 +1,1168 b''
1 use bytes_cast::BytesCast;
1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
2 use micro_timer::timed;
3 use std::borrow::Cow;
3 use std::borrow::Cow;
4 use std::convert::TryInto;
4 use std::convert::TryInto;
5 use std::path::PathBuf;
5 use std::path::PathBuf;
6
6
7 use super::on_disk;
7 use super::on_disk;
8 use super::on_disk::DirstateV2ParseError;
8 use super::on_disk::DirstateV2ParseError;
9 use super::path_with_basename::WithBasename;
9 use super::path_with_basename::WithBasename;
10 use crate::dirstate::parsers::pack_entry;
10 use crate::dirstate::parsers::pack_entry;
11 use crate::dirstate::parsers::packed_entry_size;
11 use crate::dirstate::parsers::packed_entry_size;
12 use crate::dirstate::parsers::parse_dirstate_entries;
12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::parsers::Timestamp;
13 use crate::dirstate::parsers::Timestamp;
14 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
14 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
15 use crate::dirstate::SIZE_NON_NORMAL;
15 use crate::dirstate::SIZE_NON_NORMAL;
16 use crate::matchers::Matcher;
16 use crate::matchers::Matcher;
17 use crate::utils::hg_path::{HgPath, HgPathBuf};
17 use crate::utils::hg_path::{HgPath, HgPathBuf};
18 use crate::CopyMapIter;
18 use crate::CopyMapIter;
19 use crate::DirstateEntry;
19 use crate::DirstateEntry;
20 use crate::DirstateError;
20 use crate::DirstateError;
21 use crate::DirstateParents;
21 use crate::DirstateParents;
22 use crate::DirstateStatus;
22 use crate::DirstateStatus;
23 use crate::EntryState;
23 use crate::EntryState;
24 use crate::FastHashMap;
24 use crate::FastHashMap;
25 use crate::PatternFileWarning;
25 use crate::PatternFileWarning;
26 use crate::StateMapIter;
26 use crate::StateMapIter;
27 use crate::StatusError;
27 use crate::StatusError;
28 use crate::StatusOptions;
28 use crate::StatusOptions;
29
29
30 /// Append to an existing data file if the amount of unreachable data (not used
30 /// Append to an existing data file if the amount of unreachable data (not used
31 /// anymore) is less than this fraction of the total amount of existing data.
31 /// anymore) is less than this fraction of the total amount of existing data.
32 const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5;
32 const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5;
33
33
34 pub struct DirstateMap<'on_disk> {
34 pub struct DirstateMap<'on_disk> {
35 /// Contents of the `.hg/dirstate` file
35 /// Contents of the `.hg/dirstate` file
36 pub(super) on_disk: &'on_disk [u8],
36 pub(super) on_disk: &'on_disk [u8],
37
37
38 pub(super) root: ChildNodes<'on_disk>,
38 pub(super) root: ChildNodes<'on_disk>,
39
39
40 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
40 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
41 pub(super) nodes_with_entry_count: u32,
41 pub(super) nodes_with_entry_count: u32,
42
42
43 /// Number of nodes anywhere in the tree that have
43 /// Number of nodes anywhere in the tree that have
44 /// `.copy_source.is_some()`.
44 /// `.copy_source.is_some()`.
45 pub(super) nodes_with_copy_source_count: u32,
45 pub(super) nodes_with_copy_source_count: u32,
46
46
47 /// See on_disk::Header
47 /// See on_disk::Header
48 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
48 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
49
49
50 /// How many bytes of `on_disk` are not used anymore
50 /// How many bytes of `on_disk` are not used anymore
51 pub(super) unreachable_bytes: u32,
51 pub(super) unreachable_bytes: u32,
52 }
52 }
53
53
54 /// Using a plain `HgPathBuf` of the full path from the repository root as a
54 /// Using a plain `HgPathBuf` of the full path from the repository root as a
55 /// map key would also work: all paths in a given map have the same parent
55 /// map key would also work: all paths in a given map have the same parent
56 /// path, so comparing full paths gives the same result as comparing base
56 /// path, so comparing full paths gives the same result as comparing base
57 /// names. However `HashMap` would waste time always re-hashing the same
57 /// names. However `HashMap` would waste time always re-hashing the same
58 /// string prefix.
58 /// string prefix.
59 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
59 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
60
60
61 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
61 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
62 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
62 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
63 pub(super) enum BorrowedPath<'tree, 'on_disk> {
63 pub(super) enum BorrowedPath<'tree, 'on_disk> {
64 InMemory(&'tree HgPathBuf),
64 InMemory(&'tree HgPathBuf),
65 OnDisk(&'on_disk HgPath),
65 OnDisk(&'on_disk HgPath),
66 }
66 }
67
67
68 pub(super) enum ChildNodes<'on_disk> {
68 pub(super) enum ChildNodes<'on_disk> {
69 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
69 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
70 OnDisk(&'on_disk [on_disk::Node]),
70 OnDisk(&'on_disk [on_disk::Node]),
71 }
71 }
72
72
73 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
73 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
74 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
74 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
75 OnDisk(&'on_disk [on_disk::Node]),
75 OnDisk(&'on_disk [on_disk::Node]),
76 }
76 }
77
77
78 pub(super) enum NodeRef<'tree, 'on_disk> {
78 pub(super) enum NodeRef<'tree, 'on_disk> {
79 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
79 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
80 OnDisk(&'on_disk on_disk::Node),
80 OnDisk(&'on_disk on_disk::Node),
81 }
81 }
82
82
83 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
83 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
84 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
84 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
85 match *self {
85 match *self {
86 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
86 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
87 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
87 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
88 }
88 }
89 }
89 }
90 }
90 }
91
91
92 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
92 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
93 type Target = HgPath;
93 type Target = HgPath;
94
94
95 fn deref(&self) -> &HgPath {
95 fn deref(&self) -> &HgPath {
96 match *self {
96 match *self {
97 BorrowedPath::InMemory(in_memory) => in_memory,
97 BorrowedPath::InMemory(in_memory) => in_memory,
98 BorrowedPath::OnDisk(on_disk) => on_disk,
98 BorrowedPath::OnDisk(on_disk) => on_disk,
99 }
99 }
100 }
100 }
101 }
101 }
102
102
103 impl Default for ChildNodes<'_> {
103 impl Default for ChildNodes<'_> {
104 fn default() -> Self {
104 fn default() -> Self {
105 ChildNodes::InMemory(Default::default())
105 ChildNodes::InMemory(Default::default())
106 }
106 }
107 }
107 }
108
108
109 impl<'on_disk> ChildNodes<'on_disk> {
109 impl<'on_disk> ChildNodes<'on_disk> {
110 pub(super) fn as_ref<'tree>(
110 pub(super) fn as_ref<'tree>(
111 &'tree self,
111 &'tree self,
112 ) -> ChildNodesRef<'tree, 'on_disk> {
112 ) -> ChildNodesRef<'tree, 'on_disk> {
113 match self {
113 match self {
114 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
114 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
115 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
115 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
116 }
116 }
117 }
117 }
118
118
119 pub(super) fn is_empty(&self) -> bool {
119 pub(super) fn is_empty(&self) -> bool {
120 match self {
120 match self {
121 ChildNodes::InMemory(nodes) => nodes.is_empty(),
121 ChildNodes::InMemory(nodes) => nodes.is_empty(),
122 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
122 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
123 }
123 }
124 }
124 }
125
125
126 fn make_mut(
126 fn make_mut(
127 &mut self,
127 &mut self,
128 on_disk: &'on_disk [u8],
128 on_disk: &'on_disk [u8],
129 unreachable_bytes: &mut u32,
129 unreachable_bytes: &mut u32,
130 ) -> Result<
130 ) -> Result<
131 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
131 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
132 DirstateV2ParseError,
132 DirstateV2ParseError,
133 > {
133 > {
134 match self {
134 match self {
135 ChildNodes::InMemory(nodes) => Ok(nodes),
135 ChildNodes::InMemory(nodes) => Ok(nodes),
136 ChildNodes::OnDisk(nodes) => {
136 ChildNodes::OnDisk(nodes) => {
137 *unreachable_bytes +=
137 *unreachable_bytes +=
138 std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32;
138 std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32;
139 let nodes = nodes
139 let nodes = nodes
140 .iter()
140 .iter()
141 .map(|node| {
141 .map(|node| {
142 Ok((
142 Ok((
143 node.path(on_disk)?,
143 node.path(on_disk)?,
144 node.to_in_memory_node(on_disk)?,
144 node.to_in_memory_node(on_disk)?,
145 ))
145 ))
146 })
146 })
147 .collect::<Result<_, _>>()?;
147 .collect::<Result<_, _>>()?;
148 *self = ChildNodes::InMemory(nodes);
148 *self = ChildNodes::InMemory(nodes);
149 match self {
149 match self {
150 ChildNodes::InMemory(nodes) => Ok(nodes),
150 ChildNodes::InMemory(nodes) => Ok(nodes),
151 ChildNodes::OnDisk(_) => unreachable!(),
151 ChildNodes::OnDisk(_) => unreachable!(),
152 }
152 }
153 }
153 }
154 }
154 }
155 }
155 }
156 }
156 }
157
157
158 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
158 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
159 pub(super) fn get(
159 pub(super) fn get(
160 &self,
160 &self,
161 base_name: &HgPath,
161 base_name: &HgPath,
162 on_disk: &'on_disk [u8],
162 on_disk: &'on_disk [u8],
163 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
163 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
164 match self {
164 match self {
165 ChildNodesRef::InMemory(nodes) => Ok(nodes
165 ChildNodesRef::InMemory(nodes) => Ok(nodes
166 .get_key_value(base_name)
166 .get_key_value(base_name)
167 .map(|(k, v)| NodeRef::InMemory(k, v))),
167 .map(|(k, v)| NodeRef::InMemory(k, v))),
168 ChildNodesRef::OnDisk(nodes) => {
168 ChildNodesRef::OnDisk(nodes) => {
169 let mut parse_result = Ok(());
169 let mut parse_result = Ok(());
170 let search_result = nodes.binary_search_by(|node| {
170 let search_result = nodes.binary_search_by(|node| {
171 match node.base_name(on_disk) {
171 match node.base_name(on_disk) {
172 Ok(node_base_name) => node_base_name.cmp(base_name),
172 Ok(node_base_name) => node_base_name.cmp(base_name),
173 Err(e) => {
173 Err(e) => {
174 parse_result = Err(e);
174 parse_result = Err(e);
175 // Dummy comparison result, `search_result` won’t
175 // Dummy comparison result, `search_result` won’t
176 // be used since `parse_result` is an error
176 // be used since `parse_result` is an error
177 std::cmp::Ordering::Equal
177 std::cmp::Ordering::Equal
178 }
178 }
179 }
179 }
180 });
180 });
181 parse_result.map(|()| {
181 parse_result.map(|()| {
182 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
182 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
183 })
183 })
184 }
184 }
185 }
185 }
186 }
186 }
187
187
188 /// Iterate in undefined order
188 /// Iterate in undefined order
189 pub(super) fn iter(
189 pub(super) fn iter(
190 &self,
190 &self,
191 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
191 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
192 match self {
192 match self {
193 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
193 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
194 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
194 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
195 ),
195 ),
196 ChildNodesRef::OnDisk(nodes) => {
196 ChildNodesRef::OnDisk(nodes) => {
197 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
197 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
198 }
198 }
199 }
199 }
200 }
200 }
201
201
202 /// Iterate in parallel in undefined order
202 /// Iterate in parallel in undefined order
203 pub(super) fn par_iter(
203 pub(super) fn par_iter(
204 &self,
204 &self,
205 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
205 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
206 {
206 {
207 use rayon::prelude::*;
207 use rayon::prelude::*;
208 match self {
208 match self {
209 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
209 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
210 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
210 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
211 ),
211 ),
212 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
212 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
213 nodes.par_iter().map(NodeRef::OnDisk),
213 nodes.par_iter().map(NodeRef::OnDisk),
214 ),
214 ),
215 }
215 }
216 }
216 }
217
217
218 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
218 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
219 match self {
219 match self {
220 ChildNodesRef::InMemory(nodes) => {
220 ChildNodesRef::InMemory(nodes) => {
221 let mut vec: Vec<_> = nodes
221 let mut vec: Vec<_> = nodes
222 .iter()
222 .iter()
223 .map(|(k, v)| NodeRef::InMemory(k, v))
223 .map(|(k, v)| NodeRef::InMemory(k, v))
224 .collect();
224 .collect();
225 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
225 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
226 match node {
226 match node {
227 NodeRef::InMemory(path, _node) => path.base_name(),
227 NodeRef::InMemory(path, _node) => path.base_name(),
228 NodeRef::OnDisk(_) => unreachable!(),
228 NodeRef::OnDisk(_) => unreachable!(),
229 }
229 }
230 }
230 }
231 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
231 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
232 // value: https://github.com/rust-lang/rust/issues/34162
232 // value: https://github.com/rust-lang/rust/issues/34162
233 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
233 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
234 vec
234 vec
235 }
235 }
236 ChildNodesRef::OnDisk(nodes) => {
236 ChildNodesRef::OnDisk(nodes) => {
237 // Nodes on disk are already sorted
237 // Nodes on disk are already sorted
238 nodes.iter().map(NodeRef::OnDisk).collect()
238 nodes.iter().map(NodeRef::OnDisk).collect()
239 }
239 }
240 }
240 }
241 }
241 }
242 }
242 }
243
243
244 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
244 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
245 pub(super) fn full_path(
245 pub(super) fn full_path(
246 &self,
246 &self,
247 on_disk: &'on_disk [u8],
247 on_disk: &'on_disk [u8],
248 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
248 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
249 match self {
249 match self {
250 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
250 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
251 NodeRef::OnDisk(node) => node.full_path(on_disk),
251 NodeRef::OnDisk(node) => node.full_path(on_disk),
252 }
252 }
253 }
253 }
254
254
255 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
255 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
256 /// HgPath>` detached from `'tree`
256 /// HgPath>` detached from `'tree`
257 pub(super) fn full_path_borrowed(
257 pub(super) fn full_path_borrowed(
258 &self,
258 &self,
259 on_disk: &'on_disk [u8],
259 on_disk: &'on_disk [u8],
260 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
260 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
261 match self {
261 match self {
262 NodeRef::InMemory(path, _node) => match path.full_path() {
262 NodeRef::InMemory(path, _node) => match path.full_path() {
263 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
263 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
264 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
264 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
265 },
265 },
266 NodeRef::OnDisk(node) => {
266 NodeRef::OnDisk(node) => {
267 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
267 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
268 }
268 }
269 }
269 }
270 }
270 }
271
271
272 pub(super) fn base_name(
272 pub(super) fn base_name(
273 &self,
273 &self,
274 on_disk: &'on_disk [u8],
274 on_disk: &'on_disk [u8],
275 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
275 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
276 match self {
276 match self {
277 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
277 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
278 NodeRef::OnDisk(node) => node.base_name(on_disk),
278 NodeRef::OnDisk(node) => node.base_name(on_disk),
279 }
279 }
280 }
280 }
281
281
282 pub(super) fn children(
282 pub(super) fn children(
283 &self,
283 &self,
284 on_disk: &'on_disk [u8],
284 on_disk: &'on_disk [u8],
285 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
285 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
286 match self {
286 match self {
287 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
287 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
288 NodeRef::OnDisk(node) => {
288 NodeRef::OnDisk(node) => {
289 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
289 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
290 }
290 }
291 }
291 }
292 }
292 }
293
293
294 pub(super) fn has_copy_source(&self) -> bool {
294 pub(super) fn has_copy_source(&self) -> bool {
295 match self {
295 match self {
296 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
296 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
297 NodeRef::OnDisk(node) => node.has_copy_source(),
297 NodeRef::OnDisk(node) => node.has_copy_source(),
298 }
298 }
299 }
299 }
300
300
301 pub(super) fn copy_source(
301 pub(super) fn copy_source(
302 &self,
302 &self,
303 on_disk: &'on_disk [u8],
303 on_disk: &'on_disk [u8],
304 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
304 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
305 match self {
305 match self {
306 NodeRef::InMemory(_path, node) => {
306 NodeRef::InMemory(_path, node) => {
307 Ok(node.copy_source.as_ref().map(|s| &**s))
307 Ok(node.copy_source.as_ref().map(|s| &**s))
308 }
308 }
309 NodeRef::OnDisk(node) => node.copy_source(on_disk),
309 NodeRef::OnDisk(node) => node.copy_source(on_disk),
310 }
310 }
311 }
311 }
312
312
313 pub(super) fn entry(
313 pub(super) fn entry(
314 &self,
314 &self,
315 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
315 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
316 match self {
316 match self {
317 NodeRef::InMemory(_path, node) => {
317 NodeRef::InMemory(_path, node) => {
318 Ok(node.data.as_entry().copied())
318 Ok(node.data.as_entry().copied())
319 }
319 }
320 NodeRef::OnDisk(node) => node.entry(),
320 NodeRef::OnDisk(node) => node.entry(),
321 }
321 }
322 }
322 }
323
323
324 pub(super) fn state(
324 pub(super) fn state(
325 &self,
325 &self,
326 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
326 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
327 match self {
327 match self {
328 NodeRef::InMemory(_path, node) => {
328 NodeRef::InMemory(_path, node) => {
329 Ok(node.data.as_entry().map(|entry| entry.state()))
329 Ok(node.data.as_entry().map(|entry| entry.state()))
330 }
330 }
331 NodeRef::OnDisk(node) => node.state(),
331 NodeRef::OnDisk(node) => node.state(),
332 }
332 }
333 }
333 }
334
334
335 pub(super) fn cached_directory_mtime(
335 pub(super) fn cached_directory_mtime(
336 &self,
336 &self,
337 ) -> Option<&'tree on_disk::Timestamp> {
337 ) -> Option<&'tree on_disk::Timestamp> {
338 match self {
338 match self {
339 NodeRef::InMemory(_path, node) => match &node.data {
339 NodeRef::InMemory(_path, node) => match &node.data {
340 NodeData::CachedDirectory { mtime } => Some(mtime),
340 NodeData::CachedDirectory { mtime } => Some(mtime),
341 _ => None,
341 _ => None,
342 },
342 },
343 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
343 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
344 }
344 }
345 }
345 }
346
346
347 pub(super) fn descendants_with_entry_count(&self) -> u32 {
347 pub(super) fn descendants_with_entry_count(&self) -> u32 {
348 match self {
348 match self {
349 NodeRef::InMemory(_path, node) => {
349 NodeRef::InMemory(_path, node) => {
350 node.descendants_with_entry_count
350 node.descendants_with_entry_count
351 }
351 }
352 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
352 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
353 }
353 }
354 }
354 }
355
355
356 pub(super) fn tracked_descendants_count(&self) -> u32 {
356 pub(super) fn tracked_descendants_count(&self) -> u32 {
357 match self {
357 match self {
358 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
358 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
359 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
359 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
360 }
360 }
361 }
361 }
362 }
362 }
363
363
364 /// Represents a file or a directory
364 /// Represents a file or a directory
365 #[derive(Default)]
365 #[derive(Default)]
366 pub(super) struct Node<'on_disk> {
366 pub(super) struct Node<'on_disk> {
367 pub(super) data: NodeData,
367 pub(super) data: NodeData,
368
368
369 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
369 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
370
370
371 pub(super) children: ChildNodes<'on_disk>,
371 pub(super) children: ChildNodes<'on_disk>,
372
372
373 /// How many (non-inclusive) descendants of this node have an entry.
373 /// How many (non-inclusive) descendants of this node have an entry.
374 pub(super) descendants_with_entry_count: u32,
374 pub(super) descendants_with_entry_count: u32,
375
375
376 /// How many (non-inclusive) descendants of this node have an entry whose
376 /// How many (non-inclusive) descendants of this node have an entry whose
377 /// state is "tracked".
377 /// state is "tracked".
378 pub(super) tracked_descendants_count: u32,
378 pub(super) tracked_descendants_count: u32,
379 }
379 }
380
380
381 pub(super) enum NodeData {
381 pub(super) enum NodeData {
382 Entry(DirstateEntry),
382 Entry(DirstateEntry),
383 CachedDirectory { mtime: on_disk::Timestamp },
383 CachedDirectory { mtime: on_disk::Timestamp },
384 None,
384 None,
385 }
385 }
386
386
387 impl Default for NodeData {
387 impl Default for NodeData {
388 fn default() -> Self {
388 fn default() -> Self {
389 NodeData::None
389 NodeData::None
390 }
390 }
391 }
391 }
392
392
393 impl NodeData {
393 impl NodeData {
394 fn has_entry(&self) -> bool {
394 fn has_entry(&self) -> bool {
395 match self {
395 match self {
396 NodeData::Entry(_) => true,
396 NodeData::Entry(_) => true,
397 _ => false,
397 _ => false,
398 }
398 }
399 }
399 }
400
400
401 fn as_entry(&self) -> Option<&DirstateEntry> {
401 fn as_entry(&self) -> Option<&DirstateEntry> {
402 match self {
402 match self {
403 NodeData::Entry(entry) => Some(entry),
403 NodeData::Entry(entry) => Some(entry),
404 _ => None,
404 _ => None,
405 }
405 }
406 }
406 }
407 }
407 }
408
408
409 impl<'on_disk> DirstateMap<'on_disk> {
409 impl<'on_disk> DirstateMap<'on_disk> {
410 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
410 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
411 Self {
411 Self {
412 on_disk,
412 on_disk,
413 root: ChildNodes::default(),
413 root: ChildNodes::default(),
414 nodes_with_entry_count: 0,
414 nodes_with_entry_count: 0,
415 nodes_with_copy_source_count: 0,
415 nodes_with_copy_source_count: 0,
416 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
416 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
417 unreachable_bytes: 0,
417 unreachable_bytes: 0,
418 }
418 }
419 }
419 }
420
420
421 #[timed]
421 #[timed]
422 pub fn new_v2(
422 pub fn new_v2(
423 on_disk: &'on_disk [u8],
423 on_disk: &'on_disk [u8],
424 data_size: usize,
424 data_size: usize,
425 metadata: &[u8],
425 metadata: &[u8],
426 ) -> Result<Self, DirstateError> {
426 ) -> Result<Self, DirstateError> {
427 if let Some(data) = on_disk.get(..data_size) {
427 if let Some(data) = on_disk.get(..data_size) {
428 Ok(on_disk::read(data, metadata)?)
428 Ok(on_disk::read(data, metadata)?)
429 } else {
429 } else {
430 Err(DirstateV2ParseError.into())
430 Err(DirstateV2ParseError.into())
431 }
431 }
432 }
432 }
433
433
434 #[timed]
434 #[timed]
435 pub fn new_v1(
435 pub fn new_v1(
436 on_disk: &'on_disk [u8],
436 on_disk: &'on_disk [u8],
437 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
437 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
438 let mut map = Self::empty(on_disk);
438 let mut map = Self::empty(on_disk);
439 if map.on_disk.is_empty() {
439 if map.on_disk.is_empty() {
440 return Ok((map, None));
440 return Ok((map, None));
441 }
441 }
442
442
443 let parents = parse_dirstate_entries(
443 let parents = parse_dirstate_entries(
444 map.on_disk,
444 map.on_disk,
445 |path, entry, copy_source| {
445 |path, entry, copy_source| {
446 let tracked = entry.state().is_tracked();
446 let tracked = entry.state().is_tracked();
447 let node = Self::get_or_insert_node(
447 let node = Self::get_or_insert_node(
448 map.on_disk,
448 map.on_disk,
449 &mut map.unreachable_bytes,
449 &mut map.unreachable_bytes,
450 &mut map.root,
450 &mut map.root,
451 path,
451 path,
452 WithBasename::to_cow_borrowed,
452 WithBasename::to_cow_borrowed,
453 |ancestor| {
453 |ancestor| {
454 if tracked {
454 if tracked {
455 ancestor.tracked_descendants_count += 1
455 ancestor.tracked_descendants_count += 1
456 }
456 }
457 ancestor.descendants_with_entry_count += 1
457 ancestor.descendants_with_entry_count += 1
458 },
458 },
459 )?;
459 )?;
460 assert!(
460 assert!(
461 !node.data.has_entry(),
461 !node.data.has_entry(),
462 "duplicate dirstate entry in read"
462 "duplicate dirstate entry in read"
463 );
463 );
464 assert!(
464 assert!(
465 node.copy_source.is_none(),
465 node.copy_source.is_none(),
466 "duplicate dirstate entry in read"
466 "duplicate dirstate entry in read"
467 );
467 );
468 node.data = NodeData::Entry(*entry);
468 node.data = NodeData::Entry(*entry);
469 node.copy_source = copy_source.map(Cow::Borrowed);
469 node.copy_source = copy_source.map(Cow::Borrowed);
470 map.nodes_with_entry_count += 1;
470 map.nodes_with_entry_count += 1;
471 if copy_source.is_some() {
471 if copy_source.is_some() {
472 map.nodes_with_copy_source_count += 1
472 map.nodes_with_copy_source_count += 1
473 }
473 }
474 Ok(())
474 Ok(())
475 },
475 },
476 )?;
476 )?;
477 let parents = Some(parents.clone());
477 let parents = Some(parents.clone());
478
478
479 Ok((map, parents))
479 Ok((map, parents))
480 }
480 }
481
481
482 /// Assuming dirstate-v2 format, returns whether the next write should
482 /// Assuming dirstate-v2 format, returns whether the next write should
483 /// append to the existing data file that contains `self.on_disk` (true),
483 /// append to the existing data file that contains `self.on_disk` (true),
484 /// or create a new data file from scratch (false).
484 /// or create a new data file from scratch (false).
485 pub(super) fn write_should_append(&self) -> bool {
485 pub(super) fn write_should_append(&self) -> bool {
486 let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32;
486 let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32;
487 ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO
487 ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO
488 }
488 }
489
489
490 fn get_node<'tree>(
490 fn get_node<'tree>(
491 &'tree self,
491 &'tree self,
492 path: &HgPath,
492 path: &HgPath,
493 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
493 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
494 let mut children = self.root.as_ref();
494 let mut children = self.root.as_ref();
495 let mut components = path.components();
495 let mut components = path.components();
496 let mut component =
496 let mut component =
497 components.next().expect("expected at least one components");
497 components.next().expect("expected at least one components");
498 loop {
498 loop {
499 if let Some(child) = children.get(component, self.on_disk)? {
499 if let Some(child) = children.get(component, self.on_disk)? {
500 if let Some(next_component) = components.next() {
500 if let Some(next_component) = components.next() {
501 component = next_component;
501 component = next_component;
502 children = child.children(self.on_disk)?;
502 children = child.children(self.on_disk)?;
503 } else {
503 } else {
504 return Ok(Some(child));
504 return Ok(Some(child));
505 }
505 }
506 } else {
506 } else {
507 return Ok(None);
507 return Ok(None);
508 }
508 }
509 }
509 }
510 }
510 }
511
511
512 /// Returns a mutable reference to the node at `path` if it exists
512 /// Returns a mutable reference to the node at `path` if it exists
513 ///
513 ///
514 /// This takes `root` instead of `&mut self` so that callers can mutate
514 /// This takes `root` instead of `&mut self` so that callers can mutate
515 /// other fields while the returned borrow is still valid
515 /// other fields while the returned borrow is still valid
516 fn get_node_mut<'tree>(
516 fn get_node_mut<'tree>(
517 on_disk: &'on_disk [u8],
517 on_disk: &'on_disk [u8],
518 unreachable_bytes: &mut u32,
518 unreachable_bytes: &mut u32,
519 root: &'tree mut ChildNodes<'on_disk>,
519 root: &'tree mut ChildNodes<'on_disk>,
520 path: &HgPath,
520 path: &HgPath,
521 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
521 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
522 let mut children = root;
522 let mut children = root;
523 let mut components = path.components();
523 let mut components = path.components();
524 let mut component =
524 let mut component =
525 components.next().expect("expected at least one components");
525 components.next().expect("expected at least one components");
526 loop {
526 loop {
527 if let Some(child) = children
527 if let Some(child) = children
528 .make_mut(on_disk, unreachable_bytes)?
528 .make_mut(on_disk, unreachable_bytes)?
529 .get_mut(component)
529 .get_mut(component)
530 {
530 {
531 if let Some(next_component) = components.next() {
531 if let Some(next_component) = components.next() {
532 component = next_component;
532 component = next_component;
533 children = &mut child.children;
533 children = &mut child.children;
534 } else {
534 } else {
535 return Ok(Some(child));
535 return Ok(Some(child));
536 }
536 }
537 } else {
537 } else {
538 return Ok(None);
538 return Ok(None);
539 }
539 }
540 }
540 }
541 }
541 }
542
542
543 pub(super) fn get_or_insert<'tree, 'path>(
543 pub(super) fn get_or_insert<'tree, 'path>(
544 &'tree mut self,
544 &'tree mut self,
545 path: &HgPath,
545 path: &HgPath,
546 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
546 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
547 Self::get_or_insert_node(
547 Self::get_or_insert_node(
548 self.on_disk,
548 self.on_disk,
549 &mut self.unreachable_bytes,
549 &mut self.unreachable_bytes,
550 &mut self.root,
550 &mut self.root,
551 path,
551 path,
552 WithBasename::to_cow_owned,
552 WithBasename::to_cow_owned,
553 |_| {},
553 |_| {},
554 )
554 )
555 }
555 }
556
556
557 fn get_or_insert_node<'tree, 'path>(
557 fn get_or_insert_node<'tree, 'path>(
558 on_disk: &'on_disk [u8],
558 on_disk: &'on_disk [u8],
559 unreachable_bytes: &mut u32,
559 unreachable_bytes: &mut u32,
560 root: &'tree mut ChildNodes<'on_disk>,
560 root: &'tree mut ChildNodes<'on_disk>,
561 path: &'path HgPath,
561 path: &'path HgPath,
562 to_cow: impl Fn(
562 to_cow: impl Fn(
563 WithBasename<&'path HgPath>,
563 WithBasename<&'path HgPath>,
564 ) -> WithBasename<Cow<'on_disk, HgPath>>,
564 ) -> WithBasename<Cow<'on_disk, HgPath>>,
565 mut each_ancestor: impl FnMut(&mut Node),
565 mut each_ancestor: impl FnMut(&mut Node),
566 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
566 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
567 let mut child_nodes = root;
567 let mut child_nodes = root;
568 let mut inclusive_ancestor_paths =
568 let mut inclusive_ancestor_paths =
569 WithBasename::inclusive_ancestors_of(path);
569 WithBasename::inclusive_ancestors_of(path);
570 let mut ancestor_path = inclusive_ancestor_paths
570 let mut ancestor_path = inclusive_ancestor_paths
571 .next()
571 .next()
572 .expect("expected at least one inclusive ancestor");
572 .expect("expected at least one inclusive ancestor");
573 loop {
573 loop {
574 // TODO: can we avoid allocating an owned key in cases where the
574 // TODO: can we avoid allocating an owned key in cases where the
575 // map already contains that key, without introducing double
575 // map already contains that key, without introducing double
576 // lookup?
576 // lookup?
577 let child_node = child_nodes
577 let child_node = child_nodes
578 .make_mut(on_disk, unreachable_bytes)?
578 .make_mut(on_disk, unreachable_bytes)?
579 .entry(to_cow(ancestor_path))
579 .entry(to_cow(ancestor_path))
580 .or_default();
580 .or_default();
581 if let Some(next) = inclusive_ancestor_paths.next() {
581 if let Some(next) = inclusive_ancestor_paths.next() {
582 each_ancestor(child_node);
582 each_ancestor(child_node);
583 ancestor_path = next;
583 ancestor_path = next;
584 child_nodes = &mut child_node.children;
584 child_nodes = &mut child_node.children;
585 } else {
585 } else {
586 return Ok(child_node);
586 return Ok(child_node);
587 }
587 }
588 }
588 }
589 }
589 }
590
590
591 fn add_or_remove_file(
591 fn add_or_remove_file(
592 &mut self,
592 &mut self,
593 path: &HgPath,
593 path: &HgPath,
594 old_state: Option<EntryState>,
594 old_state: Option<EntryState>,
595 new_entry: DirstateEntry,
595 new_entry: DirstateEntry,
596 ) -> Result<(), DirstateV2ParseError> {
596 ) -> Result<(), DirstateV2ParseError> {
597 let had_entry = old_state.is_some();
597 let had_entry = old_state.is_some();
598 let was_tracked = old_state.map_or(false, |s| s.is_tracked());
598 let was_tracked = old_state.map_or(false, |s| s.is_tracked());
599 let tracked_count_increment =
599 let tracked_count_increment =
600 match (was_tracked, new_entry.state().is_tracked()) {
600 match (was_tracked, new_entry.state().is_tracked()) {
601 (false, true) => 1,
601 (false, true) => 1,
602 (true, false) => -1,
602 (true, false) => -1,
603 _ => 0,
603 _ => 0,
604 };
604 };
605
605
606 let node = Self::get_or_insert_node(
606 let node = Self::get_or_insert_node(
607 self.on_disk,
607 self.on_disk,
608 &mut self.unreachable_bytes,
608 &mut self.unreachable_bytes,
609 &mut self.root,
609 &mut self.root,
610 path,
610 path,
611 WithBasename::to_cow_owned,
611 WithBasename::to_cow_owned,
612 |ancestor| {
612 |ancestor| {
613 if !had_entry {
613 if !had_entry {
614 ancestor.descendants_with_entry_count += 1;
614 ancestor.descendants_with_entry_count += 1;
615 }
615 }
616
616
617 // We can’t use `+= increment` because the counter is unsigned,
617 // We can’t use `+= increment` because the counter is unsigned,
618 // and we want debug builds to detect accidental underflow
618 // and we want debug builds to detect accidental underflow
619 // through zero
619 // through zero
620 match tracked_count_increment {
620 match tracked_count_increment {
621 1 => ancestor.tracked_descendants_count += 1,
621 1 => ancestor.tracked_descendants_count += 1,
622 -1 => ancestor.tracked_descendants_count -= 1,
622 -1 => ancestor.tracked_descendants_count -= 1,
623 _ => {}
623 _ => {}
624 }
624 }
625 },
625 },
626 )?;
626 )?;
627 if !had_entry {
627 if !had_entry {
628 self.nodes_with_entry_count += 1
628 self.nodes_with_entry_count += 1
629 }
629 }
630 node.data = NodeData::Entry(new_entry);
630 node.data = NodeData::Entry(new_entry);
631 Ok(())
631 Ok(())
632 }
632 }
633
633
634 fn iter_nodes<'tree>(
634 fn iter_nodes<'tree>(
635 &'tree self,
635 &'tree self,
636 ) -> impl Iterator<
636 ) -> impl Iterator<
637 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
637 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
638 > + 'tree {
638 > + 'tree {
639 // Depth first tree traversal.
639 // Depth first tree traversal.
640 //
640 //
641 // If we could afford internal iteration and recursion,
641 // If we could afford internal iteration and recursion,
642 // this would look like:
642 // this would look like:
643 //
643 //
644 // ```
644 // ```
645 // fn traverse_children(
645 // fn traverse_children(
646 // children: &ChildNodes,
646 // children: &ChildNodes,
647 // each: &mut impl FnMut(&Node),
647 // each: &mut impl FnMut(&Node),
648 // ) {
648 // ) {
649 // for child in children.values() {
649 // for child in children.values() {
650 // traverse_children(&child.children, each);
650 // traverse_children(&child.children, each);
651 // each(child);
651 // each(child);
652 // }
652 // }
653 // }
653 // }
654 // ```
654 // ```
655 //
655 //
656 // However we want an external iterator and therefore can’t use the
656 // However we want an external iterator and therefore can’t use the
657 // call stack. Use an explicit stack instead:
657 // call stack. Use an explicit stack instead:
658 let mut stack = Vec::new();
658 let mut stack = Vec::new();
659 let mut iter = self.root.as_ref().iter();
659 let mut iter = self.root.as_ref().iter();
660 std::iter::from_fn(move || {
660 std::iter::from_fn(move || {
661 while let Some(child_node) = iter.next() {
661 while let Some(child_node) = iter.next() {
662 let children = match child_node.children(self.on_disk) {
662 let children = match child_node.children(self.on_disk) {
663 Ok(children) => children,
663 Ok(children) => children,
664 Err(error) => return Some(Err(error)),
664 Err(error) => return Some(Err(error)),
665 };
665 };
666 // Pseudo-recursion
666 // Pseudo-recursion
667 let new_iter = children.iter();
667 let new_iter = children.iter();
668 let old_iter = std::mem::replace(&mut iter, new_iter);
668 let old_iter = std::mem::replace(&mut iter, new_iter);
669 stack.push((child_node, old_iter));
669 stack.push((child_node, old_iter));
670 }
670 }
671 // Found the end of a `children.iter()` iterator.
671 // Found the end of a `children.iter()` iterator.
672 if let Some((child_node, next_iter)) = stack.pop() {
672 if let Some((child_node, next_iter)) = stack.pop() {
673 // "Return" from pseudo-recursion by restoring state from the
673 // "Return" from pseudo-recursion by restoring state from the
674 // explicit stack
674 // explicit stack
675 iter = next_iter;
675 iter = next_iter;
676
676
677 Some(Ok(child_node))
677 Some(Ok(child_node))
678 } else {
678 } else {
679 // Reached the bottom of the stack, we’re done
679 // Reached the bottom of the stack, we’re done
680 None
680 None
681 }
681 }
682 })
682 })
683 }
683 }
684
684
685 fn clear_known_ambiguous_mtimes(
685 fn clear_known_ambiguous_mtimes(
686 &mut self,
686 &mut self,
687 paths: &[impl AsRef<HgPath>],
687 paths: &[impl AsRef<HgPath>],
688 ) -> Result<(), DirstateV2ParseError> {
688 ) -> Result<(), DirstateV2ParseError> {
689 for path in paths {
689 for path in paths {
690 if let Some(node) = Self::get_node_mut(
690 if let Some(node) = Self::get_node_mut(
691 self.on_disk,
691 self.on_disk,
692 &mut self.unreachable_bytes,
692 &mut self.unreachable_bytes,
693 &mut self.root,
693 &mut self.root,
694 path.as_ref(),
694 path.as_ref(),
695 )? {
695 )? {
696 if let NodeData::Entry(entry) = &mut node.data {
696 if let NodeData::Entry(entry) = &mut node.data {
697 entry.clear_mtime();
697 entry.clear_mtime();
698 }
698 }
699 }
699 }
700 }
700 }
701 Ok(())
701 Ok(())
702 }
702 }
703
703
704 /// Return a faillilble iterator of full paths of nodes that have an
705 /// `entry` for which the given `predicate` returns true.
706 ///
707 /// Fallibility means that each iterator item is a `Result`, which may
708 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
709 /// should only happen if Mercurial is buggy or a repository is corrupted.
710 fn filter_full_paths<'tree>(
711 &'tree self,
712 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
713 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
714 {
715 filter_map_results(self.iter_nodes(), move |node| {
716 if let Some(entry) = node.entry()? {
717 if predicate(&entry) {
718 return Ok(Some(node.full_path(self.on_disk)?));
719 }
720 }
721 Ok(None)
722 })
723 }
724
725 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
704 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
726 if let Cow::Borrowed(path) = path {
705 if let Cow::Borrowed(path) = path {
727 *unreachable_bytes += path.len() as u32
706 *unreachable_bytes += path.len() as u32
728 }
707 }
729 }
708 }
730 }
709 }
731
710
732 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
711 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
733 ///
712 ///
734 /// The callback is only called for incoming `Ok` values. Errors are passed
713 /// The callback is only called for incoming `Ok` values. Errors are passed
735 /// through as-is. In order to let it use the `?` operator the callback is
714 /// through as-is. In order to let it use the `?` operator the callback is
736 /// expected to return a `Result` of `Option`, instead of an `Option` of
715 /// expected to return a `Result` of `Option`, instead of an `Option` of
737 /// `Result`.
716 /// `Result`.
738 fn filter_map_results<'a, I, F, A, B, E>(
717 fn filter_map_results<'a, I, F, A, B, E>(
739 iter: I,
718 iter: I,
740 f: F,
719 f: F,
741 ) -> impl Iterator<Item = Result<B, E>> + 'a
720 ) -> impl Iterator<Item = Result<B, E>> + 'a
742 where
721 where
743 I: Iterator<Item = Result<A, E>> + 'a,
722 I: Iterator<Item = Result<A, E>> + 'a,
744 F: Fn(A) -> Result<Option<B>, E> + 'a,
723 F: Fn(A) -> Result<Option<B>, E> + 'a,
745 {
724 {
746 iter.filter_map(move |result| match result {
725 iter.filter_map(move |result| match result {
747 Ok(node) => f(node).transpose(),
726 Ok(node) => f(node).transpose(),
748 Err(e) => Some(Err(e)),
727 Err(e) => Some(Err(e)),
749 })
728 })
750 }
729 }
751
730
752 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
731 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
753 fn clear(&mut self) {
732 fn clear(&mut self) {
754 self.root = Default::default();
733 self.root = Default::default();
755 self.nodes_with_entry_count = 0;
734 self.nodes_with_entry_count = 0;
756 self.nodes_with_copy_source_count = 0;
735 self.nodes_with_copy_source_count = 0;
757 }
736 }
758
737
759 fn set_entry(
738 fn set_entry(
760 &mut self,
739 &mut self,
761 filename: &HgPath,
740 filename: &HgPath,
762 entry: DirstateEntry,
741 entry: DirstateEntry,
763 ) -> Result<(), DirstateV2ParseError> {
742 ) -> Result<(), DirstateV2ParseError> {
764 self.get_or_insert(&filename)?.data = NodeData::Entry(entry);
743 self.get_or_insert(&filename)?.data = NodeData::Entry(entry);
765 Ok(())
744 Ok(())
766 }
745 }
767
746
768 fn add_file(
747 fn add_file(
769 &mut self,
748 &mut self,
770 filename: &HgPath,
749 filename: &HgPath,
771 entry: DirstateEntry,
750 entry: DirstateEntry,
772 ) -> Result<(), DirstateError> {
751 ) -> Result<(), DirstateError> {
773 let old_state = self.get(filename)?.map(|e| e.state());
752 let old_state = self.get(filename)?.map(|e| e.state());
774 Ok(self.add_or_remove_file(filename, old_state, entry)?)
753 Ok(self.add_or_remove_file(filename, old_state, entry)?)
775 }
754 }
776
755
777 fn remove_file(
756 fn remove_file(
778 &mut self,
757 &mut self,
779 filename: &HgPath,
758 filename: &HgPath,
780 in_merge: bool,
759 in_merge: bool,
781 ) -> Result<(), DirstateError> {
760 ) -> Result<(), DirstateError> {
782 let old_entry_opt = self.get(filename)?;
761 let old_entry_opt = self.get(filename)?;
783 let old_state = old_entry_opt.map(|e| e.state());
762 let old_state = old_entry_opt.map(|e| e.state());
784 let mut size = 0;
763 let mut size = 0;
785 if in_merge {
764 if in_merge {
786 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
765 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
787 // during a merge. So I (marmoute) am not sure we need the
766 // during a merge. So I (marmoute) am not sure we need the
788 // conditionnal at all. Adding double checking this with assert
767 // conditionnal at all. Adding double checking this with assert
789 // would be nice.
768 // would be nice.
790 if let Some(old_entry) = old_entry_opt {
769 if let Some(old_entry) = old_entry_opt {
791 // backup the previous state
770 // backup the previous state
792 if old_entry.state() == EntryState::Merged {
771 if old_entry.state() == EntryState::Merged {
793 size = SIZE_NON_NORMAL;
772 size = SIZE_NON_NORMAL;
794 } else if old_entry.state() == EntryState::Normal
773 } else if old_entry.state() == EntryState::Normal
795 && old_entry.size() == SIZE_FROM_OTHER_PARENT
774 && old_entry.size() == SIZE_FROM_OTHER_PARENT
796 {
775 {
797 // other parent
776 // other parent
798 size = SIZE_FROM_OTHER_PARENT;
777 size = SIZE_FROM_OTHER_PARENT;
799 }
778 }
800 }
779 }
801 }
780 }
802 if size == 0 {
781 if size == 0 {
803 self.copy_map_remove(filename)?;
782 self.copy_map_remove(filename)?;
804 }
783 }
805 let entry = DirstateEntry::new_removed(size);
784 let entry = DirstateEntry::new_removed(size);
806 Ok(self.add_or_remove_file(filename, old_state, entry)?)
785 Ok(self.add_or_remove_file(filename, old_state, entry)?)
807 }
786 }
808
787
809 fn drop_entry_and_copy_source(
788 fn drop_entry_and_copy_source(
810 &mut self,
789 &mut self,
811 filename: &HgPath,
790 filename: &HgPath,
812 ) -> Result<(), DirstateError> {
791 ) -> Result<(), DirstateError> {
813 let was_tracked = self
792 let was_tracked = self
814 .get(filename)?
793 .get(filename)?
815 .map_or(false, |e| e.state().is_tracked());
794 .map_or(false, |e| e.state().is_tracked());
816 struct Dropped {
795 struct Dropped {
817 was_tracked: bool,
796 was_tracked: bool,
818 had_entry: bool,
797 had_entry: bool,
819 had_copy_source: bool,
798 had_copy_source: bool,
820 }
799 }
821
800
822 /// If this returns `Ok(Some((dropped, removed)))`, then
801 /// If this returns `Ok(Some((dropped, removed)))`, then
823 ///
802 ///
824 /// * `dropped` is about the leaf node that was at `filename`
803 /// * `dropped` is about the leaf node that was at `filename`
825 /// * `removed` is whether this particular level of recursion just
804 /// * `removed` is whether this particular level of recursion just
826 /// removed a node in `nodes`.
805 /// removed a node in `nodes`.
827 fn recur<'on_disk>(
806 fn recur<'on_disk>(
828 on_disk: &'on_disk [u8],
807 on_disk: &'on_disk [u8],
829 unreachable_bytes: &mut u32,
808 unreachable_bytes: &mut u32,
830 nodes: &mut ChildNodes<'on_disk>,
809 nodes: &mut ChildNodes<'on_disk>,
831 path: &HgPath,
810 path: &HgPath,
832 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
811 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
833 let (first_path_component, rest_of_path) =
812 let (first_path_component, rest_of_path) =
834 path.split_first_component();
813 path.split_first_component();
835 let nodes = nodes.make_mut(on_disk, unreachable_bytes)?;
814 let nodes = nodes.make_mut(on_disk, unreachable_bytes)?;
836 let node = if let Some(node) = nodes.get_mut(first_path_component)
815 let node = if let Some(node) = nodes.get_mut(first_path_component)
837 {
816 {
838 node
817 node
839 } else {
818 } else {
840 return Ok(None);
819 return Ok(None);
841 };
820 };
842 let dropped;
821 let dropped;
843 if let Some(rest) = rest_of_path {
822 if let Some(rest) = rest_of_path {
844 if let Some((d, removed)) = recur(
823 if let Some((d, removed)) = recur(
845 on_disk,
824 on_disk,
846 unreachable_bytes,
825 unreachable_bytes,
847 &mut node.children,
826 &mut node.children,
848 rest,
827 rest,
849 )? {
828 )? {
850 dropped = d;
829 dropped = d;
851 if dropped.had_entry {
830 if dropped.had_entry {
852 node.descendants_with_entry_count -= 1;
831 node.descendants_with_entry_count -= 1;
853 }
832 }
854 if dropped.was_tracked {
833 if dropped.was_tracked {
855 node.tracked_descendants_count -= 1;
834 node.tracked_descendants_count -= 1;
856 }
835 }
857
836
858 // Directory caches must be invalidated when removing a
837 // Directory caches must be invalidated when removing a
859 // child node
838 // child node
860 if removed {
839 if removed {
861 if let NodeData::CachedDirectory { .. } = &node.data {
840 if let NodeData::CachedDirectory { .. } = &node.data {
862 node.data = NodeData::None
841 node.data = NodeData::None
863 }
842 }
864 }
843 }
865 } else {
844 } else {
866 return Ok(None);
845 return Ok(None);
867 }
846 }
868 } else {
847 } else {
869 let had_entry = node.data.has_entry();
848 let had_entry = node.data.has_entry();
870 if had_entry {
849 if had_entry {
871 node.data = NodeData::None
850 node.data = NodeData::None
872 }
851 }
873 if let Some(source) = &node.copy_source {
852 if let Some(source) = &node.copy_source {
874 DirstateMap::count_dropped_path(unreachable_bytes, source);
853 DirstateMap::count_dropped_path(unreachable_bytes, source);
875 node.copy_source = None
854 node.copy_source = None
876 }
855 }
877 dropped = Dropped {
856 dropped = Dropped {
878 was_tracked: node
857 was_tracked: node
879 .data
858 .data
880 .as_entry()
859 .as_entry()
881 .map_or(false, |entry| entry.state().is_tracked()),
860 .map_or(false, |entry| entry.state().is_tracked()),
882 had_entry,
861 had_entry,
883 had_copy_source: node.copy_source.take().is_some(),
862 had_copy_source: node.copy_source.take().is_some(),
884 };
863 };
885 }
864 }
886 // After recursion, for both leaf (rest_of_path is None) nodes and
865 // After recursion, for both leaf (rest_of_path is None) nodes and
887 // parent nodes, remove a node if it just became empty.
866 // parent nodes, remove a node if it just became empty.
888 let remove = !node.data.has_entry()
867 let remove = !node.data.has_entry()
889 && node.copy_source.is_none()
868 && node.copy_source.is_none()
890 && node.children.is_empty();
869 && node.children.is_empty();
891 if remove {
870 if remove {
892 let (key, _) =
871 let (key, _) =
893 nodes.remove_entry(first_path_component).unwrap();
872 nodes.remove_entry(first_path_component).unwrap();
894 DirstateMap::count_dropped_path(
873 DirstateMap::count_dropped_path(
895 unreachable_bytes,
874 unreachable_bytes,
896 key.full_path(),
875 key.full_path(),
897 )
876 )
898 }
877 }
899 Ok(Some((dropped, remove)))
878 Ok(Some((dropped, remove)))
900 }
879 }
901
880
902 if let Some((dropped, _removed)) = recur(
881 if let Some((dropped, _removed)) = recur(
903 self.on_disk,
882 self.on_disk,
904 &mut self.unreachable_bytes,
883 &mut self.unreachable_bytes,
905 &mut self.root,
884 &mut self.root,
906 filename,
885 filename,
907 )? {
886 )? {
908 if dropped.had_entry {
887 if dropped.had_entry {
909 self.nodes_with_entry_count -= 1
888 self.nodes_with_entry_count -= 1
910 }
889 }
911 if dropped.had_copy_source {
890 if dropped.had_copy_source {
912 self.nodes_with_copy_source_count -= 1
891 self.nodes_with_copy_source_count -= 1
913 }
892 }
914 } else {
893 } else {
915 debug_assert!(!was_tracked);
894 debug_assert!(!was_tracked);
916 }
895 }
917 Ok(())
896 Ok(())
918 }
897 }
919
898
920 fn non_normal_entries_contains(
921 &mut self,
922 key: &HgPath,
923 ) -> Result<bool, DirstateV2ParseError> {
924 Ok(if let Some(node) = self.get_node(key)? {
925 node.entry()?.map_or(false, |entry| entry.is_non_normal())
926 } else {
927 false
928 })
929 }
930
931 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool {
932 // Do nothing, this `DirstateMap` does not have a separate "non normal
933 // entries" set that need to be kept up to date.
934 if let Ok(Some(v)) = self.get(key) {
935 return v.is_non_normal();
936 }
937 false
938 }
939
940 fn non_normal_entries_add(&mut self, _key: &HgPath) {
941 // Do nothing, this `DirstateMap` does not have a separate "non normal
942 // entries" set that need to be kept up to date
943 }
944
945 fn non_normal_or_other_parent_paths(
946 &mut self,
947 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
948 {
949 Box::new(self.filter_full_paths(|entry| {
950 entry.is_non_normal() || entry.is_from_other_parent()
951 }))
952 }
953
954 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
955 // Do nothing, this `DirstateMap` does not have a separate "non normal
956 // entries" and "from other parent" sets that need to be recomputed
957 }
958
959 fn iter_non_normal_paths(
960 &mut self,
961 ) -> Box<
962 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
963 > {
964 self.iter_non_normal_paths_panic()
965 }
966
967 fn iter_non_normal_paths_panic(
968 &self,
969 ) -> Box<
970 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
971 > {
972 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
973 }
974
975 fn iter_other_parent_paths(
976 &mut self,
977 ) -> Box<
978 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
979 > {
980 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
981 }
982
983 fn has_tracked_dir(
899 fn has_tracked_dir(
984 &mut self,
900 &mut self,
985 directory: &HgPath,
901 directory: &HgPath,
986 ) -> Result<bool, DirstateError> {
902 ) -> Result<bool, DirstateError> {
987 if let Some(node) = self.get_node(directory)? {
903 if let Some(node) = self.get_node(directory)? {
988 // A node without a `DirstateEntry` was created to hold child
904 // A node without a `DirstateEntry` was created to hold child
989 // nodes, and is therefore a directory.
905 // nodes, and is therefore a directory.
990 let state = node.state()?;
906 let state = node.state()?;
991 Ok(state.is_none() && node.tracked_descendants_count() > 0)
907 Ok(state.is_none() && node.tracked_descendants_count() > 0)
992 } else {
908 } else {
993 Ok(false)
909 Ok(false)
994 }
910 }
995 }
911 }
996
912
997 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
913 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
998 if let Some(node) = self.get_node(directory)? {
914 if let Some(node) = self.get_node(directory)? {
999 // A node without a `DirstateEntry` was created to hold child
915 // A node without a `DirstateEntry` was created to hold child
1000 // nodes, and is therefore a directory.
916 // nodes, and is therefore a directory.
1001 let state = node.state()?;
917 let state = node.state()?;
1002 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
918 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
1003 } else {
919 } else {
1004 Ok(false)
920 Ok(false)
1005 }
921 }
1006 }
922 }
1007
923
1008 #[timed]
924 #[timed]
1009 fn pack_v1(
925 fn pack_v1(
1010 &mut self,
926 &mut self,
1011 parents: DirstateParents,
927 parents: DirstateParents,
1012 now: Timestamp,
928 now: Timestamp,
1013 ) -> Result<Vec<u8>, DirstateError> {
929 ) -> Result<Vec<u8>, DirstateError> {
1014 let now: i32 = now.0.try_into().expect("time overflow");
930 let now: i32 = now.0.try_into().expect("time overflow");
1015 let mut ambiguous_mtimes = Vec::new();
931 let mut ambiguous_mtimes = Vec::new();
1016 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
932 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1017 // reallocations
933 // reallocations
1018 let mut size = parents.as_bytes().len();
934 let mut size = parents.as_bytes().len();
1019 for node in self.iter_nodes() {
935 for node in self.iter_nodes() {
1020 let node = node?;
936 let node = node?;
1021 if let Some(entry) = node.entry()? {
937 if let Some(entry) = node.entry()? {
1022 size += packed_entry_size(
938 size += packed_entry_size(
1023 node.full_path(self.on_disk)?,
939 node.full_path(self.on_disk)?,
1024 node.copy_source(self.on_disk)?,
940 node.copy_source(self.on_disk)?,
1025 );
941 );
1026 if entry.mtime_is_ambiguous(now) {
942 if entry.mtime_is_ambiguous(now) {
1027 ambiguous_mtimes.push(
943 ambiguous_mtimes.push(
1028 node.full_path_borrowed(self.on_disk)?
944 node.full_path_borrowed(self.on_disk)?
1029 .detach_from_tree(),
945 .detach_from_tree(),
1030 )
946 )
1031 }
947 }
1032 }
948 }
1033 }
949 }
1034 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
950 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1035
951
1036 let mut packed = Vec::with_capacity(size);
952 let mut packed = Vec::with_capacity(size);
1037 packed.extend(parents.as_bytes());
953 packed.extend(parents.as_bytes());
1038
954
1039 for node in self.iter_nodes() {
955 for node in self.iter_nodes() {
1040 let node = node?;
956 let node = node?;
1041 if let Some(entry) = node.entry()? {
957 if let Some(entry) = node.entry()? {
1042 pack_entry(
958 pack_entry(
1043 node.full_path(self.on_disk)?,
959 node.full_path(self.on_disk)?,
1044 &entry,
960 &entry,
1045 node.copy_source(self.on_disk)?,
961 node.copy_source(self.on_disk)?,
1046 &mut packed,
962 &mut packed,
1047 );
963 );
1048 }
964 }
1049 }
965 }
1050 Ok(packed)
966 Ok(packed)
1051 }
967 }
1052
968
1053 /// Returns new data and metadata together with whether that data should be
969 /// Returns new data and metadata together with whether that data should be
1054 /// appended to the existing data file whose content is at
970 /// appended to the existing data file whose content is at
1055 /// `self.on_disk` (true), instead of written to a new data file
971 /// `self.on_disk` (true), instead of written to a new data file
1056 /// (false).
972 /// (false).
1057 #[timed]
973 #[timed]
1058 fn pack_v2(
974 fn pack_v2(
1059 &mut self,
975 &mut self,
1060 now: Timestamp,
976 now: Timestamp,
1061 can_append: bool,
977 can_append: bool,
1062 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
978 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
1063 // TODO: how do we want to handle this in 2038?
979 // TODO: how do we want to handle this in 2038?
1064 let now: i32 = now.0.try_into().expect("time overflow");
980 let now: i32 = now.0.try_into().expect("time overflow");
1065 let mut paths = Vec::new();
981 let mut paths = Vec::new();
1066 for node in self.iter_nodes() {
982 for node in self.iter_nodes() {
1067 let node = node?;
983 let node = node?;
1068 if let Some(entry) = node.entry()? {
984 if let Some(entry) = node.entry()? {
1069 if entry.mtime_is_ambiguous(now) {
985 if entry.mtime_is_ambiguous(now) {
1070 paths.push(
986 paths.push(
1071 node.full_path_borrowed(self.on_disk)?
987 node.full_path_borrowed(self.on_disk)?
1072 .detach_from_tree(),
988 .detach_from_tree(),
1073 )
989 )
1074 }
990 }
1075 }
991 }
1076 }
992 }
1077 // Borrow of `self` ends here since we collect cloned paths
993 // Borrow of `self` ends here since we collect cloned paths
1078
994
1079 self.clear_known_ambiguous_mtimes(&paths)?;
995 self.clear_known_ambiguous_mtimes(&paths)?;
1080
996
1081 on_disk::write(self, can_append)
997 on_disk::write(self, can_append)
1082 }
998 }
1083
999
1084 fn status<'a>(
1000 fn status<'a>(
1085 &'a mut self,
1001 &'a mut self,
1086 matcher: &'a (dyn Matcher + Sync),
1002 matcher: &'a (dyn Matcher + Sync),
1087 root_dir: PathBuf,
1003 root_dir: PathBuf,
1088 ignore_files: Vec<PathBuf>,
1004 ignore_files: Vec<PathBuf>,
1089 options: StatusOptions,
1005 options: StatusOptions,
1090 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1006 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1091 {
1007 {
1092 super::status::status(self, matcher, root_dir, ignore_files, options)
1008 super::status::status(self, matcher, root_dir, ignore_files, options)
1093 }
1009 }
1094
1010
1095 fn copy_map_len(&self) -> usize {
1011 fn copy_map_len(&self) -> usize {
1096 self.nodes_with_copy_source_count as usize
1012 self.nodes_with_copy_source_count as usize
1097 }
1013 }
1098
1014
1099 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1015 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1100 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1016 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1101 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1017 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1102 Some((node.full_path(self.on_disk)?, source))
1018 Some((node.full_path(self.on_disk)?, source))
1103 } else {
1019 } else {
1104 None
1020 None
1105 })
1021 })
1106 }))
1022 }))
1107 }
1023 }
1108
1024
1109 fn copy_map_contains_key(
1025 fn copy_map_contains_key(
1110 &self,
1026 &self,
1111 key: &HgPath,
1027 key: &HgPath,
1112 ) -> Result<bool, DirstateV2ParseError> {
1028 ) -> Result<bool, DirstateV2ParseError> {
1113 Ok(if let Some(node) = self.get_node(key)? {
1029 Ok(if let Some(node) = self.get_node(key)? {
1114 node.has_copy_source()
1030 node.has_copy_source()
1115 } else {
1031 } else {
1116 false
1032 false
1117 })
1033 })
1118 }
1034 }
1119
1035
1120 fn copy_map_get(
1036 fn copy_map_get(
1121 &self,
1037 &self,
1122 key: &HgPath,
1038 key: &HgPath,
1123 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1039 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1124 if let Some(node) = self.get_node(key)? {
1040 if let Some(node) = self.get_node(key)? {
1125 if let Some(source) = node.copy_source(self.on_disk)? {
1041 if let Some(source) = node.copy_source(self.on_disk)? {
1126 return Ok(Some(source));
1042 return Ok(Some(source));
1127 }
1043 }
1128 }
1044 }
1129 Ok(None)
1045 Ok(None)
1130 }
1046 }
1131
1047
1132 fn copy_map_remove(
1048 fn copy_map_remove(
1133 &mut self,
1049 &mut self,
1134 key: &HgPath,
1050 key: &HgPath,
1135 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1051 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1136 let count = &mut self.nodes_with_copy_source_count;
1052 let count = &mut self.nodes_with_copy_source_count;
1137 let unreachable_bytes = &mut self.unreachable_bytes;
1053 let unreachable_bytes = &mut self.unreachable_bytes;
1138 Ok(Self::get_node_mut(
1054 Ok(Self::get_node_mut(
1139 self.on_disk,
1055 self.on_disk,
1140 unreachable_bytes,
1056 unreachable_bytes,
1141 &mut self.root,
1057 &mut self.root,
1142 key,
1058 key,
1143 )?
1059 )?
1144 .and_then(|node| {
1060 .and_then(|node| {
1145 if let Some(source) = &node.copy_source {
1061 if let Some(source) = &node.copy_source {
1146 *count -= 1;
1062 *count -= 1;
1147 Self::count_dropped_path(unreachable_bytes, source);
1063 Self::count_dropped_path(unreachable_bytes, source);
1148 }
1064 }
1149 node.copy_source.take().map(Cow::into_owned)
1065 node.copy_source.take().map(Cow::into_owned)
1150 }))
1066 }))
1151 }
1067 }
1152
1068
1153 fn copy_map_insert(
1069 fn copy_map_insert(
1154 &mut self,
1070 &mut self,
1155 key: HgPathBuf,
1071 key: HgPathBuf,
1156 value: HgPathBuf,
1072 value: HgPathBuf,
1157 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1073 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1158 let node = Self::get_or_insert_node(
1074 let node = Self::get_or_insert_node(
1159 self.on_disk,
1075 self.on_disk,
1160 &mut self.unreachable_bytes,
1076 &mut self.unreachable_bytes,
1161 &mut self.root,
1077 &mut self.root,
1162 &key,
1078 &key,
1163 WithBasename::to_cow_owned,
1079 WithBasename::to_cow_owned,
1164 |_ancestor| {},
1080 |_ancestor| {},
1165 )?;
1081 )?;
1166 if node.copy_source.is_none() {
1082 if node.copy_source.is_none() {
1167 self.nodes_with_copy_source_count += 1
1083 self.nodes_with_copy_source_count += 1
1168 }
1084 }
1169 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1085 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1170 }
1086 }
1171
1087
1172 fn len(&self) -> usize {
1088 fn len(&self) -> usize {
1173 self.nodes_with_entry_count as usize
1089 self.nodes_with_entry_count as usize
1174 }
1090 }
1175
1091
1176 fn contains_key(
1092 fn contains_key(
1177 &self,
1093 &self,
1178 key: &HgPath,
1094 key: &HgPath,
1179 ) -> Result<bool, DirstateV2ParseError> {
1095 ) -> Result<bool, DirstateV2ParseError> {
1180 Ok(self.get(key)?.is_some())
1096 Ok(self.get(key)?.is_some())
1181 }
1097 }
1182
1098
1183 fn get(
1099 fn get(
1184 &self,
1100 &self,
1185 key: &HgPath,
1101 key: &HgPath,
1186 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1102 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1187 Ok(if let Some(node) = self.get_node(key)? {
1103 Ok(if let Some(node) = self.get_node(key)? {
1188 node.entry()?
1104 node.entry()?
1189 } else {
1105 } else {
1190 None
1106 None
1191 })
1107 })
1192 }
1108 }
1193
1109
1194 fn iter(&self) -> StateMapIter<'_> {
1110 fn iter(&self) -> StateMapIter<'_> {
1195 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1111 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1196 Ok(if let Some(entry) = node.entry()? {
1112 Ok(if let Some(entry) = node.entry()? {
1197 Some((node.full_path(self.on_disk)?, entry))
1113 Some((node.full_path(self.on_disk)?, entry))
1198 } else {
1114 } else {
1199 None
1115 None
1200 })
1116 })
1201 }))
1117 }))
1202 }
1118 }
1203
1119
1204 fn iter_tracked_dirs(
1120 fn iter_tracked_dirs(
1205 &mut self,
1121 &mut self,
1206 ) -> Result<
1122 ) -> Result<
1207 Box<
1123 Box<
1208 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
1124 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
1209 + Send
1125 + Send
1210 + '_,
1126 + '_,
1211 >,
1127 >,
1212 DirstateError,
1128 DirstateError,
1213 > {
1129 > {
1214 let on_disk = self.on_disk;
1130 let on_disk = self.on_disk;
1215 Ok(Box::new(filter_map_results(
1131 Ok(Box::new(filter_map_results(
1216 self.iter_nodes(),
1132 self.iter_nodes(),
1217 move |node| {
1133 move |node| {
1218 Ok(if node.tracked_descendants_count() > 0 {
1134 Ok(if node.tracked_descendants_count() > 0 {
1219 Some(node.full_path(on_disk)?)
1135 Some(node.full_path(on_disk)?)
1220 } else {
1136 } else {
1221 None
1137 None
1222 })
1138 })
1223 },
1139 },
1224 )))
1140 )))
1225 }
1141 }
1226
1142
1227 fn debug_iter(
1143 fn debug_iter(
1228 &self,
1144 &self,
1229 all: bool,
1145 all: bool,
1230 ) -> Box<
1146 ) -> Box<
1231 dyn Iterator<
1147 dyn Iterator<
1232 Item = Result<
1148 Item = Result<
1233 (&HgPath, (u8, i32, i32, i32)),
1149 (&HgPath, (u8, i32, i32, i32)),
1234 DirstateV2ParseError,
1150 DirstateV2ParseError,
1235 >,
1151 >,
1236 > + Send
1152 > + Send
1237 + '_,
1153 + '_,
1238 > {
1154 > {
1239 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1155 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1240 let debug_tuple = if let Some(entry) = node.entry()? {
1156 let debug_tuple = if let Some(entry) = node.entry()? {
1241 entry.debug_tuple()
1157 entry.debug_tuple()
1242 } else if !all {
1158 } else if !all {
1243 return Ok(None);
1159 return Ok(None);
1244 } else if let Some(mtime) = node.cached_directory_mtime() {
1160 } else if let Some(mtime) = node.cached_directory_mtime() {
1245 (b' ', 0, -1, mtime.seconds() as i32)
1161 (b' ', 0, -1, mtime.seconds() as i32)
1246 } else {
1162 } else {
1247 (b' ', 0, -1, -1)
1163 (b' ', 0, -1, -1)
1248 };
1164 };
1249 Ok(Some((node.full_path(self.on_disk)?, debug_tuple)))
1165 Ok(Some((node.full_path(self.on_disk)?, debug_tuple)))
1250 }))
1166 }))
1251 }
1167 }
1252 }
1168 }
@@ -1,537 +1,401 b''
1 use std::path::PathBuf;
1 use std::path::PathBuf;
2
2
3 use crate::dirstate::parsers::Timestamp;
3 use crate::dirstate::parsers::Timestamp;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
5 use crate::matchers::Matcher;
5 use crate::matchers::Matcher;
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
7 use crate::CopyMapIter;
7 use crate::CopyMapIter;
8 use crate::DirstateEntry;
8 use crate::DirstateEntry;
9 use crate::DirstateError;
9 use crate::DirstateError;
10 use crate::DirstateMap;
10 use crate::DirstateMap;
11 use crate::DirstateParents;
11 use crate::DirstateParents;
12 use crate::DirstateStatus;
12 use crate::DirstateStatus;
13 use crate::PatternFileWarning;
13 use crate::PatternFileWarning;
14 use crate::StateMapIter;
14 use crate::StateMapIter;
15 use crate::StatusError;
15 use crate::StatusError;
16 use crate::StatusOptions;
16 use crate::StatusOptions;
17
17
18 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
18 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
19 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
19 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
20 /// a trait object of this trait. Except for constructors, this trait defines
20 /// a trait object of this trait. Except for constructors, this trait defines
21 /// all APIs that the class needs to interact with its inner dirstate map.
21 /// all APIs that the class needs to interact with its inner dirstate map.
22 ///
22 ///
23 /// A trait object is used to support two different concrete types:
23 /// A trait object is used to support two different concrete types:
24 ///
24 ///
25 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
25 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
26 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
26 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
27 /// fields.
27 /// fields.
28 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
28 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
29 /// dirstate map" based on a tree data struture with nodes for directories
29 /// dirstate map" based on a tree data struture with nodes for directories
30 /// containing child nodes for their files and sub-directories. This tree
30 /// containing child nodes for their files and sub-directories. This tree
31 /// enables a more efficient algorithm for `hg status`, but its details are
31 /// enables a more efficient algorithm for `hg status`, but its details are
32 /// abstracted in this trait.
32 /// abstracted in this trait.
33 ///
33 ///
34 /// The dirstate map associates paths of files in the working directory to
34 /// The dirstate map associates paths of files in the working directory to
35 /// various information about the state of those files.
35 /// various information about the state of those files.
36 pub trait DirstateMapMethods {
36 pub trait DirstateMapMethods {
37 /// Remove information about all files in this map
37 /// Remove information about all files in this map
38 fn clear(&mut self);
38 fn clear(&mut self);
39
39
40 /// Add the given filename to the map if it is not already there, and
40 /// Add the given filename to the map if it is not already there, and
41 /// associate the given entry with it.
41 /// associate the given entry with it.
42 fn set_entry(
42 fn set_entry(
43 &mut self,
43 &mut self,
44 filename: &HgPath,
44 filename: &HgPath,
45 entry: DirstateEntry,
45 entry: DirstateEntry,
46 ) -> Result<(), DirstateV2ParseError>;
46 ) -> Result<(), DirstateV2ParseError>;
47
47
48 /// Add or change the information associated to a given file.
48 /// Add or change the information associated to a given file.
49 fn add_file(
49 fn add_file(
50 &mut self,
50 &mut self,
51 filename: &HgPath,
51 filename: &HgPath,
52 entry: DirstateEntry,
52 entry: DirstateEntry,
53 ) -> Result<(), DirstateError>;
53 ) -> Result<(), DirstateError>;
54
54
55 /// Mark a file as "removed" (as in `hg rm`).
55 /// Mark a file as "removed" (as in `hg rm`).
56 fn remove_file(
56 fn remove_file(
57 &mut self,
57 &mut self,
58 filename: &HgPath,
58 filename: &HgPath,
59 in_merge: bool,
59 in_merge: bool,
60 ) -> Result<(), DirstateError>;
60 ) -> Result<(), DirstateError>;
61
61
62 /// Drop information about this file from the map if any.
62 /// Drop information about this file from the map if any.
63 ///
63 ///
64 /// `get` will now return `None` for this filename.
64 /// `get` will now return `None` for this filename.
65 fn drop_entry_and_copy_source(
65 fn drop_entry_and_copy_source(
66 &mut self,
66 &mut self,
67 filename: &HgPath,
67 filename: &HgPath,
68 ) -> Result<(), DirstateError>;
68 ) -> Result<(), DirstateError>;
69
69
70 /// Return whether the map has an "non-normal" entry for the given
71 /// filename. That is, any entry with a `state` other than
72 /// `EntryState::Normal` or with an ambiguous `mtime`.
73 fn non_normal_entries_contains(
74 &mut self,
75 key: &HgPath,
76 ) -> Result<bool, DirstateV2ParseError>;
77
78 /// Mark the given path as "normal" file. This is only relevant in the flat
79 /// dirstate map where there is a separate `HashSet` that needs to be kept
80 /// up to date.
81 /// Returns whether the key was present in the set.
82 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool;
83
84 /// Mark the given path as "non-normal" file.
85 /// This is only relevant in the flat dirstate map where there is a
86 /// separate `HashSet` that needs to be kept up to date.
87 fn non_normal_entries_add(&mut self, key: &HgPath);
88
89 /// Return an iterator of paths whose respective entry are either
90 /// "non-normal" (see `non_normal_entries_contains`) or "from other
91 /// parent".
92 ///
93 /// If that information is cached, create the cache as needed.
94 ///
95 /// "From other parent" is defined as `state == Normal && size == -2`.
96 ///
97 /// Because parse errors can happen during iteration, the iterated items
98 /// are `Result`s.
99 fn non_normal_or_other_parent_paths(
100 &mut self,
101 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
102
103 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
104 ///
105 /// If `force` is true, the cache is re-created even if it already exists.
106 fn set_non_normal_other_parent_entries(&mut self, force: bool);
107
108 /// Return an iterator of paths whose respective entry are "non-normal"
109 /// (see `non_normal_entries_contains`).
110 ///
111 /// If that information is cached, create the cache as needed.
112 ///
113 /// Because parse errors can happen during iteration, the iterated items
114 /// are `Result`s.
115 fn iter_non_normal_paths(
116 &mut self,
117 ) -> Box<
118 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
119 >;
120
121 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
122 /// self`.
123 ///
124 /// Panics if a cache is necessary but does not exist yet.
125 fn iter_non_normal_paths_panic(
126 &self,
127 ) -> Box<
128 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
129 >;
130
131 /// Return an iterator of paths whose respective entry are "from other
132 /// parent".
133 ///
134 /// If that information is cached, create the cache as needed.
135 ///
136 /// "From other parent" is defined as `state == Normal && size == -2`.
137 ///
138 /// Because parse errors can happen during iteration, the iterated items
139 /// are `Result`s.
140 fn iter_other_parent_paths(
141 &mut self,
142 ) -> Box<
143 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
144 >;
145
146 /// Returns whether the sub-tree rooted at the given directory contains any
70 /// Returns whether the sub-tree rooted at the given directory contains any
147 /// tracked file.
71 /// tracked file.
148 ///
72 ///
149 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
73 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
150 fn has_tracked_dir(
74 fn has_tracked_dir(
151 &mut self,
75 &mut self,
152 directory: &HgPath,
76 directory: &HgPath,
153 ) -> Result<bool, DirstateError>;
77 ) -> Result<bool, DirstateError>;
154
78
155 /// Returns whether the sub-tree rooted at the given directory contains any
79 /// Returns whether the sub-tree rooted at the given directory contains any
156 /// file with a dirstate entry.
80 /// file with a dirstate entry.
157 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
81 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
158
82
159 /// Clear mtimes equal to `now` in entries with `state ==
83 /// Clear mtimes equal to `now` in entries with `state ==
160 /// EntryState::Normal`, and serialize bytes to write the `.hg/dirstate`
84 /// EntryState::Normal`, and serialize bytes to write the `.hg/dirstate`
161 /// file to disk in dirstate-v1 format.
85 /// file to disk in dirstate-v1 format.
162 fn pack_v1(
86 fn pack_v1(
163 &mut self,
87 &mut self,
164 parents: DirstateParents,
88 parents: DirstateParents,
165 now: Timestamp,
89 now: Timestamp,
166 ) -> Result<Vec<u8>, DirstateError>;
90 ) -> Result<Vec<u8>, DirstateError>;
167
91
168 /// Clear mtimes equal to `now` in entries with `state ==
92 /// Clear mtimes equal to `now` in entries with `state ==
169 /// EntryState::Normal`, and serialize bytes to write a dirstate data file
93 /// EntryState::Normal`, and serialize bytes to write a dirstate data file
170 /// to disk in dirstate-v2 format.
94 /// to disk in dirstate-v2 format.
171 ///
95 ///
172 /// Returns new data and metadata together with whether that data should be
96 /// Returns new data and metadata together with whether that data should be
173 /// appended to the existing data file whose content is at
97 /// appended to the existing data file whose content is at
174 /// `self.on_disk` (true), instead of written to a new data file
98 /// `self.on_disk` (true), instead of written to a new data file
175 /// (false).
99 /// (false).
176 ///
100 ///
177 /// Note: this is only supported by the tree dirstate map.
101 /// Note: this is only supported by the tree dirstate map.
178 fn pack_v2(
102 fn pack_v2(
179 &mut self,
103 &mut self,
180 now: Timestamp,
104 now: Timestamp,
181 can_append: bool,
105 can_append: bool,
182 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError>;
106 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError>;
183
107
184 /// Run the status algorithm.
108 /// Run the status algorithm.
185 ///
109 ///
186 /// This is not sematically a method of the dirstate map, but a different
110 /// This is not sematically a method of the dirstate map, but a different
187 /// algorithm is used for the flat v.s. tree dirstate map so having it in
111 /// algorithm is used for the flat v.s. tree dirstate map so having it in
188 /// this trait enables the same dynamic dispatch as with other methods.
112 /// this trait enables the same dynamic dispatch as with other methods.
189 fn status<'a>(
113 fn status<'a>(
190 &'a mut self,
114 &'a mut self,
191 matcher: &'a (dyn Matcher + Sync),
115 matcher: &'a (dyn Matcher + Sync),
192 root_dir: PathBuf,
116 root_dir: PathBuf,
193 ignore_files: Vec<PathBuf>,
117 ignore_files: Vec<PathBuf>,
194 options: StatusOptions,
118 options: StatusOptions,
195 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
119 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
196
120
197 /// Returns how many files in the dirstate map have a recorded copy source.
121 /// Returns how many files in the dirstate map have a recorded copy source.
198 fn copy_map_len(&self) -> usize;
122 fn copy_map_len(&self) -> usize;
199
123
200 /// Returns an iterator of `(path, copy_source)` for all files that have a
124 /// Returns an iterator of `(path, copy_source)` for all files that have a
201 /// copy source.
125 /// copy source.
202 fn copy_map_iter(&self) -> CopyMapIter<'_>;
126 fn copy_map_iter(&self) -> CopyMapIter<'_>;
203
127
204 /// Returns whether the givef file has a copy source.
128 /// Returns whether the givef file has a copy source.
205 fn copy_map_contains_key(
129 fn copy_map_contains_key(
206 &self,
130 &self,
207 key: &HgPath,
131 key: &HgPath,
208 ) -> Result<bool, DirstateV2ParseError>;
132 ) -> Result<bool, DirstateV2ParseError>;
209
133
210 /// Returns the copy source for the given file.
134 /// Returns the copy source for the given file.
211 fn copy_map_get(
135 fn copy_map_get(
212 &self,
136 &self,
213 key: &HgPath,
137 key: &HgPath,
214 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
138 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
215
139
216 /// Removes the recorded copy source if any for the given file, and returns
140 /// Removes the recorded copy source if any for the given file, and returns
217 /// it.
141 /// it.
218 fn copy_map_remove(
142 fn copy_map_remove(
219 &mut self,
143 &mut self,
220 key: &HgPath,
144 key: &HgPath,
221 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
145 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
222
146
223 /// Set the given `value` copy source for the given `key` file.
147 /// Set the given `value` copy source for the given `key` file.
224 fn copy_map_insert(
148 fn copy_map_insert(
225 &mut self,
149 &mut self,
226 key: HgPathBuf,
150 key: HgPathBuf,
227 value: HgPathBuf,
151 value: HgPathBuf,
228 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
152 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
229
153
230 /// Returns the number of files that have an entry.
154 /// Returns the number of files that have an entry.
231 fn len(&self) -> usize;
155 fn len(&self) -> usize;
232
156
233 /// Returns whether the given file has an entry.
157 /// Returns whether the given file has an entry.
234 fn contains_key(&self, key: &HgPath)
158 fn contains_key(&self, key: &HgPath)
235 -> Result<bool, DirstateV2ParseError>;
159 -> Result<bool, DirstateV2ParseError>;
236
160
237 /// Returns the entry, if any, for the given file.
161 /// Returns the entry, if any, for the given file.
238 fn get(
162 fn get(
239 &self,
163 &self,
240 key: &HgPath,
164 key: &HgPath,
241 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
165 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
242
166
243 /// Returns a `(path, entry)` iterator of files that have an entry.
167 /// Returns a `(path, entry)` iterator of files that have an entry.
244 ///
168 ///
245 /// Because parse errors can happen during iteration, the iterated items
169 /// Because parse errors can happen during iteration, the iterated items
246 /// are `Result`s.
170 /// are `Result`s.
247 fn iter(&self) -> StateMapIter<'_>;
171 fn iter(&self) -> StateMapIter<'_>;
248
172
249 /// Returns an iterator of tracked directories.
173 /// Returns an iterator of tracked directories.
250 ///
174 ///
251 /// This is the paths for which `has_tracked_dir` would return true.
175 /// This is the paths for which `has_tracked_dir` would return true.
252 /// Or, in other words, the union of ancestor paths of all paths that have
176 /// Or, in other words, the union of ancestor paths of all paths that have
253 /// an associated entry in a "tracked" state in this dirstate map.
177 /// an associated entry in a "tracked" state in this dirstate map.
254 ///
178 ///
255 /// Because parse errors can happen during iteration, the iterated items
179 /// Because parse errors can happen during iteration, the iterated items
256 /// are `Result`s.
180 /// are `Result`s.
257 fn iter_tracked_dirs(
181 fn iter_tracked_dirs(
258 &mut self,
182 &mut self,
259 ) -> Result<
183 ) -> Result<
260 Box<
184 Box<
261 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
185 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
262 + Send
186 + Send
263 + '_,
187 + '_,
264 >,
188 >,
265 DirstateError,
189 DirstateError,
266 >;
190 >;
267
191
268 /// Return an iterator of `(path, (state, mode, size, mtime))` for every
192 /// Return an iterator of `(path, (state, mode, size, mtime))` for every
269 /// node stored in this dirstate map, for the purpose of the `hg
193 /// node stored in this dirstate map, for the purpose of the `hg
270 /// debugdirstate` command.
194 /// debugdirstate` command.
271 ///
195 ///
272 /// If `all` is true, include nodes that don’t have an entry.
196 /// If `all` is true, include nodes that don’t have an entry.
273 /// For such nodes `state` is the ASCII space.
197 /// For such nodes `state` is the ASCII space.
274 /// An `mtime` may still be present. It is used to optimize `status`.
198 /// An `mtime` may still be present. It is used to optimize `status`.
275 ///
199 ///
276 /// Because parse errors can happen during iteration, the iterated items
200 /// Because parse errors can happen during iteration, the iterated items
277 /// are `Result`s.
201 /// are `Result`s.
278 fn debug_iter(
202 fn debug_iter(
279 &self,
203 &self,
280 all: bool,
204 all: bool,
281 ) -> Box<
205 ) -> Box<
282 dyn Iterator<
206 dyn Iterator<
283 Item = Result<
207 Item = Result<
284 (&HgPath, (u8, i32, i32, i32)),
208 (&HgPath, (u8, i32, i32, i32)),
285 DirstateV2ParseError,
209 DirstateV2ParseError,
286 >,
210 >,
287 > + Send
211 > + Send
288 + '_,
212 + '_,
289 >;
213 >;
290 }
214 }
291
215
292 impl DirstateMapMethods for DirstateMap {
216 impl DirstateMapMethods for DirstateMap {
293 fn clear(&mut self) {
217 fn clear(&mut self) {
294 self.clear()
218 self.clear()
295 }
219 }
296
220
297 /// Used to set a value directory.
221 /// Used to set a value directory.
298 ///
222 ///
299 /// XXX Is temporary during a refactor of V1 dirstate and will disappear
223 /// XXX Is temporary during a refactor of V1 dirstate and will disappear
300 /// shortly.
224 /// shortly.
301 fn set_entry(
225 fn set_entry(
302 &mut self,
226 &mut self,
303 filename: &HgPath,
227 filename: &HgPath,
304 entry: DirstateEntry,
228 entry: DirstateEntry,
305 ) -> Result<(), DirstateV2ParseError> {
229 ) -> Result<(), DirstateV2ParseError> {
306 self.set_entry(&filename, entry);
230 self.set_entry(&filename, entry);
307 Ok(())
231 Ok(())
308 }
232 }
309
233
310 fn add_file(
234 fn add_file(
311 &mut self,
235 &mut self,
312 filename: &HgPath,
236 filename: &HgPath,
313 entry: DirstateEntry,
237 entry: DirstateEntry,
314 ) -> Result<(), DirstateError> {
238 ) -> Result<(), DirstateError> {
315 self.add_file(filename, entry)
239 self.add_file(filename, entry)
316 }
240 }
317
241
318 fn remove_file(
242 fn remove_file(
319 &mut self,
243 &mut self,
320 filename: &HgPath,
244 filename: &HgPath,
321 in_merge: bool,
245 in_merge: bool,
322 ) -> Result<(), DirstateError> {
246 ) -> Result<(), DirstateError> {
323 self.remove_file(filename, in_merge)
247 self.remove_file(filename, in_merge)
324 }
248 }
325
249
326 fn drop_entry_and_copy_source(
250 fn drop_entry_and_copy_source(
327 &mut self,
251 &mut self,
328 filename: &HgPath,
252 filename: &HgPath,
329 ) -> Result<(), DirstateError> {
253 ) -> Result<(), DirstateError> {
330 self.drop_entry_and_copy_source(filename)
254 self.drop_entry_and_copy_source(filename)
331 }
255 }
332
256
333 fn non_normal_entries_contains(
334 &mut self,
335 key: &HgPath,
336 ) -> Result<bool, DirstateV2ParseError> {
337 let (non_normal, _other_parent) =
338 self.get_non_normal_other_parent_entries();
339 Ok(non_normal.contains(key))
340 }
341
342 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool {
343 self.non_normal_entries_remove(key)
344 }
345
346 fn non_normal_entries_add(&mut self, key: &HgPath) {
347 self.non_normal_entries_add(key)
348 }
349
350 fn non_normal_or_other_parent_paths(
351 &mut self,
352 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
353 {
354 let (non_normal, other_parent) =
355 self.get_non_normal_other_parent_entries();
356 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
357 }
358
359 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
360 self.set_non_normal_other_parent_entries(force)
361 }
362
363 fn iter_non_normal_paths(
364 &mut self,
365 ) -> Box<
366 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
367 > {
368 let (non_normal, _other_parent) =
369 self.get_non_normal_other_parent_entries();
370 Box::new(non_normal.iter().map(|p| Ok(&**p)))
371 }
372
373 fn iter_non_normal_paths_panic(
374 &self,
375 ) -> Box<
376 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
377 > {
378 let (non_normal, _other_parent) =
379 self.get_non_normal_other_parent_entries_panic();
380 Box::new(non_normal.iter().map(|p| Ok(&**p)))
381 }
382
383 fn iter_other_parent_paths(
384 &mut self,
385 ) -> Box<
386 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
387 > {
388 let (_non_normal, other_parent) =
389 self.get_non_normal_other_parent_entries();
390 Box::new(other_parent.iter().map(|p| Ok(&**p)))
391 }
392
393 fn has_tracked_dir(
257 fn has_tracked_dir(
394 &mut self,
258 &mut self,
395 directory: &HgPath,
259 directory: &HgPath,
396 ) -> Result<bool, DirstateError> {
260 ) -> Result<bool, DirstateError> {
397 self.has_tracked_dir(directory)
261 self.has_tracked_dir(directory)
398 }
262 }
399
263
400 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
264 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
401 self.has_dir(directory)
265 self.has_dir(directory)
402 }
266 }
403
267
404 fn pack_v1(
268 fn pack_v1(
405 &mut self,
269 &mut self,
406 parents: DirstateParents,
270 parents: DirstateParents,
407 now: Timestamp,
271 now: Timestamp,
408 ) -> Result<Vec<u8>, DirstateError> {
272 ) -> Result<Vec<u8>, DirstateError> {
409 self.pack(parents, now)
273 Ok(self.pack(parents, now)?)
410 }
274 }
411
275
412 fn pack_v2(
276 fn pack_v2(
413 &mut self,
277 &mut self,
414 _now: Timestamp,
278 _now: Timestamp,
415 _can_append: bool,
279 _can_append: bool,
416 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
280 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
417 panic!(
281 panic!(
418 "should have used dirstate_tree::DirstateMap to use the v2 format"
282 "should have used dirstate_tree::DirstateMap to use the v2 format"
419 )
283 )
420 }
284 }
421
285
422 fn status<'a>(
286 fn status<'a>(
423 &'a mut self,
287 &'a mut self,
424 matcher: &'a (dyn Matcher + Sync),
288 matcher: &'a (dyn Matcher + Sync),
425 root_dir: PathBuf,
289 root_dir: PathBuf,
426 ignore_files: Vec<PathBuf>,
290 ignore_files: Vec<PathBuf>,
427 options: StatusOptions,
291 options: StatusOptions,
428 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
292 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
429 {
293 {
430 crate::status(self, matcher, root_dir, ignore_files, options)
294 crate::status(self, matcher, root_dir, ignore_files, options)
431 }
295 }
432
296
433 fn copy_map_len(&self) -> usize {
297 fn copy_map_len(&self) -> usize {
434 self.copy_map.len()
298 self.copy_map.len()
435 }
299 }
436
300
437 fn copy_map_iter(&self) -> CopyMapIter<'_> {
301 fn copy_map_iter(&self) -> CopyMapIter<'_> {
438 Box::new(
302 Box::new(
439 self.copy_map
303 self.copy_map
440 .iter()
304 .iter()
441 .map(|(key, value)| Ok((&**key, &**value))),
305 .map(|(key, value)| Ok((&**key, &**value))),
442 )
306 )
443 }
307 }
444
308
445 fn copy_map_contains_key(
309 fn copy_map_contains_key(
446 &self,
310 &self,
447 key: &HgPath,
311 key: &HgPath,
448 ) -> Result<bool, DirstateV2ParseError> {
312 ) -> Result<bool, DirstateV2ParseError> {
449 Ok(self.copy_map.contains_key(key))
313 Ok(self.copy_map.contains_key(key))
450 }
314 }
451
315
452 fn copy_map_get(
316 fn copy_map_get(
453 &self,
317 &self,
454 key: &HgPath,
318 key: &HgPath,
455 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
319 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
456 Ok(self.copy_map.get(key).map(|p| &**p))
320 Ok(self.copy_map.get(key).map(|p| &**p))
457 }
321 }
458
322
459 fn copy_map_remove(
323 fn copy_map_remove(
460 &mut self,
324 &mut self,
461 key: &HgPath,
325 key: &HgPath,
462 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
326 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
463 Ok(self.copy_map.remove(key))
327 Ok(self.copy_map.remove(key))
464 }
328 }
465
329
466 fn copy_map_insert(
330 fn copy_map_insert(
467 &mut self,
331 &mut self,
468 key: HgPathBuf,
332 key: HgPathBuf,
469 value: HgPathBuf,
333 value: HgPathBuf,
470 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
334 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
471 Ok(self.copy_map.insert(key, value))
335 Ok(self.copy_map.insert(key, value))
472 }
336 }
473
337
474 fn len(&self) -> usize {
338 fn len(&self) -> usize {
475 (&**self).len()
339 (&**self).len()
476 }
340 }
477
341
478 fn contains_key(
342 fn contains_key(
479 &self,
343 &self,
480 key: &HgPath,
344 key: &HgPath,
481 ) -> Result<bool, DirstateV2ParseError> {
345 ) -> Result<bool, DirstateV2ParseError> {
482 Ok((&**self).contains_key(key))
346 Ok((&**self).contains_key(key))
483 }
347 }
484
348
485 fn get(
349 fn get(
486 &self,
350 &self,
487 key: &HgPath,
351 key: &HgPath,
488 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
352 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
489 Ok((&**self).get(key).cloned())
353 Ok((&**self).get(key).cloned())
490 }
354 }
491
355
492 fn iter(&self) -> StateMapIter<'_> {
356 fn iter(&self) -> StateMapIter<'_> {
493 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
357 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
494 }
358 }
495
359
496 fn iter_tracked_dirs(
360 fn iter_tracked_dirs(
497 &mut self,
361 &mut self,
498 ) -> Result<
362 ) -> Result<
499 Box<
363 Box<
500 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
364 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
501 + Send
365 + Send
502 + '_,
366 + '_,
503 >,
367 >,
504 DirstateError,
368 DirstateError,
505 > {
369 > {
506 self.set_all_dirs()?;
370 self.set_all_dirs()?;
507 Ok(Box::new(
371 Ok(Box::new(
508 self.all_dirs
372 self.all_dirs
509 .as_ref()
373 .as_ref()
510 .unwrap()
374 .unwrap()
511 .iter()
375 .iter()
512 .map(|path| Ok(&**path)),
376 .map(|path| Ok(&**path)),
513 ))
377 ))
514 }
378 }
515
379
516 fn debug_iter(
380 fn debug_iter(
517 &self,
381 &self,
518 all: bool,
382 all: bool,
519 ) -> Box<
383 ) -> Box<
520 dyn Iterator<
384 dyn Iterator<
521 Item = Result<
385 Item = Result<
522 (&HgPath, (u8, i32, i32, i32)),
386 (&HgPath, (u8, i32, i32, i32)),
523 DirstateV2ParseError,
387 DirstateV2ParseError,
524 >,
388 >,
525 > + Send
389 > + Send
526 + '_,
390 + '_,
527 > {
391 > {
528 // Not used for the flat (not tree-based) DirstateMap
392 // Not used for the flat (not tree-based) DirstateMap
529 let _ = all;
393 let _ = all;
530
394
531 Box::new(
395 Box::new(
532 (&**self)
396 (&**self)
533 .iter()
397 .iter()
534 .map(|(path, entry)| Ok((&**path, entry.debug_tuple()))),
398 .map(|(path, entry)| Ok((&**path, entry.debug_tuple()))),
535 )
399 )
536 }
400 }
537 }
401 }
@@ -1,229 +1,179 b''
1 use crate::dirstate::parsers::Timestamp;
1 use crate::dirstate::parsers::Timestamp;
2 use crate::dirstate_tree::dispatch::DirstateMapMethods;
2 use crate::dirstate_tree::dispatch::DirstateMapMethods;
3 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
3 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
4 use crate::dirstate_tree::owning::OwningDirstateMap;
4 use crate::dirstate_tree::owning::OwningDirstateMap;
5 use crate::matchers::Matcher;
5 use crate::matchers::Matcher;
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
7 use crate::CopyMapIter;
7 use crate::CopyMapIter;
8 use crate::DirstateEntry;
8 use crate::DirstateEntry;
9 use crate::DirstateError;
9 use crate::DirstateError;
10 use crate::DirstateParents;
10 use crate::DirstateParents;
11 use crate::DirstateStatus;
11 use crate::DirstateStatus;
12 use crate::PatternFileWarning;
12 use crate::PatternFileWarning;
13 use crate::StateMapIter;
13 use crate::StateMapIter;
14 use crate::StatusError;
14 use crate::StatusError;
15 use crate::StatusOptions;
15 use crate::StatusOptions;
16 use std::path::PathBuf;
16 use std::path::PathBuf;
17
17
18 impl DirstateMapMethods for OwningDirstateMap {
18 impl DirstateMapMethods for OwningDirstateMap {
19 fn clear(&mut self) {
19 fn clear(&mut self) {
20 self.get_mut().clear()
20 self.get_mut().clear()
21 }
21 }
22
22
23 fn set_entry(
23 fn set_entry(
24 &mut self,
24 &mut self,
25 filename: &HgPath,
25 filename: &HgPath,
26 entry: DirstateEntry,
26 entry: DirstateEntry,
27 ) -> Result<(), DirstateV2ParseError> {
27 ) -> Result<(), DirstateV2ParseError> {
28 self.get_mut().set_entry(filename, entry)
28 self.get_mut().set_entry(filename, entry)
29 }
29 }
30
30
31 fn add_file(
31 fn add_file(
32 &mut self,
32 &mut self,
33 filename: &HgPath,
33 filename: &HgPath,
34 entry: DirstateEntry,
34 entry: DirstateEntry,
35 ) -> Result<(), DirstateError> {
35 ) -> Result<(), DirstateError> {
36 self.get_mut().add_file(filename, entry)
36 self.get_mut().add_file(filename, entry)
37 }
37 }
38
38
39 fn remove_file(
39 fn remove_file(
40 &mut self,
40 &mut self,
41 filename: &HgPath,
41 filename: &HgPath,
42 in_merge: bool,
42 in_merge: bool,
43 ) -> Result<(), DirstateError> {
43 ) -> Result<(), DirstateError> {
44 self.get_mut().remove_file(filename, in_merge)
44 self.get_mut().remove_file(filename, in_merge)
45 }
45 }
46
46
47 fn drop_entry_and_copy_source(
47 fn drop_entry_and_copy_source(
48 &mut self,
48 &mut self,
49 filename: &HgPath,
49 filename: &HgPath,
50 ) -> Result<(), DirstateError> {
50 ) -> Result<(), DirstateError> {
51 self.get_mut().drop_entry_and_copy_source(filename)
51 self.get_mut().drop_entry_and_copy_source(filename)
52 }
52 }
53
53
54 fn non_normal_entries_contains(
55 &mut self,
56 key: &HgPath,
57 ) -> Result<bool, DirstateV2ParseError> {
58 self.get_mut().non_normal_entries_contains(key)
59 }
60
61 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool {
62 self.get_mut().non_normal_entries_remove(key)
63 }
64
65 fn non_normal_entries_add(&mut self, key: &HgPath) {
66 self.get_mut().non_normal_entries_add(key)
67 }
68
69 fn non_normal_or_other_parent_paths(
70 &mut self,
71 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
72 {
73 self.get_mut().non_normal_or_other_parent_paths()
74 }
75
76 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
77 self.get_mut().set_non_normal_other_parent_entries(force)
78 }
79
80 fn iter_non_normal_paths(
81 &mut self,
82 ) -> Box<
83 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
84 > {
85 self.get_mut().iter_non_normal_paths()
86 }
87
88 fn iter_non_normal_paths_panic(
89 &self,
90 ) -> Box<
91 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
92 > {
93 self.get().iter_non_normal_paths_panic()
94 }
95
96 fn iter_other_parent_paths(
97 &mut self,
98 ) -> Box<
99 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
100 > {
101 self.get_mut().iter_other_parent_paths()
102 }
103
104 fn has_tracked_dir(
54 fn has_tracked_dir(
105 &mut self,
55 &mut self,
106 directory: &HgPath,
56 directory: &HgPath,
107 ) -> Result<bool, DirstateError> {
57 ) -> Result<bool, DirstateError> {
108 self.get_mut().has_tracked_dir(directory)
58 self.get_mut().has_tracked_dir(directory)
109 }
59 }
110
60
111 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
61 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
112 self.get_mut().has_dir(directory)
62 self.get_mut().has_dir(directory)
113 }
63 }
114
64
115 fn pack_v1(
65 fn pack_v1(
116 &mut self,
66 &mut self,
117 parents: DirstateParents,
67 parents: DirstateParents,
118 now: Timestamp,
68 now: Timestamp,
119 ) -> Result<Vec<u8>, DirstateError> {
69 ) -> Result<Vec<u8>, DirstateError> {
120 self.get_mut().pack_v1(parents, now)
70 self.get_mut().pack_v1(parents, now)
121 }
71 }
122
72
123 fn pack_v2(
73 fn pack_v2(
124 &mut self,
74 &mut self,
125 now: Timestamp,
75 now: Timestamp,
126 can_append: bool,
76 can_append: bool,
127 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
77 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
128 self.get_mut().pack_v2(now, can_append)
78 self.get_mut().pack_v2(now, can_append)
129 }
79 }
130
80
131 fn status<'a>(
81 fn status<'a>(
132 &'a mut self,
82 &'a mut self,
133 matcher: &'a (dyn Matcher + Sync),
83 matcher: &'a (dyn Matcher + Sync),
134 root_dir: PathBuf,
84 root_dir: PathBuf,
135 ignore_files: Vec<PathBuf>,
85 ignore_files: Vec<PathBuf>,
136 options: StatusOptions,
86 options: StatusOptions,
137 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
87 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
138 {
88 {
139 self.get_mut()
89 self.get_mut()
140 .status(matcher, root_dir, ignore_files, options)
90 .status(matcher, root_dir, ignore_files, options)
141 }
91 }
142
92
143 fn copy_map_len(&self) -> usize {
93 fn copy_map_len(&self) -> usize {
144 self.get().copy_map_len()
94 self.get().copy_map_len()
145 }
95 }
146
96
147 fn copy_map_iter(&self) -> CopyMapIter<'_> {
97 fn copy_map_iter(&self) -> CopyMapIter<'_> {
148 self.get().copy_map_iter()
98 self.get().copy_map_iter()
149 }
99 }
150
100
151 fn copy_map_contains_key(
101 fn copy_map_contains_key(
152 &self,
102 &self,
153 key: &HgPath,
103 key: &HgPath,
154 ) -> Result<bool, DirstateV2ParseError> {
104 ) -> Result<bool, DirstateV2ParseError> {
155 self.get().copy_map_contains_key(key)
105 self.get().copy_map_contains_key(key)
156 }
106 }
157
107
158 fn copy_map_get(
108 fn copy_map_get(
159 &self,
109 &self,
160 key: &HgPath,
110 key: &HgPath,
161 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
111 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
162 self.get().copy_map_get(key)
112 self.get().copy_map_get(key)
163 }
113 }
164
114
165 fn copy_map_remove(
115 fn copy_map_remove(
166 &mut self,
116 &mut self,
167 key: &HgPath,
117 key: &HgPath,
168 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
118 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
169 self.get_mut().copy_map_remove(key)
119 self.get_mut().copy_map_remove(key)
170 }
120 }
171
121
172 fn copy_map_insert(
122 fn copy_map_insert(
173 &mut self,
123 &mut self,
174 key: HgPathBuf,
124 key: HgPathBuf,
175 value: HgPathBuf,
125 value: HgPathBuf,
176 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
126 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
177 self.get_mut().copy_map_insert(key, value)
127 self.get_mut().copy_map_insert(key, value)
178 }
128 }
179
129
180 fn len(&self) -> usize {
130 fn len(&self) -> usize {
181 self.get().len()
131 self.get().len()
182 }
132 }
183
133
184 fn contains_key(
134 fn contains_key(
185 &self,
135 &self,
186 key: &HgPath,
136 key: &HgPath,
187 ) -> Result<bool, DirstateV2ParseError> {
137 ) -> Result<bool, DirstateV2ParseError> {
188 self.get().contains_key(key)
138 self.get().contains_key(key)
189 }
139 }
190
140
191 fn get(
141 fn get(
192 &self,
142 &self,
193 key: &HgPath,
143 key: &HgPath,
194 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
144 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
195 self.get().get(key)
145 self.get().get(key)
196 }
146 }
197
147
198 fn iter(&self) -> StateMapIter<'_> {
148 fn iter(&self) -> StateMapIter<'_> {
199 self.get().iter()
149 self.get().iter()
200 }
150 }
201
151
202 fn iter_tracked_dirs(
152 fn iter_tracked_dirs(
203 &mut self,
153 &mut self,
204 ) -> Result<
154 ) -> Result<
205 Box<
155 Box<
206 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
156 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
207 + Send
157 + Send
208 + '_,
158 + '_,
209 >,
159 >,
210 DirstateError,
160 DirstateError,
211 > {
161 > {
212 self.get_mut().iter_tracked_dirs()
162 self.get_mut().iter_tracked_dirs()
213 }
163 }
214
164
215 fn debug_iter(
165 fn debug_iter(
216 &self,
166 &self,
217 all: bool,
167 all: bool,
218 ) -> Box<
168 ) -> Box<
219 dyn Iterator<
169 dyn Iterator<
220 Item = Result<
170 Item = Result<
221 (&HgPath, (u8, i32, i32, i32)),
171 (&HgPath, (u8, i32, i32, i32)),
222 DirstateV2ParseError,
172 DirstateV2ParseError,
223 >,
173 >,
224 > + Send
174 > + Send
225 + '_,
175 + '_,
226 > {
176 > {
227 self.get().debug_iter(all)
177 self.get().debug_iter(all)
228 }
178 }
229 }
179 }
@@ -1,72 +1,71 b''
1 // dirstate.rs
1 // dirstate.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::dirstate` module provided by the
8 //! Bindings for the `hg::dirstate` module provided by the
9 //! `hg-core` package.
9 //! `hg-core` package.
10 //!
10 //!
11 //! From Python, this will be seen as `mercurial.rustext.dirstate`
11 //! From Python, this will be seen as `mercurial.rustext.dirstate`
12 mod copymap;
12 mod copymap;
13 mod dirs_multiset;
13 mod dirs_multiset;
14 mod dirstate_map;
14 mod dirstate_map;
15 mod item;
15 mod item;
16 mod non_normal_entries;
17 mod status;
16 mod status;
18 use self::item::DirstateItem;
17 use self::item::DirstateItem;
19 use crate::{
18 use crate::{
20 dirstate::{
19 dirstate::{
21 dirs_multiset::Dirs, dirstate_map::DirstateMap, status::status_wrapper,
20 dirs_multiset::Dirs, dirstate_map::DirstateMap, status::status_wrapper,
22 },
21 },
23 exceptions,
22 exceptions,
24 };
23 };
25 use cpython::{PyBytes, PyDict, PyList, PyModule, PyObject, PyResult, Python};
24 use cpython::{PyBytes, PyDict, PyList, PyModule, PyObject, PyResult, Python};
26 use hg::dirstate_tree::on_disk::V2_FORMAT_MARKER;
25 use hg::dirstate_tree::on_disk::V2_FORMAT_MARKER;
27
26
28 /// Create the module, with `__package__` given from parent
27 /// Create the module, with `__package__` given from parent
29 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
28 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
30 let dotted_name = &format!("{}.dirstate", package);
29 let dotted_name = &format!("{}.dirstate", package);
31 let m = PyModule::new(py, dotted_name)?;
30 let m = PyModule::new(py, dotted_name)?;
32
31
33 env_logger::init();
32 env_logger::init();
34
33
35 m.add(py, "__package__", package)?;
34 m.add(py, "__package__", package)?;
36 m.add(py, "__doc__", "Dirstate - Rust implementation")?;
35 m.add(py, "__doc__", "Dirstate - Rust implementation")?;
37
36
38 m.add(
37 m.add(
39 py,
38 py,
40 "FallbackError",
39 "FallbackError",
41 py.get_type::<exceptions::FallbackError>(),
40 py.get_type::<exceptions::FallbackError>(),
42 )?;
41 )?;
43 m.add_class::<Dirs>(py)?;
42 m.add_class::<Dirs>(py)?;
44 m.add_class::<DirstateMap>(py)?;
43 m.add_class::<DirstateMap>(py)?;
45 m.add_class::<DirstateItem>(py)?;
44 m.add_class::<DirstateItem>(py)?;
46 m.add(py, "V2_FORMAT_MARKER", PyBytes::new(py, V2_FORMAT_MARKER))?;
45 m.add(py, "V2_FORMAT_MARKER", PyBytes::new(py, V2_FORMAT_MARKER))?;
47 m.add(
46 m.add(
48 py,
47 py,
49 "status",
48 "status",
50 py_fn!(
49 py_fn!(
51 py,
50 py,
52 status_wrapper(
51 status_wrapper(
53 dmap: DirstateMap,
52 dmap: DirstateMap,
54 root_dir: PyObject,
53 root_dir: PyObject,
55 matcher: PyObject,
54 matcher: PyObject,
56 ignorefiles: PyList,
55 ignorefiles: PyList,
57 check_exec: bool,
56 check_exec: bool,
58 last_normal_time: i64,
57 last_normal_time: i64,
59 list_clean: bool,
58 list_clean: bool,
60 list_ignored: bool,
59 list_ignored: bool,
61 list_unknown: bool,
60 list_unknown: bool,
62 collect_traversed_dirs: bool
61 collect_traversed_dirs: bool
63 )
62 )
64 ),
63 ),
65 )?;
64 )?;
66
65
67 let sys = PyModule::import(py, "sys")?;
66 let sys = PyModule::import(py, "sys")?;
68 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
67 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
69 sys_modules.set_item(py, dotted_name, &m)?;
68 sys_modules.set_item(py, dotted_name, &m)?;
70
69
71 Ok(m)
70 Ok(m)
72 }
71 }
@@ -1,612 +1,514 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
9 //! `hg-core` package.
9 //! `hg-core` package.
10
10
11 use std::cell::{RefCell, RefMut};
11 use std::cell::{RefCell, RefMut};
12 use std::convert::TryInto;
12 use std::convert::TryInto;
13
13
14 use cpython::{
14 use cpython::{
15 exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject,
15 exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject,
16 PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
16 PyResult, Python, PythonObject, ToPyObject, UnsafePyLeaked,
17 UnsafePyLeaked,
18 };
17 };
19
18
20 use crate::{
19 use crate::{
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
20 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 dirstate::item::DirstateItem,
21 dirstate::item::DirstateItem,
23 dirstate::non_normal_entries::{
24 NonNormalEntries, NonNormalEntriesIterator,
25 },
26 pybytes_deref::PyBytesDeref,
22 pybytes_deref::PyBytesDeref,
27 };
23 };
28 use hg::{
24 use hg::{
29 dirstate::parsers::Timestamp,
25 dirstate::parsers::Timestamp,
30 dirstate_tree::dirstate_map::DirstateMap as TreeDirstateMap,
26 dirstate_tree::dirstate_map::DirstateMap as TreeDirstateMap,
31 dirstate_tree::dispatch::DirstateMapMethods,
27 dirstate_tree::dispatch::DirstateMapMethods,
32 dirstate_tree::on_disk::DirstateV2ParseError,
28 dirstate_tree::on_disk::DirstateV2ParseError,
33 dirstate_tree::owning::OwningDirstateMap,
29 dirstate_tree::owning::OwningDirstateMap,
34 revlog::Node,
30 revlog::Node,
35 utils::files::normalize_case,
31 utils::files::normalize_case,
36 utils::hg_path::{HgPath, HgPathBuf},
32 utils::hg_path::{HgPath, HgPathBuf},
37 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
33 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
38 DirstateParents, EntryState, StateMapIter,
34 DirstateParents, EntryState, StateMapIter,
39 };
35 };
40
36
41 // TODO
37 // TODO
42 // This object needs to share references to multiple members of its Rust
38 // This object needs to share references to multiple members of its Rust
43 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
39 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
44 // Right now `CopyMap` is done, but it needs to have an explicit reference
40 // Right now `CopyMap` is done, but it needs to have an explicit reference
45 // to `RustDirstateMap` which itself needs to have an encapsulation for
41 // to `RustDirstateMap` which itself needs to have an encapsulation for
46 // every method in `CopyMap` (copymapcopy, etc.).
42 // every method in `CopyMap` (copymapcopy, etc.).
47 // This is ugly and hard to maintain.
43 // This is ugly and hard to maintain.
48 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
44 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
49 // `py_class!` is already implemented and does not mention
45 // `py_class!` is already implemented and does not mention
50 // `RustDirstateMap`, rightfully so.
46 // `RustDirstateMap`, rightfully so.
51 // All attributes also have to have a separate refcount data attribute for
47 // All attributes also have to have a separate refcount data attribute for
52 // leaks, with all methods that go along for reference sharing.
48 // leaks, with all methods that go along for reference sharing.
53 py_class!(pub class DirstateMap |py| {
49 py_class!(pub class DirstateMap |py| {
54 @shared data inner: Box<dyn DirstateMapMethods + Send>;
50 @shared data inner: Box<dyn DirstateMapMethods + Send>;
55
51
56 /// Returns a `(dirstate_map, parents)` tuple
52 /// Returns a `(dirstate_map, parents)` tuple
57 @staticmethod
53 @staticmethod
58 def new_v1(
54 def new_v1(
59 use_dirstate_tree: bool,
55 use_dirstate_tree: bool,
60 on_disk: PyBytes,
56 on_disk: PyBytes,
61 ) -> PyResult<PyObject> {
57 ) -> PyResult<PyObject> {
62 let (inner, parents) = if use_dirstate_tree {
58 let (inner, parents) = if use_dirstate_tree {
63 let on_disk = PyBytesDeref::new(py, on_disk);
59 let on_disk = PyBytesDeref::new(py, on_disk);
64 let mut map = OwningDirstateMap::new_empty(on_disk);
60 let mut map = OwningDirstateMap::new_empty(on_disk);
65 let (on_disk, map_placeholder) = map.get_mut_pair();
61 let (on_disk, map_placeholder) = map.get_mut_pair();
66
62
67 let (actual_map, parents) = TreeDirstateMap::new_v1(on_disk)
63 let (actual_map, parents) = TreeDirstateMap::new_v1(on_disk)
68 .map_err(|e| dirstate_error(py, e))?;
64 .map_err(|e| dirstate_error(py, e))?;
69 *map_placeholder = actual_map;
65 *map_placeholder = actual_map;
70 (Box::new(map) as _, parents)
66 (Box::new(map) as _, parents)
71 } else {
67 } else {
72 let bytes = on_disk.data(py);
68 let bytes = on_disk.data(py);
73 let mut map = RustDirstateMap::default();
69 let mut map = RustDirstateMap::default();
74 let parents = map.read(bytes).map_err(|e| dirstate_error(py, e))?;
70 let parents = map.read(bytes).map_err(|e| dirstate_error(py, e))?;
75 (Box::new(map) as _, parents)
71 (Box::new(map) as _, parents)
76 };
72 };
77 let map = Self::create_instance(py, inner)?;
73 let map = Self::create_instance(py, inner)?;
78 let parents = parents.map(|p| {
74 let parents = parents.map(|p| {
79 let p1 = PyBytes::new(py, p.p1.as_bytes());
75 let p1 = PyBytes::new(py, p.p1.as_bytes());
80 let p2 = PyBytes::new(py, p.p2.as_bytes());
76 let p2 = PyBytes::new(py, p.p2.as_bytes());
81 (p1, p2)
77 (p1, p2)
82 });
78 });
83 Ok((map, parents).to_py_object(py).into_object())
79 Ok((map, parents).to_py_object(py).into_object())
84 }
80 }
85
81
86 /// Returns a DirstateMap
82 /// Returns a DirstateMap
87 @staticmethod
83 @staticmethod
88 def new_v2(
84 def new_v2(
89 on_disk: PyBytes,
85 on_disk: PyBytes,
90 data_size: usize,
86 data_size: usize,
91 tree_metadata: PyBytes,
87 tree_metadata: PyBytes,
92 ) -> PyResult<PyObject> {
88 ) -> PyResult<PyObject> {
93 let dirstate_error = |e: DirstateError| {
89 let dirstate_error = |e: DirstateError| {
94 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
90 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
95 };
91 };
96 let on_disk = PyBytesDeref::new(py, on_disk);
92 let on_disk = PyBytesDeref::new(py, on_disk);
97 let mut map = OwningDirstateMap::new_empty(on_disk);
93 let mut map = OwningDirstateMap::new_empty(on_disk);
98 let (on_disk, map_placeholder) = map.get_mut_pair();
94 let (on_disk, map_placeholder) = map.get_mut_pair();
99 *map_placeholder = TreeDirstateMap::new_v2(
95 *map_placeholder = TreeDirstateMap::new_v2(
100 on_disk, data_size, tree_metadata.data(py),
96 on_disk, data_size, tree_metadata.data(py),
101 ).map_err(dirstate_error)?;
97 ).map_err(dirstate_error)?;
102 let map = Self::create_instance(py, Box::new(map))?;
98 let map = Self::create_instance(py, Box::new(map))?;
103 Ok(map.into_object())
99 Ok(map.into_object())
104 }
100 }
105
101
106 def clear(&self) -> PyResult<PyObject> {
102 def clear(&self) -> PyResult<PyObject> {
107 self.inner(py).borrow_mut().clear();
103 self.inner(py).borrow_mut().clear();
108 Ok(py.None())
104 Ok(py.None())
109 }
105 }
110
106
111 def get(
107 def get(
112 &self,
108 &self,
113 key: PyObject,
109 key: PyObject,
114 default: Option<PyObject> = None
110 default: Option<PyObject> = None
115 ) -> PyResult<Option<PyObject>> {
111 ) -> PyResult<Option<PyObject>> {
116 let key = key.extract::<PyBytes>(py)?;
112 let key = key.extract::<PyBytes>(py)?;
117 match self
113 match self
118 .inner(py)
114 .inner(py)
119 .borrow()
115 .borrow()
120 .get(HgPath::new(key.data(py)))
116 .get(HgPath::new(key.data(py)))
121 .map_err(|e| v2_error(py, e))?
117 .map_err(|e| v2_error(py, e))?
122 {
118 {
123 Some(entry) => {
119 Some(entry) => {
124 Ok(Some(DirstateItem::new_as_pyobject(py, entry)?))
120 Ok(Some(DirstateItem::new_as_pyobject(py, entry)?))
125 },
121 },
126 None => Ok(default)
122 None => Ok(default)
127 }
123 }
128 }
124 }
129
125
130 def set_dirstate_item(
126 def set_dirstate_item(
131 &self,
127 &self,
132 path: PyObject,
128 path: PyObject,
133 item: DirstateItem
129 item: DirstateItem
134 ) -> PyResult<PyObject> {
130 ) -> PyResult<PyObject> {
135 let f = path.extract::<PyBytes>(py)?;
131 let f = path.extract::<PyBytes>(py)?;
136 let filename = HgPath::new(f.data(py));
132 let filename = HgPath::new(f.data(py));
137 self.inner(py)
133 self.inner(py)
138 .borrow_mut()
134 .borrow_mut()
139 .set_entry(filename, item.get_entry(py))
135 .set_entry(filename, item.get_entry(py))
140 .map_err(|e| v2_error(py, e))?;
136 .map_err(|e| v2_error(py, e))?;
141 Ok(py.None())
137 Ok(py.None())
142 }
138 }
143
139
144 def addfile(
140 def addfile(
145 &self,
141 &self,
146 f: PyBytes,
142 f: PyBytes,
147 item: DirstateItem,
143 item: DirstateItem,
148 ) -> PyResult<PyNone> {
144 ) -> PyResult<PyNone> {
149 let filename = HgPath::new(f.data(py));
145 let filename = HgPath::new(f.data(py));
150 let entry = item.get_entry(py);
146 let entry = item.get_entry(py);
151 self.inner(py)
147 self.inner(py)
152 .borrow_mut()
148 .borrow_mut()
153 .add_file(filename, entry)
149 .add_file(filename, entry)
154 .map_err(|e |dirstate_error(py, e))?;
150 .map_err(|e |dirstate_error(py, e))?;
155 Ok(PyNone)
151 Ok(PyNone)
156 }
152 }
157
153
158 def removefile(
154 def removefile(
159 &self,
155 &self,
160 f: PyObject,
156 f: PyObject,
161 in_merge: PyObject
157 in_merge: PyObject
162 ) -> PyResult<PyObject> {
158 ) -> PyResult<PyObject> {
163 self.inner(py).borrow_mut()
159 self.inner(py).borrow_mut()
164 .remove_file(
160 .remove_file(
165 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
161 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
166 in_merge.extract::<PyBool>(py)?.is_true(),
162 in_merge.extract::<PyBool>(py)?.is_true(),
167 )
163 )
168 .or_else(|_| {
164 .or_else(|_| {
169 Err(PyErr::new::<exc::OSError, _>(
165 Err(PyErr::new::<exc::OSError, _>(
170 py,
166 py,
171 "Dirstate error".to_string(),
167 "Dirstate error".to_string(),
172 ))
168 ))
173 })?;
169 })?;
174 Ok(py.None())
170 Ok(py.None())
175 }
171 }
176
172
177 def drop_item_and_copy_source(
173 def drop_item_and_copy_source(
178 &self,
174 &self,
179 f: PyBytes,
175 f: PyBytes,
180 ) -> PyResult<PyNone> {
176 ) -> PyResult<PyNone> {
181 self.inner(py)
177 self.inner(py)
182 .borrow_mut()
178 .borrow_mut()
183 .drop_entry_and_copy_source(HgPath::new(f.data(py)))
179 .drop_entry_and_copy_source(HgPath::new(f.data(py)))
184 .map_err(|e |dirstate_error(py, e))?;
180 .map_err(|e |dirstate_error(py, e))?;
185 Ok(PyNone)
181 Ok(PyNone)
186 }
182 }
187
183
188 def other_parent_entries(&self) -> PyResult<PyObject> {
189 let mut inner_shared = self.inner(py).borrow_mut();
190 let set = PySet::empty(py)?;
191 for path in inner_shared.iter_other_parent_paths() {
192 let path = path.map_err(|e| v2_error(py, e))?;
193 set.add(py, PyBytes::new(py, path.as_bytes()))?;
194 }
195 Ok(set.into_object())
196 }
197
198 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
199 NonNormalEntries::from_inner(py, self.clone_ref(py))
200 }
201
202 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
203 let key = key.extract::<PyBytes>(py)?;
204 self.inner(py)
205 .borrow_mut()
206 .non_normal_entries_contains(HgPath::new(key.data(py)))
207 .map_err(|e| v2_error(py, e))
208 }
209
210 def non_normal_entries_display(&self) -> PyResult<PyString> {
211 let mut inner = self.inner(py).borrow_mut();
212 let paths = inner
213 .iter_non_normal_paths()
214 .collect::<Result<Vec<_>, _>>()
215 .map_err(|e| v2_error(py, e))?;
216 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
217 Ok(PyString::new(py, &formatted))
218 }
219
220 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
221 let key = key.extract::<PyBytes>(py)?;
222 let key = key.data(py);
223 let was_present = self
224 .inner(py)
225 .borrow_mut()
226 .non_normal_entries_remove(HgPath::new(key));
227 if !was_present {
228 let msg = String::from_utf8_lossy(key);
229 Err(PyErr::new::<exc::KeyError, _>(py, msg))
230 } else {
231 Ok(py.None())
232 }
233 }
234
235 def non_normal_entries_discard(&self, key: PyObject) -> PyResult<PyObject>
236 {
237 let key = key.extract::<PyBytes>(py)?;
238 self
239 .inner(py)
240 .borrow_mut()
241 .non_normal_entries_remove(HgPath::new(key.data(py)));
242 Ok(py.None())
243 }
244
245 def non_normal_entries_add(&self, key: PyObject) -> PyResult<PyObject> {
246 let key = key.extract::<PyBytes>(py)?;
247 self
248 .inner(py)
249 .borrow_mut()
250 .non_normal_entries_add(HgPath::new(key.data(py)));
251 Ok(py.None())
252 }
253
254 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
255 let mut inner = self.inner(py).borrow_mut();
256
257 let ret = PyList::new(py, &[]);
258 for filename in inner.non_normal_or_other_parent_paths() {
259 let filename = filename.map_err(|e| v2_error(py, e))?;
260 let as_pystring = PyBytes::new(py, filename.as_bytes());
261 ret.append(py, as_pystring.into_object());
262 }
263 Ok(ret)
264 }
265
266 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
267 // Make sure the sets are defined before we no longer have a mutable
268 // reference to the dmap.
269 self.inner(py)
270 .borrow_mut()
271 .set_non_normal_other_parent_entries(false);
272
273 let leaked_ref = self.inner(py).leak_immutable();
274
275 NonNormalEntriesIterator::from_inner(py, unsafe {
276 leaked_ref.map(py, |o| {
277 o.iter_non_normal_paths_panic()
278 })
279 })
280 }
281
282 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
184 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
283 let d = d.extract::<PyBytes>(py)?;
185 let d = d.extract::<PyBytes>(py)?;
284 Ok(self.inner(py).borrow_mut()
186 Ok(self.inner(py).borrow_mut()
285 .has_tracked_dir(HgPath::new(d.data(py)))
187 .has_tracked_dir(HgPath::new(d.data(py)))
286 .map_err(|e| {
188 .map_err(|e| {
287 PyErr::new::<exc::ValueError, _>(py, e.to_string())
189 PyErr::new::<exc::ValueError, _>(py, e.to_string())
288 })?
190 })?
289 .to_py_object(py))
191 .to_py_object(py))
290 }
192 }
291
193
292 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
194 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
293 let d = d.extract::<PyBytes>(py)?;
195 let d = d.extract::<PyBytes>(py)?;
294 Ok(self.inner(py).borrow_mut()
196 Ok(self.inner(py).borrow_mut()
295 .has_dir(HgPath::new(d.data(py)))
197 .has_dir(HgPath::new(d.data(py)))
296 .map_err(|e| {
198 .map_err(|e| {
297 PyErr::new::<exc::ValueError, _>(py, e.to_string())
199 PyErr::new::<exc::ValueError, _>(py, e.to_string())
298 })?
200 })?
299 .to_py_object(py))
201 .to_py_object(py))
300 }
202 }
301
203
302 def write_v1(
204 def write_v1(
303 &self,
205 &self,
304 p1: PyObject,
206 p1: PyObject,
305 p2: PyObject,
207 p2: PyObject,
306 now: PyObject
208 now: PyObject
307 ) -> PyResult<PyBytes> {
209 ) -> PyResult<PyBytes> {
308 let now = Timestamp(now.extract(py)?);
210 let now = Timestamp(now.extract(py)?);
309
211
310 let mut inner = self.inner(py).borrow_mut();
212 let mut inner = self.inner(py).borrow_mut();
311 let parents = DirstateParents {
213 let parents = DirstateParents {
312 p1: extract_node_id(py, &p1)?,
214 p1: extract_node_id(py, &p1)?,
313 p2: extract_node_id(py, &p2)?,
215 p2: extract_node_id(py, &p2)?,
314 };
216 };
315 let result = inner.pack_v1(parents, now);
217 let result = inner.pack_v1(parents, now);
316 match result {
218 match result {
317 Ok(packed) => Ok(PyBytes::new(py, &packed)),
219 Ok(packed) => Ok(PyBytes::new(py, &packed)),
318 Err(_) => Err(PyErr::new::<exc::OSError, _>(
220 Err(_) => Err(PyErr::new::<exc::OSError, _>(
319 py,
221 py,
320 "Dirstate error".to_string(),
222 "Dirstate error".to_string(),
321 )),
223 )),
322 }
224 }
323 }
225 }
324
226
325 /// Returns new data together with whether that data should be appended to
227 /// Returns new data together with whether that data should be appended to
326 /// the existing data file whose content is at `self.on_disk` (True),
228 /// the existing data file whose content is at `self.on_disk` (True),
327 /// instead of written to a new data file (False).
229 /// instead of written to a new data file (False).
328 def write_v2(
230 def write_v2(
329 &self,
231 &self,
330 now: PyObject,
232 now: PyObject,
331 can_append: bool,
233 can_append: bool,
332 ) -> PyResult<PyObject> {
234 ) -> PyResult<PyObject> {
333 let now = Timestamp(now.extract(py)?);
235 let now = Timestamp(now.extract(py)?);
334
236
335 let mut inner = self.inner(py).borrow_mut();
237 let mut inner = self.inner(py).borrow_mut();
336 let result = inner.pack_v2(now, can_append);
238 let result = inner.pack_v2(now, can_append);
337 match result {
239 match result {
338 Ok((packed, tree_metadata, append)) => {
240 Ok((packed, tree_metadata, append)) => {
339 let packed = PyBytes::new(py, &packed);
241 let packed = PyBytes::new(py, &packed);
340 let tree_metadata = PyBytes::new(py, &tree_metadata);
242 let tree_metadata = PyBytes::new(py, &tree_metadata);
341 let tuple = (packed, tree_metadata, append);
243 let tuple = (packed, tree_metadata, append);
342 Ok(tuple.to_py_object(py).into_object())
244 Ok(tuple.to_py_object(py).into_object())
343 },
245 },
344 Err(_) => Err(PyErr::new::<exc::OSError, _>(
246 Err(_) => Err(PyErr::new::<exc::OSError, _>(
345 py,
247 py,
346 "Dirstate error".to_string(),
248 "Dirstate error".to_string(),
347 )),
249 )),
348 }
250 }
349 }
251 }
350
252
351 def filefoldmapasdict(&self) -> PyResult<PyDict> {
253 def filefoldmapasdict(&self) -> PyResult<PyDict> {
352 let dict = PyDict::new(py);
254 let dict = PyDict::new(py);
353 for item in self.inner(py).borrow_mut().iter() {
255 for item in self.inner(py).borrow_mut().iter() {
354 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
256 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
355 if entry.state() != EntryState::Removed {
257 if entry.state() != EntryState::Removed {
356 let key = normalize_case(path);
258 let key = normalize_case(path);
357 let value = path;
259 let value = path;
358 dict.set_item(
260 dict.set_item(
359 py,
261 py,
360 PyBytes::new(py, key.as_bytes()).into_object(),
262 PyBytes::new(py, key.as_bytes()).into_object(),
361 PyBytes::new(py, value.as_bytes()).into_object(),
263 PyBytes::new(py, value.as_bytes()).into_object(),
362 )?;
264 )?;
363 }
265 }
364 }
266 }
365 Ok(dict)
267 Ok(dict)
366 }
268 }
367
269
368 def __len__(&self) -> PyResult<usize> {
270 def __len__(&self) -> PyResult<usize> {
369 Ok(self.inner(py).borrow().len())
271 Ok(self.inner(py).borrow().len())
370 }
272 }
371
273
372 def __contains__(&self, key: PyObject) -> PyResult<bool> {
274 def __contains__(&self, key: PyObject) -> PyResult<bool> {
373 let key = key.extract::<PyBytes>(py)?;
275 let key = key.extract::<PyBytes>(py)?;
374 self.inner(py)
276 self.inner(py)
375 .borrow()
277 .borrow()
376 .contains_key(HgPath::new(key.data(py)))
278 .contains_key(HgPath::new(key.data(py)))
377 .map_err(|e| v2_error(py, e))
279 .map_err(|e| v2_error(py, e))
378 }
280 }
379
281
380 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
282 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
381 let key = key.extract::<PyBytes>(py)?;
283 let key = key.extract::<PyBytes>(py)?;
382 let key = HgPath::new(key.data(py));
284 let key = HgPath::new(key.data(py));
383 match self
285 match self
384 .inner(py)
286 .inner(py)
385 .borrow()
287 .borrow()
386 .get(key)
288 .get(key)
387 .map_err(|e| v2_error(py, e))?
289 .map_err(|e| v2_error(py, e))?
388 {
290 {
389 Some(entry) => {
291 Some(entry) => {
390 Ok(DirstateItem::new_as_pyobject(py, entry)?)
292 Ok(DirstateItem::new_as_pyobject(py, entry)?)
391 },
293 },
392 None => Err(PyErr::new::<exc::KeyError, _>(
294 None => Err(PyErr::new::<exc::KeyError, _>(
393 py,
295 py,
394 String::from_utf8_lossy(key.as_bytes()),
296 String::from_utf8_lossy(key.as_bytes()),
395 )),
297 )),
396 }
298 }
397 }
299 }
398
300
399 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
301 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
400 let leaked_ref = self.inner(py).leak_immutable();
302 let leaked_ref = self.inner(py).leak_immutable();
401 DirstateMapKeysIterator::from_inner(
303 DirstateMapKeysIterator::from_inner(
402 py,
304 py,
403 unsafe { leaked_ref.map(py, |o| o.iter()) },
305 unsafe { leaked_ref.map(py, |o| o.iter()) },
404 )
306 )
405 }
307 }
406
308
407 def items(&self) -> PyResult<DirstateMapItemsIterator> {
309 def items(&self) -> PyResult<DirstateMapItemsIterator> {
408 let leaked_ref = self.inner(py).leak_immutable();
310 let leaked_ref = self.inner(py).leak_immutable();
409 DirstateMapItemsIterator::from_inner(
311 DirstateMapItemsIterator::from_inner(
410 py,
312 py,
411 unsafe { leaked_ref.map(py, |o| o.iter()) },
313 unsafe { leaked_ref.map(py, |o| o.iter()) },
412 )
314 )
413 }
315 }
414
316
415 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
317 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
416 let leaked_ref = self.inner(py).leak_immutable();
318 let leaked_ref = self.inner(py).leak_immutable();
417 DirstateMapKeysIterator::from_inner(
319 DirstateMapKeysIterator::from_inner(
418 py,
320 py,
419 unsafe { leaked_ref.map(py, |o| o.iter()) },
321 unsafe { leaked_ref.map(py, |o| o.iter()) },
420 )
322 )
421 }
323 }
422
324
423 // TODO all copymap* methods, see docstring above
325 // TODO all copymap* methods, see docstring above
424 def copymapcopy(&self) -> PyResult<PyDict> {
326 def copymapcopy(&self) -> PyResult<PyDict> {
425 let dict = PyDict::new(py);
327 let dict = PyDict::new(py);
426 for item in self.inner(py).borrow().copy_map_iter() {
328 for item in self.inner(py).borrow().copy_map_iter() {
427 let (key, value) = item.map_err(|e| v2_error(py, e))?;
329 let (key, value) = item.map_err(|e| v2_error(py, e))?;
428 dict.set_item(
330 dict.set_item(
429 py,
331 py,
430 PyBytes::new(py, key.as_bytes()),
332 PyBytes::new(py, key.as_bytes()),
431 PyBytes::new(py, value.as_bytes()),
333 PyBytes::new(py, value.as_bytes()),
432 )?;
334 )?;
433 }
335 }
434 Ok(dict)
336 Ok(dict)
435 }
337 }
436
338
437 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
339 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
438 let key = key.extract::<PyBytes>(py)?;
340 let key = key.extract::<PyBytes>(py)?;
439 match self
341 match self
440 .inner(py)
342 .inner(py)
441 .borrow()
343 .borrow()
442 .copy_map_get(HgPath::new(key.data(py)))
344 .copy_map_get(HgPath::new(key.data(py)))
443 .map_err(|e| v2_error(py, e))?
345 .map_err(|e| v2_error(py, e))?
444 {
346 {
445 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
347 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
446 None => Err(PyErr::new::<exc::KeyError, _>(
348 None => Err(PyErr::new::<exc::KeyError, _>(
447 py,
349 py,
448 String::from_utf8_lossy(key.data(py)),
350 String::from_utf8_lossy(key.data(py)),
449 )),
351 )),
450 }
352 }
451 }
353 }
452 def copymap(&self) -> PyResult<CopyMap> {
354 def copymap(&self) -> PyResult<CopyMap> {
453 CopyMap::from_inner(py, self.clone_ref(py))
355 CopyMap::from_inner(py, self.clone_ref(py))
454 }
356 }
455
357
456 def copymaplen(&self) -> PyResult<usize> {
358 def copymaplen(&self) -> PyResult<usize> {
457 Ok(self.inner(py).borrow().copy_map_len())
359 Ok(self.inner(py).borrow().copy_map_len())
458 }
360 }
459 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
361 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
460 let key = key.extract::<PyBytes>(py)?;
362 let key = key.extract::<PyBytes>(py)?;
461 self.inner(py)
363 self.inner(py)
462 .borrow()
364 .borrow()
463 .copy_map_contains_key(HgPath::new(key.data(py)))
365 .copy_map_contains_key(HgPath::new(key.data(py)))
464 .map_err(|e| v2_error(py, e))
366 .map_err(|e| v2_error(py, e))
465 }
367 }
466 def copymapget(
368 def copymapget(
467 &self,
369 &self,
468 key: PyObject,
370 key: PyObject,
469 default: Option<PyObject>
371 default: Option<PyObject>
470 ) -> PyResult<Option<PyObject>> {
372 ) -> PyResult<Option<PyObject>> {
471 let key = key.extract::<PyBytes>(py)?;
373 let key = key.extract::<PyBytes>(py)?;
472 match self
374 match self
473 .inner(py)
375 .inner(py)
474 .borrow()
376 .borrow()
475 .copy_map_get(HgPath::new(key.data(py)))
377 .copy_map_get(HgPath::new(key.data(py)))
476 .map_err(|e| v2_error(py, e))?
378 .map_err(|e| v2_error(py, e))?
477 {
379 {
478 Some(copy) => Ok(Some(
380 Some(copy) => Ok(Some(
479 PyBytes::new(py, copy.as_bytes()).into_object(),
381 PyBytes::new(py, copy.as_bytes()).into_object(),
480 )),
382 )),
481 None => Ok(default),
383 None => Ok(default),
482 }
384 }
483 }
385 }
484 def copymapsetitem(
386 def copymapsetitem(
485 &self,
387 &self,
486 key: PyObject,
388 key: PyObject,
487 value: PyObject
389 value: PyObject
488 ) -> PyResult<PyObject> {
390 ) -> PyResult<PyObject> {
489 let key = key.extract::<PyBytes>(py)?;
391 let key = key.extract::<PyBytes>(py)?;
490 let value = value.extract::<PyBytes>(py)?;
392 let value = value.extract::<PyBytes>(py)?;
491 self.inner(py)
393 self.inner(py)
492 .borrow_mut()
394 .borrow_mut()
493 .copy_map_insert(
395 .copy_map_insert(
494 HgPathBuf::from_bytes(key.data(py)),
396 HgPathBuf::from_bytes(key.data(py)),
495 HgPathBuf::from_bytes(value.data(py)),
397 HgPathBuf::from_bytes(value.data(py)),
496 )
398 )
497 .map_err(|e| v2_error(py, e))?;
399 .map_err(|e| v2_error(py, e))?;
498 Ok(py.None())
400 Ok(py.None())
499 }
401 }
500 def copymappop(
402 def copymappop(
501 &self,
403 &self,
502 key: PyObject,
404 key: PyObject,
503 default: Option<PyObject>
405 default: Option<PyObject>
504 ) -> PyResult<Option<PyObject>> {
406 ) -> PyResult<Option<PyObject>> {
505 let key = key.extract::<PyBytes>(py)?;
407 let key = key.extract::<PyBytes>(py)?;
506 match self
408 match self
507 .inner(py)
409 .inner(py)
508 .borrow_mut()
410 .borrow_mut()
509 .copy_map_remove(HgPath::new(key.data(py)))
411 .copy_map_remove(HgPath::new(key.data(py)))
510 .map_err(|e| v2_error(py, e))?
412 .map_err(|e| v2_error(py, e))?
511 {
413 {
512 Some(_) => Ok(None),
414 Some(_) => Ok(None),
513 None => Ok(default),
415 None => Ok(default),
514 }
416 }
515 }
417 }
516
418
517 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
419 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
518 let leaked_ref = self.inner(py).leak_immutable();
420 let leaked_ref = self.inner(py).leak_immutable();
519 CopyMapKeysIterator::from_inner(
421 CopyMapKeysIterator::from_inner(
520 py,
422 py,
521 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
423 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
522 )
424 )
523 }
425 }
524
426
525 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
427 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
526 let leaked_ref = self.inner(py).leak_immutable();
428 let leaked_ref = self.inner(py).leak_immutable();
527 CopyMapItemsIterator::from_inner(
429 CopyMapItemsIterator::from_inner(
528 py,
430 py,
529 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
431 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
530 )
432 )
531 }
433 }
532
434
533 def tracked_dirs(&self) -> PyResult<PyList> {
435 def tracked_dirs(&self) -> PyResult<PyList> {
534 let dirs = PyList::new(py, &[]);
436 let dirs = PyList::new(py, &[]);
535 for path in self.inner(py).borrow_mut().iter_tracked_dirs()
437 for path in self.inner(py).borrow_mut().iter_tracked_dirs()
536 .map_err(|e |dirstate_error(py, e))?
438 .map_err(|e |dirstate_error(py, e))?
537 {
439 {
538 let path = path.map_err(|e| v2_error(py, e))?;
440 let path = path.map_err(|e| v2_error(py, e))?;
539 let path = PyBytes::new(py, path.as_bytes());
441 let path = PyBytes::new(py, path.as_bytes());
540 dirs.append(py, path.into_object())
442 dirs.append(py, path.into_object())
541 }
443 }
542 Ok(dirs)
444 Ok(dirs)
543 }
445 }
544
446
545 def debug_iter(&self, all: bool) -> PyResult<PyList> {
447 def debug_iter(&self, all: bool) -> PyResult<PyList> {
546 let dirs = PyList::new(py, &[]);
448 let dirs = PyList::new(py, &[]);
547 for item in self.inner(py).borrow().debug_iter(all) {
449 for item in self.inner(py).borrow().debug_iter(all) {
548 let (path, (state, mode, size, mtime)) =
450 let (path, (state, mode, size, mtime)) =
549 item.map_err(|e| v2_error(py, e))?;
451 item.map_err(|e| v2_error(py, e))?;
550 let path = PyBytes::new(py, path.as_bytes());
452 let path = PyBytes::new(py, path.as_bytes());
551 let item = (path, state, mode, size, mtime);
453 let item = (path, state, mode, size, mtime);
552 dirs.append(py, item.to_py_object(py).into_object())
454 dirs.append(py, item.to_py_object(py).into_object())
553 }
455 }
554 Ok(dirs)
456 Ok(dirs)
555 }
457 }
556 });
458 });
557
459
558 impl DirstateMap {
460 impl DirstateMap {
559 pub fn get_inner_mut<'a>(
461 pub fn get_inner_mut<'a>(
560 &'a self,
462 &'a self,
561 py: Python<'a>,
463 py: Python<'a>,
562 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
464 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
563 self.inner(py).borrow_mut()
465 self.inner(py).borrow_mut()
564 }
466 }
565 fn translate_key(
467 fn translate_key(
566 py: Python,
468 py: Python,
567 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
469 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
568 ) -> PyResult<Option<PyBytes>> {
470 ) -> PyResult<Option<PyBytes>> {
569 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
471 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
570 Ok(Some(PyBytes::new(py, f.as_bytes())))
472 Ok(Some(PyBytes::new(py, f.as_bytes())))
571 }
473 }
572 fn translate_key_value(
474 fn translate_key_value(
573 py: Python,
475 py: Python,
574 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
476 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
575 ) -> PyResult<Option<(PyBytes, PyObject)>> {
477 ) -> PyResult<Option<(PyBytes, PyObject)>> {
576 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
478 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
577 Ok(Some((
479 Ok(Some((
578 PyBytes::new(py, f.as_bytes()),
480 PyBytes::new(py, f.as_bytes()),
579 DirstateItem::new_as_pyobject(py, entry)?,
481 DirstateItem::new_as_pyobject(py, entry)?,
580 )))
482 )))
581 }
483 }
582 }
484 }
583
485
584 py_shared_iterator!(
486 py_shared_iterator!(
585 DirstateMapKeysIterator,
487 DirstateMapKeysIterator,
586 UnsafePyLeaked<StateMapIter<'static>>,
488 UnsafePyLeaked<StateMapIter<'static>>,
587 DirstateMap::translate_key,
489 DirstateMap::translate_key,
588 Option<PyBytes>
490 Option<PyBytes>
589 );
491 );
590
492
591 py_shared_iterator!(
493 py_shared_iterator!(
592 DirstateMapItemsIterator,
494 DirstateMapItemsIterator,
593 UnsafePyLeaked<StateMapIter<'static>>,
495 UnsafePyLeaked<StateMapIter<'static>>,
594 DirstateMap::translate_key_value,
496 DirstateMap::translate_key_value,
595 Option<(PyBytes, PyObject)>
497 Option<(PyBytes, PyObject)>
596 );
498 );
597
499
598 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
500 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
599 let bytes = obj.extract::<PyBytes>(py)?;
501 let bytes = obj.extract::<PyBytes>(py)?;
600 match bytes.data(py).try_into() {
502 match bytes.data(py).try_into() {
601 Ok(s) => Ok(s),
503 Ok(s) => Ok(s),
602 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
504 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
603 }
505 }
604 }
506 }
605
507
606 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
508 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
607 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
509 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
608 }
510 }
609
511
610 fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr {
512 fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr {
611 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
513 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
612 }
514 }
@@ -1,223 +1,213 b''
1 use cpython::exc;
1 use cpython::exc;
2 use cpython::PyBytes;
2 use cpython::PyBytes;
3 use cpython::PyErr;
3 use cpython::PyErr;
4 use cpython::PyNone;
4 use cpython::PyNone;
5 use cpython::PyObject;
5 use cpython::PyObject;
6 use cpython::PyResult;
6 use cpython::PyResult;
7 use cpython::Python;
7 use cpython::Python;
8 use cpython::PythonObject;
8 use cpython::PythonObject;
9 use hg::dirstate::entry::Flags;
9 use hg::dirstate::entry::Flags;
10 use hg::dirstate::DirstateEntry;
10 use hg::dirstate::DirstateEntry;
11 use hg::dirstate::EntryState;
11 use hg::dirstate::EntryState;
12 use std::cell::Cell;
12 use std::cell::Cell;
13 use std::convert::TryFrom;
13 use std::convert::TryFrom;
14
14
15 py_class!(pub class DirstateItem |py| {
15 py_class!(pub class DirstateItem |py| {
16 data entry: Cell<DirstateEntry>;
16 data entry: Cell<DirstateEntry>;
17
17
18 def __new__(
18 def __new__(
19 _cls,
19 _cls,
20 wc_tracked: bool = false,
20 wc_tracked: bool = false,
21 p1_tracked: bool = false,
21 p1_tracked: bool = false,
22 p2_tracked: bool = false,
22 p2_tracked: bool = false,
23 merged: bool = false,
23 merged: bool = false,
24 clean_p1: bool = false,
24 clean_p1: bool = false,
25 clean_p2: bool = false,
25 clean_p2: bool = false,
26 possibly_dirty: bool = false,
26 possibly_dirty: bool = false,
27 parentfiledata: Option<(i32, i32, i32)> = None,
27 parentfiledata: Option<(i32, i32, i32)> = None,
28
28
29 ) -> PyResult<DirstateItem> {
29 ) -> PyResult<DirstateItem> {
30 let mut flags = Flags::empty();
30 let mut flags = Flags::empty();
31 flags.set(Flags::WDIR_TRACKED, wc_tracked);
31 flags.set(Flags::WDIR_TRACKED, wc_tracked);
32 flags.set(Flags::P1_TRACKED, p1_tracked);
32 flags.set(Flags::P1_TRACKED, p1_tracked);
33 flags.set(Flags::P2_TRACKED, p2_tracked);
33 flags.set(Flags::P2_TRACKED, p2_tracked);
34 flags.set(Flags::MERGED, merged);
34 flags.set(Flags::MERGED, merged);
35 flags.set(Flags::CLEAN_P1, clean_p1);
35 flags.set(Flags::CLEAN_P1, clean_p1);
36 flags.set(Flags::CLEAN_P2, clean_p2);
36 flags.set(Flags::CLEAN_P2, clean_p2);
37 flags.set(Flags::POSSIBLY_DIRTY, possibly_dirty);
37 flags.set(Flags::POSSIBLY_DIRTY, possibly_dirty);
38 let entry = DirstateEntry::new(flags, parentfiledata);
38 let entry = DirstateEntry::new(flags, parentfiledata);
39 DirstateItem::create_instance(py, Cell::new(entry))
39 DirstateItem::create_instance(py, Cell::new(entry))
40 }
40 }
41
41
42 @property
42 @property
43 def state(&self) -> PyResult<PyBytes> {
43 def state(&self) -> PyResult<PyBytes> {
44 let state_byte: u8 = self.entry(py).get().state().into();
44 let state_byte: u8 = self.entry(py).get().state().into();
45 Ok(PyBytes::new(py, &[state_byte]))
45 Ok(PyBytes::new(py, &[state_byte]))
46 }
46 }
47
47
48 @property
48 @property
49 def mode(&self) -> PyResult<i32> {
49 def mode(&self) -> PyResult<i32> {
50 Ok(self.entry(py).get().mode())
50 Ok(self.entry(py).get().mode())
51 }
51 }
52
52
53 @property
53 @property
54 def size(&self) -> PyResult<i32> {
54 def size(&self) -> PyResult<i32> {
55 Ok(self.entry(py).get().size())
55 Ok(self.entry(py).get().size())
56 }
56 }
57
57
58 @property
58 @property
59 def mtime(&self) -> PyResult<i32> {
59 def mtime(&self) -> PyResult<i32> {
60 Ok(self.entry(py).get().mtime())
60 Ok(self.entry(py).get().mtime())
61 }
61 }
62
62
63 @property
63 @property
64 def tracked(&self) -> PyResult<bool> {
64 def tracked(&self) -> PyResult<bool> {
65 Ok(self.entry(py).get().tracked())
65 Ok(self.entry(py).get().tracked())
66 }
66 }
67
67
68 @property
68 @property
69 def added(&self) -> PyResult<bool> {
69 def added(&self) -> PyResult<bool> {
70 Ok(self.entry(py).get().added())
70 Ok(self.entry(py).get().added())
71 }
71 }
72
72
73 @property
73 @property
74 def merged(&self) -> PyResult<bool> {
74 def merged(&self) -> PyResult<bool> {
75 Ok(self.entry(py).get().merged())
75 Ok(self.entry(py).get().merged())
76 }
76 }
77
77
78 @property
78 @property
79 def removed(&self) -> PyResult<bool> {
79 def removed(&self) -> PyResult<bool> {
80 Ok(self.entry(py).get().removed())
80 Ok(self.entry(py).get().removed())
81 }
81 }
82
82
83 @property
83 @property
84 def from_p2(&self) -> PyResult<bool> {
84 def from_p2(&self) -> PyResult<bool> {
85 Ok(self.entry(py).get().from_p2())
85 Ok(self.entry(py).get().from_p2())
86 }
86 }
87
87
88 @property
88 @property
89 def merged_removed(&self) -> PyResult<bool> {
89 def merged_removed(&self) -> PyResult<bool> {
90 Ok(self.entry(py).get().merged_removed())
90 Ok(self.entry(py).get().merged_removed())
91 }
91 }
92
92
93 @property
93 @property
94 def from_p2_removed(&self) -> PyResult<bool> {
94 def from_p2_removed(&self) -> PyResult<bool> {
95 Ok(self.entry(py).get().from_p2_removed())
95 Ok(self.entry(py).get().from_p2_removed())
96 }
96 }
97
97
98 @property
99 def dm_nonnormal(&self) -> PyResult<bool> {
100 Ok(self.entry(py).get().is_non_normal())
101 }
102
103 @property
104 def dm_otherparent(&self) -> PyResult<bool> {
105 Ok(self.entry(py).get().is_from_other_parent())
106 }
107
108 def v1_state(&self) -> PyResult<PyBytes> {
98 def v1_state(&self) -> PyResult<PyBytes> {
109 let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data();
99 let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data();
110 let state_byte: u8 = state.into();
100 let state_byte: u8 = state.into();
111 Ok(PyBytes::new(py, &[state_byte]))
101 Ok(PyBytes::new(py, &[state_byte]))
112 }
102 }
113
103
114 def v1_mode(&self) -> PyResult<i32> {
104 def v1_mode(&self) -> PyResult<i32> {
115 let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data();
105 let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data();
116 Ok(mode)
106 Ok(mode)
117 }
107 }
118
108
119 def v1_size(&self) -> PyResult<i32> {
109 def v1_size(&self) -> PyResult<i32> {
120 let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data();
110 let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data();
121 Ok(size)
111 Ok(size)
122 }
112 }
123
113
124 def v1_mtime(&self) -> PyResult<i32> {
114 def v1_mtime(&self) -> PyResult<i32> {
125 let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data();
115 let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data();
126 Ok(mtime)
116 Ok(mtime)
127 }
117 }
128
118
129 def need_delay(&self, now: i32) -> PyResult<bool> {
119 def need_delay(&self, now: i32) -> PyResult<bool> {
130 Ok(self.entry(py).get().mtime_is_ambiguous(now))
120 Ok(self.entry(py).get().mtime_is_ambiguous(now))
131 }
121 }
132
122
133 @classmethod
123 @classmethod
134 def from_v1_data(
124 def from_v1_data(
135 _cls,
125 _cls,
136 state: PyBytes,
126 state: PyBytes,
137 mode: i32,
127 mode: i32,
138 size: i32,
128 size: i32,
139 mtime: i32,
129 mtime: i32,
140 ) -> PyResult<Self> {
130 ) -> PyResult<Self> {
141 let state = <[u8; 1]>::try_from(state.data(py))
131 let state = <[u8; 1]>::try_from(state.data(py))
142 .ok()
132 .ok()
143 .and_then(|state| EntryState::try_from(state[0]).ok())
133 .and_then(|state| EntryState::try_from(state[0]).ok())
144 .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?;
134 .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?;
145 let entry = DirstateEntry::from_v1_data(state, mode, size, mtime);
135 let entry = DirstateEntry::from_v1_data(state, mode, size, mtime);
146 DirstateItem::create_instance(py, Cell::new(entry))
136 DirstateItem::create_instance(py, Cell::new(entry))
147 }
137 }
148
138
149 @classmethod
139 @classmethod
150 def new_added(_cls) -> PyResult<Self> {
140 def new_added(_cls) -> PyResult<Self> {
151 let entry = DirstateEntry::new_added();
141 let entry = DirstateEntry::new_added();
152 DirstateItem::create_instance(py, Cell::new(entry))
142 DirstateItem::create_instance(py, Cell::new(entry))
153 }
143 }
154
144
155 @classmethod
145 @classmethod
156 def new_merged(_cls) -> PyResult<Self> {
146 def new_merged(_cls) -> PyResult<Self> {
157 let entry = DirstateEntry::new_merged();
147 let entry = DirstateEntry::new_merged();
158 DirstateItem::create_instance(py, Cell::new(entry))
148 DirstateItem::create_instance(py, Cell::new(entry))
159 }
149 }
160
150
161 @classmethod
151 @classmethod
162 def new_from_p2(_cls) -> PyResult<Self> {
152 def new_from_p2(_cls) -> PyResult<Self> {
163 let entry = DirstateEntry::new_from_p2();
153 let entry = DirstateEntry::new_from_p2();
164 DirstateItem::create_instance(py, Cell::new(entry))
154 DirstateItem::create_instance(py, Cell::new(entry))
165 }
155 }
166
156
167 @classmethod
157 @classmethod
168 def new_possibly_dirty(_cls) -> PyResult<Self> {
158 def new_possibly_dirty(_cls) -> PyResult<Self> {
169 let entry = DirstateEntry::new_possibly_dirty();
159 let entry = DirstateEntry::new_possibly_dirty();
170 DirstateItem::create_instance(py, Cell::new(entry))
160 DirstateItem::create_instance(py, Cell::new(entry))
171 }
161 }
172
162
173 @classmethod
163 @classmethod
174 def new_normal(_cls, mode: i32, size: i32, mtime: i32) -> PyResult<Self> {
164 def new_normal(_cls, mode: i32, size: i32, mtime: i32) -> PyResult<Self> {
175 let entry = DirstateEntry::new_normal(mode, size, mtime);
165 let entry = DirstateEntry::new_normal(mode, size, mtime);
176 DirstateItem::create_instance(py, Cell::new(entry))
166 DirstateItem::create_instance(py, Cell::new(entry))
177 }
167 }
178
168
179 def set_clean(
169 def set_clean(
180 &self,
170 &self,
181 mode: i32,
171 mode: i32,
182 size: i32,
172 size: i32,
183 mtime: i32,
173 mtime: i32,
184 ) -> PyResult<PyNone> {
174 ) -> PyResult<PyNone> {
185 self.update(py, |entry| entry.set_clean(mode, size, mtime));
175 self.update(py, |entry| entry.set_clean(mode, size, mtime));
186 Ok(PyNone)
176 Ok(PyNone)
187 }
177 }
188
178
189 def set_possibly_dirty(&self) -> PyResult<PyNone> {
179 def set_possibly_dirty(&self) -> PyResult<PyNone> {
190 self.update(py, |entry| entry.set_possibly_dirty());
180 self.update(py, |entry| entry.set_possibly_dirty());
191 Ok(PyNone)
181 Ok(PyNone)
192 }
182 }
193
183
194 def set_tracked(&self) -> PyResult<PyNone> {
184 def set_tracked(&self) -> PyResult<PyNone> {
195 self.update(py, |entry| entry.set_tracked());
185 self.update(py, |entry| entry.set_tracked());
196 Ok(PyNone)
186 Ok(PyNone)
197 }
187 }
198
188
199 def set_untracked(&self) -> PyResult<PyNone> {
189 def set_untracked(&self) -> PyResult<PyNone> {
200 self.update(py, |entry| entry.set_untracked());
190 self.update(py, |entry| entry.set_untracked());
201 Ok(PyNone)
191 Ok(PyNone)
202 }
192 }
203 });
193 });
204
194
205 impl DirstateItem {
195 impl DirstateItem {
206 pub fn new_as_pyobject(
196 pub fn new_as_pyobject(
207 py: Python<'_>,
197 py: Python<'_>,
208 entry: DirstateEntry,
198 entry: DirstateEntry,
209 ) -> PyResult<PyObject> {
199 ) -> PyResult<PyObject> {
210 Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object())
200 Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object())
211 }
201 }
212
202
213 pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry {
203 pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry {
214 self.entry(py).get()
204 self.entry(py).get()
215 }
205 }
216
206
217 // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable
207 // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable
218 pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) {
208 pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) {
219 let mut entry = self.entry(py).get();
209 let mut entry = self.entry(py).get();
220 f(&mut entry);
210 f(&mut entry);
221 self.entry(py).set(entry)
211 self.entry(py).set(entry)
222 }
212 }
223 }
213 }
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now