##// END OF EJS Templates
dirstate-item: allow mtime to be None in "parentdata"...
marmoute -
r49201:3d6eb119 default
parent child Browse files
Show More
@@ -1,1337 +1,1355 b''
1 /*
1 /*
2 parsers.c - efficient content parsing
2 parsers.c - efficient content parsing
3
3
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
5
5
6 This software may be used and distributed according to the terms of
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
7 the GNU General Public License, incorporated herein by reference.
8 */
8 */
9
9
10 #define PY_SSIZE_T_CLEAN
10 #define PY_SSIZE_T_CLEAN
11 #include <Python.h>
11 #include <Python.h>
12 #include <ctype.h>
12 #include <ctype.h>
13 #include <stddef.h>
13 #include <stddef.h>
14 #include <string.h>
14 #include <string.h>
15
15
16 #include "bitmanipulation.h"
16 #include "bitmanipulation.h"
17 #include "charencode.h"
17 #include "charencode.h"
18 #include "util.h"
18 #include "util.h"
19
19
20 #ifdef IS_PY3K
20 #ifdef IS_PY3K
21 /* The mapping of Python types is meant to be temporary to get Python
21 /* The mapping of Python types is meant to be temporary to get Python
22 * 3 to compile. We should remove this once Python 3 support is fully
22 * 3 to compile. We should remove this once Python 3 support is fully
23 * supported and proper types are used in the extensions themselves. */
23 * supported and proper types are used in the extensions themselves. */
24 #define PyInt_Check PyLong_Check
24 #define PyInt_Check PyLong_Check
25 #define PyInt_FromLong PyLong_FromLong
25 #define PyInt_FromLong PyLong_FromLong
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 #define PyInt_AsLong PyLong_AsLong
27 #define PyInt_AsLong PyLong_AsLong
28 #else
28 #else
29 /* Windows on Python 2.7 doesn't define S_IFLNK. Python 3+ defines via
29 /* Windows on Python 2.7 doesn't define S_IFLNK. Python 3+ defines via
30 * pyport.h. */
30 * pyport.h. */
31 #ifndef S_IFLNK
31 #ifndef S_IFLNK
32 #define S_IFLNK 0120000
32 #define S_IFLNK 0120000
33 #endif
33 #endif
34 #endif
34 #endif
35
35
36 static const char *const versionerrortext = "Python minor version mismatch";
36 static const char *const versionerrortext = "Python minor version mismatch";
37
37
38 static const int dirstate_v1_from_p2 = -2;
38 static const int dirstate_v1_from_p2 = -2;
39 static const int dirstate_v1_nonnormal = -1;
39 static const int dirstate_v1_nonnormal = -1;
40 static const int ambiguous_time = -1;
40 static const int ambiguous_time = -1;
41
41
42 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
42 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
43 {
43 {
44 Py_ssize_t expected_size;
44 Py_ssize_t expected_size;
45
45
46 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
46 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
47 return NULL;
47 return NULL;
48 }
48 }
49
49
50 return _dict_new_presized(expected_size);
50 return _dict_new_presized(expected_size);
51 }
51 }
52
52
53 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
53 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
54 PyObject *kwds)
54 PyObject *kwds)
55 {
55 {
56 /* We do all the initialization here and not a tp_init function because
56 /* We do all the initialization here and not a tp_init function because
57 * dirstate_item is immutable. */
57 * dirstate_item is immutable. */
58 dirstateItemObject *t;
58 dirstateItemObject *t;
59 int wc_tracked;
59 int wc_tracked;
60 int p1_tracked;
60 int p1_tracked;
61 int p2_info;
61 int p2_info;
62 int has_meaningful_data;
62 int has_meaningful_data;
63 int has_meaningful_mtime;
63 int has_meaningful_mtime;
64 int mode;
64 int mode;
65 int size;
65 int size;
66 int mtime_s;
66 int mtime_s;
67 int mtime_ns;
67 int mtime_ns;
68 PyObject *parentfiledata;
68 PyObject *parentfiledata;
69 PyObject *mtime;
69 PyObject *fallback_exec;
70 PyObject *fallback_exec;
70 PyObject *fallback_symlink;
71 PyObject *fallback_symlink;
71 static char *keywords_name[] = {
72 static char *keywords_name[] = {
72 "wc_tracked", "p1_tracked", "p2_info",
73 "wc_tracked", "p1_tracked", "p2_info",
73 "has_meaningful_data", "has_meaningful_mtime", "parentfiledata",
74 "has_meaningful_data", "has_meaningful_mtime", "parentfiledata",
74 "fallback_exec", "fallback_symlink", NULL,
75 "fallback_exec", "fallback_symlink", NULL,
75 };
76 };
76 wc_tracked = 0;
77 wc_tracked = 0;
77 p1_tracked = 0;
78 p1_tracked = 0;
78 p2_info = 0;
79 p2_info = 0;
79 has_meaningful_mtime = 1;
80 has_meaningful_mtime = 1;
80 has_meaningful_data = 1;
81 has_meaningful_data = 1;
81 parentfiledata = Py_None;
82 parentfiledata = Py_None;
82 fallback_exec = Py_None;
83 fallback_exec = Py_None;
83 fallback_symlink = Py_None;
84 fallback_symlink = Py_None;
84 if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiiiOOO", keywords_name,
85 if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiiiOOO", keywords_name,
85 &wc_tracked, &p1_tracked, &p2_info,
86 &wc_tracked, &p1_tracked, &p2_info,
86 &has_meaningful_data,
87 &has_meaningful_data,
87 &has_meaningful_mtime, &parentfiledata,
88 &has_meaningful_mtime, &parentfiledata,
88 &fallback_exec, &fallback_symlink)) {
89 &fallback_exec, &fallback_symlink)) {
89 return NULL;
90 return NULL;
90 }
91 }
91 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
92 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
92 if (!t) {
93 if (!t) {
93 return NULL;
94 return NULL;
94 }
95 }
95
96
96 t->flags = 0;
97 t->flags = 0;
97 if (wc_tracked) {
98 if (wc_tracked) {
98 t->flags |= dirstate_flag_wc_tracked;
99 t->flags |= dirstate_flag_wc_tracked;
99 }
100 }
100 if (p1_tracked) {
101 if (p1_tracked) {
101 t->flags |= dirstate_flag_p1_tracked;
102 t->flags |= dirstate_flag_p1_tracked;
102 }
103 }
103 if (p2_info) {
104 if (p2_info) {
104 t->flags |= dirstate_flag_p2_info;
105 t->flags |= dirstate_flag_p2_info;
105 }
106 }
106
107
107 if (fallback_exec != Py_None) {
108 if (fallback_exec != Py_None) {
108 t->flags |= dirstate_flag_has_fallback_exec;
109 t->flags |= dirstate_flag_has_fallback_exec;
109 if (PyObject_IsTrue(fallback_exec)) {
110 if (PyObject_IsTrue(fallback_exec)) {
110 t->flags |= dirstate_flag_fallback_exec;
111 t->flags |= dirstate_flag_fallback_exec;
111 }
112 }
112 }
113 }
113 if (fallback_symlink != Py_None) {
114 if (fallback_symlink != Py_None) {
114 t->flags |= dirstate_flag_has_fallback_symlink;
115 t->flags |= dirstate_flag_has_fallback_symlink;
115 if (PyObject_IsTrue(fallback_symlink)) {
116 if (PyObject_IsTrue(fallback_symlink)) {
116 t->flags |= dirstate_flag_fallback_symlink;
117 t->flags |= dirstate_flag_fallback_symlink;
117 }
118 }
118 }
119 }
119
120
120 if (parentfiledata != Py_None) {
121 if (parentfiledata != Py_None) {
121 if (!PyArg_ParseTuple(parentfiledata, "ii(ii)", &mode, &size,
122 if (!PyArg_ParseTuple(parentfiledata, "iiO", &mode, &size,
122 &mtime_s, &mtime_ns)) {
123 &mtime)) {
123 return NULL;
124 return NULL;
124 }
125 }
126 if (mtime != Py_None) {
127 if (!PyArg_ParseTuple(mtime, "ii", &mtime_s,
128 &mtime_ns)) {
129 return NULL;
130 }
131 } else {
132 has_meaningful_mtime = 0;
133 }
125 } else {
134 } else {
126 has_meaningful_data = 0;
135 has_meaningful_data = 0;
127 has_meaningful_mtime = 0;
136 has_meaningful_mtime = 0;
128 }
137 }
129 if (has_meaningful_data) {
138 if (has_meaningful_data) {
130 t->flags |= dirstate_flag_has_meaningful_data;
139 t->flags |= dirstate_flag_has_meaningful_data;
131 t->mode = mode;
140 t->mode = mode;
132 t->size = size;
141 t->size = size;
133 } else {
142 } else {
134 t->mode = 0;
143 t->mode = 0;
135 t->size = 0;
144 t->size = 0;
136 }
145 }
137 if (has_meaningful_mtime) {
146 if (has_meaningful_mtime) {
138 t->flags |= dirstate_flag_has_mtime;
147 t->flags |= dirstate_flag_has_mtime;
139 t->mtime_s = mtime_s;
148 t->mtime_s = mtime_s;
140 t->mtime_ns = mtime_ns;
149 t->mtime_ns = mtime_ns;
141 } else {
150 } else {
142 t->mtime_s = 0;
151 t->mtime_s = 0;
143 t->mtime_ns = 0;
152 t->mtime_ns = 0;
144 }
153 }
145 return (PyObject *)t;
154 return (PyObject *)t;
146 }
155 }
147
156
148 static void dirstate_item_dealloc(PyObject *o)
157 static void dirstate_item_dealloc(PyObject *o)
149 {
158 {
150 PyObject_Del(o);
159 PyObject_Del(o);
151 }
160 }
152
161
153 static inline bool dirstate_item_c_tracked(dirstateItemObject *self)
162 static inline bool dirstate_item_c_tracked(dirstateItemObject *self)
154 {
163 {
155 return (self->flags & dirstate_flag_wc_tracked);
164 return (self->flags & dirstate_flag_wc_tracked);
156 }
165 }
157
166
158 static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self)
167 static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self)
159 {
168 {
160 const int mask = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
169 const int mask = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
161 dirstate_flag_p2_info;
170 dirstate_flag_p2_info;
162 return (self->flags & mask);
171 return (self->flags & mask);
163 }
172 }
164
173
165 static inline bool dirstate_item_c_added(dirstateItemObject *self)
174 static inline bool dirstate_item_c_added(dirstateItemObject *self)
166 {
175 {
167 const int mask = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
176 const int mask = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
168 dirstate_flag_p2_info);
177 dirstate_flag_p2_info);
169 const int target = dirstate_flag_wc_tracked;
178 const int target = dirstate_flag_wc_tracked;
170 return (self->flags & mask) == target;
179 return (self->flags & mask) == target;
171 }
180 }
172
181
173 static inline bool dirstate_item_c_removed(dirstateItemObject *self)
182 static inline bool dirstate_item_c_removed(dirstateItemObject *self)
174 {
183 {
175 if (self->flags & dirstate_flag_wc_tracked) {
184 if (self->flags & dirstate_flag_wc_tracked) {
176 return false;
185 return false;
177 }
186 }
178 return (self->flags &
187 return (self->flags &
179 (dirstate_flag_p1_tracked | dirstate_flag_p2_info));
188 (dirstate_flag_p1_tracked | dirstate_flag_p2_info));
180 }
189 }
181
190
182 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
191 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
183 {
192 {
184 return ((self->flags & dirstate_flag_wc_tracked) &&
193 return ((self->flags & dirstate_flag_wc_tracked) &&
185 (self->flags & dirstate_flag_p1_tracked) &&
194 (self->flags & dirstate_flag_p1_tracked) &&
186 (self->flags & dirstate_flag_p2_info));
195 (self->flags & dirstate_flag_p2_info));
187 }
196 }
188
197
189 static inline bool dirstate_item_c_from_p2(dirstateItemObject *self)
198 static inline bool dirstate_item_c_from_p2(dirstateItemObject *self)
190 {
199 {
191 return ((self->flags & dirstate_flag_wc_tracked) &&
200 return ((self->flags & dirstate_flag_wc_tracked) &&
192 !(self->flags & dirstate_flag_p1_tracked) &&
201 !(self->flags & dirstate_flag_p1_tracked) &&
193 (self->flags & dirstate_flag_p2_info));
202 (self->flags & dirstate_flag_p2_info));
194 }
203 }
195
204
196 static inline char dirstate_item_c_v1_state(dirstateItemObject *self)
205 static inline char dirstate_item_c_v1_state(dirstateItemObject *self)
197 {
206 {
198 if (dirstate_item_c_removed(self)) {
207 if (dirstate_item_c_removed(self)) {
199 return 'r';
208 return 'r';
200 } else if (dirstate_item_c_merged(self)) {
209 } else if (dirstate_item_c_merged(self)) {
201 return 'm';
210 return 'm';
202 } else if (dirstate_item_c_added(self)) {
211 } else if (dirstate_item_c_added(self)) {
203 return 'a';
212 return 'a';
204 } else {
213 } else {
205 return 'n';
214 return 'n';
206 }
215 }
207 }
216 }
208
217
209 static inline bool dirstate_item_c_has_fallback_exec(dirstateItemObject *self)
218 static inline bool dirstate_item_c_has_fallback_exec(dirstateItemObject *self)
210 {
219 {
211 return (bool)self->flags & dirstate_flag_has_fallback_exec;
220 return (bool)self->flags & dirstate_flag_has_fallback_exec;
212 }
221 }
213
222
214 static inline bool
223 static inline bool
215 dirstate_item_c_has_fallback_symlink(dirstateItemObject *self)
224 dirstate_item_c_has_fallback_symlink(dirstateItemObject *self)
216 {
225 {
217 return (bool)self->flags & dirstate_flag_has_fallback_symlink;
226 return (bool)self->flags & dirstate_flag_has_fallback_symlink;
218 }
227 }
219
228
220 static inline int dirstate_item_c_v1_mode(dirstateItemObject *self)
229 static inline int dirstate_item_c_v1_mode(dirstateItemObject *self)
221 {
230 {
222 if (self->flags & dirstate_flag_has_meaningful_data) {
231 if (self->flags & dirstate_flag_has_meaningful_data) {
223 return self->mode;
232 return self->mode;
224 } else {
233 } else {
225 return 0;
234 return 0;
226 }
235 }
227 }
236 }
228
237
229 static inline int dirstate_item_c_v1_size(dirstateItemObject *self)
238 static inline int dirstate_item_c_v1_size(dirstateItemObject *self)
230 {
239 {
231 if (!(self->flags & dirstate_flag_wc_tracked) &&
240 if (!(self->flags & dirstate_flag_wc_tracked) &&
232 (self->flags & dirstate_flag_p2_info)) {
241 (self->flags & dirstate_flag_p2_info)) {
233 if (self->flags & dirstate_flag_p1_tracked) {
242 if (self->flags & dirstate_flag_p1_tracked) {
234 return dirstate_v1_nonnormal;
243 return dirstate_v1_nonnormal;
235 } else {
244 } else {
236 return dirstate_v1_from_p2;
245 return dirstate_v1_from_p2;
237 }
246 }
238 } else if (dirstate_item_c_removed(self)) {
247 } else if (dirstate_item_c_removed(self)) {
239 return 0;
248 return 0;
240 } else if (self->flags & dirstate_flag_p2_info) {
249 } else if (self->flags & dirstate_flag_p2_info) {
241 return dirstate_v1_from_p2;
250 return dirstate_v1_from_p2;
242 } else if (dirstate_item_c_added(self)) {
251 } else if (dirstate_item_c_added(self)) {
243 return dirstate_v1_nonnormal;
252 return dirstate_v1_nonnormal;
244 } else if (self->flags & dirstate_flag_has_meaningful_data) {
253 } else if (self->flags & dirstate_flag_has_meaningful_data) {
245 return self->size;
254 return self->size;
246 } else {
255 } else {
247 return dirstate_v1_nonnormal;
256 return dirstate_v1_nonnormal;
248 }
257 }
249 }
258 }
250
259
251 static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self)
260 static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self)
252 {
261 {
253 if (dirstate_item_c_removed(self)) {
262 if (dirstate_item_c_removed(self)) {
254 return 0;
263 return 0;
255 } else if (!(self->flags & dirstate_flag_has_mtime) ||
264 } else if (!(self->flags & dirstate_flag_has_mtime) ||
256 !(self->flags & dirstate_flag_p1_tracked) ||
265 !(self->flags & dirstate_flag_p1_tracked) ||
257 !(self->flags & dirstate_flag_wc_tracked) ||
266 !(self->flags & dirstate_flag_wc_tracked) ||
258 (self->flags & dirstate_flag_p2_info)) {
267 (self->flags & dirstate_flag_p2_info)) {
259 return ambiguous_time;
268 return ambiguous_time;
260 } else {
269 } else {
261 return self->mtime_s;
270 return self->mtime_s;
262 }
271 }
263 }
272 }
264
273
265 static PyObject *dirstate_item_v2_data(dirstateItemObject *self)
274 static PyObject *dirstate_item_v2_data(dirstateItemObject *self)
266 {
275 {
267 int flags = self->flags;
276 int flags = self->flags;
268 int mode = dirstate_item_c_v1_mode(self);
277 int mode = dirstate_item_c_v1_mode(self);
269 #ifdef S_IXUSR
278 #ifdef S_IXUSR
270 /* This is for platforms with an exec bit */
279 /* This is for platforms with an exec bit */
271 if ((mode & S_IXUSR) != 0) {
280 if ((mode & S_IXUSR) != 0) {
272 flags |= dirstate_flag_mode_exec_perm;
281 flags |= dirstate_flag_mode_exec_perm;
273 } else {
282 } else {
274 flags &= ~dirstate_flag_mode_exec_perm;
283 flags &= ~dirstate_flag_mode_exec_perm;
275 }
284 }
276 #else
285 #else
277 flags &= ~dirstate_flag_mode_exec_perm;
286 flags &= ~dirstate_flag_mode_exec_perm;
278 #endif
287 #endif
279 #ifdef S_ISLNK
288 #ifdef S_ISLNK
280 /* This is for platforms with support for symlinks */
289 /* This is for platforms with support for symlinks */
281 if (S_ISLNK(mode)) {
290 if (S_ISLNK(mode)) {
282 flags |= dirstate_flag_mode_is_symlink;
291 flags |= dirstate_flag_mode_is_symlink;
283 } else {
292 } else {
284 flags &= ~dirstate_flag_mode_is_symlink;
293 flags &= ~dirstate_flag_mode_is_symlink;
285 }
294 }
286 #else
295 #else
287 flags &= ~dirstate_flag_mode_is_symlink;
296 flags &= ~dirstate_flag_mode_is_symlink;
288 #endif
297 #endif
289 return Py_BuildValue("iiii", flags, self->size, self->mtime_s,
298 return Py_BuildValue("iiii", flags, self->size, self->mtime_s,
290 self->mtime_ns);
299 self->mtime_ns);
291 };
300 };
292
301
293 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
302 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
294 {
303 {
295 char state = dirstate_item_c_v1_state(self);
304 char state = dirstate_item_c_v1_state(self);
296 return PyBytes_FromStringAndSize(&state, 1);
305 return PyBytes_FromStringAndSize(&state, 1);
297 };
306 };
298
307
299 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
308 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
300 {
309 {
301 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
310 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
302 };
311 };
303
312
304 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
313 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
305 {
314 {
306 return PyInt_FromLong(dirstate_item_c_v1_size(self));
315 return PyInt_FromLong(dirstate_item_c_v1_size(self));
307 };
316 };
308
317
309 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
318 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
310 {
319 {
311 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
320 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
312 };
321 };
313
322
314 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
323 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
315 PyObject *now)
324 PyObject *now)
316 {
325 {
317 int now_s;
326 int now_s;
318 int now_ns;
327 int now_ns;
319 if (!PyArg_ParseTuple(now, "ii", &now_s, &now_ns)) {
328 if (!PyArg_ParseTuple(now, "ii", &now_s, &now_ns)) {
320 return NULL;
329 return NULL;
321 }
330 }
322 if (dirstate_item_c_v1_state(self) == 'n' && self->mtime_s == now_s) {
331 if (dirstate_item_c_v1_state(self) == 'n' && self->mtime_s == now_s) {
323 Py_RETURN_TRUE;
332 Py_RETURN_TRUE;
324 } else {
333 } else {
325 Py_RETURN_FALSE;
334 Py_RETURN_FALSE;
326 }
335 }
327 };
336 };
328
337
329 static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self,
338 static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self,
330 PyObject *other)
339 PyObject *other)
331 {
340 {
332 int other_s;
341 int other_s;
333 int other_ns;
342 int other_ns;
334 if (!PyArg_ParseTuple(other, "ii", &other_s, &other_ns)) {
343 if (!PyArg_ParseTuple(other, "ii", &other_s, &other_ns)) {
335 return NULL;
344 return NULL;
336 }
345 }
337 if ((self->flags & dirstate_flag_has_mtime) &&
346 if ((self->flags & dirstate_flag_has_mtime) &&
338 self->mtime_s == other_s &&
347 self->mtime_s == other_s &&
339 (self->mtime_ns == other_ns || self->mtime_ns == 0 ||
348 (self->mtime_ns == other_ns || self->mtime_ns == 0 ||
340 other_ns == 0)) {
349 other_ns == 0)) {
341 Py_RETURN_TRUE;
350 Py_RETURN_TRUE;
342 } else {
351 } else {
343 Py_RETURN_FALSE;
352 Py_RETURN_FALSE;
344 }
353 }
345 };
354 };
346
355
347 /* This will never change since it's bound to V1
356 /* This will never change since it's bound to V1
348 */
357 */
349 static inline dirstateItemObject *
358 static inline dirstateItemObject *
350 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
359 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
351 {
360 {
352 dirstateItemObject *t =
361 dirstateItemObject *t =
353 PyObject_New(dirstateItemObject, &dirstateItemType);
362 PyObject_New(dirstateItemObject, &dirstateItemType);
354 if (!t) {
363 if (!t) {
355 return NULL;
364 return NULL;
356 }
365 }
357 t->flags = 0;
366 t->flags = 0;
358 t->mode = 0;
367 t->mode = 0;
359 t->size = 0;
368 t->size = 0;
360 t->mtime_s = 0;
369 t->mtime_s = 0;
361 t->mtime_ns = 0;
370 t->mtime_ns = 0;
362
371
363 if (state == 'm') {
372 if (state == 'm') {
364 t->flags = (dirstate_flag_wc_tracked |
373 t->flags = (dirstate_flag_wc_tracked |
365 dirstate_flag_p1_tracked | dirstate_flag_p2_info);
374 dirstate_flag_p1_tracked | dirstate_flag_p2_info);
366 } else if (state == 'a') {
375 } else if (state == 'a') {
367 t->flags = dirstate_flag_wc_tracked;
376 t->flags = dirstate_flag_wc_tracked;
368 } else if (state == 'r') {
377 } else if (state == 'r') {
369 if (size == dirstate_v1_nonnormal) {
378 if (size == dirstate_v1_nonnormal) {
370 t->flags =
379 t->flags =
371 dirstate_flag_p1_tracked | dirstate_flag_p2_info;
380 dirstate_flag_p1_tracked | dirstate_flag_p2_info;
372 } else if (size == dirstate_v1_from_p2) {
381 } else if (size == dirstate_v1_from_p2) {
373 t->flags = dirstate_flag_p2_info;
382 t->flags = dirstate_flag_p2_info;
374 } else {
383 } else {
375 t->flags = dirstate_flag_p1_tracked;
384 t->flags = dirstate_flag_p1_tracked;
376 }
385 }
377 } else if (state == 'n') {
386 } else if (state == 'n') {
378 if (size == dirstate_v1_from_p2) {
387 if (size == dirstate_v1_from_p2) {
379 t->flags =
388 t->flags =
380 dirstate_flag_wc_tracked | dirstate_flag_p2_info;
389 dirstate_flag_wc_tracked | dirstate_flag_p2_info;
381 } else if (size == dirstate_v1_nonnormal) {
390 } else if (size == dirstate_v1_nonnormal) {
382 t->flags =
391 t->flags =
383 dirstate_flag_wc_tracked | dirstate_flag_p1_tracked;
392 dirstate_flag_wc_tracked | dirstate_flag_p1_tracked;
384 } else if (mtime == ambiguous_time) {
393 } else if (mtime == ambiguous_time) {
385 t->flags = (dirstate_flag_wc_tracked |
394 t->flags = (dirstate_flag_wc_tracked |
386 dirstate_flag_p1_tracked |
395 dirstate_flag_p1_tracked |
387 dirstate_flag_has_meaningful_data);
396 dirstate_flag_has_meaningful_data);
388 t->mode = mode;
397 t->mode = mode;
389 t->size = size;
398 t->size = size;
390 } else {
399 } else {
391 t->flags = (dirstate_flag_wc_tracked |
400 t->flags = (dirstate_flag_wc_tracked |
392 dirstate_flag_p1_tracked |
401 dirstate_flag_p1_tracked |
393 dirstate_flag_has_meaningful_data |
402 dirstate_flag_has_meaningful_data |
394 dirstate_flag_has_mtime);
403 dirstate_flag_has_mtime);
395 t->mode = mode;
404 t->mode = mode;
396 t->size = size;
405 t->size = size;
397 t->mtime_s = mtime;
406 t->mtime_s = mtime;
398 }
407 }
399 } else {
408 } else {
400 PyErr_Format(PyExc_RuntimeError,
409 PyErr_Format(PyExc_RuntimeError,
401 "unknown state: `%c` (%d, %d, %d)", state, mode,
410 "unknown state: `%c` (%d, %d, %d)", state, mode,
402 size, mtime, NULL);
411 size, mtime, NULL);
403 Py_DECREF(t);
412 Py_DECREF(t);
404 return NULL;
413 return NULL;
405 }
414 }
406
415
407 return t;
416 return t;
408 }
417 }
409
418
410 /* This will never change since it's bound to V1, unlike `dirstate_item_new` */
419 /* This will never change since it's bound to V1, unlike `dirstate_item_new` */
411 static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype,
420 static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype,
412 PyObject *args)
421 PyObject *args)
413 {
422 {
414 /* We do all the initialization here and not a tp_init function because
423 /* We do all the initialization here and not a tp_init function because
415 * dirstate_item is immutable. */
424 * dirstate_item is immutable. */
416 char state;
425 char state;
417 int size, mode, mtime;
426 int size, mode, mtime;
418 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
427 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
419 return NULL;
428 return NULL;
420 }
429 }
421 return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime);
430 return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime);
422 };
431 };
423
432
424 static PyObject *dirstate_item_from_v2_meth(PyTypeObject *subtype,
433 static PyObject *dirstate_item_from_v2_meth(PyTypeObject *subtype,
425 PyObject *args)
434 PyObject *args)
426 {
435 {
427 dirstateItemObject *t =
436 dirstateItemObject *t =
428 PyObject_New(dirstateItemObject, &dirstateItemType);
437 PyObject_New(dirstateItemObject, &dirstateItemType);
429 if (!t) {
438 if (!t) {
430 return NULL;
439 return NULL;
431 }
440 }
432 if (!PyArg_ParseTuple(args, "iiii", &t->flags, &t->size, &t->mtime_s,
441 if (!PyArg_ParseTuple(args, "iiii", &t->flags, &t->size, &t->mtime_s,
433 &t->mtime_ns)) {
442 &t->mtime_ns)) {
434 return NULL;
443 return NULL;
435 }
444 }
436 if (t->flags & dirstate_flag_expected_state_is_modified) {
445 if (t->flags & dirstate_flag_expected_state_is_modified) {
437 t->flags &= ~(dirstate_flag_expected_state_is_modified |
446 t->flags &= ~(dirstate_flag_expected_state_is_modified |
438 dirstate_flag_has_meaningful_data |
447 dirstate_flag_has_meaningful_data |
439 dirstate_flag_has_mtime);
448 dirstate_flag_has_mtime);
440 }
449 }
441 if (t->flags & dirstate_flag_mtime_second_ambiguous) {
450 if (t->flags & dirstate_flag_mtime_second_ambiguous) {
442 /* The current code is not able to do the more subtle comparison
451 /* The current code is not able to do the more subtle comparison
443 * that the MTIME_SECOND_AMBIGUOUS requires. So we ignore the
452 * that the MTIME_SECOND_AMBIGUOUS requires. So we ignore the
444 * mtime */
453 * mtime */
445 t->flags &= ~(dirstate_flag_mtime_second_ambiguous |
454 t->flags &= ~(dirstate_flag_mtime_second_ambiguous |
446 dirstate_flag_has_meaningful_data |
455 dirstate_flag_has_meaningful_data |
447 dirstate_flag_has_mtime);
456 dirstate_flag_has_mtime);
448 }
457 }
449 t->mode = 0;
458 t->mode = 0;
450 if (t->flags & dirstate_flag_has_meaningful_data) {
459 if (t->flags & dirstate_flag_has_meaningful_data) {
451 if (t->flags & dirstate_flag_mode_exec_perm) {
460 if (t->flags & dirstate_flag_mode_exec_perm) {
452 t->mode = 0755;
461 t->mode = 0755;
453 } else {
462 } else {
454 t->mode = 0644;
463 t->mode = 0644;
455 }
464 }
456 if (t->flags & dirstate_flag_mode_is_symlink) {
465 if (t->flags & dirstate_flag_mode_is_symlink) {
457 t->mode |= S_IFLNK;
466 t->mode |= S_IFLNK;
458 } else {
467 } else {
459 t->mode |= S_IFREG;
468 t->mode |= S_IFREG;
460 }
469 }
461 }
470 }
462 return (PyObject *)t;
471 return (PyObject *)t;
463 };
472 };
464
473
465 /* This means the next status call will have to actually check its content
474 /* This means the next status call will have to actually check its content
466 to make sure it is correct. */
475 to make sure it is correct. */
467 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
476 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
468 {
477 {
469 self->flags &= ~dirstate_flag_has_mtime;
478 self->flags &= ~dirstate_flag_has_mtime;
470 Py_RETURN_NONE;
479 Py_RETURN_NONE;
471 }
480 }
472
481
473 /* See docstring of the python implementation for details */
482 /* See docstring of the python implementation for details */
474 static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
483 static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
475 PyObject *args)
484 PyObject *args)
476 {
485 {
477 int size, mode, mtime_s, mtime_ns;
486 int size, mode, mtime_s, mtime_ns;
478 if (!PyArg_ParseTuple(args, "ii(ii)", &mode, &size, &mtime_s,
487 PyObject *mtime;
479 &mtime_ns)) {
488 mtime_s = 0;
489 mtime_ns = 0;
490 if (!PyArg_ParseTuple(args, "iiO", &mode, &size, &mtime)) {
480 return NULL;
491 return NULL;
481 }
492 }
493 if (mtime != Py_None) {
494 if (!PyArg_ParseTuple(mtime, "ii", &mtime_s, &mtime_ns)) {
495 return NULL;
496 }
497 } else {
498 self->flags &= ~dirstate_flag_has_mtime;
499 }
482 self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
500 self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
483 dirstate_flag_has_meaningful_data |
501 dirstate_flag_has_meaningful_data |
484 dirstate_flag_has_mtime;
502 dirstate_flag_has_mtime;
485 self->mode = mode;
503 self->mode = mode;
486 self->size = size;
504 self->size = size;
487 self->mtime_s = mtime_s;
505 self->mtime_s = mtime_s;
488 self->mtime_ns = mtime_ns;
506 self->mtime_ns = mtime_ns;
489 Py_RETURN_NONE;
507 Py_RETURN_NONE;
490 }
508 }
491
509
492 static PyObject *dirstate_item_set_tracked(dirstateItemObject *self)
510 static PyObject *dirstate_item_set_tracked(dirstateItemObject *self)
493 {
511 {
494 self->flags |= dirstate_flag_wc_tracked;
512 self->flags |= dirstate_flag_wc_tracked;
495 self->flags &= ~dirstate_flag_has_mtime;
513 self->flags &= ~dirstate_flag_has_mtime;
496 Py_RETURN_NONE;
514 Py_RETURN_NONE;
497 }
515 }
498
516
499 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
517 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
500 {
518 {
501 self->flags &= ~dirstate_flag_wc_tracked;
519 self->flags &= ~dirstate_flag_wc_tracked;
502 self->mode = 0;
520 self->mode = 0;
503 self->size = 0;
521 self->size = 0;
504 self->mtime_s = 0;
522 self->mtime_s = 0;
505 self->mtime_ns = 0;
523 self->mtime_ns = 0;
506 Py_RETURN_NONE;
524 Py_RETURN_NONE;
507 }
525 }
508
526
509 static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self)
527 static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self)
510 {
528 {
511 if (self->flags & dirstate_flag_p2_info) {
529 if (self->flags & dirstate_flag_p2_info) {
512 self->flags &= ~(dirstate_flag_p2_info |
530 self->flags &= ~(dirstate_flag_p2_info |
513 dirstate_flag_has_meaningful_data |
531 dirstate_flag_has_meaningful_data |
514 dirstate_flag_has_mtime);
532 dirstate_flag_has_mtime);
515 self->mode = 0;
533 self->mode = 0;
516 self->size = 0;
534 self->size = 0;
517 self->mtime_s = 0;
535 self->mtime_s = 0;
518 self->mtime_ns = 0;
536 self->mtime_ns = 0;
519 }
537 }
520 Py_RETURN_NONE;
538 Py_RETURN_NONE;
521 }
539 }
522 static PyMethodDef dirstate_item_methods[] = {
540 static PyMethodDef dirstate_item_methods[] = {
523 {"v2_data", (PyCFunction)dirstate_item_v2_data, METH_NOARGS,
541 {"v2_data", (PyCFunction)dirstate_item_v2_data, METH_NOARGS,
524 "return data suitable for v2 serialization"},
542 "return data suitable for v2 serialization"},
525 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
543 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
526 "return a \"state\" suitable for v1 serialization"},
544 "return a \"state\" suitable for v1 serialization"},
527 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
545 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
528 "return a \"mode\" suitable for v1 serialization"},
546 "return a \"mode\" suitable for v1 serialization"},
529 {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS,
547 {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS,
530 "return a \"size\" suitable for v1 serialization"},
548 "return a \"size\" suitable for v1 serialization"},
531 {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
549 {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
532 "return a \"mtime\" suitable for v1 serialization"},
550 "return a \"mtime\" suitable for v1 serialization"},
533 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
551 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
534 "True if the stored mtime would be ambiguous with the current time"},
552 "True if the stored mtime would be ambiguous with the current time"},
535 {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to,
553 {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to,
536 METH_O, "True if the stored mtime is likely equal to the given mtime"},
554 METH_O, "True if the stored mtime is likely equal to the given mtime"},
537 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
555 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
538 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
556 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
539 {"from_v2_data", (PyCFunction)dirstate_item_from_v2_meth,
557 {"from_v2_data", (PyCFunction)dirstate_item_from_v2_meth,
540 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V2 data"},
558 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V2 data"},
541 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
559 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
542 METH_NOARGS, "mark a file as \"possibly dirty\""},
560 METH_NOARGS, "mark a file as \"possibly dirty\""},
543 {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS,
561 {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS,
544 "mark a file as \"clean\""},
562 "mark a file as \"clean\""},
545 {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS,
563 {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS,
546 "mark a file as \"tracked\""},
564 "mark a file as \"tracked\""},
547 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
565 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
548 "mark a file as \"untracked\""},
566 "mark a file as \"untracked\""},
549 {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS,
567 {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS,
550 "remove all \"merge-only\" from a DirstateItem"},
568 "remove all \"merge-only\" from a DirstateItem"},
551 {NULL} /* Sentinel */
569 {NULL} /* Sentinel */
552 };
570 };
553
571
554 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
572 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
555 {
573 {
556 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
574 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
557 };
575 };
558
576
559 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
577 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
560 {
578 {
561 return PyInt_FromLong(dirstate_item_c_v1_size(self));
579 return PyInt_FromLong(dirstate_item_c_v1_size(self));
562 };
580 };
563
581
564 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
582 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
565 {
583 {
566 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
584 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
567 };
585 };
568
586
569 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
587 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
570 {
588 {
571 char state = dirstate_item_c_v1_state(self);
589 char state = dirstate_item_c_v1_state(self);
572 return PyBytes_FromStringAndSize(&state, 1);
590 return PyBytes_FromStringAndSize(&state, 1);
573 };
591 };
574
592
575 static PyObject *dirstate_item_get_has_fallback_exec(dirstateItemObject *self)
593 static PyObject *dirstate_item_get_has_fallback_exec(dirstateItemObject *self)
576 {
594 {
577 if (dirstate_item_c_has_fallback_exec(self)) {
595 if (dirstate_item_c_has_fallback_exec(self)) {
578 Py_RETURN_TRUE;
596 Py_RETURN_TRUE;
579 } else {
597 } else {
580 Py_RETURN_FALSE;
598 Py_RETURN_FALSE;
581 }
599 }
582 };
600 };
583
601
584 static PyObject *dirstate_item_get_fallback_exec(dirstateItemObject *self)
602 static PyObject *dirstate_item_get_fallback_exec(dirstateItemObject *self)
585 {
603 {
586 if (dirstate_item_c_has_fallback_exec(self)) {
604 if (dirstate_item_c_has_fallback_exec(self)) {
587 if (self->flags & dirstate_flag_fallback_exec) {
605 if (self->flags & dirstate_flag_fallback_exec) {
588 Py_RETURN_TRUE;
606 Py_RETURN_TRUE;
589 } else {
607 } else {
590 Py_RETURN_FALSE;
608 Py_RETURN_FALSE;
591 }
609 }
592 } else {
610 } else {
593 Py_RETURN_NONE;
611 Py_RETURN_NONE;
594 }
612 }
595 };
613 };
596
614
597 static int dirstate_item_set_fallback_exec(dirstateItemObject *self,
615 static int dirstate_item_set_fallback_exec(dirstateItemObject *self,
598 PyObject *value)
616 PyObject *value)
599 {
617 {
600 if ((value == Py_None) || (value == NULL)) {
618 if ((value == Py_None) || (value == NULL)) {
601 self->flags &= ~dirstate_flag_has_fallback_exec;
619 self->flags &= ~dirstate_flag_has_fallback_exec;
602 } else {
620 } else {
603 self->flags |= dirstate_flag_has_fallback_exec;
621 self->flags |= dirstate_flag_has_fallback_exec;
604 if (PyObject_IsTrue(value)) {
622 if (PyObject_IsTrue(value)) {
605 self->flags |= dirstate_flag_fallback_exec;
623 self->flags |= dirstate_flag_fallback_exec;
606 } else {
624 } else {
607 self->flags &= ~dirstate_flag_fallback_exec;
625 self->flags &= ~dirstate_flag_fallback_exec;
608 }
626 }
609 }
627 }
610 return 0;
628 return 0;
611 };
629 };
612
630
613 static PyObject *
631 static PyObject *
614 dirstate_item_get_has_fallback_symlink(dirstateItemObject *self)
632 dirstate_item_get_has_fallback_symlink(dirstateItemObject *self)
615 {
633 {
616 if (dirstate_item_c_has_fallback_symlink(self)) {
634 if (dirstate_item_c_has_fallback_symlink(self)) {
617 Py_RETURN_TRUE;
635 Py_RETURN_TRUE;
618 } else {
636 } else {
619 Py_RETURN_FALSE;
637 Py_RETURN_FALSE;
620 }
638 }
621 };
639 };
622
640
623 static PyObject *dirstate_item_get_fallback_symlink(dirstateItemObject *self)
641 static PyObject *dirstate_item_get_fallback_symlink(dirstateItemObject *self)
624 {
642 {
625 if (dirstate_item_c_has_fallback_symlink(self)) {
643 if (dirstate_item_c_has_fallback_symlink(self)) {
626 if (self->flags & dirstate_flag_fallback_symlink) {
644 if (self->flags & dirstate_flag_fallback_symlink) {
627 Py_RETURN_TRUE;
645 Py_RETURN_TRUE;
628 } else {
646 } else {
629 Py_RETURN_FALSE;
647 Py_RETURN_FALSE;
630 }
648 }
631 } else {
649 } else {
632 Py_RETURN_NONE;
650 Py_RETURN_NONE;
633 }
651 }
634 };
652 };
635
653
636 static int dirstate_item_set_fallback_symlink(dirstateItemObject *self,
654 static int dirstate_item_set_fallback_symlink(dirstateItemObject *self,
637 PyObject *value)
655 PyObject *value)
638 {
656 {
639 if ((value == Py_None) || (value == NULL)) {
657 if ((value == Py_None) || (value == NULL)) {
640 self->flags &= ~dirstate_flag_has_fallback_symlink;
658 self->flags &= ~dirstate_flag_has_fallback_symlink;
641 } else {
659 } else {
642 self->flags |= dirstate_flag_has_fallback_symlink;
660 self->flags |= dirstate_flag_has_fallback_symlink;
643 if (PyObject_IsTrue(value)) {
661 if (PyObject_IsTrue(value)) {
644 self->flags |= dirstate_flag_fallback_symlink;
662 self->flags |= dirstate_flag_fallback_symlink;
645 } else {
663 } else {
646 self->flags &= ~dirstate_flag_fallback_symlink;
664 self->flags &= ~dirstate_flag_fallback_symlink;
647 }
665 }
648 }
666 }
649 return 0;
667 return 0;
650 };
668 };
651
669
652 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
670 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
653 {
671 {
654 if (dirstate_item_c_tracked(self)) {
672 if (dirstate_item_c_tracked(self)) {
655 Py_RETURN_TRUE;
673 Py_RETURN_TRUE;
656 } else {
674 } else {
657 Py_RETURN_FALSE;
675 Py_RETURN_FALSE;
658 }
676 }
659 };
677 };
660 static PyObject *dirstate_item_get_p1_tracked(dirstateItemObject *self)
678 static PyObject *dirstate_item_get_p1_tracked(dirstateItemObject *self)
661 {
679 {
662 if (self->flags & dirstate_flag_p1_tracked) {
680 if (self->flags & dirstate_flag_p1_tracked) {
663 Py_RETURN_TRUE;
681 Py_RETURN_TRUE;
664 } else {
682 } else {
665 Py_RETURN_FALSE;
683 Py_RETURN_FALSE;
666 }
684 }
667 };
685 };
668
686
669 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
687 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
670 {
688 {
671 if (dirstate_item_c_added(self)) {
689 if (dirstate_item_c_added(self)) {
672 Py_RETURN_TRUE;
690 Py_RETURN_TRUE;
673 } else {
691 } else {
674 Py_RETURN_FALSE;
692 Py_RETURN_FALSE;
675 }
693 }
676 };
694 };
677
695
678 static PyObject *dirstate_item_get_p2_info(dirstateItemObject *self)
696 static PyObject *dirstate_item_get_p2_info(dirstateItemObject *self)
679 {
697 {
680 if (self->flags & dirstate_flag_wc_tracked &&
698 if (self->flags & dirstate_flag_wc_tracked &&
681 self->flags & dirstate_flag_p2_info) {
699 self->flags & dirstate_flag_p2_info) {
682 Py_RETURN_TRUE;
700 Py_RETURN_TRUE;
683 } else {
701 } else {
684 Py_RETURN_FALSE;
702 Py_RETURN_FALSE;
685 }
703 }
686 };
704 };
687
705
688 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
706 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
689 {
707 {
690 if (dirstate_item_c_merged(self)) {
708 if (dirstate_item_c_merged(self)) {
691 Py_RETURN_TRUE;
709 Py_RETURN_TRUE;
692 } else {
710 } else {
693 Py_RETURN_FALSE;
711 Py_RETURN_FALSE;
694 }
712 }
695 };
713 };
696
714
697 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
715 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
698 {
716 {
699 if (dirstate_item_c_from_p2(self)) {
717 if (dirstate_item_c_from_p2(self)) {
700 Py_RETURN_TRUE;
718 Py_RETURN_TRUE;
701 } else {
719 } else {
702 Py_RETURN_FALSE;
720 Py_RETURN_FALSE;
703 }
721 }
704 };
722 };
705
723
706 static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self)
724 static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self)
707 {
725 {
708 if (!(self->flags & dirstate_flag_wc_tracked)) {
726 if (!(self->flags & dirstate_flag_wc_tracked)) {
709 Py_RETURN_FALSE;
727 Py_RETURN_FALSE;
710 } else if (!(self->flags & dirstate_flag_p1_tracked)) {
728 } else if (!(self->flags & dirstate_flag_p1_tracked)) {
711 Py_RETURN_FALSE;
729 Py_RETURN_FALSE;
712 } else if (self->flags & dirstate_flag_p2_info) {
730 } else if (self->flags & dirstate_flag_p2_info) {
713 Py_RETURN_FALSE;
731 Py_RETURN_FALSE;
714 } else {
732 } else {
715 Py_RETURN_TRUE;
733 Py_RETURN_TRUE;
716 }
734 }
717 };
735 };
718
736
719 static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self)
737 static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self)
720 {
738 {
721 if (dirstate_item_c_any_tracked(self)) {
739 if (dirstate_item_c_any_tracked(self)) {
722 Py_RETURN_TRUE;
740 Py_RETURN_TRUE;
723 } else {
741 } else {
724 Py_RETURN_FALSE;
742 Py_RETURN_FALSE;
725 }
743 }
726 };
744 };
727
745
728 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
746 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
729 {
747 {
730 if (dirstate_item_c_removed(self)) {
748 if (dirstate_item_c_removed(self)) {
731 Py_RETURN_TRUE;
749 Py_RETURN_TRUE;
732 } else {
750 } else {
733 Py_RETURN_FALSE;
751 Py_RETURN_FALSE;
734 }
752 }
735 };
753 };
736
754
737 static PyGetSetDef dirstate_item_getset[] = {
755 static PyGetSetDef dirstate_item_getset[] = {
738 {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
756 {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
739 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
757 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
740 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
758 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
741 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
759 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
742 {"has_fallback_exec", (getter)dirstate_item_get_has_fallback_exec, NULL,
760 {"has_fallback_exec", (getter)dirstate_item_get_has_fallback_exec, NULL,
743 "has_fallback_exec", NULL},
761 "has_fallback_exec", NULL},
744 {"fallback_exec", (getter)dirstate_item_get_fallback_exec,
762 {"fallback_exec", (getter)dirstate_item_get_fallback_exec,
745 (setter)dirstate_item_set_fallback_exec, "fallback_exec", NULL},
763 (setter)dirstate_item_set_fallback_exec, "fallback_exec", NULL},
746 {"has_fallback_symlink", (getter)dirstate_item_get_has_fallback_symlink,
764 {"has_fallback_symlink", (getter)dirstate_item_get_has_fallback_symlink,
747 NULL, "has_fallback_symlink", NULL},
765 NULL, "has_fallback_symlink", NULL},
748 {"fallback_symlink", (getter)dirstate_item_get_fallback_symlink,
766 {"fallback_symlink", (getter)dirstate_item_get_fallback_symlink,
749 (setter)dirstate_item_set_fallback_symlink, "fallback_symlink", NULL},
767 (setter)dirstate_item_set_fallback_symlink, "fallback_symlink", NULL},
750 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
768 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
751 {"p1_tracked", (getter)dirstate_item_get_p1_tracked, NULL, "p1_tracked",
769 {"p1_tracked", (getter)dirstate_item_get_p1_tracked, NULL, "p1_tracked",
752 NULL},
770 NULL},
753 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
771 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
754 {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL},
772 {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL},
755 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
773 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
756 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
774 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
757 {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean",
775 {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean",
758 NULL},
776 NULL},
759 {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked",
777 {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked",
760 NULL},
778 NULL},
761 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
779 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
762 {NULL} /* Sentinel */
780 {NULL} /* Sentinel */
763 };
781 };
764
782
765 PyTypeObject dirstateItemType = {
783 PyTypeObject dirstateItemType = {
766 PyVarObject_HEAD_INIT(NULL, 0) /* header */
784 PyVarObject_HEAD_INIT(NULL, 0) /* header */
767 "dirstate_tuple", /* tp_name */
785 "dirstate_tuple", /* tp_name */
768 sizeof(dirstateItemObject), /* tp_basicsize */
786 sizeof(dirstateItemObject), /* tp_basicsize */
769 0, /* tp_itemsize */
787 0, /* tp_itemsize */
770 (destructor)dirstate_item_dealloc, /* tp_dealloc */
788 (destructor)dirstate_item_dealloc, /* tp_dealloc */
771 0, /* tp_print */
789 0, /* tp_print */
772 0, /* tp_getattr */
790 0, /* tp_getattr */
773 0, /* tp_setattr */
791 0, /* tp_setattr */
774 0, /* tp_compare */
792 0, /* tp_compare */
775 0, /* tp_repr */
793 0, /* tp_repr */
776 0, /* tp_as_number */
794 0, /* tp_as_number */
777 0, /* tp_as_sequence */
795 0, /* tp_as_sequence */
778 0, /* tp_as_mapping */
796 0, /* tp_as_mapping */
779 0, /* tp_hash */
797 0, /* tp_hash */
780 0, /* tp_call */
798 0, /* tp_call */
781 0, /* tp_str */
799 0, /* tp_str */
782 0, /* tp_getattro */
800 0, /* tp_getattro */
783 0, /* tp_setattro */
801 0, /* tp_setattro */
784 0, /* tp_as_buffer */
802 0, /* tp_as_buffer */
785 Py_TPFLAGS_DEFAULT, /* tp_flags */
803 Py_TPFLAGS_DEFAULT, /* tp_flags */
786 "dirstate tuple", /* tp_doc */
804 "dirstate tuple", /* tp_doc */
787 0, /* tp_traverse */
805 0, /* tp_traverse */
788 0, /* tp_clear */
806 0, /* tp_clear */
789 0, /* tp_richcompare */
807 0, /* tp_richcompare */
790 0, /* tp_weaklistoffset */
808 0, /* tp_weaklistoffset */
791 0, /* tp_iter */
809 0, /* tp_iter */
792 0, /* tp_iternext */
810 0, /* tp_iternext */
793 dirstate_item_methods, /* tp_methods */
811 dirstate_item_methods, /* tp_methods */
794 0, /* tp_members */
812 0, /* tp_members */
795 dirstate_item_getset, /* tp_getset */
813 dirstate_item_getset, /* tp_getset */
796 0, /* tp_base */
814 0, /* tp_base */
797 0, /* tp_dict */
815 0, /* tp_dict */
798 0, /* tp_descr_get */
816 0, /* tp_descr_get */
799 0, /* tp_descr_set */
817 0, /* tp_descr_set */
800 0, /* tp_dictoffset */
818 0, /* tp_dictoffset */
801 0, /* tp_init */
819 0, /* tp_init */
802 0, /* tp_alloc */
820 0, /* tp_alloc */
803 dirstate_item_new, /* tp_new */
821 dirstate_item_new, /* tp_new */
804 };
822 };
805
823
806 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
824 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
807 {
825 {
808 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
826 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
809 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
827 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
810 char state, *cur, *str, *cpos;
828 char state, *cur, *str, *cpos;
811 int mode, size, mtime;
829 int mode, size, mtime;
812 unsigned int flen, pos = 40;
830 unsigned int flen, pos = 40;
813 Py_ssize_t len = 40;
831 Py_ssize_t len = 40;
814 Py_ssize_t readlen;
832 Py_ssize_t readlen;
815
833
816 if (!PyArg_ParseTuple(
834 if (!PyArg_ParseTuple(
817 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
835 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
818 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
836 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
819 goto quit;
837 goto quit;
820 }
838 }
821
839
822 len = readlen;
840 len = readlen;
823
841
824 /* read parents */
842 /* read parents */
825 if (len < 40) {
843 if (len < 40) {
826 PyErr_SetString(PyExc_ValueError,
844 PyErr_SetString(PyExc_ValueError,
827 "too little data for parents");
845 "too little data for parents");
828 goto quit;
846 goto quit;
829 }
847 }
830
848
831 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
849 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
832 str + 20, (Py_ssize_t)20);
850 str + 20, (Py_ssize_t)20);
833 if (!parents) {
851 if (!parents) {
834 goto quit;
852 goto quit;
835 }
853 }
836
854
837 /* read filenames */
855 /* read filenames */
838 while (pos >= 40 && pos < len) {
856 while (pos >= 40 && pos < len) {
839 if (pos + 17 > len) {
857 if (pos + 17 > len) {
840 PyErr_SetString(PyExc_ValueError,
858 PyErr_SetString(PyExc_ValueError,
841 "overflow in dirstate");
859 "overflow in dirstate");
842 goto quit;
860 goto quit;
843 }
861 }
844 cur = str + pos;
862 cur = str + pos;
845 /* unpack header */
863 /* unpack header */
846 state = *cur;
864 state = *cur;
847 mode = getbe32(cur + 1);
865 mode = getbe32(cur + 1);
848 size = getbe32(cur + 5);
866 size = getbe32(cur + 5);
849 mtime = getbe32(cur + 9);
867 mtime = getbe32(cur + 9);
850 flen = getbe32(cur + 13);
868 flen = getbe32(cur + 13);
851 pos += 17;
869 pos += 17;
852 cur += 17;
870 cur += 17;
853 if (flen > len - pos) {
871 if (flen > len - pos) {
854 PyErr_SetString(PyExc_ValueError,
872 PyErr_SetString(PyExc_ValueError,
855 "overflow in dirstate");
873 "overflow in dirstate");
856 goto quit;
874 goto quit;
857 }
875 }
858
876
859 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
877 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
860 size, mtime);
878 size, mtime);
861 if (!entry)
879 if (!entry)
862 goto quit;
880 goto quit;
863 cpos = memchr(cur, 0, flen);
881 cpos = memchr(cur, 0, flen);
864 if (cpos) {
882 if (cpos) {
865 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
883 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
866 cname = PyBytes_FromStringAndSize(
884 cname = PyBytes_FromStringAndSize(
867 cpos + 1, flen - (cpos - cur) - 1);
885 cpos + 1, flen - (cpos - cur) - 1);
868 if (!fname || !cname ||
886 if (!fname || !cname ||
869 PyDict_SetItem(cmap, fname, cname) == -1 ||
887 PyDict_SetItem(cmap, fname, cname) == -1 ||
870 PyDict_SetItem(dmap, fname, entry) == -1) {
888 PyDict_SetItem(dmap, fname, entry) == -1) {
871 goto quit;
889 goto quit;
872 }
890 }
873 Py_DECREF(cname);
891 Py_DECREF(cname);
874 } else {
892 } else {
875 fname = PyBytes_FromStringAndSize(cur, flen);
893 fname = PyBytes_FromStringAndSize(cur, flen);
876 if (!fname ||
894 if (!fname ||
877 PyDict_SetItem(dmap, fname, entry) == -1) {
895 PyDict_SetItem(dmap, fname, entry) == -1) {
878 goto quit;
896 goto quit;
879 }
897 }
880 }
898 }
881 Py_DECREF(fname);
899 Py_DECREF(fname);
882 Py_DECREF(entry);
900 Py_DECREF(entry);
883 fname = cname = entry = NULL;
901 fname = cname = entry = NULL;
884 pos += flen;
902 pos += flen;
885 }
903 }
886
904
887 ret = parents;
905 ret = parents;
888 Py_INCREF(ret);
906 Py_INCREF(ret);
889 quit:
907 quit:
890 Py_XDECREF(fname);
908 Py_XDECREF(fname);
891 Py_XDECREF(cname);
909 Py_XDECREF(cname);
892 Py_XDECREF(entry);
910 Py_XDECREF(entry);
893 Py_XDECREF(parents);
911 Py_XDECREF(parents);
894 return ret;
912 return ret;
895 }
913 }
896
914
897 /*
915 /*
898 * Efficiently pack a dirstate object into its on-disk format.
916 * Efficiently pack a dirstate object into its on-disk format.
899 */
917 */
900 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
918 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
901 {
919 {
902 PyObject *packobj = NULL;
920 PyObject *packobj = NULL;
903 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
921 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
904 Py_ssize_t nbytes, pos, l;
922 Py_ssize_t nbytes, pos, l;
905 PyObject *k, *v = NULL, *pn;
923 PyObject *k, *v = NULL, *pn;
906 char *p, *s;
924 char *p, *s;
907 int now_s;
925 int now_s;
908 int now_ns;
926 int now_ns;
909
927
910 if (!PyArg_ParseTuple(args, "O!O!O!(ii):pack_dirstate", &PyDict_Type,
928 if (!PyArg_ParseTuple(args, "O!O!O!(ii):pack_dirstate", &PyDict_Type,
911 &map, &PyDict_Type, &copymap, &PyTuple_Type, &pl,
929 &map, &PyDict_Type, &copymap, &PyTuple_Type, &pl,
912 &now_s, &now_ns)) {
930 &now_s, &now_ns)) {
913 return NULL;
931 return NULL;
914 }
932 }
915
933
916 if (PyTuple_Size(pl) != 2) {
934 if (PyTuple_Size(pl) != 2) {
917 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
935 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
918 return NULL;
936 return NULL;
919 }
937 }
920
938
921 /* Figure out how much we need to allocate. */
939 /* Figure out how much we need to allocate. */
922 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
940 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
923 PyObject *c;
941 PyObject *c;
924 if (!PyBytes_Check(k)) {
942 if (!PyBytes_Check(k)) {
925 PyErr_SetString(PyExc_TypeError, "expected string key");
943 PyErr_SetString(PyExc_TypeError, "expected string key");
926 goto bail;
944 goto bail;
927 }
945 }
928 nbytes += PyBytes_GET_SIZE(k) + 17;
946 nbytes += PyBytes_GET_SIZE(k) + 17;
929 c = PyDict_GetItem(copymap, k);
947 c = PyDict_GetItem(copymap, k);
930 if (c) {
948 if (c) {
931 if (!PyBytes_Check(c)) {
949 if (!PyBytes_Check(c)) {
932 PyErr_SetString(PyExc_TypeError,
950 PyErr_SetString(PyExc_TypeError,
933 "expected string key");
951 "expected string key");
934 goto bail;
952 goto bail;
935 }
953 }
936 nbytes += PyBytes_GET_SIZE(c) + 1;
954 nbytes += PyBytes_GET_SIZE(c) + 1;
937 }
955 }
938 }
956 }
939
957
940 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
958 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
941 if (packobj == NULL) {
959 if (packobj == NULL) {
942 goto bail;
960 goto bail;
943 }
961 }
944
962
945 p = PyBytes_AS_STRING(packobj);
963 p = PyBytes_AS_STRING(packobj);
946
964
947 pn = PyTuple_GET_ITEM(pl, 0);
965 pn = PyTuple_GET_ITEM(pl, 0);
948 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
966 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
949 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
967 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
950 goto bail;
968 goto bail;
951 }
969 }
952 memcpy(p, s, l);
970 memcpy(p, s, l);
953 p += 20;
971 p += 20;
954 pn = PyTuple_GET_ITEM(pl, 1);
972 pn = PyTuple_GET_ITEM(pl, 1);
955 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
973 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
956 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
974 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
957 goto bail;
975 goto bail;
958 }
976 }
959 memcpy(p, s, l);
977 memcpy(p, s, l);
960 p += 20;
978 p += 20;
961
979
962 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
980 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
963 dirstateItemObject *tuple;
981 dirstateItemObject *tuple;
964 char state;
982 char state;
965 int mode, size, mtime;
983 int mode, size, mtime;
966 Py_ssize_t len, l;
984 Py_ssize_t len, l;
967 PyObject *o;
985 PyObject *o;
968 char *t;
986 char *t;
969
987
970 if (!dirstate_tuple_check(v)) {
988 if (!dirstate_tuple_check(v)) {
971 PyErr_SetString(PyExc_TypeError,
989 PyErr_SetString(PyExc_TypeError,
972 "expected a dirstate tuple");
990 "expected a dirstate tuple");
973 goto bail;
991 goto bail;
974 }
992 }
975 tuple = (dirstateItemObject *)v;
993 tuple = (dirstateItemObject *)v;
976
994
977 state = dirstate_item_c_v1_state(tuple);
995 state = dirstate_item_c_v1_state(tuple);
978 mode = dirstate_item_c_v1_mode(tuple);
996 mode = dirstate_item_c_v1_mode(tuple);
979 size = dirstate_item_c_v1_size(tuple);
997 size = dirstate_item_c_v1_size(tuple);
980 mtime = dirstate_item_c_v1_mtime(tuple);
998 mtime = dirstate_item_c_v1_mtime(tuple);
981 if (state == 'n' && tuple->mtime_s == now_s) {
999 if (state == 'n' && tuple->mtime_s == now_s) {
982 /* See pure/parsers.py:pack_dirstate for why we do
1000 /* See pure/parsers.py:pack_dirstate for why we do
983 * this. */
1001 * this. */
984 mtime = -1;
1002 mtime = -1;
985 mtime_unset = (PyObject *)dirstate_item_from_v1_data(
1003 mtime_unset = (PyObject *)dirstate_item_from_v1_data(
986 state, mode, size, mtime);
1004 state, mode, size, mtime);
987 if (!mtime_unset) {
1005 if (!mtime_unset) {
988 goto bail;
1006 goto bail;
989 }
1007 }
990 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
1008 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
991 goto bail;
1009 goto bail;
992 }
1010 }
993 Py_DECREF(mtime_unset);
1011 Py_DECREF(mtime_unset);
994 mtime_unset = NULL;
1012 mtime_unset = NULL;
995 }
1013 }
996 *p++ = state;
1014 *p++ = state;
997 putbe32((uint32_t)mode, p);
1015 putbe32((uint32_t)mode, p);
998 putbe32((uint32_t)size, p + 4);
1016 putbe32((uint32_t)size, p + 4);
999 putbe32((uint32_t)mtime, p + 8);
1017 putbe32((uint32_t)mtime, p + 8);
1000 t = p + 12;
1018 t = p + 12;
1001 p += 16;
1019 p += 16;
1002 len = PyBytes_GET_SIZE(k);
1020 len = PyBytes_GET_SIZE(k);
1003 memcpy(p, PyBytes_AS_STRING(k), len);
1021 memcpy(p, PyBytes_AS_STRING(k), len);
1004 p += len;
1022 p += len;
1005 o = PyDict_GetItem(copymap, k);
1023 o = PyDict_GetItem(copymap, k);
1006 if (o) {
1024 if (o) {
1007 *p++ = '\0';
1025 *p++ = '\0';
1008 l = PyBytes_GET_SIZE(o);
1026 l = PyBytes_GET_SIZE(o);
1009 memcpy(p, PyBytes_AS_STRING(o), l);
1027 memcpy(p, PyBytes_AS_STRING(o), l);
1010 p += l;
1028 p += l;
1011 len += l + 1;
1029 len += l + 1;
1012 }
1030 }
1013 putbe32((uint32_t)len, t);
1031 putbe32((uint32_t)len, t);
1014 }
1032 }
1015
1033
1016 pos = p - PyBytes_AS_STRING(packobj);
1034 pos = p - PyBytes_AS_STRING(packobj);
1017 if (pos != nbytes) {
1035 if (pos != nbytes) {
1018 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
1036 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
1019 (long)pos, (long)nbytes);
1037 (long)pos, (long)nbytes);
1020 goto bail;
1038 goto bail;
1021 }
1039 }
1022
1040
1023 return packobj;
1041 return packobj;
1024 bail:
1042 bail:
1025 Py_XDECREF(mtime_unset);
1043 Py_XDECREF(mtime_unset);
1026 Py_XDECREF(packobj);
1044 Py_XDECREF(packobj);
1027 Py_XDECREF(v);
1045 Py_XDECREF(v);
1028 return NULL;
1046 return NULL;
1029 }
1047 }
1030
1048
1031 #define BUMPED_FIX 1
1049 #define BUMPED_FIX 1
1032 #define USING_SHA_256 2
1050 #define USING_SHA_256 2
1033 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
1051 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
1034
1052
1035 static PyObject *readshas(const char *source, unsigned char num,
1053 static PyObject *readshas(const char *source, unsigned char num,
1036 Py_ssize_t hashwidth)
1054 Py_ssize_t hashwidth)
1037 {
1055 {
1038 int i;
1056 int i;
1039 PyObject *list = PyTuple_New(num);
1057 PyObject *list = PyTuple_New(num);
1040 if (list == NULL) {
1058 if (list == NULL) {
1041 return NULL;
1059 return NULL;
1042 }
1060 }
1043 for (i = 0; i < num; i++) {
1061 for (i = 0; i < num; i++) {
1044 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
1062 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
1045 if (hash == NULL) {
1063 if (hash == NULL) {
1046 Py_DECREF(list);
1064 Py_DECREF(list);
1047 return NULL;
1065 return NULL;
1048 }
1066 }
1049 PyTuple_SET_ITEM(list, i, hash);
1067 PyTuple_SET_ITEM(list, i, hash);
1050 source += hashwidth;
1068 source += hashwidth;
1051 }
1069 }
1052 return list;
1070 return list;
1053 }
1071 }
1054
1072
1055 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
1073 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
1056 uint32_t *msize)
1074 uint32_t *msize)
1057 {
1075 {
1058 const char *data = databegin;
1076 const char *data = databegin;
1059 const char *meta;
1077 const char *meta;
1060
1078
1061 double mtime;
1079 double mtime;
1062 int16_t tz;
1080 int16_t tz;
1063 uint16_t flags;
1081 uint16_t flags;
1064 unsigned char nsuccs, nparents, nmetadata;
1082 unsigned char nsuccs, nparents, nmetadata;
1065 Py_ssize_t hashwidth = 20;
1083 Py_ssize_t hashwidth = 20;
1066
1084
1067 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
1085 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
1068 PyObject *metadata = NULL, *ret = NULL;
1086 PyObject *metadata = NULL, *ret = NULL;
1069 int i;
1087 int i;
1070
1088
1071 if (data + FM1_HEADER_SIZE > dataend) {
1089 if (data + FM1_HEADER_SIZE > dataend) {
1072 goto overflow;
1090 goto overflow;
1073 }
1091 }
1074
1092
1075 *msize = getbe32(data);
1093 *msize = getbe32(data);
1076 data += 4;
1094 data += 4;
1077 mtime = getbefloat64(data);
1095 mtime = getbefloat64(data);
1078 data += 8;
1096 data += 8;
1079 tz = getbeint16(data);
1097 tz = getbeint16(data);
1080 data += 2;
1098 data += 2;
1081 flags = getbeuint16(data);
1099 flags = getbeuint16(data);
1082 data += 2;
1100 data += 2;
1083
1101
1084 if (flags & USING_SHA_256) {
1102 if (flags & USING_SHA_256) {
1085 hashwidth = 32;
1103 hashwidth = 32;
1086 }
1104 }
1087
1105
1088 nsuccs = (unsigned char)(*data++);
1106 nsuccs = (unsigned char)(*data++);
1089 nparents = (unsigned char)(*data++);
1107 nparents = (unsigned char)(*data++);
1090 nmetadata = (unsigned char)(*data++);
1108 nmetadata = (unsigned char)(*data++);
1091
1109
1092 if (databegin + *msize > dataend) {
1110 if (databegin + *msize > dataend) {
1093 goto overflow;
1111 goto overflow;
1094 }
1112 }
1095 dataend = databegin + *msize; /* narrow down to marker size */
1113 dataend = databegin + *msize; /* narrow down to marker size */
1096
1114
1097 if (data + hashwidth > dataend) {
1115 if (data + hashwidth > dataend) {
1098 goto overflow;
1116 goto overflow;
1099 }
1117 }
1100 prec = PyBytes_FromStringAndSize(data, hashwidth);
1118 prec = PyBytes_FromStringAndSize(data, hashwidth);
1101 data += hashwidth;
1119 data += hashwidth;
1102 if (prec == NULL) {
1120 if (prec == NULL) {
1103 goto bail;
1121 goto bail;
1104 }
1122 }
1105
1123
1106 if (data + nsuccs * hashwidth > dataend) {
1124 if (data + nsuccs * hashwidth > dataend) {
1107 goto overflow;
1125 goto overflow;
1108 }
1126 }
1109 succs = readshas(data, nsuccs, hashwidth);
1127 succs = readshas(data, nsuccs, hashwidth);
1110 if (succs == NULL) {
1128 if (succs == NULL) {
1111 goto bail;
1129 goto bail;
1112 }
1130 }
1113 data += nsuccs * hashwidth;
1131 data += nsuccs * hashwidth;
1114
1132
1115 if (nparents == 1 || nparents == 2) {
1133 if (nparents == 1 || nparents == 2) {
1116 if (data + nparents * hashwidth > dataend) {
1134 if (data + nparents * hashwidth > dataend) {
1117 goto overflow;
1135 goto overflow;
1118 }
1136 }
1119 parents = readshas(data, nparents, hashwidth);
1137 parents = readshas(data, nparents, hashwidth);
1120 if (parents == NULL) {
1138 if (parents == NULL) {
1121 goto bail;
1139 goto bail;
1122 }
1140 }
1123 data += nparents * hashwidth;
1141 data += nparents * hashwidth;
1124 } else {
1142 } else {
1125 parents = Py_None;
1143 parents = Py_None;
1126 Py_INCREF(parents);
1144 Py_INCREF(parents);
1127 }
1145 }
1128
1146
1129 if (data + 2 * nmetadata > dataend) {
1147 if (data + 2 * nmetadata > dataend) {
1130 goto overflow;
1148 goto overflow;
1131 }
1149 }
1132 meta = data + (2 * nmetadata);
1150 meta = data + (2 * nmetadata);
1133 metadata = PyTuple_New(nmetadata);
1151 metadata = PyTuple_New(nmetadata);
1134 if (metadata == NULL) {
1152 if (metadata == NULL) {
1135 goto bail;
1153 goto bail;
1136 }
1154 }
1137 for (i = 0; i < nmetadata; i++) {
1155 for (i = 0; i < nmetadata; i++) {
1138 PyObject *tmp, *left = NULL, *right = NULL;
1156 PyObject *tmp, *left = NULL, *right = NULL;
1139 Py_ssize_t leftsize = (unsigned char)(*data++);
1157 Py_ssize_t leftsize = (unsigned char)(*data++);
1140 Py_ssize_t rightsize = (unsigned char)(*data++);
1158 Py_ssize_t rightsize = (unsigned char)(*data++);
1141 if (meta + leftsize + rightsize > dataend) {
1159 if (meta + leftsize + rightsize > dataend) {
1142 goto overflow;
1160 goto overflow;
1143 }
1161 }
1144 left = PyBytes_FromStringAndSize(meta, leftsize);
1162 left = PyBytes_FromStringAndSize(meta, leftsize);
1145 meta += leftsize;
1163 meta += leftsize;
1146 right = PyBytes_FromStringAndSize(meta, rightsize);
1164 right = PyBytes_FromStringAndSize(meta, rightsize);
1147 meta += rightsize;
1165 meta += rightsize;
1148 tmp = PyTuple_New(2);
1166 tmp = PyTuple_New(2);
1149 if (!left || !right || !tmp) {
1167 if (!left || !right || !tmp) {
1150 Py_XDECREF(left);
1168 Py_XDECREF(left);
1151 Py_XDECREF(right);
1169 Py_XDECREF(right);
1152 Py_XDECREF(tmp);
1170 Py_XDECREF(tmp);
1153 goto bail;
1171 goto bail;
1154 }
1172 }
1155 PyTuple_SET_ITEM(tmp, 0, left);
1173 PyTuple_SET_ITEM(tmp, 0, left);
1156 PyTuple_SET_ITEM(tmp, 1, right);
1174 PyTuple_SET_ITEM(tmp, 1, right);
1157 PyTuple_SET_ITEM(metadata, i, tmp);
1175 PyTuple_SET_ITEM(metadata, i, tmp);
1158 }
1176 }
1159 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
1177 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
1160 (int)tz * 60, parents);
1178 (int)tz * 60, parents);
1161 goto bail; /* return successfully */
1179 goto bail; /* return successfully */
1162
1180
1163 overflow:
1181 overflow:
1164 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
1182 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
1165 bail:
1183 bail:
1166 Py_XDECREF(prec);
1184 Py_XDECREF(prec);
1167 Py_XDECREF(succs);
1185 Py_XDECREF(succs);
1168 Py_XDECREF(metadata);
1186 Py_XDECREF(metadata);
1169 Py_XDECREF(parents);
1187 Py_XDECREF(parents);
1170 return ret;
1188 return ret;
1171 }
1189 }
1172
1190
1173 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
1191 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
1174 {
1192 {
1175 const char *data, *dataend;
1193 const char *data, *dataend;
1176 Py_ssize_t datalen, offset, stop;
1194 Py_ssize_t datalen, offset, stop;
1177 PyObject *markers = NULL;
1195 PyObject *markers = NULL;
1178
1196
1179 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
1197 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
1180 &offset, &stop)) {
1198 &offset, &stop)) {
1181 return NULL;
1199 return NULL;
1182 }
1200 }
1183 if (offset < 0) {
1201 if (offset < 0) {
1184 PyErr_SetString(PyExc_ValueError,
1202 PyErr_SetString(PyExc_ValueError,
1185 "invalid negative offset in fm1readmarkers");
1203 "invalid negative offset in fm1readmarkers");
1186 return NULL;
1204 return NULL;
1187 }
1205 }
1188 if (stop > datalen) {
1206 if (stop > datalen) {
1189 PyErr_SetString(
1207 PyErr_SetString(
1190 PyExc_ValueError,
1208 PyExc_ValueError,
1191 "stop longer than data length in fm1readmarkers");
1209 "stop longer than data length in fm1readmarkers");
1192 return NULL;
1210 return NULL;
1193 }
1211 }
1194 dataend = data + datalen;
1212 dataend = data + datalen;
1195 data += offset;
1213 data += offset;
1196 markers = PyList_New(0);
1214 markers = PyList_New(0);
1197 if (!markers) {
1215 if (!markers) {
1198 return NULL;
1216 return NULL;
1199 }
1217 }
1200 while (offset < stop) {
1218 while (offset < stop) {
1201 uint32_t msize;
1219 uint32_t msize;
1202 int error;
1220 int error;
1203 PyObject *record = fm1readmarker(data, dataend, &msize);
1221 PyObject *record = fm1readmarker(data, dataend, &msize);
1204 if (!record) {
1222 if (!record) {
1205 goto bail;
1223 goto bail;
1206 }
1224 }
1207 error = PyList_Append(markers, record);
1225 error = PyList_Append(markers, record);
1208 Py_DECREF(record);
1226 Py_DECREF(record);
1209 if (error) {
1227 if (error) {
1210 goto bail;
1228 goto bail;
1211 }
1229 }
1212 data += msize;
1230 data += msize;
1213 offset += msize;
1231 offset += msize;
1214 }
1232 }
1215 return markers;
1233 return markers;
1216 bail:
1234 bail:
1217 Py_DECREF(markers);
1235 Py_DECREF(markers);
1218 return NULL;
1236 return NULL;
1219 }
1237 }
1220
1238
1221 static char parsers_doc[] = "Efficient content parsing.";
1239 static char parsers_doc[] = "Efficient content parsing.";
1222
1240
1223 PyObject *encodedir(PyObject *self, PyObject *args);
1241 PyObject *encodedir(PyObject *self, PyObject *args);
1224 PyObject *pathencode(PyObject *self, PyObject *args);
1242 PyObject *pathencode(PyObject *self, PyObject *args);
1225 PyObject *lowerencode(PyObject *self, PyObject *args);
1243 PyObject *lowerencode(PyObject *self, PyObject *args);
1226 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
1244 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
1227
1245
1228 static PyMethodDef methods[] = {
1246 static PyMethodDef methods[] = {
1229 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
1247 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
1230 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
1248 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
1231 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
1249 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
1232 "parse a revlog index\n"},
1250 "parse a revlog index\n"},
1233 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
1251 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
1234 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
1252 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
1235 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
1253 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
1236 {"dict_new_presized", dict_new_presized, METH_VARARGS,
1254 {"dict_new_presized", dict_new_presized, METH_VARARGS,
1237 "construct a dict with an expected size\n"},
1255 "construct a dict with an expected size\n"},
1238 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
1256 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
1239 "make file foldmap\n"},
1257 "make file foldmap\n"},
1240 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
1258 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
1241 "escape a UTF-8 byte string to JSON (fast path)\n"},
1259 "escape a UTF-8 byte string to JSON (fast path)\n"},
1242 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
1260 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
1243 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
1261 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
1244 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
1262 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
1245 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
1263 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
1246 "parse v1 obsolete markers\n"},
1264 "parse v1 obsolete markers\n"},
1247 {NULL, NULL}};
1265 {NULL, NULL}};
1248
1266
1249 void dirs_module_init(PyObject *mod);
1267 void dirs_module_init(PyObject *mod);
1250 void manifest_module_init(PyObject *mod);
1268 void manifest_module_init(PyObject *mod);
1251 void revlog_module_init(PyObject *mod);
1269 void revlog_module_init(PyObject *mod);
1252
1270
1253 static const int version = 20;
1271 static const int version = 20;
1254
1272
1255 static void module_init(PyObject *mod)
1273 static void module_init(PyObject *mod)
1256 {
1274 {
1257 PyModule_AddIntConstant(mod, "version", version);
1275 PyModule_AddIntConstant(mod, "version", version);
1258
1276
1259 /* This module constant has two purposes. First, it lets us unit test
1277 /* This module constant has two purposes. First, it lets us unit test
1260 * the ImportError raised without hard-coding any error text. This
1278 * the ImportError raised without hard-coding any error text. This
1261 * means we can change the text in the future without breaking tests,
1279 * means we can change the text in the future without breaking tests,
1262 * even across changesets without a recompile. Second, its presence
1280 * even across changesets without a recompile. Second, its presence
1263 * can be used to determine whether the version-checking logic is
1281 * can be used to determine whether the version-checking logic is
1264 * present, which also helps in testing across changesets without a
1282 * present, which also helps in testing across changesets without a
1265 * recompile. Note that this means the pure-Python version of parsers
1283 * recompile. Note that this means the pure-Python version of parsers
1266 * should not have this module constant. */
1284 * should not have this module constant. */
1267 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
1285 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
1268
1286
1269 dirs_module_init(mod);
1287 dirs_module_init(mod);
1270 manifest_module_init(mod);
1288 manifest_module_init(mod);
1271 revlog_module_init(mod);
1289 revlog_module_init(mod);
1272
1290
1273 if (PyType_Ready(&dirstateItemType) < 0) {
1291 if (PyType_Ready(&dirstateItemType) < 0) {
1274 return;
1292 return;
1275 }
1293 }
1276 Py_INCREF(&dirstateItemType);
1294 Py_INCREF(&dirstateItemType);
1277 PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
1295 PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
1278 }
1296 }
1279
1297
1280 static int check_python_version(void)
1298 static int check_python_version(void)
1281 {
1299 {
1282 PyObject *sys = PyImport_ImportModule("sys"), *ver;
1300 PyObject *sys = PyImport_ImportModule("sys"), *ver;
1283 long hexversion;
1301 long hexversion;
1284 if (!sys) {
1302 if (!sys) {
1285 return -1;
1303 return -1;
1286 }
1304 }
1287 ver = PyObject_GetAttrString(sys, "hexversion");
1305 ver = PyObject_GetAttrString(sys, "hexversion");
1288 Py_DECREF(sys);
1306 Py_DECREF(sys);
1289 if (!ver) {
1307 if (!ver) {
1290 return -1;
1308 return -1;
1291 }
1309 }
1292 hexversion = PyInt_AsLong(ver);
1310 hexversion = PyInt_AsLong(ver);
1293 Py_DECREF(ver);
1311 Py_DECREF(ver);
1294 /* sys.hexversion is a 32-bit number by default, so the -1 case
1312 /* sys.hexversion is a 32-bit number by default, so the -1 case
1295 * should only occur in unusual circumstances (e.g. if sys.hexversion
1313 * should only occur in unusual circumstances (e.g. if sys.hexversion
1296 * is manually set to an invalid value). */
1314 * is manually set to an invalid value). */
1297 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
1315 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
1298 PyErr_Format(PyExc_ImportError,
1316 PyErr_Format(PyExc_ImportError,
1299 "%s: The Mercurial extension "
1317 "%s: The Mercurial extension "
1300 "modules were compiled with Python " PY_VERSION
1318 "modules were compiled with Python " PY_VERSION
1301 ", but "
1319 ", but "
1302 "Mercurial is currently using Python with "
1320 "Mercurial is currently using Python with "
1303 "sys.hexversion=%ld: "
1321 "sys.hexversion=%ld: "
1304 "Python %s\n at: %s",
1322 "Python %s\n at: %s",
1305 versionerrortext, hexversion, Py_GetVersion(),
1323 versionerrortext, hexversion, Py_GetVersion(),
1306 Py_GetProgramFullPath());
1324 Py_GetProgramFullPath());
1307 return -1;
1325 return -1;
1308 }
1326 }
1309 return 0;
1327 return 0;
1310 }
1328 }
1311
1329
1312 #ifdef IS_PY3K
1330 #ifdef IS_PY3K
1313 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
1331 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
1314 parsers_doc, -1, methods};
1332 parsers_doc, -1, methods};
1315
1333
1316 PyMODINIT_FUNC PyInit_parsers(void)
1334 PyMODINIT_FUNC PyInit_parsers(void)
1317 {
1335 {
1318 PyObject *mod;
1336 PyObject *mod;
1319
1337
1320 if (check_python_version() == -1)
1338 if (check_python_version() == -1)
1321 return NULL;
1339 return NULL;
1322 mod = PyModule_Create(&parsers_module);
1340 mod = PyModule_Create(&parsers_module);
1323 module_init(mod);
1341 module_init(mod);
1324 return mod;
1342 return mod;
1325 }
1343 }
1326 #else
1344 #else
1327 PyMODINIT_FUNC initparsers(void)
1345 PyMODINIT_FUNC initparsers(void)
1328 {
1346 {
1329 PyObject *mod;
1347 PyObject *mod;
1330
1348
1331 if (check_python_version() == -1) {
1349 if (check_python_version() == -1) {
1332 return;
1350 return;
1333 }
1351 }
1334 mod = Py_InitModule3("parsers", methods, parsers_doc);
1352 mod = Py_InitModule3("parsers", methods, parsers_doc);
1335 module_init(mod);
1353 module_init(mod);
1336 }
1354 }
1337 #endif
1355 #endif
@@ -1,1533 +1,1534 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .dirstateutils import (
34 from .dirstateutils import (
35 timestamp,
35 timestamp,
36 )
36 )
37
37
38 from .interfaces import (
38 from .interfaces import (
39 dirstate as intdirstate,
39 dirstate as intdirstate,
40 util as interfaceutil,
40 util as interfaceutil,
41 )
41 )
42
42
43 parsers = policy.importmod('parsers')
43 parsers = policy.importmod('parsers')
44 rustmod = policy.importrust('dirstate')
44 rustmod = policy.importrust('dirstate')
45
45
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47
47
48 propertycache = util.propertycache
48 propertycache = util.propertycache
49 filecache = scmutil.filecache
49 filecache = scmutil.filecache
50 _rangemask = dirstatemap.rangemask
50 _rangemask = dirstatemap.rangemask
51
51
52 DirstateItem = dirstatemap.DirstateItem
52 DirstateItem = dirstatemap.DirstateItem
53
53
54
54
55 class repocache(filecache):
55 class repocache(filecache):
56 """filecache for files in .hg/"""
56 """filecache for files in .hg/"""
57
57
58 def join(self, obj, fname):
58 def join(self, obj, fname):
59 return obj._opener.join(fname)
59 return obj._opener.join(fname)
60
60
61
61
62 class rootcache(filecache):
62 class rootcache(filecache):
63 """filecache for files in the repository root"""
63 """filecache for files in the repository root"""
64
64
65 def join(self, obj, fname):
65 def join(self, obj, fname):
66 return obj._join(fname)
66 return obj._join(fname)
67
67
68
68
69 def _getfsnow(vfs):
69 def _getfsnow(vfs):
70 '''Get "now" timestamp on filesystem'''
70 '''Get "now" timestamp on filesystem'''
71 tmpfd, tmpname = vfs.mkstemp()
71 tmpfd, tmpname = vfs.mkstemp()
72 try:
72 try:
73 return timestamp.mtime_of(os.fstat(tmpfd))
73 return timestamp.mtime_of(os.fstat(tmpfd))
74 finally:
74 finally:
75 os.close(tmpfd)
75 os.close(tmpfd)
76 vfs.unlink(tmpname)
76 vfs.unlink(tmpname)
77
77
78
78
79 def requires_parents_change(func):
79 def requires_parents_change(func):
80 def wrap(self, *args, **kwargs):
80 def wrap(self, *args, **kwargs):
81 if not self.pendingparentchange():
81 if not self.pendingparentchange():
82 msg = 'calling `%s` outside of a parentchange context'
82 msg = 'calling `%s` outside of a parentchange context'
83 msg %= func.__name__
83 msg %= func.__name__
84 raise error.ProgrammingError(msg)
84 raise error.ProgrammingError(msg)
85 return func(self, *args, **kwargs)
85 return func(self, *args, **kwargs)
86
86
87 return wrap
87 return wrap
88
88
89
89
90 def requires_no_parents_change(func):
90 def requires_no_parents_change(func):
91 def wrap(self, *args, **kwargs):
91 def wrap(self, *args, **kwargs):
92 if self.pendingparentchange():
92 if self.pendingparentchange():
93 msg = 'calling `%s` inside of a parentchange context'
93 msg = 'calling `%s` inside of a parentchange context'
94 msg %= func.__name__
94 msg %= func.__name__
95 raise error.ProgrammingError(msg)
95 raise error.ProgrammingError(msg)
96 return func(self, *args, **kwargs)
96 return func(self, *args, **kwargs)
97
97
98 return wrap
98 return wrap
99
99
100
100
101 @interfaceutil.implementer(intdirstate.idirstate)
101 @interfaceutil.implementer(intdirstate.idirstate)
102 class dirstate(object):
102 class dirstate(object):
103 def __init__(
103 def __init__(
104 self,
104 self,
105 opener,
105 opener,
106 ui,
106 ui,
107 root,
107 root,
108 validate,
108 validate,
109 sparsematchfn,
109 sparsematchfn,
110 nodeconstants,
110 nodeconstants,
111 use_dirstate_v2,
111 use_dirstate_v2,
112 ):
112 ):
113 """Create a new dirstate object.
113 """Create a new dirstate object.
114
114
115 opener is an open()-like callable that can be used to open the
115 opener is an open()-like callable that can be used to open the
116 dirstate file; root is the root of the directory tracked by
116 dirstate file; root is the root of the directory tracked by
117 the dirstate.
117 the dirstate.
118 """
118 """
119 self._use_dirstate_v2 = use_dirstate_v2
119 self._use_dirstate_v2 = use_dirstate_v2
120 self._nodeconstants = nodeconstants
120 self._nodeconstants = nodeconstants
121 self._opener = opener
121 self._opener = opener
122 self._validate = validate
122 self._validate = validate
123 self._root = root
123 self._root = root
124 self._sparsematchfn = sparsematchfn
124 self._sparsematchfn = sparsematchfn
125 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
125 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
126 # UNC path pointing to root share (issue4557)
126 # UNC path pointing to root share (issue4557)
127 self._rootdir = pathutil.normasprefix(root)
127 self._rootdir = pathutil.normasprefix(root)
128 self._dirty = False
128 self._dirty = False
129 self._lastnormaltime = timestamp.zero()
129 self._lastnormaltime = timestamp.zero()
130 self._ui = ui
130 self._ui = ui
131 self._filecache = {}
131 self._filecache = {}
132 self._parentwriters = 0
132 self._parentwriters = 0
133 self._filename = b'dirstate'
133 self._filename = b'dirstate'
134 self._pendingfilename = b'%s.pending' % self._filename
134 self._pendingfilename = b'%s.pending' % self._filename
135 self._plchangecallbacks = {}
135 self._plchangecallbacks = {}
136 self._origpl = None
136 self._origpl = None
137 self._mapcls = dirstatemap.dirstatemap
137 self._mapcls = dirstatemap.dirstatemap
138 # Access and cache cwd early, so we don't access it for the first time
138 # Access and cache cwd early, so we don't access it for the first time
139 # after a working-copy update caused it to not exist (accessing it then
139 # after a working-copy update caused it to not exist (accessing it then
140 # raises an exception).
140 # raises an exception).
141 self._cwd
141 self._cwd
142
142
143 def prefetch_parents(self):
143 def prefetch_parents(self):
144 """make sure the parents are loaded
144 """make sure the parents are loaded
145
145
146 Used to avoid a race condition.
146 Used to avoid a race condition.
147 """
147 """
148 self._pl
148 self._pl
149
149
150 @contextlib.contextmanager
150 @contextlib.contextmanager
151 def parentchange(self):
151 def parentchange(self):
152 """Context manager for handling dirstate parents.
152 """Context manager for handling dirstate parents.
153
153
154 If an exception occurs in the scope of the context manager,
154 If an exception occurs in the scope of the context manager,
155 the incoherent dirstate won't be written when wlock is
155 the incoherent dirstate won't be written when wlock is
156 released.
156 released.
157 """
157 """
158 self._parentwriters += 1
158 self._parentwriters += 1
159 yield
159 yield
160 # Typically we want the "undo" step of a context manager in a
160 # Typically we want the "undo" step of a context manager in a
161 # finally block so it happens even when an exception
161 # finally block so it happens even when an exception
162 # occurs. In this case, however, we only want to decrement
162 # occurs. In this case, however, we only want to decrement
163 # parentwriters if the code in the with statement exits
163 # parentwriters if the code in the with statement exits
164 # normally, so we don't have a try/finally here on purpose.
164 # normally, so we don't have a try/finally here on purpose.
165 self._parentwriters -= 1
165 self._parentwriters -= 1
166
166
167 def pendingparentchange(self):
167 def pendingparentchange(self):
168 """Returns true if the dirstate is in the middle of a set of changes
168 """Returns true if the dirstate is in the middle of a set of changes
169 that modify the dirstate parent.
169 that modify the dirstate parent.
170 """
170 """
171 return self._parentwriters > 0
171 return self._parentwriters > 0
172
172
173 @propertycache
173 @propertycache
174 def _map(self):
174 def _map(self):
175 """Return the dirstate contents (see documentation for dirstatemap)."""
175 """Return the dirstate contents (see documentation for dirstatemap)."""
176 self._map = self._mapcls(
176 self._map = self._mapcls(
177 self._ui,
177 self._ui,
178 self._opener,
178 self._opener,
179 self._root,
179 self._root,
180 self._nodeconstants,
180 self._nodeconstants,
181 self._use_dirstate_v2,
181 self._use_dirstate_v2,
182 )
182 )
183 return self._map
183 return self._map
184
184
185 @property
185 @property
186 def _sparsematcher(self):
186 def _sparsematcher(self):
187 """The matcher for the sparse checkout.
187 """The matcher for the sparse checkout.
188
188
189 The working directory may not include every file from a manifest. The
189 The working directory may not include every file from a manifest. The
190 matcher obtained by this property will match a path if it is to be
190 matcher obtained by this property will match a path if it is to be
191 included in the working directory.
191 included in the working directory.
192 """
192 """
193 # TODO there is potential to cache this property. For now, the matcher
193 # TODO there is potential to cache this property. For now, the matcher
194 # is resolved on every access. (But the called function does use a
194 # is resolved on every access. (But the called function does use a
195 # cache to keep the lookup fast.)
195 # cache to keep the lookup fast.)
196 return self._sparsematchfn()
196 return self._sparsematchfn()
197
197
198 @repocache(b'branch')
198 @repocache(b'branch')
199 def _branch(self):
199 def _branch(self):
200 try:
200 try:
201 return self._opener.read(b"branch").strip() or b"default"
201 return self._opener.read(b"branch").strip() or b"default"
202 except IOError as inst:
202 except IOError as inst:
203 if inst.errno != errno.ENOENT:
203 if inst.errno != errno.ENOENT:
204 raise
204 raise
205 return b"default"
205 return b"default"
206
206
207 @property
207 @property
208 def _pl(self):
208 def _pl(self):
209 return self._map.parents()
209 return self._map.parents()
210
210
211 def hasdir(self, d):
211 def hasdir(self, d):
212 return self._map.hastrackeddir(d)
212 return self._map.hastrackeddir(d)
213
213
214 @rootcache(b'.hgignore')
214 @rootcache(b'.hgignore')
215 def _ignore(self):
215 def _ignore(self):
216 files = self._ignorefiles()
216 files = self._ignorefiles()
217 if not files:
217 if not files:
218 return matchmod.never()
218 return matchmod.never()
219
219
220 pats = [b'include:%s' % f for f in files]
220 pats = [b'include:%s' % f for f in files]
221 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
221 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
222
222
223 @propertycache
223 @propertycache
224 def _slash(self):
224 def _slash(self):
225 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
225 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
226
226
227 @propertycache
227 @propertycache
228 def _checklink(self):
228 def _checklink(self):
229 return util.checklink(self._root)
229 return util.checklink(self._root)
230
230
231 @propertycache
231 @propertycache
232 def _checkexec(self):
232 def _checkexec(self):
233 return bool(util.checkexec(self._root))
233 return bool(util.checkexec(self._root))
234
234
235 @propertycache
235 @propertycache
236 def _checkcase(self):
236 def _checkcase(self):
237 return not util.fscasesensitive(self._join(b'.hg'))
237 return not util.fscasesensitive(self._join(b'.hg'))
238
238
239 def _join(self, f):
239 def _join(self, f):
240 # much faster than os.path.join()
240 # much faster than os.path.join()
241 # it's safe because f is always a relative path
241 # it's safe because f is always a relative path
242 return self._rootdir + f
242 return self._rootdir + f
243
243
244 def flagfunc(self, buildfallback):
244 def flagfunc(self, buildfallback):
245 """build a callable that returns flags associated with a filename
245 """build a callable that returns flags associated with a filename
246
246
247 The information is extracted from three possible layers:
247 The information is extracted from three possible layers:
248 1. the file system if it supports the information
248 1. the file system if it supports the information
249 2. the "fallback" information stored in the dirstate if any
249 2. the "fallback" information stored in the dirstate if any
250 3. a more expensive mechanism inferring the flags from the parents.
250 3. a more expensive mechanism inferring the flags from the parents.
251 """
251 """
252
252
253 # small hack to cache the result of buildfallback()
253 # small hack to cache the result of buildfallback()
254 fallback_func = []
254 fallback_func = []
255
255
256 def get_flags(x):
256 def get_flags(x):
257 entry = None
257 entry = None
258 fallback_value = None
258 fallback_value = None
259 try:
259 try:
260 st = os.lstat(self._join(x))
260 st = os.lstat(self._join(x))
261 except OSError:
261 except OSError:
262 return b''
262 return b''
263
263
264 if self._checklink:
264 if self._checklink:
265 if util.statislink(st):
265 if util.statislink(st):
266 return b'l'
266 return b'l'
267 else:
267 else:
268 entry = self.get_entry(x)
268 entry = self.get_entry(x)
269 if entry.has_fallback_symlink:
269 if entry.has_fallback_symlink:
270 if entry.fallback_symlink:
270 if entry.fallback_symlink:
271 return b'l'
271 return b'l'
272 else:
272 else:
273 if not fallback_func:
273 if not fallback_func:
274 fallback_func.append(buildfallback())
274 fallback_func.append(buildfallback())
275 fallback_value = fallback_func[0](x)
275 fallback_value = fallback_func[0](x)
276 if b'l' in fallback_value:
276 if b'l' in fallback_value:
277 return b'l'
277 return b'l'
278
278
279 if self._checkexec:
279 if self._checkexec:
280 if util.statisexec(st):
280 if util.statisexec(st):
281 return b'x'
281 return b'x'
282 else:
282 else:
283 if entry is None:
283 if entry is None:
284 entry = self.get_entry(x)
284 entry = self.get_entry(x)
285 if entry.has_fallback_exec:
285 if entry.has_fallback_exec:
286 if entry.fallback_exec:
286 if entry.fallback_exec:
287 return b'x'
287 return b'x'
288 else:
288 else:
289 if fallback_value is None:
289 if fallback_value is None:
290 if not fallback_func:
290 if not fallback_func:
291 fallback_func.append(buildfallback())
291 fallback_func.append(buildfallback())
292 fallback_value = fallback_func[0](x)
292 fallback_value = fallback_func[0](x)
293 if b'x' in fallback_value:
293 if b'x' in fallback_value:
294 return b'x'
294 return b'x'
295 return b''
295 return b''
296
296
297 return get_flags
297 return get_flags
298
298
299 @propertycache
299 @propertycache
300 def _cwd(self):
300 def _cwd(self):
301 # internal config: ui.forcecwd
301 # internal config: ui.forcecwd
302 forcecwd = self._ui.config(b'ui', b'forcecwd')
302 forcecwd = self._ui.config(b'ui', b'forcecwd')
303 if forcecwd:
303 if forcecwd:
304 return forcecwd
304 return forcecwd
305 return encoding.getcwd()
305 return encoding.getcwd()
306
306
307 def getcwd(self):
307 def getcwd(self):
308 """Return the path from which a canonical path is calculated.
308 """Return the path from which a canonical path is calculated.
309
309
310 This path should be used to resolve file patterns or to convert
310 This path should be used to resolve file patterns or to convert
311 canonical paths back to file paths for display. It shouldn't be
311 canonical paths back to file paths for display. It shouldn't be
312 used to get real file paths. Use vfs functions instead.
312 used to get real file paths. Use vfs functions instead.
313 """
313 """
314 cwd = self._cwd
314 cwd = self._cwd
315 if cwd == self._root:
315 if cwd == self._root:
316 return b''
316 return b''
317 # self._root ends with a path separator if self._root is '/' or 'C:\'
317 # self._root ends with a path separator if self._root is '/' or 'C:\'
318 rootsep = self._root
318 rootsep = self._root
319 if not util.endswithsep(rootsep):
319 if not util.endswithsep(rootsep):
320 rootsep += pycompat.ossep
320 rootsep += pycompat.ossep
321 if cwd.startswith(rootsep):
321 if cwd.startswith(rootsep):
322 return cwd[len(rootsep) :]
322 return cwd[len(rootsep) :]
323 else:
323 else:
324 # we're outside the repo. return an absolute path.
324 # we're outside the repo. return an absolute path.
325 return cwd
325 return cwd
326
326
327 def pathto(self, f, cwd=None):
327 def pathto(self, f, cwd=None):
328 if cwd is None:
328 if cwd is None:
329 cwd = self.getcwd()
329 cwd = self.getcwd()
330 path = util.pathto(self._root, cwd, f)
330 path = util.pathto(self._root, cwd, f)
331 if self._slash:
331 if self._slash:
332 return util.pconvert(path)
332 return util.pconvert(path)
333 return path
333 return path
334
334
335 def __getitem__(self, key):
335 def __getitem__(self, key):
336 """Return the current state of key (a filename) in the dirstate.
336 """Return the current state of key (a filename) in the dirstate.
337
337
338 States are:
338 States are:
339 n normal
339 n normal
340 m needs merging
340 m needs merging
341 r marked for removal
341 r marked for removal
342 a marked for addition
342 a marked for addition
343 ? not tracked
343 ? not tracked
344
344
345 XXX The "state" is a bit obscure to be in the "public" API. we should
345 XXX The "state" is a bit obscure to be in the "public" API. we should
346 consider migrating all user of this to going through the dirstate entry
346 consider migrating all user of this to going through the dirstate entry
347 instead.
347 instead.
348 """
348 """
349 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
349 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
350 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
350 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
351 entry = self._map.get(key)
351 entry = self._map.get(key)
352 if entry is not None:
352 if entry is not None:
353 return entry.state
353 return entry.state
354 return b'?'
354 return b'?'
355
355
356 def get_entry(self, path):
356 def get_entry(self, path):
357 """return a DirstateItem for the associated path"""
357 """return a DirstateItem for the associated path"""
358 entry = self._map.get(path)
358 entry = self._map.get(path)
359 if entry is None:
359 if entry is None:
360 return DirstateItem()
360 return DirstateItem()
361 return entry
361 return entry
362
362
363 def __contains__(self, key):
363 def __contains__(self, key):
364 return key in self._map
364 return key in self._map
365
365
366 def __iter__(self):
366 def __iter__(self):
367 return iter(sorted(self._map))
367 return iter(sorted(self._map))
368
368
369 def items(self):
369 def items(self):
370 return pycompat.iteritems(self._map)
370 return pycompat.iteritems(self._map)
371
371
372 iteritems = items
372 iteritems = items
373
373
374 def parents(self):
374 def parents(self):
375 return [self._validate(p) for p in self._pl]
375 return [self._validate(p) for p in self._pl]
376
376
377 def p1(self):
377 def p1(self):
378 return self._validate(self._pl[0])
378 return self._validate(self._pl[0])
379
379
380 def p2(self):
380 def p2(self):
381 return self._validate(self._pl[1])
381 return self._validate(self._pl[1])
382
382
383 @property
383 @property
384 def in_merge(self):
384 def in_merge(self):
385 """True if a merge is in progress"""
385 """True if a merge is in progress"""
386 return self._pl[1] != self._nodeconstants.nullid
386 return self._pl[1] != self._nodeconstants.nullid
387
387
388 def branch(self):
388 def branch(self):
389 return encoding.tolocal(self._branch)
389 return encoding.tolocal(self._branch)
390
390
391 def setparents(self, p1, p2=None):
391 def setparents(self, p1, p2=None):
392 """Set dirstate parents to p1 and p2.
392 """Set dirstate parents to p1 and p2.
393
393
394 When moving from two parents to one, "merged" entries a
394 When moving from two parents to one, "merged" entries a
395 adjusted to normal and previous copy records discarded and
395 adjusted to normal and previous copy records discarded and
396 returned by the call.
396 returned by the call.
397
397
398 See localrepo.setparents()
398 See localrepo.setparents()
399 """
399 """
400 if p2 is None:
400 if p2 is None:
401 p2 = self._nodeconstants.nullid
401 p2 = self._nodeconstants.nullid
402 if self._parentwriters == 0:
402 if self._parentwriters == 0:
403 raise ValueError(
403 raise ValueError(
404 b"cannot set dirstate parent outside of "
404 b"cannot set dirstate parent outside of "
405 b"dirstate.parentchange context manager"
405 b"dirstate.parentchange context manager"
406 )
406 )
407
407
408 self._dirty = True
408 self._dirty = True
409 oldp2 = self._pl[1]
409 oldp2 = self._pl[1]
410 if self._origpl is None:
410 if self._origpl is None:
411 self._origpl = self._pl
411 self._origpl = self._pl
412 nullid = self._nodeconstants.nullid
412 nullid = self._nodeconstants.nullid
413 # True if we need to fold p2 related state back to a linear case
413 # True if we need to fold p2 related state back to a linear case
414 fold_p2 = oldp2 != nullid and p2 == nullid
414 fold_p2 = oldp2 != nullid and p2 == nullid
415 return self._map.setparents(p1, p2, fold_p2=fold_p2)
415 return self._map.setparents(p1, p2, fold_p2=fold_p2)
416
416
417 def setbranch(self, branch):
417 def setbranch(self, branch):
418 self.__class__._branch.set(self, encoding.fromlocal(branch))
418 self.__class__._branch.set(self, encoding.fromlocal(branch))
419 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
419 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
420 try:
420 try:
421 f.write(self._branch + b'\n')
421 f.write(self._branch + b'\n')
422 f.close()
422 f.close()
423
423
424 # make sure filecache has the correct stat info for _branch after
424 # make sure filecache has the correct stat info for _branch after
425 # replacing the underlying file
425 # replacing the underlying file
426 ce = self._filecache[b'_branch']
426 ce = self._filecache[b'_branch']
427 if ce:
427 if ce:
428 ce.refresh()
428 ce.refresh()
429 except: # re-raises
429 except: # re-raises
430 f.discard()
430 f.discard()
431 raise
431 raise
432
432
433 def invalidate(self):
433 def invalidate(self):
434 """Causes the next access to reread the dirstate.
434 """Causes the next access to reread the dirstate.
435
435
436 This is different from localrepo.invalidatedirstate() because it always
436 This is different from localrepo.invalidatedirstate() because it always
437 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
437 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
438 check whether the dirstate has changed before rereading it."""
438 check whether the dirstate has changed before rereading it."""
439
439
440 for a in ("_map", "_branch", "_ignore"):
440 for a in ("_map", "_branch", "_ignore"):
441 if a in self.__dict__:
441 if a in self.__dict__:
442 delattr(self, a)
442 delattr(self, a)
443 self._lastnormaltime = timestamp.zero()
443 self._lastnormaltime = timestamp.zero()
444 self._dirty = False
444 self._dirty = False
445 self._parentwriters = 0
445 self._parentwriters = 0
446 self._origpl = None
446 self._origpl = None
447
447
448 def copy(self, source, dest):
448 def copy(self, source, dest):
449 """Mark dest as a copy of source. Unmark dest if source is None."""
449 """Mark dest as a copy of source. Unmark dest if source is None."""
450 if source == dest:
450 if source == dest:
451 return
451 return
452 self._dirty = True
452 self._dirty = True
453 if source is not None:
453 if source is not None:
454 self._map.copymap[dest] = source
454 self._map.copymap[dest] = source
455 else:
455 else:
456 self._map.copymap.pop(dest, None)
456 self._map.copymap.pop(dest, None)
457
457
458 def copied(self, file):
458 def copied(self, file):
459 return self._map.copymap.get(file, None)
459 return self._map.copymap.get(file, None)
460
460
461 def copies(self):
461 def copies(self):
462 return self._map.copymap
462 return self._map.copymap
463
463
464 @requires_no_parents_change
464 @requires_no_parents_change
465 def set_tracked(self, filename):
465 def set_tracked(self, filename):
466 """a "public" method for generic code to mark a file as tracked
466 """a "public" method for generic code to mark a file as tracked
467
467
468 This function is to be called outside of "update/merge" case. For
468 This function is to be called outside of "update/merge" case. For
469 example by a command like `hg add X`.
469 example by a command like `hg add X`.
470
470
471 return True the file was previously untracked, False otherwise.
471 return True the file was previously untracked, False otherwise.
472 """
472 """
473 self._dirty = True
473 self._dirty = True
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None or not entry.tracked:
475 if entry is None or not entry.tracked:
476 self._check_new_tracked_filename(filename)
476 self._check_new_tracked_filename(filename)
477 return self._map.set_tracked(filename)
477 return self._map.set_tracked(filename)
478
478
479 @requires_no_parents_change
479 @requires_no_parents_change
480 def set_untracked(self, filename):
480 def set_untracked(self, filename):
481 """a "public" method for generic code to mark a file as untracked
481 """a "public" method for generic code to mark a file as untracked
482
482
483 This function is to be called outside of "update/merge" case. For
483 This function is to be called outside of "update/merge" case. For
484 example by a command like `hg remove X`.
484 example by a command like `hg remove X`.
485
485
486 return True the file was previously tracked, False otherwise.
486 return True the file was previously tracked, False otherwise.
487 """
487 """
488 ret = self._map.set_untracked(filename)
488 ret = self._map.set_untracked(filename)
489 if ret:
489 if ret:
490 self._dirty = True
490 self._dirty = True
491 return ret
491 return ret
492
492
493 @requires_no_parents_change
493 @requires_no_parents_change
494 def set_clean(self, filename, parentfiledata=None):
494 def set_clean(self, filename, parentfiledata=None):
495 """record that the current state of the file on disk is known to be clean"""
495 """record that the current state of the file on disk is known to be clean"""
496 self._dirty = True
496 self._dirty = True
497 if parentfiledata:
497 if parentfiledata:
498 (mode, size, mtime) = parentfiledata
498 (mode, size, mtime) = parentfiledata
499 else:
499 else:
500 (mode, size, mtime) = self._get_filedata(filename)
500 (mode, size, mtime) = self._get_filedata(filename)
501 if not self._map[filename].tracked:
501 if not self._map[filename].tracked:
502 self._check_new_tracked_filename(filename)
502 self._check_new_tracked_filename(filename)
503 self._map.set_clean(filename, mode, size, mtime)
503 self._map.set_clean(filename, mode, size, mtime)
504 if mtime > self._lastnormaltime:
504 if mtime > self._lastnormaltime:
505 # Remember the most recent modification timeslot for status(),
505 # Remember the most recent modification timeslot for status(),
506 # to make sure we won't miss future size-preserving file content
506 # to make sure we won't miss future size-preserving file content
507 # modifications that happen within the same timeslot.
507 # modifications that happen within the same timeslot.
508 self._lastnormaltime = mtime
508 self._lastnormaltime = mtime
509
509
510 @requires_no_parents_change
510 @requires_no_parents_change
511 def set_possibly_dirty(self, filename):
511 def set_possibly_dirty(self, filename):
512 """record that the current state of the file on disk is unknown"""
512 """record that the current state of the file on disk is unknown"""
513 self._dirty = True
513 self._dirty = True
514 self._map.set_possibly_dirty(filename)
514 self._map.set_possibly_dirty(filename)
515
515
516 @requires_parents_change
516 @requires_parents_change
517 def update_file_p1(
517 def update_file_p1(
518 self,
518 self,
519 filename,
519 filename,
520 p1_tracked,
520 p1_tracked,
521 ):
521 ):
522 """Set a file as tracked in the parent (or not)
522 """Set a file as tracked in the parent (or not)
523
523
524 This is to be called when adjust the dirstate to a new parent after an history
524 This is to be called when adjust the dirstate to a new parent after an history
525 rewriting operation.
525 rewriting operation.
526
526
527 It should not be called during a merge (p2 != nullid) and only within
527 It should not be called during a merge (p2 != nullid) and only within
528 a `with dirstate.parentchange():` context.
528 a `with dirstate.parentchange():` context.
529 """
529 """
530 if self.in_merge:
530 if self.in_merge:
531 msg = b'update_file_reference should not be called when merging'
531 msg = b'update_file_reference should not be called when merging'
532 raise error.ProgrammingError(msg)
532 raise error.ProgrammingError(msg)
533 entry = self._map.get(filename)
533 entry = self._map.get(filename)
534 if entry is None:
534 if entry is None:
535 wc_tracked = False
535 wc_tracked = False
536 else:
536 else:
537 wc_tracked = entry.tracked
537 wc_tracked = entry.tracked
538 if not (p1_tracked or wc_tracked):
538 if not (p1_tracked or wc_tracked):
539 # the file is no longer relevant to anyone
539 # the file is no longer relevant to anyone
540 if self._map.get(filename) is not None:
540 if self._map.get(filename) is not None:
541 self._map.reset_state(filename)
541 self._map.reset_state(filename)
542 self._dirty = True
542 self._dirty = True
543 elif (not p1_tracked) and wc_tracked:
543 elif (not p1_tracked) and wc_tracked:
544 if entry is not None and entry.added:
544 if entry is not None and entry.added:
545 return # avoid dropping copy information (maybe?)
545 return # avoid dropping copy information (maybe?)
546
546
547 parentfiledata = None
547 parentfiledata = None
548 if wc_tracked and p1_tracked:
548 if wc_tracked and p1_tracked:
549 parentfiledata = self._get_filedata(filename)
549 parentfiledata = self._get_filedata(filename)
550
550
551 self._map.reset_state(
551 self._map.reset_state(
552 filename,
552 filename,
553 wc_tracked,
553 wc_tracked,
554 p1_tracked,
554 p1_tracked,
555 # the underlying reference might have changed, we will have to
555 # the underlying reference might have changed, we will have to
556 # check it.
556 # check it.
557 has_meaningful_mtime=False,
557 has_meaningful_mtime=False,
558 parentfiledata=parentfiledata,
558 parentfiledata=parentfiledata,
559 )
559 )
560 if (
560 if (
561 parentfiledata is not None
561 parentfiledata is not None
562 and parentfiledata[2] > self._lastnormaltime
562 and parentfiledata[2] > self._lastnormaltime
563 ):
563 ):
564 # Remember the most recent modification timeslot for status(),
564 # Remember the most recent modification timeslot for status(),
565 # to make sure we won't miss future size-preserving file content
565 # to make sure we won't miss future size-preserving file content
566 # modifications that happen within the same timeslot.
566 # modifications that happen within the same timeslot.
567 self._lastnormaltime = parentfiledata[2]
567 self._lastnormaltime = parentfiledata[2]
568
568
569 @requires_parents_change
569 @requires_parents_change
570 def update_file(
570 def update_file(
571 self,
571 self,
572 filename,
572 filename,
573 wc_tracked,
573 wc_tracked,
574 p1_tracked,
574 p1_tracked,
575 p2_info=False,
575 p2_info=False,
576 possibly_dirty=False,
576 possibly_dirty=False,
577 parentfiledata=None,
577 parentfiledata=None,
578 ):
578 ):
579 """update the information about a file in the dirstate
579 """update the information about a file in the dirstate
580
580
581 This is to be called when the direstates parent changes to keep track
581 This is to be called when the direstates parent changes to keep track
582 of what is the file situation in regards to the working copy and its parent.
582 of what is the file situation in regards to the working copy and its parent.
583
583
584 This function must be called within a `dirstate.parentchange` context.
584 This function must be called within a `dirstate.parentchange` context.
585
585
586 note: the API is at an early stage and we might need to adjust it
586 note: the API is at an early stage and we might need to adjust it
587 depending of what information ends up being relevant and useful to
587 depending of what information ends up being relevant and useful to
588 other processing.
588 other processing.
589 """
589 """
590
590
591 # note: I do not think we need to double check name clash here since we
591 # note: I do not think we need to double check name clash here since we
592 # are in a update/merge case that should already have taken care of
592 # are in a update/merge case that should already have taken care of
593 # this. The test agrees
593 # this. The test agrees
594
594
595 self._dirty = True
595 self._dirty = True
596
596
597 need_parent_file_data = (
597 need_parent_file_data = (
598 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
598 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
599 )
599 )
600
600
601 if need_parent_file_data and parentfiledata is None:
601 if need_parent_file_data and parentfiledata is None:
602 parentfiledata = self._get_filedata(filename)
602 parentfiledata = self._get_filedata(filename)
603
603
604 self._map.reset_state(
604 self._map.reset_state(
605 filename,
605 filename,
606 wc_tracked,
606 wc_tracked,
607 p1_tracked,
607 p1_tracked,
608 p2_info=p2_info,
608 p2_info=p2_info,
609 has_meaningful_mtime=not possibly_dirty,
609 has_meaningful_mtime=not possibly_dirty,
610 parentfiledata=parentfiledata,
610 parentfiledata=parentfiledata,
611 )
611 )
612 if (
612 if (
613 parentfiledata is not None
613 parentfiledata is not None
614 and parentfiledata[2] is not None
614 and parentfiledata[2] > self._lastnormaltime
615 and parentfiledata[2] > self._lastnormaltime
615 ):
616 ):
616 # Remember the most recent modification timeslot for status(),
617 # Remember the most recent modification timeslot for status(),
617 # to make sure we won't miss future size-preserving file content
618 # to make sure we won't miss future size-preserving file content
618 # modifications that happen within the same timeslot.
619 # modifications that happen within the same timeslot.
619 self._lastnormaltime = parentfiledata[2]
620 self._lastnormaltime = parentfiledata[2]
620
621
621 def _check_new_tracked_filename(self, filename):
622 def _check_new_tracked_filename(self, filename):
622 scmutil.checkfilename(filename)
623 scmutil.checkfilename(filename)
623 if self._map.hastrackeddir(filename):
624 if self._map.hastrackeddir(filename):
624 msg = _(b'directory %r already in dirstate')
625 msg = _(b'directory %r already in dirstate')
625 msg %= pycompat.bytestr(filename)
626 msg %= pycompat.bytestr(filename)
626 raise error.Abort(msg)
627 raise error.Abort(msg)
627 # shadows
628 # shadows
628 for d in pathutil.finddirs(filename):
629 for d in pathutil.finddirs(filename):
629 if self._map.hastrackeddir(d):
630 if self._map.hastrackeddir(d):
630 break
631 break
631 entry = self._map.get(d)
632 entry = self._map.get(d)
632 if entry is not None and not entry.removed:
633 if entry is not None and not entry.removed:
633 msg = _(b'file %r in dirstate clashes with %r')
634 msg = _(b'file %r in dirstate clashes with %r')
634 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
635 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
635 raise error.Abort(msg)
636 raise error.Abort(msg)
636
637
637 def _get_filedata(self, filename):
638 def _get_filedata(self, filename):
638 """returns"""
639 """returns"""
639 s = os.lstat(self._join(filename))
640 s = os.lstat(self._join(filename))
640 mode = s.st_mode
641 mode = s.st_mode
641 size = s.st_size
642 size = s.st_size
642 mtime = timestamp.mtime_of(s)
643 mtime = timestamp.mtime_of(s)
643 return (mode, size, mtime)
644 return (mode, size, mtime)
644
645
645 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
646 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
646 if exists is None:
647 if exists is None:
647 exists = os.path.lexists(os.path.join(self._root, path))
648 exists = os.path.lexists(os.path.join(self._root, path))
648 if not exists:
649 if not exists:
649 # Maybe a path component exists
650 # Maybe a path component exists
650 if not ignoremissing and b'/' in path:
651 if not ignoremissing and b'/' in path:
651 d, f = path.rsplit(b'/', 1)
652 d, f = path.rsplit(b'/', 1)
652 d = self._normalize(d, False, ignoremissing, None)
653 d = self._normalize(d, False, ignoremissing, None)
653 folded = d + b"/" + f
654 folded = d + b"/" + f
654 else:
655 else:
655 # No path components, preserve original case
656 # No path components, preserve original case
656 folded = path
657 folded = path
657 else:
658 else:
658 # recursively normalize leading directory components
659 # recursively normalize leading directory components
659 # against dirstate
660 # against dirstate
660 if b'/' in normed:
661 if b'/' in normed:
661 d, f = normed.rsplit(b'/', 1)
662 d, f = normed.rsplit(b'/', 1)
662 d = self._normalize(d, False, ignoremissing, True)
663 d = self._normalize(d, False, ignoremissing, True)
663 r = self._root + b"/" + d
664 r = self._root + b"/" + d
664 folded = d + b"/" + util.fspath(f, r)
665 folded = d + b"/" + util.fspath(f, r)
665 else:
666 else:
666 folded = util.fspath(normed, self._root)
667 folded = util.fspath(normed, self._root)
667 storemap[normed] = folded
668 storemap[normed] = folded
668
669
669 return folded
670 return folded
670
671
671 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
672 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
672 normed = util.normcase(path)
673 normed = util.normcase(path)
673 folded = self._map.filefoldmap.get(normed, None)
674 folded = self._map.filefoldmap.get(normed, None)
674 if folded is None:
675 if folded is None:
675 if isknown:
676 if isknown:
676 folded = path
677 folded = path
677 else:
678 else:
678 folded = self._discoverpath(
679 folded = self._discoverpath(
679 path, normed, ignoremissing, exists, self._map.filefoldmap
680 path, normed, ignoremissing, exists, self._map.filefoldmap
680 )
681 )
681 return folded
682 return folded
682
683
683 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
684 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
684 normed = util.normcase(path)
685 normed = util.normcase(path)
685 folded = self._map.filefoldmap.get(normed, None)
686 folded = self._map.filefoldmap.get(normed, None)
686 if folded is None:
687 if folded is None:
687 folded = self._map.dirfoldmap.get(normed, None)
688 folded = self._map.dirfoldmap.get(normed, None)
688 if folded is None:
689 if folded is None:
689 if isknown:
690 if isknown:
690 folded = path
691 folded = path
691 else:
692 else:
692 # store discovered result in dirfoldmap so that future
693 # store discovered result in dirfoldmap so that future
693 # normalizefile calls don't start matching directories
694 # normalizefile calls don't start matching directories
694 folded = self._discoverpath(
695 folded = self._discoverpath(
695 path, normed, ignoremissing, exists, self._map.dirfoldmap
696 path, normed, ignoremissing, exists, self._map.dirfoldmap
696 )
697 )
697 return folded
698 return folded
698
699
699 def normalize(self, path, isknown=False, ignoremissing=False):
700 def normalize(self, path, isknown=False, ignoremissing=False):
700 """
701 """
701 normalize the case of a pathname when on a casefolding filesystem
702 normalize the case of a pathname when on a casefolding filesystem
702
703
703 isknown specifies whether the filename came from walking the
704 isknown specifies whether the filename came from walking the
704 disk, to avoid extra filesystem access.
705 disk, to avoid extra filesystem access.
705
706
706 If ignoremissing is True, missing path are returned
707 If ignoremissing is True, missing path are returned
707 unchanged. Otherwise, we try harder to normalize possibly
708 unchanged. Otherwise, we try harder to normalize possibly
708 existing path components.
709 existing path components.
709
710
710 The normalized case is determined based on the following precedence:
711 The normalized case is determined based on the following precedence:
711
712
712 - version of name already stored in the dirstate
713 - version of name already stored in the dirstate
713 - version of name stored on disk
714 - version of name stored on disk
714 - version provided via command arguments
715 - version provided via command arguments
715 """
716 """
716
717
717 if self._checkcase:
718 if self._checkcase:
718 return self._normalize(path, isknown, ignoremissing)
719 return self._normalize(path, isknown, ignoremissing)
719 return path
720 return path
720
721
721 def clear(self):
722 def clear(self):
722 self._map.clear()
723 self._map.clear()
723 self._lastnormaltime = timestamp.zero()
724 self._lastnormaltime = timestamp.zero()
724 self._dirty = True
725 self._dirty = True
725
726
726 def rebuild(self, parent, allfiles, changedfiles=None):
727 def rebuild(self, parent, allfiles, changedfiles=None):
727 if changedfiles is None:
728 if changedfiles is None:
728 # Rebuild entire dirstate
729 # Rebuild entire dirstate
729 to_lookup = allfiles
730 to_lookup = allfiles
730 to_drop = []
731 to_drop = []
731 lastnormaltime = self._lastnormaltime
732 lastnormaltime = self._lastnormaltime
732 self.clear()
733 self.clear()
733 self._lastnormaltime = lastnormaltime
734 self._lastnormaltime = lastnormaltime
734 elif len(changedfiles) < 10:
735 elif len(changedfiles) < 10:
735 # Avoid turning allfiles into a set, which can be expensive if it's
736 # Avoid turning allfiles into a set, which can be expensive if it's
736 # large.
737 # large.
737 to_lookup = []
738 to_lookup = []
738 to_drop = []
739 to_drop = []
739 for f in changedfiles:
740 for f in changedfiles:
740 if f in allfiles:
741 if f in allfiles:
741 to_lookup.append(f)
742 to_lookup.append(f)
742 else:
743 else:
743 to_drop.append(f)
744 to_drop.append(f)
744 else:
745 else:
745 changedfilesset = set(changedfiles)
746 changedfilesset = set(changedfiles)
746 to_lookup = changedfilesset & set(allfiles)
747 to_lookup = changedfilesset & set(allfiles)
747 to_drop = changedfilesset - to_lookup
748 to_drop = changedfilesset - to_lookup
748
749
749 if self._origpl is None:
750 if self._origpl is None:
750 self._origpl = self._pl
751 self._origpl = self._pl
751 self._map.setparents(parent, self._nodeconstants.nullid)
752 self._map.setparents(parent, self._nodeconstants.nullid)
752
753
753 for f in to_lookup:
754 for f in to_lookup:
754
755
755 if self.in_merge:
756 if self.in_merge:
756 self.set_tracked(f)
757 self.set_tracked(f)
757 else:
758 else:
758 self._map.reset_state(
759 self._map.reset_state(
759 f,
760 f,
760 wc_tracked=True,
761 wc_tracked=True,
761 p1_tracked=True,
762 p1_tracked=True,
762 )
763 )
763 for f in to_drop:
764 for f in to_drop:
764 self._map.reset_state(f)
765 self._map.reset_state(f)
765
766
766 self._dirty = True
767 self._dirty = True
767
768
768 def identity(self):
769 def identity(self):
769 """Return identity of dirstate itself to detect changing in storage
770 """Return identity of dirstate itself to detect changing in storage
770
771
771 If identity of previous dirstate is equal to this, writing
772 If identity of previous dirstate is equal to this, writing
772 changes based on the former dirstate out can keep consistency.
773 changes based on the former dirstate out can keep consistency.
773 """
774 """
774 return self._map.identity
775 return self._map.identity
775
776
776 def write(self, tr):
777 def write(self, tr):
777 if not self._dirty:
778 if not self._dirty:
778 return
779 return
779
780
780 filename = self._filename
781 filename = self._filename
781 if tr:
782 if tr:
782 # 'dirstate.write()' is not only for writing in-memory
783 # 'dirstate.write()' is not only for writing in-memory
783 # changes out, but also for dropping ambiguous timestamp.
784 # changes out, but also for dropping ambiguous timestamp.
784 # delayed writing re-raise "ambiguous timestamp issue".
785 # delayed writing re-raise "ambiguous timestamp issue".
785 # See also the wiki page below for detail:
786 # See also the wiki page below for detail:
786 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
787 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
787
788
788 # record when mtime start to be ambiguous
789 # record when mtime start to be ambiguous
789 now = _getfsnow(self._opener)
790 now = _getfsnow(self._opener)
790
791
791 # delay writing in-memory changes out
792 # delay writing in-memory changes out
792 tr.addfilegenerator(
793 tr.addfilegenerator(
793 b'dirstate',
794 b'dirstate',
794 (self._filename,),
795 (self._filename,),
795 lambda f: self._writedirstate(tr, f, now=now),
796 lambda f: self._writedirstate(tr, f, now=now),
796 location=b'plain',
797 location=b'plain',
797 )
798 )
798 return
799 return
799
800
800 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
801 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
801 self._writedirstate(tr, st)
802 self._writedirstate(tr, st)
802
803
803 def addparentchangecallback(self, category, callback):
804 def addparentchangecallback(self, category, callback):
804 """add a callback to be called when the wd parents are changed
805 """add a callback to be called when the wd parents are changed
805
806
806 Callback will be called with the following arguments:
807 Callback will be called with the following arguments:
807 dirstate, (oldp1, oldp2), (newp1, newp2)
808 dirstate, (oldp1, oldp2), (newp1, newp2)
808
809
809 Category is a unique identifier to allow overwriting an old callback
810 Category is a unique identifier to allow overwriting an old callback
810 with a newer callback.
811 with a newer callback.
811 """
812 """
812 self._plchangecallbacks[category] = callback
813 self._plchangecallbacks[category] = callback
813
814
814 def _writedirstate(self, tr, st, now=None):
815 def _writedirstate(self, tr, st, now=None):
815 # notify callbacks about parents change
816 # notify callbacks about parents change
816 if self._origpl is not None and self._origpl != self._pl:
817 if self._origpl is not None and self._origpl != self._pl:
817 for c, callback in sorted(
818 for c, callback in sorted(
818 pycompat.iteritems(self._plchangecallbacks)
819 pycompat.iteritems(self._plchangecallbacks)
819 ):
820 ):
820 callback(self, self._origpl, self._pl)
821 callback(self, self._origpl, self._pl)
821 self._origpl = None
822 self._origpl = None
822
823
823 if now is None:
824 if now is None:
824 # use the modification time of the newly created temporary file as the
825 # use the modification time of the newly created temporary file as the
825 # filesystem's notion of 'now'
826 # filesystem's notion of 'now'
826 now = timestamp.mtime_of(util.fstat(st))
827 now = timestamp.mtime_of(util.fstat(st))
827
828
828 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
829 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
829 # timestamp of each entries in dirstate, because of 'now > mtime'
830 # timestamp of each entries in dirstate, because of 'now > mtime'
830 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
831 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
831 if delaywrite > 0:
832 if delaywrite > 0:
832 # do we have any files to delay for?
833 # do we have any files to delay for?
833 for f, e in pycompat.iteritems(self._map):
834 for f, e in pycompat.iteritems(self._map):
834 if e.need_delay(now):
835 if e.need_delay(now):
835 import time # to avoid useless import
836 import time # to avoid useless import
836
837
837 # rather than sleep n seconds, sleep until the next
838 # rather than sleep n seconds, sleep until the next
838 # multiple of n seconds
839 # multiple of n seconds
839 clock = time.time()
840 clock = time.time()
840 start = int(clock) - (int(clock) % delaywrite)
841 start = int(clock) - (int(clock) % delaywrite)
841 end = start + delaywrite
842 end = start + delaywrite
842 time.sleep(end - clock)
843 time.sleep(end - clock)
843 # trust our estimate that the end is near now
844 # trust our estimate that the end is near now
844 now = timestamp.timestamp((end, 0))
845 now = timestamp.timestamp((end, 0))
845 break
846 break
846
847
847 self._map.write(tr, st, now)
848 self._map.write(tr, st, now)
848 self._lastnormaltime = timestamp.zero()
849 self._lastnormaltime = timestamp.zero()
849 self._dirty = False
850 self._dirty = False
850
851
851 def _dirignore(self, f):
852 def _dirignore(self, f):
852 if self._ignore(f):
853 if self._ignore(f):
853 return True
854 return True
854 for p in pathutil.finddirs(f):
855 for p in pathutil.finddirs(f):
855 if self._ignore(p):
856 if self._ignore(p):
856 return True
857 return True
857 return False
858 return False
858
859
859 def _ignorefiles(self):
860 def _ignorefiles(self):
860 files = []
861 files = []
861 if os.path.exists(self._join(b'.hgignore')):
862 if os.path.exists(self._join(b'.hgignore')):
862 files.append(self._join(b'.hgignore'))
863 files.append(self._join(b'.hgignore'))
863 for name, path in self._ui.configitems(b"ui"):
864 for name, path in self._ui.configitems(b"ui"):
864 if name == b'ignore' or name.startswith(b'ignore.'):
865 if name == b'ignore' or name.startswith(b'ignore.'):
865 # we need to use os.path.join here rather than self._join
866 # we need to use os.path.join here rather than self._join
866 # because path is arbitrary and user-specified
867 # because path is arbitrary and user-specified
867 files.append(os.path.join(self._rootdir, util.expandpath(path)))
868 files.append(os.path.join(self._rootdir, util.expandpath(path)))
868 return files
869 return files
869
870
870 def _ignorefileandline(self, f):
871 def _ignorefileandline(self, f):
871 files = collections.deque(self._ignorefiles())
872 files = collections.deque(self._ignorefiles())
872 visited = set()
873 visited = set()
873 while files:
874 while files:
874 i = files.popleft()
875 i = files.popleft()
875 patterns = matchmod.readpatternfile(
876 patterns = matchmod.readpatternfile(
876 i, self._ui.warn, sourceinfo=True
877 i, self._ui.warn, sourceinfo=True
877 )
878 )
878 for pattern, lineno, line in patterns:
879 for pattern, lineno, line in patterns:
879 kind, p = matchmod._patsplit(pattern, b'glob')
880 kind, p = matchmod._patsplit(pattern, b'glob')
880 if kind == b"subinclude":
881 if kind == b"subinclude":
881 if p not in visited:
882 if p not in visited:
882 files.append(p)
883 files.append(p)
883 continue
884 continue
884 m = matchmod.match(
885 m = matchmod.match(
885 self._root, b'', [], [pattern], warn=self._ui.warn
886 self._root, b'', [], [pattern], warn=self._ui.warn
886 )
887 )
887 if m(f):
888 if m(f):
888 return (i, lineno, line)
889 return (i, lineno, line)
889 visited.add(i)
890 visited.add(i)
890 return (None, -1, b"")
891 return (None, -1, b"")
891
892
892 def _walkexplicit(self, match, subrepos):
893 def _walkexplicit(self, match, subrepos):
893 """Get stat data about the files explicitly specified by match.
894 """Get stat data about the files explicitly specified by match.
894
895
895 Return a triple (results, dirsfound, dirsnotfound).
896 Return a triple (results, dirsfound, dirsnotfound).
896 - results is a mapping from filename to stat result. It also contains
897 - results is a mapping from filename to stat result. It also contains
897 listings mapping subrepos and .hg to None.
898 listings mapping subrepos and .hg to None.
898 - dirsfound is a list of files found to be directories.
899 - dirsfound is a list of files found to be directories.
899 - dirsnotfound is a list of files that the dirstate thinks are
900 - dirsnotfound is a list of files that the dirstate thinks are
900 directories and that were not found."""
901 directories and that were not found."""
901
902
902 def badtype(mode):
903 def badtype(mode):
903 kind = _(b'unknown')
904 kind = _(b'unknown')
904 if stat.S_ISCHR(mode):
905 if stat.S_ISCHR(mode):
905 kind = _(b'character device')
906 kind = _(b'character device')
906 elif stat.S_ISBLK(mode):
907 elif stat.S_ISBLK(mode):
907 kind = _(b'block device')
908 kind = _(b'block device')
908 elif stat.S_ISFIFO(mode):
909 elif stat.S_ISFIFO(mode):
909 kind = _(b'fifo')
910 kind = _(b'fifo')
910 elif stat.S_ISSOCK(mode):
911 elif stat.S_ISSOCK(mode):
911 kind = _(b'socket')
912 kind = _(b'socket')
912 elif stat.S_ISDIR(mode):
913 elif stat.S_ISDIR(mode):
913 kind = _(b'directory')
914 kind = _(b'directory')
914 return _(b'unsupported file type (type is %s)') % kind
915 return _(b'unsupported file type (type is %s)') % kind
915
916
916 badfn = match.bad
917 badfn = match.bad
917 dmap = self._map
918 dmap = self._map
918 lstat = os.lstat
919 lstat = os.lstat
919 getkind = stat.S_IFMT
920 getkind = stat.S_IFMT
920 dirkind = stat.S_IFDIR
921 dirkind = stat.S_IFDIR
921 regkind = stat.S_IFREG
922 regkind = stat.S_IFREG
922 lnkkind = stat.S_IFLNK
923 lnkkind = stat.S_IFLNK
923 join = self._join
924 join = self._join
924 dirsfound = []
925 dirsfound = []
925 foundadd = dirsfound.append
926 foundadd = dirsfound.append
926 dirsnotfound = []
927 dirsnotfound = []
927 notfoundadd = dirsnotfound.append
928 notfoundadd = dirsnotfound.append
928
929
929 if not match.isexact() and self._checkcase:
930 if not match.isexact() and self._checkcase:
930 normalize = self._normalize
931 normalize = self._normalize
931 else:
932 else:
932 normalize = None
933 normalize = None
933
934
934 files = sorted(match.files())
935 files = sorted(match.files())
935 subrepos.sort()
936 subrepos.sort()
936 i, j = 0, 0
937 i, j = 0, 0
937 while i < len(files) and j < len(subrepos):
938 while i < len(files) and j < len(subrepos):
938 subpath = subrepos[j] + b"/"
939 subpath = subrepos[j] + b"/"
939 if files[i] < subpath:
940 if files[i] < subpath:
940 i += 1
941 i += 1
941 continue
942 continue
942 while i < len(files) and files[i].startswith(subpath):
943 while i < len(files) and files[i].startswith(subpath):
943 del files[i]
944 del files[i]
944 j += 1
945 j += 1
945
946
946 if not files or b'' in files:
947 if not files or b'' in files:
947 files = [b'']
948 files = [b'']
948 # constructing the foldmap is expensive, so don't do it for the
949 # constructing the foldmap is expensive, so don't do it for the
949 # common case where files is ['']
950 # common case where files is ['']
950 normalize = None
951 normalize = None
951 results = dict.fromkeys(subrepos)
952 results = dict.fromkeys(subrepos)
952 results[b'.hg'] = None
953 results[b'.hg'] = None
953
954
954 for ff in files:
955 for ff in files:
955 if normalize:
956 if normalize:
956 nf = normalize(ff, False, True)
957 nf = normalize(ff, False, True)
957 else:
958 else:
958 nf = ff
959 nf = ff
959 if nf in results:
960 if nf in results:
960 continue
961 continue
961
962
962 try:
963 try:
963 st = lstat(join(nf))
964 st = lstat(join(nf))
964 kind = getkind(st.st_mode)
965 kind = getkind(st.st_mode)
965 if kind == dirkind:
966 if kind == dirkind:
966 if nf in dmap:
967 if nf in dmap:
967 # file replaced by dir on disk but still in dirstate
968 # file replaced by dir on disk but still in dirstate
968 results[nf] = None
969 results[nf] = None
969 foundadd((nf, ff))
970 foundadd((nf, ff))
970 elif kind == regkind or kind == lnkkind:
971 elif kind == regkind or kind == lnkkind:
971 results[nf] = st
972 results[nf] = st
972 else:
973 else:
973 badfn(ff, badtype(kind))
974 badfn(ff, badtype(kind))
974 if nf in dmap:
975 if nf in dmap:
975 results[nf] = None
976 results[nf] = None
976 except OSError as inst: # nf not found on disk - it is dirstate only
977 except OSError as inst: # nf not found on disk - it is dirstate only
977 if nf in dmap: # does it exactly match a missing file?
978 if nf in dmap: # does it exactly match a missing file?
978 results[nf] = None
979 results[nf] = None
979 else: # does it match a missing directory?
980 else: # does it match a missing directory?
980 if self._map.hasdir(nf):
981 if self._map.hasdir(nf):
981 notfoundadd(nf)
982 notfoundadd(nf)
982 else:
983 else:
983 badfn(ff, encoding.strtolocal(inst.strerror))
984 badfn(ff, encoding.strtolocal(inst.strerror))
984
985
985 # match.files() may contain explicitly-specified paths that shouldn't
986 # match.files() may contain explicitly-specified paths that shouldn't
986 # be taken; drop them from the list of files found. dirsfound/notfound
987 # be taken; drop them from the list of files found. dirsfound/notfound
987 # aren't filtered here because they will be tested later.
988 # aren't filtered here because they will be tested later.
988 if match.anypats():
989 if match.anypats():
989 for f in list(results):
990 for f in list(results):
990 if f == b'.hg' or f in subrepos:
991 if f == b'.hg' or f in subrepos:
991 # keep sentinel to disable further out-of-repo walks
992 # keep sentinel to disable further out-of-repo walks
992 continue
993 continue
993 if not match(f):
994 if not match(f):
994 del results[f]
995 del results[f]
995
996
996 # Case insensitive filesystems cannot rely on lstat() failing to detect
997 # Case insensitive filesystems cannot rely on lstat() failing to detect
997 # a case-only rename. Prune the stat object for any file that does not
998 # a case-only rename. Prune the stat object for any file that does not
998 # match the case in the filesystem, if there are multiple files that
999 # match the case in the filesystem, if there are multiple files that
999 # normalize to the same path.
1000 # normalize to the same path.
1000 if match.isexact() and self._checkcase:
1001 if match.isexact() and self._checkcase:
1001 normed = {}
1002 normed = {}
1002
1003
1003 for f, st in pycompat.iteritems(results):
1004 for f, st in pycompat.iteritems(results):
1004 if st is None:
1005 if st is None:
1005 continue
1006 continue
1006
1007
1007 nc = util.normcase(f)
1008 nc = util.normcase(f)
1008 paths = normed.get(nc)
1009 paths = normed.get(nc)
1009
1010
1010 if paths is None:
1011 if paths is None:
1011 paths = set()
1012 paths = set()
1012 normed[nc] = paths
1013 normed[nc] = paths
1013
1014
1014 paths.add(f)
1015 paths.add(f)
1015
1016
1016 for norm, paths in pycompat.iteritems(normed):
1017 for norm, paths in pycompat.iteritems(normed):
1017 if len(paths) > 1:
1018 if len(paths) > 1:
1018 for path in paths:
1019 for path in paths:
1019 folded = self._discoverpath(
1020 folded = self._discoverpath(
1020 path, norm, True, None, self._map.dirfoldmap
1021 path, norm, True, None, self._map.dirfoldmap
1021 )
1022 )
1022 if path != folded:
1023 if path != folded:
1023 results[path] = None
1024 results[path] = None
1024
1025
1025 return results, dirsfound, dirsnotfound
1026 return results, dirsfound, dirsnotfound
1026
1027
1027 def walk(self, match, subrepos, unknown, ignored, full=True):
1028 def walk(self, match, subrepos, unknown, ignored, full=True):
1028 """
1029 """
1029 Walk recursively through the directory tree, finding all files
1030 Walk recursively through the directory tree, finding all files
1030 matched by match.
1031 matched by match.
1031
1032
1032 If full is False, maybe skip some known-clean files.
1033 If full is False, maybe skip some known-clean files.
1033
1034
1034 Return a dict mapping filename to stat-like object (either
1035 Return a dict mapping filename to stat-like object (either
1035 mercurial.osutil.stat instance or return value of os.stat()).
1036 mercurial.osutil.stat instance or return value of os.stat()).
1036
1037
1037 """
1038 """
1038 # full is a flag that extensions that hook into walk can use -- this
1039 # full is a flag that extensions that hook into walk can use -- this
1039 # implementation doesn't use it at all. This satisfies the contract
1040 # implementation doesn't use it at all. This satisfies the contract
1040 # because we only guarantee a "maybe".
1041 # because we only guarantee a "maybe".
1041
1042
1042 if ignored:
1043 if ignored:
1043 ignore = util.never
1044 ignore = util.never
1044 dirignore = util.never
1045 dirignore = util.never
1045 elif unknown:
1046 elif unknown:
1046 ignore = self._ignore
1047 ignore = self._ignore
1047 dirignore = self._dirignore
1048 dirignore = self._dirignore
1048 else:
1049 else:
1049 # if not unknown and not ignored, drop dir recursion and step 2
1050 # if not unknown and not ignored, drop dir recursion and step 2
1050 ignore = util.always
1051 ignore = util.always
1051 dirignore = util.always
1052 dirignore = util.always
1052
1053
1053 matchfn = match.matchfn
1054 matchfn = match.matchfn
1054 matchalways = match.always()
1055 matchalways = match.always()
1055 matchtdir = match.traversedir
1056 matchtdir = match.traversedir
1056 dmap = self._map
1057 dmap = self._map
1057 listdir = util.listdir
1058 listdir = util.listdir
1058 lstat = os.lstat
1059 lstat = os.lstat
1059 dirkind = stat.S_IFDIR
1060 dirkind = stat.S_IFDIR
1060 regkind = stat.S_IFREG
1061 regkind = stat.S_IFREG
1061 lnkkind = stat.S_IFLNK
1062 lnkkind = stat.S_IFLNK
1062 join = self._join
1063 join = self._join
1063
1064
1064 exact = skipstep3 = False
1065 exact = skipstep3 = False
1065 if match.isexact(): # match.exact
1066 if match.isexact(): # match.exact
1066 exact = True
1067 exact = True
1067 dirignore = util.always # skip step 2
1068 dirignore = util.always # skip step 2
1068 elif match.prefix(): # match.match, no patterns
1069 elif match.prefix(): # match.match, no patterns
1069 skipstep3 = True
1070 skipstep3 = True
1070
1071
1071 if not exact and self._checkcase:
1072 if not exact and self._checkcase:
1072 normalize = self._normalize
1073 normalize = self._normalize
1073 normalizefile = self._normalizefile
1074 normalizefile = self._normalizefile
1074 skipstep3 = False
1075 skipstep3 = False
1075 else:
1076 else:
1076 normalize = self._normalize
1077 normalize = self._normalize
1077 normalizefile = None
1078 normalizefile = None
1078
1079
1079 # step 1: find all explicit files
1080 # step 1: find all explicit files
1080 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1081 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1081 if matchtdir:
1082 if matchtdir:
1082 for d in work:
1083 for d in work:
1083 matchtdir(d[0])
1084 matchtdir(d[0])
1084 for d in dirsnotfound:
1085 for d in dirsnotfound:
1085 matchtdir(d)
1086 matchtdir(d)
1086
1087
1087 skipstep3 = skipstep3 and not (work or dirsnotfound)
1088 skipstep3 = skipstep3 and not (work or dirsnotfound)
1088 work = [d for d in work if not dirignore(d[0])]
1089 work = [d for d in work if not dirignore(d[0])]
1089
1090
1090 # step 2: visit subdirectories
1091 # step 2: visit subdirectories
1091 def traverse(work, alreadynormed):
1092 def traverse(work, alreadynormed):
1092 wadd = work.append
1093 wadd = work.append
1093 while work:
1094 while work:
1094 tracing.counter('dirstate.walk work', len(work))
1095 tracing.counter('dirstate.walk work', len(work))
1095 nd = work.pop()
1096 nd = work.pop()
1096 visitentries = match.visitchildrenset(nd)
1097 visitentries = match.visitchildrenset(nd)
1097 if not visitentries:
1098 if not visitentries:
1098 continue
1099 continue
1099 if visitentries == b'this' or visitentries == b'all':
1100 if visitentries == b'this' or visitentries == b'all':
1100 visitentries = None
1101 visitentries = None
1101 skip = None
1102 skip = None
1102 if nd != b'':
1103 if nd != b'':
1103 skip = b'.hg'
1104 skip = b'.hg'
1104 try:
1105 try:
1105 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1106 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1106 entries = listdir(join(nd), stat=True, skip=skip)
1107 entries = listdir(join(nd), stat=True, skip=skip)
1107 except OSError as inst:
1108 except OSError as inst:
1108 if inst.errno in (errno.EACCES, errno.ENOENT):
1109 if inst.errno in (errno.EACCES, errno.ENOENT):
1109 match.bad(
1110 match.bad(
1110 self.pathto(nd), encoding.strtolocal(inst.strerror)
1111 self.pathto(nd), encoding.strtolocal(inst.strerror)
1111 )
1112 )
1112 continue
1113 continue
1113 raise
1114 raise
1114 for f, kind, st in entries:
1115 for f, kind, st in entries:
1115 # Some matchers may return files in the visitentries set,
1116 # Some matchers may return files in the visitentries set,
1116 # instead of 'this', if the matcher explicitly mentions them
1117 # instead of 'this', if the matcher explicitly mentions them
1117 # and is not an exactmatcher. This is acceptable; we do not
1118 # and is not an exactmatcher. This is acceptable; we do not
1118 # make any hard assumptions about file-or-directory below
1119 # make any hard assumptions about file-or-directory below
1119 # based on the presence of `f` in visitentries. If
1120 # based on the presence of `f` in visitentries. If
1120 # visitchildrenset returned a set, we can always skip the
1121 # visitchildrenset returned a set, we can always skip the
1121 # entries *not* in the set it provided regardless of whether
1122 # entries *not* in the set it provided regardless of whether
1122 # they're actually a file or a directory.
1123 # they're actually a file or a directory.
1123 if visitentries and f not in visitentries:
1124 if visitentries and f not in visitentries:
1124 continue
1125 continue
1125 if normalizefile:
1126 if normalizefile:
1126 # even though f might be a directory, we're only
1127 # even though f might be a directory, we're only
1127 # interested in comparing it to files currently in the
1128 # interested in comparing it to files currently in the
1128 # dmap -- therefore normalizefile is enough
1129 # dmap -- therefore normalizefile is enough
1129 nf = normalizefile(
1130 nf = normalizefile(
1130 nd and (nd + b"/" + f) or f, True, True
1131 nd and (nd + b"/" + f) or f, True, True
1131 )
1132 )
1132 else:
1133 else:
1133 nf = nd and (nd + b"/" + f) or f
1134 nf = nd and (nd + b"/" + f) or f
1134 if nf not in results:
1135 if nf not in results:
1135 if kind == dirkind:
1136 if kind == dirkind:
1136 if not ignore(nf):
1137 if not ignore(nf):
1137 if matchtdir:
1138 if matchtdir:
1138 matchtdir(nf)
1139 matchtdir(nf)
1139 wadd(nf)
1140 wadd(nf)
1140 if nf in dmap and (matchalways or matchfn(nf)):
1141 if nf in dmap and (matchalways or matchfn(nf)):
1141 results[nf] = None
1142 results[nf] = None
1142 elif kind == regkind or kind == lnkkind:
1143 elif kind == regkind or kind == lnkkind:
1143 if nf in dmap:
1144 if nf in dmap:
1144 if matchalways or matchfn(nf):
1145 if matchalways or matchfn(nf):
1145 results[nf] = st
1146 results[nf] = st
1146 elif (matchalways or matchfn(nf)) and not ignore(
1147 elif (matchalways or matchfn(nf)) and not ignore(
1147 nf
1148 nf
1148 ):
1149 ):
1149 # unknown file -- normalize if necessary
1150 # unknown file -- normalize if necessary
1150 if not alreadynormed:
1151 if not alreadynormed:
1151 nf = normalize(nf, False, True)
1152 nf = normalize(nf, False, True)
1152 results[nf] = st
1153 results[nf] = st
1153 elif nf in dmap and (matchalways or matchfn(nf)):
1154 elif nf in dmap and (matchalways or matchfn(nf)):
1154 results[nf] = None
1155 results[nf] = None
1155
1156
1156 for nd, d in work:
1157 for nd, d in work:
1157 # alreadynormed means that processwork doesn't have to do any
1158 # alreadynormed means that processwork doesn't have to do any
1158 # expensive directory normalization
1159 # expensive directory normalization
1159 alreadynormed = not normalize or nd == d
1160 alreadynormed = not normalize or nd == d
1160 traverse([d], alreadynormed)
1161 traverse([d], alreadynormed)
1161
1162
1162 for s in subrepos:
1163 for s in subrepos:
1163 del results[s]
1164 del results[s]
1164 del results[b'.hg']
1165 del results[b'.hg']
1165
1166
1166 # step 3: visit remaining files from dmap
1167 # step 3: visit remaining files from dmap
1167 if not skipstep3 and not exact:
1168 if not skipstep3 and not exact:
1168 # If a dmap file is not in results yet, it was either
1169 # If a dmap file is not in results yet, it was either
1169 # a) not matching matchfn b) ignored, c) missing, or d) under a
1170 # a) not matching matchfn b) ignored, c) missing, or d) under a
1170 # symlink directory.
1171 # symlink directory.
1171 if not results and matchalways:
1172 if not results and matchalways:
1172 visit = [f for f in dmap]
1173 visit = [f for f in dmap]
1173 else:
1174 else:
1174 visit = [f for f in dmap if f not in results and matchfn(f)]
1175 visit = [f for f in dmap if f not in results and matchfn(f)]
1175 visit.sort()
1176 visit.sort()
1176
1177
1177 if unknown:
1178 if unknown:
1178 # unknown == True means we walked all dirs under the roots
1179 # unknown == True means we walked all dirs under the roots
1179 # that wasn't ignored, and everything that matched was stat'ed
1180 # that wasn't ignored, and everything that matched was stat'ed
1180 # and is already in results.
1181 # and is already in results.
1181 # The rest must thus be ignored or under a symlink.
1182 # The rest must thus be ignored or under a symlink.
1182 audit_path = pathutil.pathauditor(self._root, cached=True)
1183 audit_path = pathutil.pathauditor(self._root, cached=True)
1183
1184
1184 for nf in iter(visit):
1185 for nf in iter(visit):
1185 # If a stat for the same file was already added with a
1186 # If a stat for the same file was already added with a
1186 # different case, don't add one for this, since that would
1187 # different case, don't add one for this, since that would
1187 # make it appear as if the file exists under both names
1188 # make it appear as if the file exists under both names
1188 # on disk.
1189 # on disk.
1189 if (
1190 if (
1190 normalizefile
1191 normalizefile
1191 and normalizefile(nf, True, True) in results
1192 and normalizefile(nf, True, True) in results
1192 ):
1193 ):
1193 results[nf] = None
1194 results[nf] = None
1194 # Report ignored items in the dmap as long as they are not
1195 # Report ignored items in the dmap as long as they are not
1195 # under a symlink directory.
1196 # under a symlink directory.
1196 elif audit_path.check(nf):
1197 elif audit_path.check(nf):
1197 try:
1198 try:
1198 results[nf] = lstat(join(nf))
1199 results[nf] = lstat(join(nf))
1199 # file was just ignored, no links, and exists
1200 # file was just ignored, no links, and exists
1200 except OSError:
1201 except OSError:
1201 # file doesn't exist
1202 # file doesn't exist
1202 results[nf] = None
1203 results[nf] = None
1203 else:
1204 else:
1204 # It's either missing or under a symlink directory
1205 # It's either missing or under a symlink directory
1205 # which we in this case report as missing
1206 # which we in this case report as missing
1206 results[nf] = None
1207 results[nf] = None
1207 else:
1208 else:
1208 # We may not have walked the full directory tree above,
1209 # We may not have walked the full directory tree above,
1209 # so stat and check everything we missed.
1210 # so stat and check everything we missed.
1210 iv = iter(visit)
1211 iv = iter(visit)
1211 for st in util.statfiles([join(i) for i in visit]):
1212 for st in util.statfiles([join(i) for i in visit]):
1212 results[next(iv)] = st
1213 results[next(iv)] = st
1213 return results
1214 return results
1214
1215
1215 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1216 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1216 # Force Rayon (Rust parallelism library) to respect the number of
1217 # Force Rayon (Rust parallelism library) to respect the number of
1217 # workers. This is a temporary workaround until Rust code knows
1218 # workers. This is a temporary workaround until Rust code knows
1218 # how to read the config file.
1219 # how to read the config file.
1219 numcpus = self._ui.configint(b"worker", b"numcpus")
1220 numcpus = self._ui.configint(b"worker", b"numcpus")
1220 if numcpus is not None:
1221 if numcpus is not None:
1221 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1222 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1222
1223
1223 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1224 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1224 if not workers_enabled:
1225 if not workers_enabled:
1225 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1226 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1226
1227
1227 (
1228 (
1228 lookup,
1229 lookup,
1229 modified,
1230 modified,
1230 added,
1231 added,
1231 removed,
1232 removed,
1232 deleted,
1233 deleted,
1233 clean,
1234 clean,
1234 ignored,
1235 ignored,
1235 unknown,
1236 unknown,
1236 warnings,
1237 warnings,
1237 bad,
1238 bad,
1238 traversed,
1239 traversed,
1239 dirty,
1240 dirty,
1240 ) = rustmod.status(
1241 ) = rustmod.status(
1241 self._map._map,
1242 self._map._map,
1242 matcher,
1243 matcher,
1243 self._rootdir,
1244 self._rootdir,
1244 self._ignorefiles(),
1245 self._ignorefiles(),
1245 self._checkexec,
1246 self._checkexec,
1246 self._lastnormaltime,
1247 self._lastnormaltime,
1247 bool(list_clean),
1248 bool(list_clean),
1248 bool(list_ignored),
1249 bool(list_ignored),
1249 bool(list_unknown),
1250 bool(list_unknown),
1250 bool(matcher.traversedir),
1251 bool(matcher.traversedir),
1251 )
1252 )
1252
1253
1253 self._dirty |= dirty
1254 self._dirty |= dirty
1254
1255
1255 if matcher.traversedir:
1256 if matcher.traversedir:
1256 for dir in traversed:
1257 for dir in traversed:
1257 matcher.traversedir(dir)
1258 matcher.traversedir(dir)
1258
1259
1259 if self._ui.warn:
1260 if self._ui.warn:
1260 for item in warnings:
1261 for item in warnings:
1261 if isinstance(item, tuple):
1262 if isinstance(item, tuple):
1262 file_path, syntax = item
1263 file_path, syntax = item
1263 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1264 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1264 file_path,
1265 file_path,
1265 syntax,
1266 syntax,
1266 )
1267 )
1267 self._ui.warn(msg)
1268 self._ui.warn(msg)
1268 else:
1269 else:
1269 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1270 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1270 self._ui.warn(
1271 self._ui.warn(
1271 msg
1272 msg
1272 % (
1273 % (
1273 pathutil.canonpath(
1274 pathutil.canonpath(
1274 self._rootdir, self._rootdir, item
1275 self._rootdir, self._rootdir, item
1275 ),
1276 ),
1276 b"No such file or directory",
1277 b"No such file or directory",
1277 )
1278 )
1278 )
1279 )
1279
1280
1280 for (fn, message) in bad:
1281 for (fn, message) in bad:
1281 matcher.bad(fn, encoding.strtolocal(message))
1282 matcher.bad(fn, encoding.strtolocal(message))
1282
1283
1283 status = scmutil.status(
1284 status = scmutil.status(
1284 modified=modified,
1285 modified=modified,
1285 added=added,
1286 added=added,
1286 removed=removed,
1287 removed=removed,
1287 deleted=deleted,
1288 deleted=deleted,
1288 unknown=unknown,
1289 unknown=unknown,
1289 ignored=ignored,
1290 ignored=ignored,
1290 clean=clean,
1291 clean=clean,
1291 )
1292 )
1292 return (lookup, status)
1293 return (lookup, status)
1293
1294
1294 def status(self, match, subrepos, ignored, clean, unknown):
1295 def status(self, match, subrepos, ignored, clean, unknown):
1295 """Determine the status of the working copy relative to the
1296 """Determine the status of the working copy relative to the
1296 dirstate and return a pair of (unsure, status), where status is of type
1297 dirstate and return a pair of (unsure, status), where status is of type
1297 scmutil.status and:
1298 scmutil.status and:
1298
1299
1299 unsure:
1300 unsure:
1300 files that might have been modified since the dirstate was
1301 files that might have been modified since the dirstate was
1301 written, but need to be read to be sure (size is the same
1302 written, but need to be read to be sure (size is the same
1302 but mtime differs)
1303 but mtime differs)
1303 status.modified:
1304 status.modified:
1304 files that have definitely been modified since the dirstate
1305 files that have definitely been modified since the dirstate
1305 was written (different size or mode)
1306 was written (different size or mode)
1306 status.clean:
1307 status.clean:
1307 files that have definitely not been modified since the
1308 files that have definitely not been modified since the
1308 dirstate was written
1309 dirstate was written
1309 """
1310 """
1310 listignored, listclean, listunknown = ignored, clean, unknown
1311 listignored, listclean, listunknown = ignored, clean, unknown
1311 lookup, modified, added, unknown, ignored = [], [], [], [], []
1312 lookup, modified, added, unknown, ignored = [], [], [], [], []
1312 removed, deleted, clean = [], [], []
1313 removed, deleted, clean = [], [], []
1313
1314
1314 dmap = self._map
1315 dmap = self._map
1315 dmap.preload()
1316 dmap.preload()
1316
1317
1317 use_rust = True
1318 use_rust = True
1318
1319
1319 allowed_matchers = (
1320 allowed_matchers = (
1320 matchmod.alwaysmatcher,
1321 matchmod.alwaysmatcher,
1321 matchmod.exactmatcher,
1322 matchmod.exactmatcher,
1322 matchmod.includematcher,
1323 matchmod.includematcher,
1323 )
1324 )
1324
1325
1325 if rustmod is None:
1326 if rustmod is None:
1326 use_rust = False
1327 use_rust = False
1327 elif self._checkcase:
1328 elif self._checkcase:
1328 # Case-insensitive filesystems are not handled yet
1329 # Case-insensitive filesystems are not handled yet
1329 use_rust = False
1330 use_rust = False
1330 elif subrepos:
1331 elif subrepos:
1331 use_rust = False
1332 use_rust = False
1332 elif sparse.enabled:
1333 elif sparse.enabled:
1333 use_rust = False
1334 use_rust = False
1334 elif not isinstance(match, allowed_matchers):
1335 elif not isinstance(match, allowed_matchers):
1335 # Some matchers have yet to be implemented
1336 # Some matchers have yet to be implemented
1336 use_rust = False
1337 use_rust = False
1337
1338
1338 if use_rust:
1339 if use_rust:
1339 try:
1340 try:
1340 return self._rust_status(
1341 return self._rust_status(
1341 match, listclean, listignored, listunknown
1342 match, listclean, listignored, listunknown
1342 )
1343 )
1343 except rustmod.FallbackError:
1344 except rustmod.FallbackError:
1344 pass
1345 pass
1345
1346
1346 def noop(f):
1347 def noop(f):
1347 pass
1348 pass
1348
1349
1349 dcontains = dmap.__contains__
1350 dcontains = dmap.__contains__
1350 dget = dmap.__getitem__
1351 dget = dmap.__getitem__
1351 ladd = lookup.append # aka "unsure"
1352 ladd = lookup.append # aka "unsure"
1352 madd = modified.append
1353 madd = modified.append
1353 aadd = added.append
1354 aadd = added.append
1354 uadd = unknown.append if listunknown else noop
1355 uadd = unknown.append if listunknown else noop
1355 iadd = ignored.append if listignored else noop
1356 iadd = ignored.append if listignored else noop
1356 radd = removed.append
1357 radd = removed.append
1357 dadd = deleted.append
1358 dadd = deleted.append
1358 cadd = clean.append if listclean else noop
1359 cadd = clean.append if listclean else noop
1359 mexact = match.exact
1360 mexact = match.exact
1360 dirignore = self._dirignore
1361 dirignore = self._dirignore
1361 checkexec = self._checkexec
1362 checkexec = self._checkexec
1362 checklink = self._checklink
1363 checklink = self._checklink
1363 copymap = self._map.copymap
1364 copymap = self._map.copymap
1364 lastnormaltime = self._lastnormaltime
1365 lastnormaltime = self._lastnormaltime
1365
1366
1366 # We need to do full walks when either
1367 # We need to do full walks when either
1367 # - we're listing all clean files, or
1368 # - we're listing all clean files, or
1368 # - match.traversedir does something, because match.traversedir should
1369 # - match.traversedir does something, because match.traversedir should
1369 # be called for every dir in the working dir
1370 # be called for every dir in the working dir
1370 full = listclean or match.traversedir is not None
1371 full = listclean or match.traversedir is not None
1371 for fn, st in pycompat.iteritems(
1372 for fn, st in pycompat.iteritems(
1372 self.walk(match, subrepos, listunknown, listignored, full=full)
1373 self.walk(match, subrepos, listunknown, listignored, full=full)
1373 ):
1374 ):
1374 if not dcontains(fn):
1375 if not dcontains(fn):
1375 if (listignored or mexact(fn)) and dirignore(fn):
1376 if (listignored or mexact(fn)) and dirignore(fn):
1376 if listignored:
1377 if listignored:
1377 iadd(fn)
1378 iadd(fn)
1378 else:
1379 else:
1379 uadd(fn)
1380 uadd(fn)
1380 continue
1381 continue
1381
1382
1382 t = dget(fn)
1383 t = dget(fn)
1383 mode = t.mode
1384 mode = t.mode
1384 size = t.size
1385 size = t.size
1385
1386
1386 if not st and t.tracked:
1387 if not st and t.tracked:
1387 dadd(fn)
1388 dadd(fn)
1388 elif t.p2_info:
1389 elif t.p2_info:
1389 madd(fn)
1390 madd(fn)
1390 elif t.added:
1391 elif t.added:
1391 aadd(fn)
1392 aadd(fn)
1392 elif t.removed:
1393 elif t.removed:
1393 radd(fn)
1394 radd(fn)
1394 elif t.tracked:
1395 elif t.tracked:
1395 if not checklink and t.has_fallback_symlink:
1396 if not checklink and t.has_fallback_symlink:
1396 # If the file system does not support symlink, the mode
1397 # If the file system does not support symlink, the mode
1397 # might not be correctly stored in the dirstate, so do not
1398 # might not be correctly stored in the dirstate, so do not
1398 # trust it.
1399 # trust it.
1399 ladd(fn)
1400 ladd(fn)
1400 elif not checkexec and t.has_fallback_exec:
1401 elif not checkexec and t.has_fallback_exec:
1401 # If the file system does not support exec bits, the mode
1402 # If the file system does not support exec bits, the mode
1402 # might not be correctly stored in the dirstate, so do not
1403 # might not be correctly stored in the dirstate, so do not
1403 # trust it.
1404 # trust it.
1404 ladd(fn)
1405 ladd(fn)
1405 elif (
1406 elif (
1406 size >= 0
1407 size >= 0
1407 and (
1408 and (
1408 (size != st.st_size and size != st.st_size & _rangemask)
1409 (size != st.st_size and size != st.st_size & _rangemask)
1409 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1410 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1410 )
1411 )
1411 or fn in copymap
1412 or fn in copymap
1412 ):
1413 ):
1413 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1414 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1414 # issue6456: Size returned may be longer due to
1415 # issue6456: Size returned may be longer due to
1415 # encryption on EXT-4 fscrypt, undecided.
1416 # encryption on EXT-4 fscrypt, undecided.
1416 ladd(fn)
1417 ladd(fn)
1417 else:
1418 else:
1418 madd(fn)
1419 madd(fn)
1419 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1420 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1420 ladd(fn)
1421 ladd(fn)
1421 elif timestamp.mtime_of(st) == lastnormaltime:
1422 elif timestamp.mtime_of(st) == lastnormaltime:
1422 # fn may have just been marked as normal and it may have
1423 # fn may have just been marked as normal and it may have
1423 # changed in the same second without changing its size.
1424 # changed in the same second without changing its size.
1424 # This can happen if we quickly do multiple commits.
1425 # This can happen if we quickly do multiple commits.
1425 # Force lookup, so we don't miss such a racy file change.
1426 # Force lookup, so we don't miss such a racy file change.
1426 ladd(fn)
1427 ladd(fn)
1427 elif listclean:
1428 elif listclean:
1428 cadd(fn)
1429 cadd(fn)
1429 status = scmutil.status(
1430 status = scmutil.status(
1430 modified, added, removed, deleted, unknown, ignored, clean
1431 modified, added, removed, deleted, unknown, ignored, clean
1431 )
1432 )
1432 return (lookup, status)
1433 return (lookup, status)
1433
1434
1434 def matches(self, match):
1435 def matches(self, match):
1435 """
1436 """
1436 return files in the dirstate (in whatever state) filtered by match
1437 return files in the dirstate (in whatever state) filtered by match
1437 """
1438 """
1438 dmap = self._map
1439 dmap = self._map
1439 if rustmod is not None:
1440 if rustmod is not None:
1440 dmap = self._map._map
1441 dmap = self._map._map
1441
1442
1442 if match.always():
1443 if match.always():
1443 return dmap.keys()
1444 return dmap.keys()
1444 files = match.files()
1445 files = match.files()
1445 if match.isexact():
1446 if match.isexact():
1446 # fast path -- filter the other way around, since typically files is
1447 # fast path -- filter the other way around, since typically files is
1447 # much smaller than dmap
1448 # much smaller than dmap
1448 return [f for f in files if f in dmap]
1449 return [f for f in files if f in dmap]
1449 if match.prefix() and all(fn in dmap for fn in files):
1450 if match.prefix() and all(fn in dmap for fn in files):
1450 # fast path -- all the values are known to be files, so just return
1451 # fast path -- all the values are known to be files, so just return
1451 # that
1452 # that
1452 return list(files)
1453 return list(files)
1453 return [f for f in dmap if match(f)]
1454 return [f for f in dmap if match(f)]
1454
1455
1455 def _actualfilename(self, tr):
1456 def _actualfilename(self, tr):
1456 if tr:
1457 if tr:
1457 return self._pendingfilename
1458 return self._pendingfilename
1458 else:
1459 else:
1459 return self._filename
1460 return self._filename
1460
1461
1461 def savebackup(self, tr, backupname):
1462 def savebackup(self, tr, backupname):
1462 '''Save current dirstate into backup file'''
1463 '''Save current dirstate into backup file'''
1463 filename = self._actualfilename(tr)
1464 filename = self._actualfilename(tr)
1464 assert backupname != filename
1465 assert backupname != filename
1465
1466
1466 # use '_writedirstate' instead of 'write' to write changes certainly,
1467 # use '_writedirstate' instead of 'write' to write changes certainly,
1467 # because the latter omits writing out if transaction is running.
1468 # because the latter omits writing out if transaction is running.
1468 # output file will be used to create backup of dirstate at this point.
1469 # output file will be used to create backup of dirstate at this point.
1469 if self._dirty or not self._opener.exists(filename):
1470 if self._dirty or not self._opener.exists(filename):
1470 self._writedirstate(
1471 self._writedirstate(
1471 tr,
1472 tr,
1472 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1473 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1473 )
1474 )
1474
1475
1475 if tr:
1476 if tr:
1476 # ensure that subsequent tr.writepending returns True for
1477 # ensure that subsequent tr.writepending returns True for
1477 # changes written out above, even if dirstate is never
1478 # changes written out above, even if dirstate is never
1478 # changed after this
1479 # changed after this
1479 tr.addfilegenerator(
1480 tr.addfilegenerator(
1480 b'dirstate',
1481 b'dirstate',
1481 (self._filename,),
1482 (self._filename,),
1482 lambda f: self._writedirstate(tr, f),
1483 lambda f: self._writedirstate(tr, f),
1483 location=b'plain',
1484 location=b'plain',
1484 )
1485 )
1485
1486
1486 # ensure that pending file written above is unlinked at
1487 # ensure that pending file written above is unlinked at
1487 # failure, even if tr.writepending isn't invoked until the
1488 # failure, even if tr.writepending isn't invoked until the
1488 # end of this transaction
1489 # end of this transaction
1489 tr.registertmp(filename, location=b'plain')
1490 tr.registertmp(filename, location=b'plain')
1490
1491
1491 self._opener.tryunlink(backupname)
1492 self._opener.tryunlink(backupname)
1492 # hardlink backup is okay because _writedirstate is always called
1493 # hardlink backup is okay because _writedirstate is always called
1493 # with an "atomictemp=True" file.
1494 # with an "atomictemp=True" file.
1494 util.copyfile(
1495 util.copyfile(
1495 self._opener.join(filename),
1496 self._opener.join(filename),
1496 self._opener.join(backupname),
1497 self._opener.join(backupname),
1497 hardlink=True,
1498 hardlink=True,
1498 )
1499 )
1499
1500
1500 def restorebackup(self, tr, backupname):
1501 def restorebackup(self, tr, backupname):
1501 '''Restore dirstate by backup file'''
1502 '''Restore dirstate by backup file'''
1502 # this "invalidate()" prevents "wlock.release()" from writing
1503 # this "invalidate()" prevents "wlock.release()" from writing
1503 # changes of dirstate out after restoring from backup file
1504 # changes of dirstate out after restoring from backup file
1504 self.invalidate()
1505 self.invalidate()
1505 filename = self._actualfilename(tr)
1506 filename = self._actualfilename(tr)
1506 o = self._opener
1507 o = self._opener
1507 if util.samefile(o.join(backupname), o.join(filename)):
1508 if util.samefile(o.join(backupname), o.join(filename)):
1508 o.unlink(backupname)
1509 o.unlink(backupname)
1509 else:
1510 else:
1510 o.rename(backupname, filename, checkambig=True)
1511 o.rename(backupname, filename, checkambig=True)
1511
1512
1512 def clearbackup(self, tr, backupname):
1513 def clearbackup(self, tr, backupname):
1513 '''Clear backup file'''
1514 '''Clear backup file'''
1514 self._opener.unlink(backupname)
1515 self._opener.unlink(backupname)
1515
1516
1516 def verify(self, m1, m2):
1517 def verify(self, m1, m2):
1517 """check the dirstate content again the parent manifest and yield errors"""
1518 """check the dirstate content again the parent manifest and yield errors"""
1518 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1519 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1519 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1520 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1520 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1521 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1521 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1522 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1522 for f, entry in self.items():
1523 for f, entry in self.items():
1523 state = entry.state
1524 state = entry.state
1524 if state in b"nr" and f not in m1:
1525 if state in b"nr" and f not in m1:
1525 yield (missing_from_p1, f, state)
1526 yield (missing_from_p1, f, state)
1526 if state in b"a" and f in m1:
1527 if state in b"a" and f in m1:
1527 yield (unexpected_in_p1, f, state)
1528 yield (unexpected_in_p1, f, state)
1528 if state in b"m" and f not in m1 and f not in m2:
1529 if state in b"m" and f not in m1 and f not in m2:
1529 yield (missing_from_ps, f, state)
1530 yield (missing_from_ps, f, state)
1530 for f in m1:
1531 for f in m1:
1531 state = self.get_entry(f).state
1532 state = self.get_entry(f).state
1532 if state not in b"nrm":
1533 if state not in b"nrm":
1533 yield (missing_from_ds, f, state)
1534 yield (missing_from_ds, f, state)
@@ -1,935 +1,937 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11 import struct
11 import struct
12 import zlib
12 import zlib
13
13
14 from ..node import (
14 from ..node import (
15 nullrev,
15 nullrev,
16 sha1nodeconstants,
16 sha1nodeconstants,
17 )
17 )
18 from ..thirdparty import attr
18 from ..thirdparty import attr
19 from .. import (
19 from .. import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 revlogutils,
22 revlogutils,
23 util,
23 util,
24 )
24 )
25
25
26 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import nodemap as nodemaputil
27 from ..revlogutils import constants as revlog_constants
27 from ..revlogutils import constants as revlog_constants
28
28
29 stringio = pycompat.bytesio
29 stringio = pycompat.bytesio
30
30
31
31
32 _pack = struct.pack
32 _pack = struct.pack
33 _unpack = struct.unpack
33 _unpack = struct.unpack
34 _compress = zlib.compress
34 _compress = zlib.compress
35 _decompress = zlib.decompress
35 _decompress = zlib.decompress
36
36
37
37
38 # a special value used internally for `size` if the file come from the other parent
38 # a special value used internally for `size` if the file come from the other parent
39 FROM_P2 = -2
39 FROM_P2 = -2
40
40
41 # a special value used internally for `size` if the file is modified/merged/added
41 # a special value used internally for `size` if the file is modified/merged/added
42 NONNORMAL = -1
42 NONNORMAL = -1
43
43
44 # a special value used internally for `time` if the time is ambigeous
44 # a special value used internally for `time` if the time is ambigeous
45 AMBIGUOUS_TIME = -1
45 AMBIGUOUS_TIME = -1
46
46
47 # Bits of the `flags` byte inside a node in the file format
47 # Bits of the `flags` byte inside a node in the file format
48 DIRSTATE_V2_WDIR_TRACKED = 1 << 0
48 DIRSTATE_V2_WDIR_TRACKED = 1 << 0
49 DIRSTATE_V2_P1_TRACKED = 1 << 1
49 DIRSTATE_V2_P1_TRACKED = 1 << 1
50 DIRSTATE_V2_P2_INFO = 1 << 2
50 DIRSTATE_V2_P2_INFO = 1 << 2
51 DIRSTATE_V2_MODE_EXEC_PERM = 1 << 3
51 DIRSTATE_V2_MODE_EXEC_PERM = 1 << 3
52 DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 4
52 DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 4
53 DIRSTATE_V2_HAS_FALLBACK_EXEC = 1 << 5
53 DIRSTATE_V2_HAS_FALLBACK_EXEC = 1 << 5
54 DIRSTATE_V2_FALLBACK_EXEC = 1 << 6
54 DIRSTATE_V2_FALLBACK_EXEC = 1 << 6
55 DIRSTATE_V2_HAS_FALLBACK_SYMLINK = 1 << 7
55 DIRSTATE_V2_HAS_FALLBACK_SYMLINK = 1 << 7
56 DIRSTATE_V2_FALLBACK_SYMLINK = 1 << 8
56 DIRSTATE_V2_FALLBACK_SYMLINK = 1 << 8
57 DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED = 1 << 9
57 DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED = 1 << 9
58 DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 10
58 DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 10
59 DIRSTATE_V2_HAS_MTIME = 1 << 11
59 DIRSTATE_V2_HAS_MTIME = 1 << 11
60 DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12
60 DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12
61 DIRSTATE_V2_DIRECTORY = 1 << 13
61 DIRSTATE_V2_DIRECTORY = 1 << 13
62 DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14
62 DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14
63 DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15
63 DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15
64
64
65
65
66 @attr.s(slots=True, init=False)
66 @attr.s(slots=True, init=False)
67 class DirstateItem(object):
67 class DirstateItem(object):
68 """represent a dirstate entry
68 """represent a dirstate entry
69
69
70 It hold multiple attributes
70 It hold multiple attributes
71
71
72 # about file tracking
72 # about file tracking
73 - wc_tracked: is the file tracked by the working copy
73 - wc_tracked: is the file tracked by the working copy
74 - p1_tracked: is the file tracked in working copy first parent
74 - p1_tracked: is the file tracked in working copy first parent
75 - p2_info: the file has been involved in some merge operation. Either
75 - p2_info: the file has been involved in some merge operation. Either
76 because it was actually merged, or because the p2 version was
76 because it was actually merged, or because the p2 version was
77 ahead, or because some rename moved it there. In either case
77 ahead, or because some rename moved it there. In either case
78 `hg status` will want it displayed as modified.
78 `hg status` will want it displayed as modified.
79
79
80 # about the file state expected from p1 manifest:
80 # about the file state expected from p1 manifest:
81 - mode: the file mode in p1
81 - mode: the file mode in p1
82 - size: the file size in p1
82 - size: the file size in p1
83
83
84 These value can be set to None, which mean we don't have a meaningful value
84 These value can be set to None, which mean we don't have a meaningful value
85 to compare with. Either because we don't really care about them as there
85 to compare with. Either because we don't really care about them as there
86 `status` is known without having to look at the disk or because we don't
86 `status` is known without having to look at the disk or because we don't
87 know these right now and a full comparison will be needed to find out if
87 know these right now and a full comparison will be needed to find out if
88 the file is clean.
88 the file is clean.
89
89
90 # about the file state on disk last time we saw it:
90 # about the file state on disk last time we saw it:
91 - mtime: the last known clean mtime for the file.
91 - mtime: the last known clean mtime for the file.
92
92
93 This value can be set to None if no cachable state exist. Either because we
93 This value can be set to None if no cachable state exist. Either because we
94 do not care (see previous section) or because we could not cache something
94 do not care (see previous section) or because we could not cache something
95 yet.
95 yet.
96 """
96 """
97
97
98 _wc_tracked = attr.ib()
98 _wc_tracked = attr.ib()
99 _p1_tracked = attr.ib()
99 _p1_tracked = attr.ib()
100 _p2_info = attr.ib()
100 _p2_info = attr.ib()
101 _mode = attr.ib()
101 _mode = attr.ib()
102 _size = attr.ib()
102 _size = attr.ib()
103 _mtime_s = attr.ib()
103 _mtime_s = attr.ib()
104 _mtime_ns = attr.ib()
104 _mtime_ns = attr.ib()
105 _fallback_exec = attr.ib()
105 _fallback_exec = attr.ib()
106 _fallback_symlink = attr.ib()
106 _fallback_symlink = attr.ib()
107
107
108 def __init__(
108 def __init__(
109 self,
109 self,
110 wc_tracked=False,
110 wc_tracked=False,
111 p1_tracked=False,
111 p1_tracked=False,
112 p2_info=False,
112 p2_info=False,
113 has_meaningful_data=True,
113 has_meaningful_data=True,
114 has_meaningful_mtime=True,
114 has_meaningful_mtime=True,
115 parentfiledata=None,
115 parentfiledata=None,
116 fallback_exec=None,
116 fallback_exec=None,
117 fallback_symlink=None,
117 fallback_symlink=None,
118 ):
118 ):
119 self._wc_tracked = wc_tracked
119 self._wc_tracked = wc_tracked
120 self._p1_tracked = p1_tracked
120 self._p1_tracked = p1_tracked
121 self._p2_info = p2_info
121 self._p2_info = p2_info
122
122
123 self._fallback_exec = fallback_exec
123 self._fallback_exec = fallback_exec
124 self._fallback_symlink = fallback_symlink
124 self._fallback_symlink = fallback_symlink
125
125
126 self._mode = None
126 self._mode = None
127 self._size = None
127 self._size = None
128 self._mtime_s = None
128 self._mtime_s = None
129 self._mtime_ns = None
129 self._mtime_ns = None
130 if parentfiledata is None:
130 if parentfiledata is None:
131 has_meaningful_mtime = False
131 has_meaningful_mtime = False
132 has_meaningful_data = False
132 has_meaningful_data = False
133 elif parentfiledata[2] is None:
134 has_meaningful_mtime = False
133 if has_meaningful_data:
135 if has_meaningful_data:
134 self._mode = parentfiledata[0]
136 self._mode = parentfiledata[0]
135 self._size = parentfiledata[1]
137 self._size = parentfiledata[1]
136 if has_meaningful_mtime:
138 if has_meaningful_mtime:
137 self._mtime_s, self._mtime_ns = parentfiledata[2]
139 self._mtime_s, self._mtime_ns = parentfiledata[2]
138
140
139 @classmethod
141 @classmethod
140 def from_v2_data(cls, flags, size, mtime_s, mtime_ns):
142 def from_v2_data(cls, flags, size, mtime_s, mtime_ns):
141 """Build a new DirstateItem object from V2 data"""
143 """Build a new DirstateItem object from V2 data"""
142 has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
144 has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
143 has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME)
145 has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME)
144 if flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS:
146 if flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS:
145 # The current code is not able to do the more subtle comparison that the
147 # The current code is not able to do the more subtle comparison that the
146 # MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime
148 # MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime
147 has_meaningful_mtime = False
149 has_meaningful_mtime = False
148 mode = None
150 mode = None
149
151
150 if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED:
152 if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED:
151 # we do not have support for this flag in the code yet,
153 # we do not have support for this flag in the code yet,
152 # force a lookup for this file.
154 # force a lookup for this file.
153 has_mode_size = False
155 has_mode_size = False
154 has_meaningful_mtime = False
156 has_meaningful_mtime = False
155
157
156 fallback_exec = None
158 fallback_exec = None
157 if flags & DIRSTATE_V2_HAS_FALLBACK_EXEC:
159 if flags & DIRSTATE_V2_HAS_FALLBACK_EXEC:
158 fallback_exec = flags & DIRSTATE_V2_FALLBACK_EXEC
160 fallback_exec = flags & DIRSTATE_V2_FALLBACK_EXEC
159
161
160 fallback_symlink = None
162 fallback_symlink = None
161 if flags & DIRSTATE_V2_HAS_FALLBACK_SYMLINK:
163 if flags & DIRSTATE_V2_HAS_FALLBACK_SYMLINK:
162 fallback_symlink = flags & DIRSTATE_V2_FALLBACK_SYMLINK
164 fallback_symlink = flags & DIRSTATE_V2_FALLBACK_SYMLINK
163
165
164 if has_mode_size:
166 if has_mode_size:
165 assert stat.S_IXUSR == 0o100
167 assert stat.S_IXUSR == 0o100
166 if flags & DIRSTATE_V2_MODE_EXEC_PERM:
168 if flags & DIRSTATE_V2_MODE_EXEC_PERM:
167 mode = 0o755
169 mode = 0o755
168 else:
170 else:
169 mode = 0o644
171 mode = 0o644
170 if flags & DIRSTATE_V2_MODE_IS_SYMLINK:
172 if flags & DIRSTATE_V2_MODE_IS_SYMLINK:
171 mode |= stat.S_IFLNK
173 mode |= stat.S_IFLNK
172 else:
174 else:
173 mode |= stat.S_IFREG
175 mode |= stat.S_IFREG
174 return cls(
176 return cls(
175 wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
177 wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
176 p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
178 p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
177 p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
179 p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
178 has_meaningful_data=has_mode_size,
180 has_meaningful_data=has_mode_size,
179 has_meaningful_mtime=has_meaningful_mtime,
181 has_meaningful_mtime=has_meaningful_mtime,
180 parentfiledata=(mode, size, (mtime_s, mtime_ns)),
182 parentfiledata=(mode, size, (mtime_s, mtime_ns)),
181 fallback_exec=fallback_exec,
183 fallback_exec=fallback_exec,
182 fallback_symlink=fallback_symlink,
184 fallback_symlink=fallback_symlink,
183 )
185 )
184
186
185 @classmethod
187 @classmethod
186 def from_v1_data(cls, state, mode, size, mtime):
188 def from_v1_data(cls, state, mode, size, mtime):
187 """Build a new DirstateItem object from V1 data
189 """Build a new DirstateItem object from V1 data
188
190
189 Since the dirstate-v1 format is frozen, the signature of this function
191 Since the dirstate-v1 format is frozen, the signature of this function
190 is not expected to change, unlike the __init__ one.
192 is not expected to change, unlike the __init__ one.
191 """
193 """
192 if state == b'm':
194 if state == b'm':
193 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
195 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
194 elif state == b'a':
196 elif state == b'a':
195 return cls(wc_tracked=True)
197 return cls(wc_tracked=True)
196 elif state == b'r':
198 elif state == b'r':
197 if size == NONNORMAL:
199 if size == NONNORMAL:
198 p1_tracked = True
200 p1_tracked = True
199 p2_info = True
201 p2_info = True
200 elif size == FROM_P2:
202 elif size == FROM_P2:
201 p1_tracked = False
203 p1_tracked = False
202 p2_info = True
204 p2_info = True
203 else:
205 else:
204 p1_tracked = True
206 p1_tracked = True
205 p2_info = False
207 p2_info = False
206 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
208 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
207 elif state == b'n':
209 elif state == b'n':
208 if size == FROM_P2:
210 if size == FROM_P2:
209 return cls(wc_tracked=True, p2_info=True)
211 return cls(wc_tracked=True, p2_info=True)
210 elif size == NONNORMAL:
212 elif size == NONNORMAL:
211 return cls(wc_tracked=True, p1_tracked=True)
213 return cls(wc_tracked=True, p1_tracked=True)
212 elif mtime == AMBIGUOUS_TIME:
214 elif mtime == AMBIGUOUS_TIME:
213 return cls(
215 return cls(
214 wc_tracked=True,
216 wc_tracked=True,
215 p1_tracked=True,
217 p1_tracked=True,
216 has_meaningful_mtime=False,
218 has_meaningful_mtime=False,
217 parentfiledata=(mode, size, (42, 0)),
219 parentfiledata=(mode, size, (42, 0)),
218 )
220 )
219 else:
221 else:
220 return cls(
222 return cls(
221 wc_tracked=True,
223 wc_tracked=True,
222 p1_tracked=True,
224 p1_tracked=True,
223 parentfiledata=(mode, size, (mtime, 0)),
225 parentfiledata=(mode, size, (mtime, 0)),
224 )
226 )
225 else:
227 else:
226 raise RuntimeError(b'unknown state: %s' % state)
228 raise RuntimeError(b'unknown state: %s' % state)
227
229
228 def set_possibly_dirty(self):
230 def set_possibly_dirty(self):
229 """Mark a file as "possibly dirty"
231 """Mark a file as "possibly dirty"
230
232
231 This means the next status call will have to actually check its content
233 This means the next status call will have to actually check its content
232 to make sure it is correct.
234 to make sure it is correct.
233 """
235 """
234 self._mtime_s = None
236 self._mtime_s = None
235 self._mtime_ns = None
237 self._mtime_ns = None
236
238
237 def set_clean(self, mode, size, mtime):
239 def set_clean(self, mode, size, mtime):
238 """mark a file as "clean" cancelling potential "possibly dirty call"
240 """mark a file as "clean" cancelling potential "possibly dirty call"
239
241
240 Note: this function is a descendant of `dirstate.normal` and is
242 Note: this function is a descendant of `dirstate.normal` and is
241 currently expected to be call on "normal" entry only. There are not
243 currently expected to be call on "normal" entry only. There are not
242 reason for this to not change in the future as long as the ccode is
244 reason for this to not change in the future as long as the ccode is
243 updated to preserve the proper state of the non-normal files.
245 updated to preserve the proper state of the non-normal files.
244 """
246 """
245 self._wc_tracked = True
247 self._wc_tracked = True
246 self._p1_tracked = True
248 self._p1_tracked = True
247 self._mode = mode
249 self._mode = mode
248 self._size = size
250 self._size = size
249 self._mtime_s, self._mtime_ns = mtime
251 self._mtime_s, self._mtime_ns = mtime
250
252
251 def set_tracked(self):
253 def set_tracked(self):
252 """mark a file as tracked in the working copy
254 """mark a file as tracked in the working copy
253
255
254 This will ultimately be called by command like `hg add`.
256 This will ultimately be called by command like `hg add`.
255 """
257 """
256 self._wc_tracked = True
258 self._wc_tracked = True
257 # `set_tracked` is replacing various `normallookup` call. So we mark
259 # `set_tracked` is replacing various `normallookup` call. So we mark
258 # the files as needing lookup
260 # the files as needing lookup
259 #
261 #
260 # Consider dropping this in the future in favor of something less broad.
262 # Consider dropping this in the future in favor of something less broad.
261 self._mtime_s = None
263 self._mtime_s = None
262 self._mtime_ns = None
264 self._mtime_ns = None
263
265
264 def set_untracked(self):
266 def set_untracked(self):
265 """mark a file as untracked in the working copy
267 """mark a file as untracked in the working copy
266
268
267 This will ultimately be called by command like `hg remove`.
269 This will ultimately be called by command like `hg remove`.
268 """
270 """
269 self._wc_tracked = False
271 self._wc_tracked = False
270 self._mode = None
272 self._mode = None
271 self._size = None
273 self._size = None
272 self._mtime_s = None
274 self._mtime_s = None
273 self._mtime_ns = None
275 self._mtime_ns = None
274
276
275 def drop_merge_data(self):
277 def drop_merge_data(self):
276 """remove all "merge-only" from a DirstateItem
278 """remove all "merge-only" from a DirstateItem
277
279
278 This is to be call by the dirstatemap code when the second parent is dropped
280 This is to be call by the dirstatemap code when the second parent is dropped
279 """
281 """
280 if self._p2_info:
282 if self._p2_info:
281 self._p2_info = False
283 self._p2_info = False
282 self._mode = None
284 self._mode = None
283 self._size = None
285 self._size = None
284 self._mtime_s = None
286 self._mtime_s = None
285 self._mtime_ns = None
287 self._mtime_ns = None
286
288
287 @property
289 @property
288 def mode(self):
290 def mode(self):
289 return self.v1_mode()
291 return self.v1_mode()
290
292
291 @property
293 @property
292 def size(self):
294 def size(self):
293 return self.v1_size()
295 return self.v1_size()
294
296
295 @property
297 @property
296 def mtime(self):
298 def mtime(self):
297 return self.v1_mtime()
299 return self.v1_mtime()
298
300
299 def mtime_likely_equal_to(self, other_mtime):
301 def mtime_likely_equal_to(self, other_mtime):
300 self_sec = self._mtime_s
302 self_sec = self._mtime_s
301 if self_sec is None:
303 if self_sec is None:
302 return False
304 return False
303 self_ns = self._mtime_ns
305 self_ns = self._mtime_ns
304 other_sec, other_ns = other_mtime
306 other_sec, other_ns = other_mtime
305 return self_sec == other_sec and (
307 return self_sec == other_sec and (
306 self_ns == other_ns or self_ns == 0 or other_ns == 0
308 self_ns == other_ns or self_ns == 0 or other_ns == 0
307 )
309 )
308
310
309 @property
311 @property
310 def state(self):
312 def state(self):
311 """
313 """
312 States are:
314 States are:
313 n normal
315 n normal
314 m needs merging
316 m needs merging
315 r marked for removal
317 r marked for removal
316 a marked for addition
318 a marked for addition
317
319
318 XXX This "state" is a bit obscure and mostly a direct expression of the
320 XXX This "state" is a bit obscure and mostly a direct expression of the
319 dirstatev1 format. It would make sense to ultimately deprecate it in
321 dirstatev1 format. It would make sense to ultimately deprecate it in
320 favor of the more "semantic" attributes.
322 favor of the more "semantic" attributes.
321 """
323 """
322 if not self.any_tracked:
324 if not self.any_tracked:
323 return b'?'
325 return b'?'
324 return self.v1_state()
326 return self.v1_state()
325
327
326 @property
328 @property
327 def has_fallback_exec(self):
329 def has_fallback_exec(self):
328 """True if "fallback" information are available for the "exec" bit
330 """True if "fallback" information are available for the "exec" bit
329
331
330 Fallback information can be stored in the dirstate to keep track of
332 Fallback information can be stored in the dirstate to keep track of
331 filesystem attribute tracked by Mercurial when the underlying file
333 filesystem attribute tracked by Mercurial when the underlying file
332 system or operating system does not support that property, (e.g.
334 system or operating system does not support that property, (e.g.
333 Windows).
335 Windows).
334
336
335 Not all version of the dirstate on-disk storage support preserving this
337 Not all version of the dirstate on-disk storage support preserving this
336 information.
338 information.
337 """
339 """
338 return self._fallback_exec is not None
340 return self._fallback_exec is not None
339
341
340 @property
342 @property
341 def fallback_exec(self):
343 def fallback_exec(self):
342 """ "fallback" information for the executable bit
344 """ "fallback" information for the executable bit
343
345
344 True if the file should be considered executable when we cannot get
346 True if the file should be considered executable when we cannot get
345 this information from the files system. False if it should be
347 this information from the files system. False if it should be
346 considered non-executable.
348 considered non-executable.
347
349
348 See has_fallback_exec for details."""
350 See has_fallback_exec for details."""
349 return self._fallback_exec
351 return self._fallback_exec
350
352
351 @fallback_exec.setter
353 @fallback_exec.setter
352 def set_fallback_exec(self, value):
354 def set_fallback_exec(self, value):
353 """control "fallback" executable bit
355 """control "fallback" executable bit
354
356
355 Set to:
357 Set to:
356 - True if the file should be considered executable,
358 - True if the file should be considered executable,
357 - False if the file should be considered non-executable,
359 - False if the file should be considered non-executable,
358 - None if we do not have valid fallback data.
360 - None if we do not have valid fallback data.
359
361
360 See has_fallback_exec for details."""
362 See has_fallback_exec for details."""
361 if value is None:
363 if value is None:
362 self._fallback_exec = None
364 self._fallback_exec = None
363 else:
365 else:
364 self._fallback_exec = bool(value)
366 self._fallback_exec = bool(value)
365
367
366 @property
368 @property
367 def has_fallback_symlink(self):
369 def has_fallback_symlink(self):
368 """True if "fallback" information are available for symlink status
370 """True if "fallback" information are available for symlink status
369
371
370 Fallback information can be stored in the dirstate to keep track of
372 Fallback information can be stored in the dirstate to keep track of
371 filesystem attribute tracked by Mercurial when the underlying file
373 filesystem attribute tracked by Mercurial when the underlying file
372 system or operating system does not support that property, (e.g.
374 system or operating system does not support that property, (e.g.
373 Windows).
375 Windows).
374
376
375 Not all version of the dirstate on-disk storage support preserving this
377 Not all version of the dirstate on-disk storage support preserving this
376 information."""
378 information."""
377 return self._fallback_symlink is not None
379 return self._fallback_symlink is not None
378
380
379 @property
381 @property
380 def fallback_symlink(self):
382 def fallback_symlink(self):
381 """ "fallback" information for symlink status
383 """ "fallback" information for symlink status
382
384
383 True if the file should be considered executable when we cannot get
385 True if the file should be considered executable when we cannot get
384 this information from the files system. False if it should be
386 this information from the files system. False if it should be
385 considered non-executable.
387 considered non-executable.
386
388
387 See has_fallback_exec for details."""
389 See has_fallback_exec for details."""
388 return self._fallback_symlink
390 return self._fallback_symlink
389
391
390 @fallback_symlink.setter
392 @fallback_symlink.setter
391 def set_fallback_symlink(self, value):
393 def set_fallback_symlink(self, value):
392 """control "fallback" symlink status
394 """control "fallback" symlink status
393
395
394 Set to:
396 Set to:
395 - True if the file should be considered a symlink,
397 - True if the file should be considered a symlink,
396 - False if the file should be considered not a symlink,
398 - False if the file should be considered not a symlink,
397 - None if we do not have valid fallback data.
399 - None if we do not have valid fallback data.
398
400
399 See has_fallback_symlink for details."""
401 See has_fallback_symlink for details."""
400 if value is None:
402 if value is None:
401 self._fallback_symlink = None
403 self._fallback_symlink = None
402 else:
404 else:
403 self._fallback_symlink = bool(value)
405 self._fallback_symlink = bool(value)
404
406
405 @property
407 @property
406 def tracked(self):
408 def tracked(self):
407 """True is the file is tracked in the working copy"""
409 """True is the file is tracked in the working copy"""
408 return self._wc_tracked
410 return self._wc_tracked
409
411
410 @property
412 @property
411 def any_tracked(self):
413 def any_tracked(self):
412 """True is the file is tracked anywhere (wc or parents)"""
414 """True is the file is tracked anywhere (wc or parents)"""
413 return self._wc_tracked or self._p1_tracked or self._p2_info
415 return self._wc_tracked or self._p1_tracked or self._p2_info
414
416
415 @property
417 @property
416 def added(self):
418 def added(self):
417 """True if the file has been added"""
419 """True if the file has been added"""
418 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
420 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
419
421
420 @property
422 @property
421 def maybe_clean(self):
423 def maybe_clean(self):
422 """True if the file has a chance to be in the "clean" state"""
424 """True if the file has a chance to be in the "clean" state"""
423 if not self._wc_tracked:
425 if not self._wc_tracked:
424 return False
426 return False
425 elif not self._p1_tracked:
427 elif not self._p1_tracked:
426 return False
428 return False
427 elif self._p2_info:
429 elif self._p2_info:
428 return False
430 return False
429 return True
431 return True
430
432
431 @property
433 @property
432 def p1_tracked(self):
434 def p1_tracked(self):
433 """True if the file is tracked in the first parent manifest"""
435 """True if the file is tracked in the first parent manifest"""
434 return self._p1_tracked
436 return self._p1_tracked
435
437
436 @property
438 @property
437 def p2_info(self):
439 def p2_info(self):
438 """True if the file needed to merge or apply any input from p2
440 """True if the file needed to merge or apply any input from p2
439
441
440 See the class documentation for details.
442 See the class documentation for details.
441 """
443 """
442 return self._wc_tracked and self._p2_info
444 return self._wc_tracked and self._p2_info
443
445
444 @property
446 @property
445 def removed(self):
447 def removed(self):
446 """True if the file has been removed"""
448 """True if the file has been removed"""
447 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
449 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
448
450
449 def v2_data(self):
451 def v2_data(self):
450 """Returns (flags, mode, size, mtime) for v2 serialization"""
452 """Returns (flags, mode, size, mtime) for v2 serialization"""
451 flags = 0
453 flags = 0
452 if self._wc_tracked:
454 if self._wc_tracked:
453 flags |= DIRSTATE_V2_WDIR_TRACKED
455 flags |= DIRSTATE_V2_WDIR_TRACKED
454 if self._p1_tracked:
456 if self._p1_tracked:
455 flags |= DIRSTATE_V2_P1_TRACKED
457 flags |= DIRSTATE_V2_P1_TRACKED
456 if self._p2_info:
458 if self._p2_info:
457 flags |= DIRSTATE_V2_P2_INFO
459 flags |= DIRSTATE_V2_P2_INFO
458 if self._mode is not None and self._size is not None:
460 if self._mode is not None and self._size is not None:
459 flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE
461 flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE
460 if self.mode & stat.S_IXUSR:
462 if self.mode & stat.S_IXUSR:
461 flags |= DIRSTATE_V2_MODE_EXEC_PERM
463 flags |= DIRSTATE_V2_MODE_EXEC_PERM
462 if stat.S_ISLNK(self.mode):
464 if stat.S_ISLNK(self.mode):
463 flags |= DIRSTATE_V2_MODE_IS_SYMLINK
465 flags |= DIRSTATE_V2_MODE_IS_SYMLINK
464 if self._mtime_s is not None:
466 if self._mtime_s is not None:
465 flags |= DIRSTATE_V2_HAS_MTIME
467 flags |= DIRSTATE_V2_HAS_MTIME
466
468
467 if self._fallback_exec is not None:
469 if self._fallback_exec is not None:
468 flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC
470 flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC
469 if self._fallback_exec:
471 if self._fallback_exec:
470 flags |= DIRSTATE_V2_FALLBACK_EXEC
472 flags |= DIRSTATE_V2_FALLBACK_EXEC
471
473
472 if self._fallback_symlink is not None:
474 if self._fallback_symlink is not None:
473 flags |= DIRSTATE_V2_HAS_FALLBACK_SYMLINK
475 flags |= DIRSTATE_V2_HAS_FALLBACK_SYMLINK
474 if self._fallback_symlink:
476 if self._fallback_symlink:
475 flags |= DIRSTATE_V2_FALLBACK_SYMLINK
477 flags |= DIRSTATE_V2_FALLBACK_SYMLINK
476
478
477 # Note: we do not need to do anything regarding
479 # Note: we do not need to do anything regarding
478 # DIRSTATE_V2_ALL_UNKNOWN_RECORDED and DIRSTATE_V2_ALL_IGNORED_RECORDED
480 # DIRSTATE_V2_ALL_UNKNOWN_RECORDED and DIRSTATE_V2_ALL_IGNORED_RECORDED
479 # since we never set _DIRSTATE_V2_HAS_DIRCTORY_MTIME
481 # since we never set _DIRSTATE_V2_HAS_DIRCTORY_MTIME
480 return (flags, self._size or 0, self._mtime_s or 0, self._mtime_ns or 0)
482 return (flags, self._size or 0, self._mtime_s or 0, self._mtime_ns or 0)
481
483
482 def v1_state(self):
484 def v1_state(self):
483 """return a "state" suitable for v1 serialization"""
485 """return a "state" suitable for v1 serialization"""
484 if not self.any_tracked:
486 if not self.any_tracked:
485 # the object has no state to record, this is -currently-
487 # the object has no state to record, this is -currently-
486 # unsupported
488 # unsupported
487 raise RuntimeError('untracked item')
489 raise RuntimeError('untracked item')
488 elif self.removed:
490 elif self.removed:
489 return b'r'
491 return b'r'
490 elif self._p1_tracked and self._p2_info:
492 elif self._p1_tracked and self._p2_info:
491 return b'm'
493 return b'm'
492 elif self.added:
494 elif self.added:
493 return b'a'
495 return b'a'
494 else:
496 else:
495 return b'n'
497 return b'n'
496
498
497 def v1_mode(self):
499 def v1_mode(self):
498 """return a "mode" suitable for v1 serialization"""
500 """return a "mode" suitable for v1 serialization"""
499 return self._mode if self._mode is not None else 0
501 return self._mode if self._mode is not None else 0
500
502
501 def v1_size(self):
503 def v1_size(self):
502 """return a "size" suitable for v1 serialization"""
504 """return a "size" suitable for v1 serialization"""
503 if not self.any_tracked:
505 if not self.any_tracked:
504 # the object has no state to record, this is -currently-
506 # the object has no state to record, this is -currently-
505 # unsupported
507 # unsupported
506 raise RuntimeError('untracked item')
508 raise RuntimeError('untracked item')
507 elif self.removed and self._p1_tracked and self._p2_info:
509 elif self.removed and self._p1_tracked and self._p2_info:
508 return NONNORMAL
510 return NONNORMAL
509 elif self._p2_info:
511 elif self._p2_info:
510 return FROM_P2
512 return FROM_P2
511 elif self.removed:
513 elif self.removed:
512 return 0
514 return 0
513 elif self.added:
515 elif self.added:
514 return NONNORMAL
516 return NONNORMAL
515 elif self._size is None:
517 elif self._size is None:
516 return NONNORMAL
518 return NONNORMAL
517 else:
519 else:
518 return self._size
520 return self._size
519
521
520 def v1_mtime(self):
522 def v1_mtime(self):
521 """return a "mtime" suitable for v1 serialization"""
523 """return a "mtime" suitable for v1 serialization"""
522 if not self.any_tracked:
524 if not self.any_tracked:
523 # the object has no state to record, this is -currently-
525 # the object has no state to record, this is -currently-
524 # unsupported
526 # unsupported
525 raise RuntimeError('untracked item')
527 raise RuntimeError('untracked item')
526 elif self.removed:
528 elif self.removed:
527 return 0
529 return 0
528 elif self._mtime_s is None:
530 elif self._mtime_s is None:
529 return AMBIGUOUS_TIME
531 return AMBIGUOUS_TIME
530 elif self._p2_info:
532 elif self._p2_info:
531 return AMBIGUOUS_TIME
533 return AMBIGUOUS_TIME
532 elif not self._p1_tracked:
534 elif not self._p1_tracked:
533 return AMBIGUOUS_TIME
535 return AMBIGUOUS_TIME
534 else:
536 else:
535 return self._mtime_s
537 return self._mtime_s
536
538
537 def need_delay(self, now):
539 def need_delay(self, now):
538 """True if the stored mtime would be ambiguous with the current time"""
540 """True if the stored mtime would be ambiguous with the current time"""
539 return self.v1_state() == b'n' and self._mtime_s == now[0]
541 return self.v1_state() == b'n' and self._mtime_s == now[0]
540
542
541
543
542 def gettype(q):
544 def gettype(q):
543 return int(q & 0xFFFF)
545 return int(q & 0xFFFF)
544
546
545
547
546 class BaseIndexObject(object):
548 class BaseIndexObject(object):
547 # Can I be passed to an algorithme implemented in Rust ?
549 # Can I be passed to an algorithme implemented in Rust ?
548 rust_ext_compat = 0
550 rust_ext_compat = 0
549 # Format of an index entry according to Python's `struct` language
551 # Format of an index entry according to Python's `struct` language
550 index_format = revlog_constants.INDEX_ENTRY_V1
552 index_format = revlog_constants.INDEX_ENTRY_V1
551 # Size of a C unsigned long long int, platform independent
553 # Size of a C unsigned long long int, platform independent
552 big_int_size = struct.calcsize(b'>Q')
554 big_int_size = struct.calcsize(b'>Q')
553 # Size of a C long int, platform independent
555 # Size of a C long int, platform independent
554 int_size = struct.calcsize(b'>i')
556 int_size = struct.calcsize(b'>i')
555 # An empty index entry, used as a default value to be overridden, or nullrev
557 # An empty index entry, used as a default value to be overridden, or nullrev
556 null_item = (
558 null_item = (
557 0,
559 0,
558 0,
560 0,
559 0,
561 0,
560 -1,
562 -1,
561 -1,
563 -1,
562 -1,
564 -1,
563 -1,
565 -1,
564 sha1nodeconstants.nullid,
566 sha1nodeconstants.nullid,
565 0,
567 0,
566 0,
568 0,
567 revlog_constants.COMP_MODE_INLINE,
569 revlog_constants.COMP_MODE_INLINE,
568 revlog_constants.COMP_MODE_INLINE,
570 revlog_constants.COMP_MODE_INLINE,
569 )
571 )
570
572
571 @util.propertycache
573 @util.propertycache
572 def entry_size(self):
574 def entry_size(self):
573 return self.index_format.size
575 return self.index_format.size
574
576
575 @property
577 @property
576 def nodemap(self):
578 def nodemap(self):
577 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
579 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
578 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
580 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
579 return self._nodemap
581 return self._nodemap
580
582
581 @util.propertycache
583 @util.propertycache
582 def _nodemap(self):
584 def _nodemap(self):
583 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
585 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
584 for r in range(0, len(self)):
586 for r in range(0, len(self)):
585 n = self[r][7]
587 n = self[r][7]
586 nodemap[n] = r
588 nodemap[n] = r
587 return nodemap
589 return nodemap
588
590
589 def has_node(self, node):
591 def has_node(self, node):
590 """return True if the node exist in the index"""
592 """return True if the node exist in the index"""
591 return node in self._nodemap
593 return node in self._nodemap
592
594
593 def rev(self, node):
595 def rev(self, node):
594 """return a revision for a node
596 """return a revision for a node
595
597
596 If the node is unknown, raise a RevlogError"""
598 If the node is unknown, raise a RevlogError"""
597 return self._nodemap[node]
599 return self._nodemap[node]
598
600
599 def get_rev(self, node):
601 def get_rev(self, node):
600 """return a revision for a node
602 """return a revision for a node
601
603
602 If the node is unknown, return None"""
604 If the node is unknown, return None"""
603 return self._nodemap.get(node)
605 return self._nodemap.get(node)
604
606
605 def _stripnodes(self, start):
607 def _stripnodes(self, start):
606 if '_nodemap' in vars(self):
608 if '_nodemap' in vars(self):
607 for r in range(start, len(self)):
609 for r in range(start, len(self)):
608 n = self[r][7]
610 n = self[r][7]
609 del self._nodemap[n]
611 del self._nodemap[n]
610
612
611 def clearcaches(self):
613 def clearcaches(self):
612 self.__dict__.pop('_nodemap', None)
614 self.__dict__.pop('_nodemap', None)
613
615
614 def __len__(self):
616 def __len__(self):
615 return self._lgt + len(self._extra)
617 return self._lgt + len(self._extra)
616
618
617 def append(self, tup):
619 def append(self, tup):
618 if '_nodemap' in vars(self):
620 if '_nodemap' in vars(self):
619 self._nodemap[tup[7]] = len(self)
621 self._nodemap[tup[7]] = len(self)
620 data = self._pack_entry(len(self), tup)
622 data = self._pack_entry(len(self), tup)
621 self._extra.append(data)
623 self._extra.append(data)
622
624
623 def _pack_entry(self, rev, entry):
625 def _pack_entry(self, rev, entry):
624 assert entry[8] == 0
626 assert entry[8] == 0
625 assert entry[9] == 0
627 assert entry[9] == 0
626 return self.index_format.pack(*entry[:8])
628 return self.index_format.pack(*entry[:8])
627
629
628 def _check_index(self, i):
630 def _check_index(self, i):
629 if not isinstance(i, int):
631 if not isinstance(i, int):
630 raise TypeError(b"expecting int indexes")
632 raise TypeError(b"expecting int indexes")
631 if i < 0 or i >= len(self):
633 if i < 0 or i >= len(self):
632 raise IndexError
634 raise IndexError
633
635
634 def __getitem__(self, i):
636 def __getitem__(self, i):
635 if i == -1:
637 if i == -1:
636 return self.null_item
638 return self.null_item
637 self._check_index(i)
639 self._check_index(i)
638 if i >= self._lgt:
640 if i >= self._lgt:
639 data = self._extra[i - self._lgt]
641 data = self._extra[i - self._lgt]
640 else:
642 else:
641 index = self._calculate_index(i)
643 index = self._calculate_index(i)
642 data = self._data[index : index + self.entry_size]
644 data = self._data[index : index + self.entry_size]
643 r = self._unpack_entry(i, data)
645 r = self._unpack_entry(i, data)
644 if self._lgt and i == 0:
646 if self._lgt and i == 0:
645 offset = revlogutils.offset_type(0, gettype(r[0]))
647 offset = revlogutils.offset_type(0, gettype(r[0]))
646 r = (offset,) + r[1:]
648 r = (offset,) + r[1:]
647 return r
649 return r
648
650
649 def _unpack_entry(self, rev, data):
651 def _unpack_entry(self, rev, data):
650 r = self.index_format.unpack(data)
652 r = self.index_format.unpack(data)
651 r = r + (
653 r = r + (
652 0,
654 0,
653 0,
655 0,
654 revlog_constants.COMP_MODE_INLINE,
656 revlog_constants.COMP_MODE_INLINE,
655 revlog_constants.COMP_MODE_INLINE,
657 revlog_constants.COMP_MODE_INLINE,
656 )
658 )
657 return r
659 return r
658
660
659 def pack_header(self, header):
661 def pack_header(self, header):
660 """pack header information as binary"""
662 """pack header information as binary"""
661 v_fmt = revlog_constants.INDEX_HEADER
663 v_fmt = revlog_constants.INDEX_HEADER
662 return v_fmt.pack(header)
664 return v_fmt.pack(header)
663
665
664 def entry_binary(self, rev):
666 def entry_binary(self, rev):
665 """return the raw binary string representing a revision"""
667 """return the raw binary string representing a revision"""
666 entry = self[rev]
668 entry = self[rev]
667 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
669 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
668 if rev == 0:
670 if rev == 0:
669 p = p[revlog_constants.INDEX_HEADER.size :]
671 p = p[revlog_constants.INDEX_HEADER.size :]
670 return p
672 return p
671
673
672
674
673 class IndexObject(BaseIndexObject):
675 class IndexObject(BaseIndexObject):
674 def __init__(self, data):
676 def __init__(self, data):
675 assert len(data) % self.entry_size == 0, (
677 assert len(data) % self.entry_size == 0, (
676 len(data),
678 len(data),
677 self.entry_size,
679 self.entry_size,
678 len(data) % self.entry_size,
680 len(data) % self.entry_size,
679 )
681 )
680 self._data = data
682 self._data = data
681 self._lgt = len(data) // self.entry_size
683 self._lgt = len(data) // self.entry_size
682 self._extra = []
684 self._extra = []
683
685
684 def _calculate_index(self, i):
686 def _calculate_index(self, i):
685 return i * self.entry_size
687 return i * self.entry_size
686
688
687 def __delitem__(self, i):
689 def __delitem__(self, i):
688 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
690 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
689 raise ValueError(b"deleting slices only supports a:-1 with step 1")
691 raise ValueError(b"deleting slices only supports a:-1 with step 1")
690 i = i.start
692 i = i.start
691 self._check_index(i)
693 self._check_index(i)
692 self._stripnodes(i)
694 self._stripnodes(i)
693 if i < self._lgt:
695 if i < self._lgt:
694 self._data = self._data[: i * self.entry_size]
696 self._data = self._data[: i * self.entry_size]
695 self._lgt = i
697 self._lgt = i
696 self._extra = []
698 self._extra = []
697 else:
699 else:
698 self._extra = self._extra[: i - self._lgt]
700 self._extra = self._extra[: i - self._lgt]
699
701
700
702
701 class PersistentNodeMapIndexObject(IndexObject):
703 class PersistentNodeMapIndexObject(IndexObject):
702 """a Debug oriented class to test persistent nodemap
704 """a Debug oriented class to test persistent nodemap
703
705
704 We need a simple python object to test API and higher level behavior. See
706 We need a simple python object to test API and higher level behavior. See
705 the Rust implementation for more serious usage. This should be used only
707 the Rust implementation for more serious usage. This should be used only
706 through the dedicated `devel.persistent-nodemap` config.
708 through the dedicated `devel.persistent-nodemap` config.
707 """
709 """
708
710
709 def nodemap_data_all(self):
711 def nodemap_data_all(self):
710 """Return bytes containing a full serialization of a nodemap
712 """Return bytes containing a full serialization of a nodemap
711
713
712 The nodemap should be valid for the full set of revisions in the
714 The nodemap should be valid for the full set of revisions in the
713 index."""
715 index."""
714 return nodemaputil.persistent_data(self)
716 return nodemaputil.persistent_data(self)
715
717
716 def nodemap_data_incremental(self):
718 def nodemap_data_incremental(self):
717 """Return bytes containing a incremental update to persistent nodemap
719 """Return bytes containing a incremental update to persistent nodemap
718
720
719 This containst the data for an append-only update of the data provided
721 This containst the data for an append-only update of the data provided
720 in the last call to `update_nodemap_data`.
722 in the last call to `update_nodemap_data`.
721 """
723 """
722 if self._nm_root is None:
724 if self._nm_root is None:
723 return None
725 return None
724 docket = self._nm_docket
726 docket = self._nm_docket
725 changed, data = nodemaputil.update_persistent_data(
727 changed, data = nodemaputil.update_persistent_data(
726 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
728 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
727 )
729 )
728
730
729 self._nm_root = self._nm_max_idx = self._nm_docket = None
731 self._nm_root = self._nm_max_idx = self._nm_docket = None
730 return docket, changed, data
732 return docket, changed, data
731
733
732 def update_nodemap_data(self, docket, nm_data):
734 def update_nodemap_data(self, docket, nm_data):
733 """provide full block of persisted binary data for a nodemap
735 """provide full block of persisted binary data for a nodemap
734
736
735 The data are expected to come from disk. See `nodemap_data_all` for a
737 The data are expected to come from disk. See `nodemap_data_all` for a
736 produceur of such data."""
738 produceur of such data."""
737 if nm_data is not None:
739 if nm_data is not None:
738 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
740 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
739 if self._nm_root:
741 if self._nm_root:
740 self._nm_docket = docket
742 self._nm_docket = docket
741 else:
743 else:
742 self._nm_root = self._nm_max_idx = self._nm_docket = None
744 self._nm_root = self._nm_max_idx = self._nm_docket = None
743
745
744
746
745 class InlinedIndexObject(BaseIndexObject):
747 class InlinedIndexObject(BaseIndexObject):
746 def __init__(self, data, inline=0):
748 def __init__(self, data, inline=0):
747 self._data = data
749 self._data = data
748 self._lgt = self._inline_scan(None)
750 self._lgt = self._inline_scan(None)
749 self._inline_scan(self._lgt)
751 self._inline_scan(self._lgt)
750 self._extra = []
752 self._extra = []
751
753
752 def _inline_scan(self, lgt):
754 def _inline_scan(self, lgt):
753 off = 0
755 off = 0
754 if lgt is not None:
756 if lgt is not None:
755 self._offsets = [0] * lgt
757 self._offsets = [0] * lgt
756 count = 0
758 count = 0
757 while off <= len(self._data) - self.entry_size:
759 while off <= len(self._data) - self.entry_size:
758 start = off + self.big_int_size
760 start = off + self.big_int_size
759 (s,) = struct.unpack(
761 (s,) = struct.unpack(
760 b'>i',
762 b'>i',
761 self._data[start : start + self.int_size],
763 self._data[start : start + self.int_size],
762 )
764 )
763 if lgt is not None:
765 if lgt is not None:
764 self._offsets[count] = off
766 self._offsets[count] = off
765 count += 1
767 count += 1
766 off += self.entry_size + s
768 off += self.entry_size + s
767 if off != len(self._data):
769 if off != len(self._data):
768 raise ValueError(b"corrupted data")
770 raise ValueError(b"corrupted data")
769 return count
771 return count
770
772
771 def __delitem__(self, i):
773 def __delitem__(self, i):
772 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
774 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
773 raise ValueError(b"deleting slices only supports a:-1 with step 1")
775 raise ValueError(b"deleting slices only supports a:-1 with step 1")
774 i = i.start
776 i = i.start
775 self._check_index(i)
777 self._check_index(i)
776 self._stripnodes(i)
778 self._stripnodes(i)
777 if i < self._lgt:
779 if i < self._lgt:
778 self._offsets = self._offsets[:i]
780 self._offsets = self._offsets[:i]
779 self._lgt = i
781 self._lgt = i
780 self._extra = []
782 self._extra = []
781 else:
783 else:
782 self._extra = self._extra[: i - self._lgt]
784 self._extra = self._extra[: i - self._lgt]
783
785
784 def _calculate_index(self, i):
786 def _calculate_index(self, i):
785 return self._offsets[i]
787 return self._offsets[i]
786
788
787
789
788 def parse_index2(data, inline, revlogv2=False):
790 def parse_index2(data, inline, revlogv2=False):
789 if not inline:
791 if not inline:
790 cls = IndexObject2 if revlogv2 else IndexObject
792 cls = IndexObject2 if revlogv2 else IndexObject
791 return cls(data), None
793 return cls(data), None
792 cls = InlinedIndexObject
794 cls = InlinedIndexObject
793 return cls(data, inline), (0, data)
795 return cls(data, inline), (0, data)
794
796
795
797
796 def parse_index_cl_v2(data):
798 def parse_index_cl_v2(data):
797 return IndexChangelogV2(data), None
799 return IndexChangelogV2(data), None
798
800
799
801
800 class IndexObject2(IndexObject):
802 class IndexObject2(IndexObject):
801 index_format = revlog_constants.INDEX_ENTRY_V2
803 index_format = revlog_constants.INDEX_ENTRY_V2
802
804
803 def replace_sidedata_info(
805 def replace_sidedata_info(
804 self,
806 self,
805 rev,
807 rev,
806 sidedata_offset,
808 sidedata_offset,
807 sidedata_length,
809 sidedata_length,
808 offset_flags,
810 offset_flags,
809 compression_mode,
811 compression_mode,
810 ):
812 ):
811 """
813 """
812 Replace an existing index entry's sidedata offset and length with new
814 Replace an existing index entry's sidedata offset and length with new
813 ones.
815 ones.
814 This cannot be used outside of the context of sidedata rewriting,
816 This cannot be used outside of the context of sidedata rewriting,
815 inside the transaction that creates the revision `rev`.
817 inside the transaction that creates the revision `rev`.
816 """
818 """
817 if rev < 0:
819 if rev < 0:
818 raise KeyError
820 raise KeyError
819 self._check_index(rev)
821 self._check_index(rev)
820 if rev < self._lgt:
822 if rev < self._lgt:
821 msg = b"cannot rewrite entries outside of this transaction"
823 msg = b"cannot rewrite entries outside of this transaction"
822 raise KeyError(msg)
824 raise KeyError(msg)
823 else:
825 else:
824 entry = list(self[rev])
826 entry = list(self[rev])
825 entry[0] = offset_flags
827 entry[0] = offset_flags
826 entry[8] = sidedata_offset
828 entry[8] = sidedata_offset
827 entry[9] = sidedata_length
829 entry[9] = sidedata_length
828 entry[11] = compression_mode
830 entry[11] = compression_mode
829 entry = tuple(entry)
831 entry = tuple(entry)
830 new = self._pack_entry(rev, entry)
832 new = self._pack_entry(rev, entry)
831 self._extra[rev - self._lgt] = new
833 self._extra[rev - self._lgt] = new
832
834
833 def _unpack_entry(self, rev, data):
835 def _unpack_entry(self, rev, data):
834 data = self.index_format.unpack(data)
836 data = self.index_format.unpack(data)
835 entry = data[:10]
837 entry = data[:10]
836 data_comp = data[10] & 3
838 data_comp = data[10] & 3
837 sidedata_comp = (data[10] & (3 << 2)) >> 2
839 sidedata_comp = (data[10] & (3 << 2)) >> 2
838 return entry + (data_comp, sidedata_comp)
840 return entry + (data_comp, sidedata_comp)
839
841
840 def _pack_entry(self, rev, entry):
842 def _pack_entry(self, rev, entry):
841 data = entry[:10]
843 data = entry[:10]
842 data_comp = entry[10] & 3
844 data_comp = entry[10] & 3
843 sidedata_comp = (entry[11] & 3) << 2
845 sidedata_comp = (entry[11] & 3) << 2
844 data += (data_comp | sidedata_comp,)
846 data += (data_comp | sidedata_comp,)
845
847
846 return self.index_format.pack(*data)
848 return self.index_format.pack(*data)
847
849
848 def entry_binary(self, rev):
850 def entry_binary(self, rev):
849 """return the raw binary string representing a revision"""
851 """return the raw binary string representing a revision"""
850 entry = self[rev]
852 entry = self[rev]
851 return self._pack_entry(rev, entry)
853 return self._pack_entry(rev, entry)
852
854
853 def pack_header(self, header):
855 def pack_header(self, header):
854 """pack header information as binary"""
856 """pack header information as binary"""
855 msg = 'version header should go in the docket, not the index: %d'
857 msg = 'version header should go in the docket, not the index: %d'
856 msg %= header
858 msg %= header
857 raise error.ProgrammingError(msg)
859 raise error.ProgrammingError(msg)
858
860
859
861
860 class IndexChangelogV2(IndexObject2):
862 class IndexChangelogV2(IndexObject2):
861 index_format = revlog_constants.INDEX_ENTRY_CL_V2
863 index_format = revlog_constants.INDEX_ENTRY_CL_V2
862
864
863 def _unpack_entry(self, rev, data, r=True):
865 def _unpack_entry(self, rev, data, r=True):
864 items = self.index_format.unpack(data)
866 items = self.index_format.unpack(data)
865 entry = items[:3] + (rev, rev) + items[3:8]
867 entry = items[:3] + (rev, rev) + items[3:8]
866 data_comp = items[8] & 3
868 data_comp = items[8] & 3
867 sidedata_comp = (items[8] >> 2) & 3
869 sidedata_comp = (items[8] >> 2) & 3
868 return entry + (data_comp, sidedata_comp)
870 return entry + (data_comp, sidedata_comp)
869
871
870 def _pack_entry(self, rev, entry):
872 def _pack_entry(self, rev, entry):
871 assert entry[3] == rev, entry[3]
873 assert entry[3] == rev, entry[3]
872 assert entry[4] == rev, entry[4]
874 assert entry[4] == rev, entry[4]
873 data = entry[:3] + entry[5:10]
875 data = entry[:3] + entry[5:10]
874 data_comp = entry[10] & 3
876 data_comp = entry[10] & 3
875 sidedata_comp = (entry[11] & 3) << 2
877 sidedata_comp = (entry[11] & 3) << 2
876 data += (data_comp | sidedata_comp,)
878 data += (data_comp | sidedata_comp,)
877 return self.index_format.pack(*data)
879 return self.index_format.pack(*data)
878
880
879
881
880 def parse_index_devel_nodemap(data, inline):
882 def parse_index_devel_nodemap(data, inline):
881 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
883 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
882 return PersistentNodeMapIndexObject(data), None
884 return PersistentNodeMapIndexObject(data), None
883
885
884
886
885 def parse_dirstate(dmap, copymap, st):
887 def parse_dirstate(dmap, copymap, st):
886 parents = [st[:20], st[20:40]]
888 parents = [st[:20], st[20:40]]
887 # dereference fields so they will be local in loop
889 # dereference fields so they will be local in loop
888 format = b">cllll"
890 format = b">cllll"
889 e_size = struct.calcsize(format)
891 e_size = struct.calcsize(format)
890 pos1 = 40
892 pos1 = 40
891 l = len(st)
893 l = len(st)
892
894
893 # the inner loop
895 # the inner loop
894 while pos1 < l:
896 while pos1 < l:
895 pos2 = pos1 + e_size
897 pos2 = pos1 + e_size
896 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
898 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
897 pos1 = pos2 + e[4]
899 pos1 = pos2 + e[4]
898 f = st[pos2:pos1]
900 f = st[pos2:pos1]
899 if b'\0' in f:
901 if b'\0' in f:
900 f, c = f.split(b'\0')
902 f, c = f.split(b'\0')
901 copymap[f] = c
903 copymap[f] = c
902 dmap[f] = DirstateItem.from_v1_data(*e[:4])
904 dmap[f] = DirstateItem.from_v1_data(*e[:4])
903 return parents
905 return parents
904
906
905
907
906 def pack_dirstate(dmap, copymap, pl, now):
908 def pack_dirstate(dmap, copymap, pl, now):
907 cs = stringio()
909 cs = stringio()
908 write = cs.write
910 write = cs.write
909 write(b"".join(pl))
911 write(b"".join(pl))
910 for f, e in pycompat.iteritems(dmap):
912 for f, e in pycompat.iteritems(dmap):
911 if e.need_delay(now):
913 if e.need_delay(now):
912 # The file was last modified "simultaneously" with the current
914 # The file was last modified "simultaneously" with the current
913 # write to dirstate (i.e. within the same second for file-
915 # write to dirstate (i.e. within the same second for file-
914 # systems with a granularity of 1 sec). This commonly happens
916 # systems with a granularity of 1 sec). This commonly happens
915 # for at least a couple of files on 'update'.
917 # for at least a couple of files on 'update'.
916 # The user could change the file without changing its size
918 # The user could change the file without changing its size
917 # within the same second. Invalidate the file's mtime in
919 # within the same second. Invalidate the file's mtime in
918 # dirstate, forcing future 'status' calls to compare the
920 # dirstate, forcing future 'status' calls to compare the
919 # contents of the file if the size is the same. This prevents
921 # contents of the file if the size is the same. This prevents
920 # mistakenly treating such files as clean.
922 # mistakenly treating such files as clean.
921 e.set_possibly_dirty()
923 e.set_possibly_dirty()
922
924
923 if f in copymap:
925 if f in copymap:
924 f = b"%s\0%s" % (f, copymap[f])
926 f = b"%s\0%s" % (f, copymap[f])
925 e = _pack(
927 e = _pack(
926 b">cllll",
928 b">cllll",
927 e.v1_state(),
929 e.v1_state(),
928 e.v1_mode(),
930 e.v1_mode(),
929 e.v1_size(),
931 e.v1_size(),
930 e.v1_mtime(),
932 e.v1_mtime(),
931 len(f),
933 len(f),
932 )
934 )
933 write(e)
935 write(e)
934 write(f)
936 write(f)
935 return cs.getvalue()
937 return cs.getvalue()
@@ -1,286 +1,288 b''
1 use cpython::exc;
1 use cpython::exc;
2 use cpython::ObjectProtocol;
2 use cpython::ObjectProtocol;
3 use cpython::PyBytes;
3 use cpython::PyBytes;
4 use cpython::PyErr;
4 use cpython::PyErr;
5 use cpython::PyNone;
5 use cpython::PyNone;
6 use cpython::PyObject;
6 use cpython::PyObject;
7 use cpython::PyResult;
7 use cpython::PyResult;
8 use cpython::Python;
8 use cpython::Python;
9 use cpython::PythonObject;
9 use cpython::PythonObject;
10 use hg::dirstate::DirstateEntry;
10 use hg::dirstate::DirstateEntry;
11 use hg::dirstate::EntryState;
11 use hg::dirstate::EntryState;
12 use hg::dirstate::TruncatedTimestamp;
12 use hg::dirstate::TruncatedTimestamp;
13 use std::cell::Cell;
13 use std::cell::Cell;
14 use std::convert::TryFrom;
14 use std::convert::TryFrom;
15
15
16 py_class!(pub class DirstateItem |py| {
16 py_class!(pub class DirstateItem |py| {
17 data entry: Cell<DirstateEntry>;
17 data entry: Cell<DirstateEntry>;
18
18
19 def __new__(
19 def __new__(
20 _cls,
20 _cls,
21 wc_tracked: bool = false,
21 wc_tracked: bool = false,
22 p1_tracked: bool = false,
22 p1_tracked: bool = false,
23 p2_info: bool = false,
23 p2_info: bool = false,
24 has_meaningful_data: bool = true,
24 has_meaningful_data: bool = true,
25 has_meaningful_mtime: bool = true,
25 has_meaningful_mtime: bool = true,
26 parentfiledata: Option<(u32, u32, (u32, u32))> = None,
26 parentfiledata: Option<(u32, u32, Option<(u32, u32)>)> = None,
27 fallback_exec: Option<bool> = None,
27 fallback_exec: Option<bool> = None,
28 fallback_symlink: Option<bool> = None,
28 fallback_symlink: Option<bool> = None,
29
29
30 ) -> PyResult<DirstateItem> {
30 ) -> PyResult<DirstateItem> {
31 let mut mode_size_opt = None;
31 let mut mode_size_opt = None;
32 let mut mtime_opt = None;
32 let mut mtime_opt = None;
33 if let Some((mode, size, mtime)) = parentfiledata {
33 if let Some((mode, size, mtime)) = parentfiledata {
34 if has_meaningful_data {
34 if has_meaningful_data {
35 mode_size_opt = Some((mode, size))
35 mode_size_opt = Some((mode, size))
36 }
36 }
37 if has_meaningful_mtime {
37 if has_meaningful_mtime {
38 mtime_opt = Some(timestamp(py, mtime)?)
38 if let Some(m) = mtime {
39 mtime_opt = Some(timestamp(py, m)?);
40 }
39 }
41 }
40 }
42 }
41 let entry = DirstateEntry::from_v2_data(
43 let entry = DirstateEntry::from_v2_data(
42 wc_tracked,
44 wc_tracked,
43 p1_tracked,
45 p1_tracked,
44 p2_info,
46 p2_info,
45 mode_size_opt,
47 mode_size_opt,
46 mtime_opt,
48 mtime_opt,
47 fallback_exec,
49 fallback_exec,
48 fallback_symlink,
50 fallback_symlink,
49 );
51 );
50 DirstateItem::create_instance(py, Cell::new(entry))
52 DirstateItem::create_instance(py, Cell::new(entry))
51 }
53 }
52
54
53 @property
55 @property
54 def state(&self) -> PyResult<PyBytes> {
56 def state(&self) -> PyResult<PyBytes> {
55 let state_byte: u8 = self.entry(py).get().state().into();
57 let state_byte: u8 = self.entry(py).get().state().into();
56 Ok(PyBytes::new(py, &[state_byte]))
58 Ok(PyBytes::new(py, &[state_byte]))
57 }
59 }
58
60
59 @property
61 @property
60 def mode(&self) -> PyResult<i32> {
62 def mode(&self) -> PyResult<i32> {
61 Ok(self.entry(py).get().mode())
63 Ok(self.entry(py).get().mode())
62 }
64 }
63
65
64 @property
66 @property
65 def size(&self) -> PyResult<i32> {
67 def size(&self) -> PyResult<i32> {
66 Ok(self.entry(py).get().size())
68 Ok(self.entry(py).get().size())
67 }
69 }
68
70
69 @property
71 @property
70 def mtime(&self) -> PyResult<i32> {
72 def mtime(&self) -> PyResult<i32> {
71 Ok(self.entry(py).get().mtime())
73 Ok(self.entry(py).get().mtime())
72 }
74 }
73
75
74 @property
76 @property
75 def has_fallback_exec(&self) -> PyResult<bool> {
77 def has_fallback_exec(&self) -> PyResult<bool> {
76 match self.entry(py).get().get_fallback_exec() {
78 match self.entry(py).get().get_fallback_exec() {
77 Some(_) => Ok(true),
79 Some(_) => Ok(true),
78 None => Ok(false),
80 None => Ok(false),
79 }
81 }
80 }
82 }
81
83
82 @property
84 @property
83 def fallback_exec(&self) -> PyResult<Option<bool>> {
85 def fallback_exec(&self) -> PyResult<Option<bool>> {
84 match self.entry(py).get().get_fallback_exec() {
86 match self.entry(py).get().get_fallback_exec() {
85 Some(exec) => Ok(Some(exec)),
87 Some(exec) => Ok(Some(exec)),
86 None => Ok(None),
88 None => Ok(None),
87 }
89 }
88 }
90 }
89
91
90 @fallback_exec.setter
92 @fallback_exec.setter
91 def set_fallback_exec(&self, value: Option<PyObject>) -> PyResult<()> {
93 def set_fallback_exec(&self, value: Option<PyObject>) -> PyResult<()> {
92 match value {
94 match value {
93 None => {self.entry(py).get().set_fallback_exec(None);},
95 None => {self.entry(py).get().set_fallback_exec(None);},
94 Some(value) => {
96 Some(value) => {
95 if value.is_none(py) {
97 if value.is_none(py) {
96 self.entry(py).get().set_fallback_exec(None);
98 self.entry(py).get().set_fallback_exec(None);
97 } else {
99 } else {
98 self.entry(py).get().set_fallback_exec(
100 self.entry(py).get().set_fallback_exec(
99 Some(value.is_true(py)?)
101 Some(value.is_true(py)?)
100 );
102 );
101 }},
103 }},
102 }
104 }
103 Ok(())
105 Ok(())
104 }
106 }
105
107
106 @property
108 @property
107 def has_fallback_symlink(&self) -> PyResult<bool> {
109 def has_fallback_symlink(&self) -> PyResult<bool> {
108 match self.entry(py).get().get_fallback_symlink() {
110 match self.entry(py).get().get_fallback_symlink() {
109 Some(_) => Ok(true),
111 Some(_) => Ok(true),
110 None => Ok(false),
112 None => Ok(false),
111 }
113 }
112 }
114 }
113
115
114 @property
116 @property
115 def fallback_symlink(&self) -> PyResult<Option<bool>> {
117 def fallback_symlink(&self) -> PyResult<Option<bool>> {
116 match self.entry(py).get().get_fallback_symlink() {
118 match self.entry(py).get().get_fallback_symlink() {
117 Some(symlink) => Ok(Some(symlink)),
119 Some(symlink) => Ok(Some(symlink)),
118 None => Ok(None),
120 None => Ok(None),
119 }
121 }
120 }
122 }
121
123
122 @fallback_symlink.setter
124 @fallback_symlink.setter
123 def set_fallback_symlink(&self, value: Option<PyObject>) -> PyResult<()> {
125 def set_fallback_symlink(&self, value: Option<PyObject>) -> PyResult<()> {
124 match value {
126 match value {
125 None => {self.entry(py).get().set_fallback_symlink(None);},
127 None => {self.entry(py).get().set_fallback_symlink(None);},
126 Some(value) => {
128 Some(value) => {
127 if value.is_none(py) {
129 if value.is_none(py) {
128 self.entry(py).get().set_fallback_symlink(None);
130 self.entry(py).get().set_fallback_symlink(None);
129 } else {
131 } else {
130 self.entry(py).get().set_fallback_symlink(
132 self.entry(py).get().set_fallback_symlink(
131 Some(value.is_true(py)?)
133 Some(value.is_true(py)?)
132 );
134 );
133 }},
135 }},
134 }
136 }
135 Ok(())
137 Ok(())
136 }
138 }
137
139
138 @property
140 @property
139 def tracked(&self) -> PyResult<bool> {
141 def tracked(&self) -> PyResult<bool> {
140 Ok(self.entry(py).get().tracked())
142 Ok(self.entry(py).get().tracked())
141 }
143 }
142
144
143 @property
145 @property
144 def p1_tracked(&self) -> PyResult<bool> {
146 def p1_tracked(&self) -> PyResult<bool> {
145 Ok(self.entry(py).get().p1_tracked())
147 Ok(self.entry(py).get().p1_tracked())
146 }
148 }
147
149
148 @property
150 @property
149 def added(&self) -> PyResult<bool> {
151 def added(&self) -> PyResult<bool> {
150 Ok(self.entry(py).get().added())
152 Ok(self.entry(py).get().added())
151 }
153 }
152
154
153
155
154 @property
156 @property
155 def p2_info(&self) -> PyResult<bool> {
157 def p2_info(&self) -> PyResult<bool> {
156 Ok(self.entry(py).get().p2_info())
158 Ok(self.entry(py).get().p2_info())
157 }
159 }
158
160
159 @property
161 @property
160 def removed(&self) -> PyResult<bool> {
162 def removed(&self) -> PyResult<bool> {
161 Ok(self.entry(py).get().removed())
163 Ok(self.entry(py).get().removed())
162 }
164 }
163
165
164 @property
166 @property
165 def maybe_clean(&self) -> PyResult<bool> {
167 def maybe_clean(&self) -> PyResult<bool> {
166 Ok(self.entry(py).get().maybe_clean())
168 Ok(self.entry(py).get().maybe_clean())
167 }
169 }
168
170
169 @property
171 @property
170 def any_tracked(&self) -> PyResult<bool> {
172 def any_tracked(&self) -> PyResult<bool> {
171 Ok(self.entry(py).get().any_tracked())
173 Ok(self.entry(py).get().any_tracked())
172 }
174 }
173
175
174 def v1_state(&self) -> PyResult<PyBytes> {
176 def v1_state(&self) -> PyResult<PyBytes> {
175 let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data();
177 let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data();
176 let state_byte: u8 = state.into();
178 let state_byte: u8 = state.into();
177 Ok(PyBytes::new(py, &[state_byte]))
179 Ok(PyBytes::new(py, &[state_byte]))
178 }
180 }
179
181
180 def v1_mode(&self) -> PyResult<i32> {
182 def v1_mode(&self) -> PyResult<i32> {
181 let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data();
183 let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data();
182 Ok(mode)
184 Ok(mode)
183 }
185 }
184
186
185 def v1_size(&self) -> PyResult<i32> {
187 def v1_size(&self) -> PyResult<i32> {
186 let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data();
188 let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data();
187 Ok(size)
189 Ok(size)
188 }
190 }
189
191
190 def v1_mtime(&self) -> PyResult<i32> {
192 def v1_mtime(&self) -> PyResult<i32> {
191 let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data();
193 let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data();
192 Ok(mtime)
194 Ok(mtime)
193 }
195 }
194
196
195 def need_delay(&self, now: (u32, u32)) -> PyResult<bool> {
197 def need_delay(&self, now: (u32, u32)) -> PyResult<bool> {
196 let now = timestamp(py, now)?;
198 let now = timestamp(py, now)?;
197 Ok(self.entry(py).get().need_delay(now))
199 Ok(self.entry(py).get().need_delay(now))
198 }
200 }
199
201
200 def mtime_likely_equal_to(&self, other: (u32, u32)) -> PyResult<bool> {
202 def mtime_likely_equal_to(&self, other: (u32, u32)) -> PyResult<bool> {
201 if let Some(mtime) = self.entry(py).get().truncated_mtime() {
203 if let Some(mtime) = self.entry(py).get().truncated_mtime() {
202 Ok(mtime.likely_equal(timestamp(py, other)?))
204 Ok(mtime.likely_equal(timestamp(py, other)?))
203 } else {
205 } else {
204 Ok(false)
206 Ok(false)
205 }
207 }
206 }
208 }
207
209
208 @classmethod
210 @classmethod
209 def from_v1_data(
211 def from_v1_data(
210 _cls,
212 _cls,
211 state: PyBytes,
213 state: PyBytes,
212 mode: i32,
214 mode: i32,
213 size: i32,
215 size: i32,
214 mtime: i32,
216 mtime: i32,
215 ) -> PyResult<Self> {
217 ) -> PyResult<Self> {
216 let state = <[u8; 1]>::try_from(state.data(py))
218 let state = <[u8; 1]>::try_from(state.data(py))
217 .ok()
219 .ok()
218 .and_then(|state| EntryState::try_from(state[0]).ok())
220 .and_then(|state| EntryState::try_from(state[0]).ok())
219 .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?;
221 .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?;
220 let entry = DirstateEntry::from_v1_data(state, mode, size, mtime);
222 let entry = DirstateEntry::from_v1_data(state, mode, size, mtime);
221 DirstateItem::create_instance(py, Cell::new(entry))
223 DirstateItem::create_instance(py, Cell::new(entry))
222 }
224 }
223
225
224 def drop_merge_data(&self) -> PyResult<PyNone> {
226 def drop_merge_data(&self) -> PyResult<PyNone> {
225 self.update(py, |entry| entry.drop_merge_data());
227 self.update(py, |entry| entry.drop_merge_data());
226 Ok(PyNone)
228 Ok(PyNone)
227 }
229 }
228
230
229 def set_clean(
231 def set_clean(
230 &self,
232 &self,
231 mode: u32,
233 mode: u32,
232 size: u32,
234 size: u32,
233 mtime: (u32, u32),
235 mtime: (u32, u32),
234 ) -> PyResult<PyNone> {
236 ) -> PyResult<PyNone> {
235 let mtime = timestamp(py, mtime)?;
237 let mtime = timestamp(py, mtime)?;
236 self.update(py, |entry| entry.set_clean(mode, size, mtime));
238 self.update(py, |entry| entry.set_clean(mode, size, mtime));
237 Ok(PyNone)
239 Ok(PyNone)
238 }
240 }
239
241
240 def set_possibly_dirty(&self) -> PyResult<PyNone> {
242 def set_possibly_dirty(&self) -> PyResult<PyNone> {
241 self.update(py, |entry| entry.set_possibly_dirty());
243 self.update(py, |entry| entry.set_possibly_dirty());
242 Ok(PyNone)
244 Ok(PyNone)
243 }
245 }
244
246
245 def set_tracked(&self) -> PyResult<PyNone> {
247 def set_tracked(&self) -> PyResult<PyNone> {
246 self.update(py, |entry| entry.set_tracked());
248 self.update(py, |entry| entry.set_tracked());
247 Ok(PyNone)
249 Ok(PyNone)
248 }
250 }
249
251
250 def set_untracked(&self) -> PyResult<PyNone> {
252 def set_untracked(&self) -> PyResult<PyNone> {
251 self.update(py, |entry| entry.set_untracked());
253 self.update(py, |entry| entry.set_untracked());
252 Ok(PyNone)
254 Ok(PyNone)
253 }
255 }
254 });
256 });
255
257
256 impl DirstateItem {
258 impl DirstateItem {
257 pub fn new_as_pyobject(
259 pub fn new_as_pyobject(
258 py: Python<'_>,
260 py: Python<'_>,
259 entry: DirstateEntry,
261 entry: DirstateEntry,
260 ) -> PyResult<PyObject> {
262 ) -> PyResult<PyObject> {
261 Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object())
263 Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object())
262 }
264 }
263
265
264 pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry {
266 pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry {
265 self.entry(py).get()
267 self.entry(py).get()
266 }
268 }
267
269
268 // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable
270 // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable
269 pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) {
271 pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) {
270 let mut entry = self.entry(py).get();
272 let mut entry = self.entry(py).get();
271 f(&mut entry);
273 f(&mut entry);
272 self.entry(py).set(entry)
274 self.entry(py).set(entry)
273 }
275 }
274 }
276 }
275
277
276 pub(crate) fn timestamp(
278 pub(crate) fn timestamp(
277 py: Python<'_>,
279 py: Python<'_>,
278 (s, ns): (u32, u32),
280 (s, ns): (u32, u32),
279 ) -> PyResult<TruncatedTimestamp> {
281 ) -> PyResult<TruncatedTimestamp> {
280 TruncatedTimestamp::from_already_truncated(s, ns).map_err(|_| {
282 TruncatedTimestamp::from_already_truncated(s, ns).map_err(|_| {
281 PyErr::new::<exc::ValueError, _>(
283 PyErr::new::<exc::ValueError, _>(
282 py,
284 py,
283 "expected mtime truncated to 31 bits",
285 "expected mtime truncated to 31 bits",
284 )
286 )
285 })
287 })
286 }
288 }
General Comments 0
You need to be logged in to leave comments. Login now