##// END OF EJS Templates
dirstate: remove need_delay logic...
Raphaël Gomès -
r49215:2b5d1618 default draft
parent child Browse files
Show More
@@ -1,1355 +1,1320 b''
1 /*
1 /*
2 parsers.c - efficient content parsing
2 parsers.c - efficient content parsing
3
3
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
5
5
6 This software may be used and distributed according to the terms of
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
7 the GNU General Public License, incorporated herein by reference.
8 */
8 */
9
9
10 #define PY_SSIZE_T_CLEAN
10 #define PY_SSIZE_T_CLEAN
11 #include <Python.h>
11 #include <Python.h>
12 #include <ctype.h>
12 #include <ctype.h>
13 #include <stddef.h>
13 #include <stddef.h>
14 #include <string.h>
14 #include <string.h>
15
15
16 #include "bitmanipulation.h"
16 #include "bitmanipulation.h"
17 #include "charencode.h"
17 #include "charencode.h"
18 #include "util.h"
18 #include "util.h"
19
19
20 #ifdef IS_PY3K
20 #ifdef IS_PY3K
21 /* The mapping of Python types is meant to be temporary to get Python
21 /* The mapping of Python types is meant to be temporary to get Python
22 * 3 to compile. We should remove this once Python 3 support is fully
22 * 3 to compile. We should remove this once Python 3 support is fully
23 * supported and proper types are used in the extensions themselves. */
23 * supported and proper types are used in the extensions themselves. */
24 #define PyInt_Check PyLong_Check
24 #define PyInt_Check PyLong_Check
25 #define PyInt_FromLong PyLong_FromLong
25 #define PyInt_FromLong PyLong_FromLong
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 #define PyInt_AsLong PyLong_AsLong
27 #define PyInt_AsLong PyLong_AsLong
28 #else
28 #else
29 /* Windows on Python 2.7 doesn't define S_IFLNK. Python 3+ defines via
29 /* Windows on Python 2.7 doesn't define S_IFLNK. Python 3+ defines via
30 * pyport.h. */
30 * pyport.h. */
31 #ifndef S_IFLNK
31 #ifndef S_IFLNK
32 #define S_IFLNK 0120000
32 #define S_IFLNK 0120000
33 #endif
33 #endif
34 #endif
34 #endif
35
35
36 static const char *const versionerrortext = "Python minor version mismatch";
36 static const char *const versionerrortext = "Python minor version mismatch";
37
37
38 static const int dirstate_v1_from_p2 = -2;
38 static const int dirstate_v1_from_p2 = -2;
39 static const int dirstate_v1_nonnormal = -1;
39 static const int dirstate_v1_nonnormal = -1;
40 static const int ambiguous_time = -1;
40 static const int ambiguous_time = -1;
41
41
42 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
42 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
43 {
43 {
44 Py_ssize_t expected_size;
44 Py_ssize_t expected_size;
45
45
46 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
46 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
47 return NULL;
47 return NULL;
48 }
48 }
49
49
50 return _dict_new_presized(expected_size);
50 return _dict_new_presized(expected_size);
51 }
51 }
52
52
53 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
53 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
54 PyObject *kwds)
54 PyObject *kwds)
55 {
55 {
56 /* We do all the initialization here and not a tp_init function because
56 /* We do all the initialization here and not a tp_init function because
57 * dirstate_item is immutable. */
57 * dirstate_item is immutable. */
58 dirstateItemObject *t;
58 dirstateItemObject *t;
59 int wc_tracked;
59 int wc_tracked;
60 int p1_tracked;
60 int p1_tracked;
61 int p2_info;
61 int p2_info;
62 int has_meaningful_data;
62 int has_meaningful_data;
63 int has_meaningful_mtime;
63 int has_meaningful_mtime;
64 int mode;
64 int mode;
65 int size;
65 int size;
66 int mtime_s;
66 int mtime_s;
67 int mtime_ns;
67 int mtime_ns;
68 PyObject *parentfiledata;
68 PyObject *parentfiledata;
69 PyObject *mtime;
69 PyObject *mtime;
70 PyObject *fallback_exec;
70 PyObject *fallback_exec;
71 PyObject *fallback_symlink;
71 PyObject *fallback_symlink;
72 static char *keywords_name[] = {
72 static char *keywords_name[] = {
73 "wc_tracked", "p1_tracked", "p2_info",
73 "wc_tracked", "p1_tracked", "p2_info",
74 "has_meaningful_data", "has_meaningful_mtime", "parentfiledata",
74 "has_meaningful_data", "has_meaningful_mtime", "parentfiledata",
75 "fallback_exec", "fallback_symlink", NULL,
75 "fallback_exec", "fallback_symlink", NULL,
76 };
76 };
77 wc_tracked = 0;
77 wc_tracked = 0;
78 p1_tracked = 0;
78 p1_tracked = 0;
79 p2_info = 0;
79 p2_info = 0;
80 has_meaningful_mtime = 1;
80 has_meaningful_mtime = 1;
81 has_meaningful_data = 1;
81 has_meaningful_data = 1;
82 parentfiledata = Py_None;
82 parentfiledata = Py_None;
83 fallback_exec = Py_None;
83 fallback_exec = Py_None;
84 fallback_symlink = Py_None;
84 fallback_symlink = Py_None;
85 if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiiiOOO", keywords_name,
85 if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiiiOOO", keywords_name,
86 &wc_tracked, &p1_tracked, &p2_info,
86 &wc_tracked, &p1_tracked, &p2_info,
87 &has_meaningful_data,
87 &has_meaningful_data,
88 &has_meaningful_mtime, &parentfiledata,
88 &has_meaningful_mtime, &parentfiledata,
89 &fallback_exec, &fallback_symlink)) {
89 &fallback_exec, &fallback_symlink)) {
90 return NULL;
90 return NULL;
91 }
91 }
92 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
92 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
93 if (!t) {
93 if (!t) {
94 return NULL;
94 return NULL;
95 }
95 }
96
96
97 t->flags = 0;
97 t->flags = 0;
98 if (wc_tracked) {
98 if (wc_tracked) {
99 t->flags |= dirstate_flag_wc_tracked;
99 t->flags |= dirstate_flag_wc_tracked;
100 }
100 }
101 if (p1_tracked) {
101 if (p1_tracked) {
102 t->flags |= dirstate_flag_p1_tracked;
102 t->flags |= dirstate_flag_p1_tracked;
103 }
103 }
104 if (p2_info) {
104 if (p2_info) {
105 t->flags |= dirstate_flag_p2_info;
105 t->flags |= dirstate_flag_p2_info;
106 }
106 }
107
107
108 if (fallback_exec != Py_None) {
108 if (fallback_exec != Py_None) {
109 t->flags |= dirstate_flag_has_fallback_exec;
109 t->flags |= dirstate_flag_has_fallback_exec;
110 if (PyObject_IsTrue(fallback_exec)) {
110 if (PyObject_IsTrue(fallback_exec)) {
111 t->flags |= dirstate_flag_fallback_exec;
111 t->flags |= dirstate_flag_fallback_exec;
112 }
112 }
113 }
113 }
114 if (fallback_symlink != Py_None) {
114 if (fallback_symlink != Py_None) {
115 t->flags |= dirstate_flag_has_fallback_symlink;
115 t->flags |= dirstate_flag_has_fallback_symlink;
116 if (PyObject_IsTrue(fallback_symlink)) {
116 if (PyObject_IsTrue(fallback_symlink)) {
117 t->flags |= dirstate_flag_fallback_symlink;
117 t->flags |= dirstate_flag_fallback_symlink;
118 }
118 }
119 }
119 }
120
120
121 if (parentfiledata != Py_None) {
121 if (parentfiledata != Py_None) {
122 if (!PyArg_ParseTuple(parentfiledata, "iiO", &mode, &size,
122 if (!PyArg_ParseTuple(parentfiledata, "iiO", &mode, &size,
123 &mtime)) {
123 &mtime)) {
124 return NULL;
124 return NULL;
125 }
125 }
126 if (mtime != Py_None) {
126 if (mtime != Py_None) {
127 if (!PyArg_ParseTuple(mtime, "ii", &mtime_s,
127 if (!PyArg_ParseTuple(mtime, "ii", &mtime_s,
128 &mtime_ns)) {
128 &mtime_ns)) {
129 return NULL;
129 return NULL;
130 }
130 }
131 } else {
131 } else {
132 has_meaningful_mtime = 0;
132 has_meaningful_mtime = 0;
133 }
133 }
134 } else {
134 } else {
135 has_meaningful_data = 0;
135 has_meaningful_data = 0;
136 has_meaningful_mtime = 0;
136 has_meaningful_mtime = 0;
137 }
137 }
138 if (has_meaningful_data) {
138 if (has_meaningful_data) {
139 t->flags |= dirstate_flag_has_meaningful_data;
139 t->flags |= dirstate_flag_has_meaningful_data;
140 t->mode = mode;
140 t->mode = mode;
141 t->size = size;
141 t->size = size;
142 } else {
142 } else {
143 t->mode = 0;
143 t->mode = 0;
144 t->size = 0;
144 t->size = 0;
145 }
145 }
146 if (has_meaningful_mtime) {
146 if (has_meaningful_mtime) {
147 t->flags |= dirstate_flag_has_mtime;
147 t->flags |= dirstate_flag_has_mtime;
148 t->mtime_s = mtime_s;
148 t->mtime_s = mtime_s;
149 t->mtime_ns = mtime_ns;
149 t->mtime_ns = mtime_ns;
150 } else {
150 } else {
151 t->mtime_s = 0;
151 t->mtime_s = 0;
152 t->mtime_ns = 0;
152 t->mtime_ns = 0;
153 }
153 }
154 return (PyObject *)t;
154 return (PyObject *)t;
155 }
155 }
156
156
157 static void dirstate_item_dealloc(PyObject *o)
157 static void dirstate_item_dealloc(PyObject *o)
158 {
158 {
159 PyObject_Del(o);
159 PyObject_Del(o);
160 }
160 }
161
161
162 static inline bool dirstate_item_c_tracked(dirstateItemObject *self)
162 static inline bool dirstate_item_c_tracked(dirstateItemObject *self)
163 {
163 {
164 return (self->flags & dirstate_flag_wc_tracked);
164 return (self->flags & dirstate_flag_wc_tracked);
165 }
165 }
166
166
167 static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self)
167 static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self)
168 {
168 {
169 const int mask = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
169 const int mask = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
170 dirstate_flag_p2_info;
170 dirstate_flag_p2_info;
171 return (self->flags & mask);
171 return (self->flags & mask);
172 }
172 }
173
173
174 static inline bool dirstate_item_c_added(dirstateItemObject *self)
174 static inline bool dirstate_item_c_added(dirstateItemObject *self)
175 {
175 {
176 const int mask = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
176 const int mask = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
177 dirstate_flag_p2_info);
177 dirstate_flag_p2_info);
178 const int target = dirstate_flag_wc_tracked;
178 const int target = dirstate_flag_wc_tracked;
179 return (self->flags & mask) == target;
179 return (self->flags & mask) == target;
180 }
180 }
181
181
182 static inline bool dirstate_item_c_removed(dirstateItemObject *self)
182 static inline bool dirstate_item_c_removed(dirstateItemObject *self)
183 {
183 {
184 if (self->flags & dirstate_flag_wc_tracked) {
184 if (self->flags & dirstate_flag_wc_tracked) {
185 return false;
185 return false;
186 }
186 }
187 return (self->flags &
187 return (self->flags &
188 (dirstate_flag_p1_tracked | dirstate_flag_p2_info));
188 (dirstate_flag_p1_tracked | dirstate_flag_p2_info));
189 }
189 }
190
190
191 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
191 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
192 {
192 {
193 return ((self->flags & dirstate_flag_wc_tracked) &&
193 return ((self->flags & dirstate_flag_wc_tracked) &&
194 (self->flags & dirstate_flag_p1_tracked) &&
194 (self->flags & dirstate_flag_p1_tracked) &&
195 (self->flags & dirstate_flag_p2_info));
195 (self->flags & dirstate_flag_p2_info));
196 }
196 }
197
197
198 static inline bool dirstate_item_c_from_p2(dirstateItemObject *self)
198 static inline bool dirstate_item_c_from_p2(dirstateItemObject *self)
199 {
199 {
200 return ((self->flags & dirstate_flag_wc_tracked) &&
200 return ((self->flags & dirstate_flag_wc_tracked) &&
201 !(self->flags & dirstate_flag_p1_tracked) &&
201 !(self->flags & dirstate_flag_p1_tracked) &&
202 (self->flags & dirstate_flag_p2_info));
202 (self->flags & dirstate_flag_p2_info));
203 }
203 }
204
204
205 static inline char dirstate_item_c_v1_state(dirstateItemObject *self)
205 static inline char dirstate_item_c_v1_state(dirstateItemObject *self)
206 {
206 {
207 if (dirstate_item_c_removed(self)) {
207 if (dirstate_item_c_removed(self)) {
208 return 'r';
208 return 'r';
209 } else if (dirstate_item_c_merged(self)) {
209 } else if (dirstate_item_c_merged(self)) {
210 return 'm';
210 return 'm';
211 } else if (dirstate_item_c_added(self)) {
211 } else if (dirstate_item_c_added(self)) {
212 return 'a';
212 return 'a';
213 } else {
213 } else {
214 return 'n';
214 return 'n';
215 }
215 }
216 }
216 }
217
217
218 static inline bool dirstate_item_c_has_fallback_exec(dirstateItemObject *self)
218 static inline bool dirstate_item_c_has_fallback_exec(dirstateItemObject *self)
219 {
219 {
220 return (bool)self->flags & dirstate_flag_has_fallback_exec;
220 return (bool)self->flags & dirstate_flag_has_fallback_exec;
221 }
221 }
222
222
223 static inline bool
223 static inline bool
224 dirstate_item_c_has_fallback_symlink(dirstateItemObject *self)
224 dirstate_item_c_has_fallback_symlink(dirstateItemObject *self)
225 {
225 {
226 return (bool)self->flags & dirstate_flag_has_fallback_symlink;
226 return (bool)self->flags & dirstate_flag_has_fallback_symlink;
227 }
227 }
228
228
229 static inline int dirstate_item_c_v1_mode(dirstateItemObject *self)
229 static inline int dirstate_item_c_v1_mode(dirstateItemObject *self)
230 {
230 {
231 if (self->flags & dirstate_flag_has_meaningful_data) {
231 if (self->flags & dirstate_flag_has_meaningful_data) {
232 return self->mode;
232 return self->mode;
233 } else {
233 } else {
234 return 0;
234 return 0;
235 }
235 }
236 }
236 }
237
237
238 static inline int dirstate_item_c_v1_size(dirstateItemObject *self)
238 static inline int dirstate_item_c_v1_size(dirstateItemObject *self)
239 {
239 {
240 if (!(self->flags & dirstate_flag_wc_tracked) &&
240 if (!(self->flags & dirstate_flag_wc_tracked) &&
241 (self->flags & dirstate_flag_p2_info)) {
241 (self->flags & dirstate_flag_p2_info)) {
242 if (self->flags & dirstate_flag_p1_tracked) {
242 if (self->flags & dirstate_flag_p1_tracked) {
243 return dirstate_v1_nonnormal;
243 return dirstate_v1_nonnormal;
244 } else {
244 } else {
245 return dirstate_v1_from_p2;
245 return dirstate_v1_from_p2;
246 }
246 }
247 } else if (dirstate_item_c_removed(self)) {
247 } else if (dirstate_item_c_removed(self)) {
248 return 0;
248 return 0;
249 } else if (self->flags & dirstate_flag_p2_info) {
249 } else if (self->flags & dirstate_flag_p2_info) {
250 return dirstate_v1_from_p2;
250 return dirstate_v1_from_p2;
251 } else if (dirstate_item_c_added(self)) {
251 } else if (dirstate_item_c_added(self)) {
252 return dirstate_v1_nonnormal;
252 return dirstate_v1_nonnormal;
253 } else if (self->flags & dirstate_flag_has_meaningful_data) {
253 } else if (self->flags & dirstate_flag_has_meaningful_data) {
254 return self->size;
254 return self->size;
255 } else {
255 } else {
256 return dirstate_v1_nonnormal;
256 return dirstate_v1_nonnormal;
257 }
257 }
258 }
258 }
259
259
260 static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self)
260 static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self)
261 {
261 {
262 if (dirstate_item_c_removed(self)) {
262 if (dirstate_item_c_removed(self)) {
263 return 0;
263 return 0;
264 } else if (!(self->flags & dirstate_flag_has_mtime) ||
264 } else if (!(self->flags & dirstate_flag_has_mtime) ||
265 !(self->flags & dirstate_flag_p1_tracked) ||
265 !(self->flags & dirstate_flag_p1_tracked) ||
266 !(self->flags & dirstate_flag_wc_tracked) ||
266 !(self->flags & dirstate_flag_wc_tracked) ||
267 (self->flags & dirstate_flag_p2_info)) {
267 (self->flags & dirstate_flag_p2_info)) {
268 return ambiguous_time;
268 return ambiguous_time;
269 } else {
269 } else {
270 return self->mtime_s;
270 return self->mtime_s;
271 }
271 }
272 }
272 }
273
273
274 static PyObject *dirstate_item_v2_data(dirstateItemObject *self)
274 static PyObject *dirstate_item_v2_data(dirstateItemObject *self)
275 {
275 {
276 int flags = self->flags;
276 int flags = self->flags;
277 int mode = dirstate_item_c_v1_mode(self);
277 int mode = dirstate_item_c_v1_mode(self);
278 #ifdef S_IXUSR
278 #ifdef S_IXUSR
279 /* This is for platforms with an exec bit */
279 /* This is for platforms with an exec bit */
280 if ((mode & S_IXUSR) != 0) {
280 if ((mode & S_IXUSR) != 0) {
281 flags |= dirstate_flag_mode_exec_perm;
281 flags |= dirstate_flag_mode_exec_perm;
282 } else {
282 } else {
283 flags &= ~dirstate_flag_mode_exec_perm;
283 flags &= ~dirstate_flag_mode_exec_perm;
284 }
284 }
285 #else
285 #else
286 flags &= ~dirstate_flag_mode_exec_perm;
286 flags &= ~dirstate_flag_mode_exec_perm;
287 #endif
287 #endif
288 #ifdef S_ISLNK
288 #ifdef S_ISLNK
289 /* This is for platforms with support for symlinks */
289 /* This is for platforms with support for symlinks */
290 if (S_ISLNK(mode)) {
290 if (S_ISLNK(mode)) {
291 flags |= dirstate_flag_mode_is_symlink;
291 flags |= dirstate_flag_mode_is_symlink;
292 } else {
292 } else {
293 flags &= ~dirstate_flag_mode_is_symlink;
293 flags &= ~dirstate_flag_mode_is_symlink;
294 }
294 }
295 #else
295 #else
296 flags &= ~dirstate_flag_mode_is_symlink;
296 flags &= ~dirstate_flag_mode_is_symlink;
297 #endif
297 #endif
298 return Py_BuildValue("iiii", flags, self->size, self->mtime_s,
298 return Py_BuildValue("iiii", flags, self->size, self->mtime_s,
299 self->mtime_ns);
299 self->mtime_ns);
300 };
300 };
301
301
302 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
302 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
303 {
303 {
304 char state = dirstate_item_c_v1_state(self);
304 char state = dirstate_item_c_v1_state(self);
305 return PyBytes_FromStringAndSize(&state, 1);
305 return PyBytes_FromStringAndSize(&state, 1);
306 };
306 };
307
307
308 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
308 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
309 {
309 {
310 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
310 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
311 };
311 };
312
312
313 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
313 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
314 {
314 {
315 return PyInt_FromLong(dirstate_item_c_v1_size(self));
315 return PyInt_FromLong(dirstate_item_c_v1_size(self));
316 };
316 };
317
317
318 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
318 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
319 {
319 {
320 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
320 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
321 };
321 };
322
322
323 static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
324 PyObject *now)
325 {
326 int now_s;
327 int now_ns;
328 if (!PyArg_ParseTuple(now, "ii", &now_s, &now_ns)) {
329 return NULL;
330 }
331 if (dirstate_item_c_v1_state(self) == 'n' && self->mtime_s == now_s) {
332 Py_RETURN_TRUE;
333 } else {
334 Py_RETURN_FALSE;
335 }
336 };
337
338 static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self,
323 static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self,
339 PyObject *other)
324 PyObject *other)
340 {
325 {
341 int other_s;
326 int other_s;
342 int other_ns;
327 int other_ns;
343 if (!PyArg_ParseTuple(other, "ii", &other_s, &other_ns)) {
328 if (!PyArg_ParseTuple(other, "ii", &other_s, &other_ns)) {
344 return NULL;
329 return NULL;
345 }
330 }
346 if ((self->flags & dirstate_flag_has_mtime) &&
331 if ((self->flags & dirstate_flag_has_mtime) &&
347 self->mtime_s == other_s &&
332 self->mtime_s == other_s &&
348 (self->mtime_ns == other_ns || self->mtime_ns == 0 ||
333 (self->mtime_ns == other_ns || self->mtime_ns == 0 ||
349 other_ns == 0)) {
334 other_ns == 0)) {
350 Py_RETURN_TRUE;
335 Py_RETURN_TRUE;
351 } else {
336 } else {
352 Py_RETURN_FALSE;
337 Py_RETURN_FALSE;
353 }
338 }
354 };
339 };
355
340
356 /* This will never change since it's bound to V1
341 /* This will never change since it's bound to V1
357 */
342 */
358 static inline dirstateItemObject *
343 static inline dirstateItemObject *
359 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
344 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
360 {
345 {
361 dirstateItemObject *t =
346 dirstateItemObject *t =
362 PyObject_New(dirstateItemObject, &dirstateItemType);
347 PyObject_New(dirstateItemObject, &dirstateItemType);
363 if (!t) {
348 if (!t) {
364 return NULL;
349 return NULL;
365 }
350 }
366 t->flags = 0;
351 t->flags = 0;
367 t->mode = 0;
352 t->mode = 0;
368 t->size = 0;
353 t->size = 0;
369 t->mtime_s = 0;
354 t->mtime_s = 0;
370 t->mtime_ns = 0;
355 t->mtime_ns = 0;
371
356
372 if (state == 'm') {
357 if (state == 'm') {
373 t->flags = (dirstate_flag_wc_tracked |
358 t->flags = (dirstate_flag_wc_tracked |
374 dirstate_flag_p1_tracked | dirstate_flag_p2_info);
359 dirstate_flag_p1_tracked | dirstate_flag_p2_info);
375 } else if (state == 'a') {
360 } else if (state == 'a') {
376 t->flags = dirstate_flag_wc_tracked;
361 t->flags = dirstate_flag_wc_tracked;
377 } else if (state == 'r') {
362 } else if (state == 'r') {
378 if (size == dirstate_v1_nonnormal) {
363 if (size == dirstate_v1_nonnormal) {
379 t->flags =
364 t->flags =
380 dirstate_flag_p1_tracked | dirstate_flag_p2_info;
365 dirstate_flag_p1_tracked | dirstate_flag_p2_info;
381 } else if (size == dirstate_v1_from_p2) {
366 } else if (size == dirstate_v1_from_p2) {
382 t->flags = dirstate_flag_p2_info;
367 t->flags = dirstate_flag_p2_info;
383 } else {
368 } else {
384 t->flags = dirstate_flag_p1_tracked;
369 t->flags = dirstate_flag_p1_tracked;
385 }
370 }
386 } else if (state == 'n') {
371 } else if (state == 'n') {
387 if (size == dirstate_v1_from_p2) {
372 if (size == dirstate_v1_from_p2) {
388 t->flags =
373 t->flags =
389 dirstate_flag_wc_tracked | dirstate_flag_p2_info;
374 dirstate_flag_wc_tracked | dirstate_flag_p2_info;
390 } else if (size == dirstate_v1_nonnormal) {
375 } else if (size == dirstate_v1_nonnormal) {
391 t->flags =
376 t->flags =
392 dirstate_flag_wc_tracked | dirstate_flag_p1_tracked;
377 dirstate_flag_wc_tracked | dirstate_flag_p1_tracked;
393 } else if (mtime == ambiguous_time) {
378 } else if (mtime == ambiguous_time) {
394 t->flags = (dirstate_flag_wc_tracked |
379 t->flags = (dirstate_flag_wc_tracked |
395 dirstate_flag_p1_tracked |
380 dirstate_flag_p1_tracked |
396 dirstate_flag_has_meaningful_data);
381 dirstate_flag_has_meaningful_data);
397 t->mode = mode;
382 t->mode = mode;
398 t->size = size;
383 t->size = size;
399 } else {
384 } else {
400 t->flags = (dirstate_flag_wc_tracked |
385 t->flags = (dirstate_flag_wc_tracked |
401 dirstate_flag_p1_tracked |
386 dirstate_flag_p1_tracked |
402 dirstate_flag_has_meaningful_data |
387 dirstate_flag_has_meaningful_data |
403 dirstate_flag_has_mtime);
388 dirstate_flag_has_mtime);
404 t->mode = mode;
389 t->mode = mode;
405 t->size = size;
390 t->size = size;
406 t->mtime_s = mtime;
391 t->mtime_s = mtime;
407 }
392 }
408 } else {
393 } else {
409 PyErr_Format(PyExc_RuntimeError,
394 PyErr_Format(PyExc_RuntimeError,
410 "unknown state: `%c` (%d, %d, %d)", state, mode,
395 "unknown state: `%c` (%d, %d, %d)", state, mode,
411 size, mtime, NULL);
396 size, mtime, NULL);
412 Py_DECREF(t);
397 Py_DECREF(t);
413 return NULL;
398 return NULL;
414 }
399 }
415
400
416 return t;
401 return t;
417 }
402 }
418
403
419 /* This will never change since it's bound to V1, unlike `dirstate_item_new` */
404 /* This will never change since it's bound to V1, unlike `dirstate_item_new` */
420 static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype,
405 static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype,
421 PyObject *args)
406 PyObject *args)
422 {
407 {
423 /* We do all the initialization here and not a tp_init function because
408 /* We do all the initialization here and not a tp_init function because
424 * dirstate_item is immutable. */
409 * dirstate_item is immutable. */
425 char state;
410 char state;
426 int size, mode, mtime;
411 int size, mode, mtime;
427 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
412 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
428 return NULL;
413 return NULL;
429 }
414 }
430 return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime);
415 return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime);
431 };
416 };
432
417
433 static PyObject *dirstate_item_from_v2_meth(PyTypeObject *subtype,
418 static PyObject *dirstate_item_from_v2_meth(PyTypeObject *subtype,
434 PyObject *args)
419 PyObject *args)
435 {
420 {
436 dirstateItemObject *t =
421 dirstateItemObject *t =
437 PyObject_New(dirstateItemObject, &dirstateItemType);
422 PyObject_New(dirstateItemObject, &dirstateItemType);
438 if (!t) {
423 if (!t) {
439 return NULL;
424 return NULL;
440 }
425 }
441 if (!PyArg_ParseTuple(args, "iiii", &t->flags, &t->size, &t->mtime_s,
426 if (!PyArg_ParseTuple(args, "iiii", &t->flags, &t->size, &t->mtime_s,
442 &t->mtime_ns)) {
427 &t->mtime_ns)) {
443 return NULL;
428 return NULL;
444 }
429 }
445 if (t->flags & dirstate_flag_expected_state_is_modified) {
430 if (t->flags & dirstate_flag_expected_state_is_modified) {
446 t->flags &= ~(dirstate_flag_expected_state_is_modified |
431 t->flags &= ~(dirstate_flag_expected_state_is_modified |
447 dirstate_flag_has_meaningful_data |
432 dirstate_flag_has_meaningful_data |
448 dirstate_flag_has_mtime);
433 dirstate_flag_has_mtime);
449 }
434 }
450 if (t->flags & dirstate_flag_mtime_second_ambiguous) {
435 if (t->flags & dirstate_flag_mtime_second_ambiguous) {
451 /* The current code is not able to do the more subtle comparison
436 /* The current code is not able to do the more subtle comparison
452 * that the MTIME_SECOND_AMBIGUOUS requires. So we ignore the
437 * that the MTIME_SECOND_AMBIGUOUS requires. So we ignore the
453 * mtime */
438 * mtime */
454 t->flags &= ~(dirstate_flag_mtime_second_ambiguous |
439 t->flags &= ~(dirstate_flag_mtime_second_ambiguous |
455 dirstate_flag_has_meaningful_data |
440 dirstate_flag_has_meaningful_data |
456 dirstate_flag_has_mtime);
441 dirstate_flag_has_mtime);
457 }
442 }
458 t->mode = 0;
443 t->mode = 0;
459 if (t->flags & dirstate_flag_has_meaningful_data) {
444 if (t->flags & dirstate_flag_has_meaningful_data) {
460 if (t->flags & dirstate_flag_mode_exec_perm) {
445 if (t->flags & dirstate_flag_mode_exec_perm) {
461 t->mode = 0755;
446 t->mode = 0755;
462 } else {
447 } else {
463 t->mode = 0644;
448 t->mode = 0644;
464 }
449 }
465 if (t->flags & dirstate_flag_mode_is_symlink) {
450 if (t->flags & dirstate_flag_mode_is_symlink) {
466 t->mode |= S_IFLNK;
451 t->mode |= S_IFLNK;
467 } else {
452 } else {
468 t->mode |= S_IFREG;
453 t->mode |= S_IFREG;
469 }
454 }
470 }
455 }
471 return (PyObject *)t;
456 return (PyObject *)t;
472 };
457 };
473
458
474 /* This means the next status call will have to actually check its content
459 /* This means the next status call will have to actually check its content
475 to make sure it is correct. */
460 to make sure it is correct. */
476 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
461 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
477 {
462 {
478 self->flags &= ~dirstate_flag_has_mtime;
463 self->flags &= ~dirstate_flag_has_mtime;
479 Py_RETURN_NONE;
464 Py_RETURN_NONE;
480 }
465 }
481
466
482 /* See docstring of the python implementation for details */
467 /* See docstring of the python implementation for details */
483 static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
468 static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
484 PyObject *args)
469 PyObject *args)
485 {
470 {
486 int size, mode, mtime_s, mtime_ns;
471 int size, mode, mtime_s, mtime_ns;
487 PyObject *mtime;
472 PyObject *mtime;
488 mtime_s = 0;
473 mtime_s = 0;
489 mtime_ns = 0;
474 mtime_ns = 0;
490 if (!PyArg_ParseTuple(args, "iiO", &mode, &size, &mtime)) {
475 if (!PyArg_ParseTuple(args, "iiO", &mode, &size, &mtime)) {
491 return NULL;
476 return NULL;
492 }
477 }
493 if (mtime != Py_None) {
478 if (mtime != Py_None) {
494 if (!PyArg_ParseTuple(mtime, "ii", &mtime_s, &mtime_ns)) {
479 if (!PyArg_ParseTuple(mtime, "ii", &mtime_s, &mtime_ns)) {
495 return NULL;
480 return NULL;
496 }
481 }
497 } else {
482 } else {
498 self->flags &= ~dirstate_flag_has_mtime;
483 self->flags &= ~dirstate_flag_has_mtime;
499 }
484 }
500 self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
485 self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
501 dirstate_flag_has_meaningful_data |
486 dirstate_flag_has_meaningful_data |
502 dirstate_flag_has_mtime;
487 dirstate_flag_has_mtime;
503 self->mode = mode;
488 self->mode = mode;
504 self->size = size;
489 self->size = size;
505 self->mtime_s = mtime_s;
490 self->mtime_s = mtime_s;
506 self->mtime_ns = mtime_ns;
491 self->mtime_ns = mtime_ns;
507 Py_RETURN_NONE;
492 Py_RETURN_NONE;
508 }
493 }
509
494
510 static PyObject *dirstate_item_set_tracked(dirstateItemObject *self)
495 static PyObject *dirstate_item_set_tracked(dirstateItemObject *self)
511 {
496 {
512 self->flags |= dirstate_flag_wc_tracked;
497 self->flags |= dirstate_flag_wc_tracked;
513 self->flags &= ~dirstate_flag_has_mtime;
498 self->flags &= ~dirstate_flag_has_mtime;
514 Py_RETURN_NONE;
499 Py_RETURN_NONE;
515 }
500 }
516
501
517 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
502 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
518 {
503 {
519 self->flags &= ~dirstate_flag_wc_tracked;
504 self->flags &= ~dirstate_flag_wc_tracked;
520 self->mode = 0;
505 self->mode = 0;
521 self->size = 0;
506 self->size = 0;
522 self->mtime_s = 0;
507 self->mtime_s = 0;
523 self->mtime_ns = 0;
508 self->mtime_ns = 0;
524 Py_RETURN_NONE;
509 Py_RETURN_NONE;
525 }
510 }
526
511
527 static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self)
512 static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self)
528 {
513 {
529 if (self->flags & dirstate_flag_p2_info) {
514 if (self->flags & dirstate_flag_p2_info) {
530 self->flags &= ~(dirstate_flag_p2_info |
515 self->flags &= ~(dirstate_flag_p2_info |
531 dirstate_flag_has_meaningful_data |
516 dirstate_flag_has_meaningful_data |
532 dirstate_flag_has_mtime);
517 dirstate_flag_has_mtime);
533 self->mode = 0;
518 self->mode = 0;
534 self->size = 0;
519 self->size = 0;
535 self->mtime_s = 0;
520 self->mtime_s = 0;
536 self->mtime_ns = 0;
521 self->mtime_ns = 0;
537 }
522 }
538 Py_RETURN_NONE;
523 Py_RETURN_NONE;
539 }
524 }
540 static PyMethodDef dirstate_item_methods[] = {
525 static PyMethodDef dirstate_item_methods[] = {
541 {"v2_data", (PyCFunction)dirstate_item_v2_data, METH_NOARGS,
526 {"v2_data", (PyCFunction)dirstate_item_v2_data, METH_NOARGS,
542 "return data suitable for v2 serialization"},
527 "return data suitable for v2 serialization"},
543 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
528 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
544 "return a \"state\" suitable for v1 serialization"},
529 "return a \"state\" suitable for v1 serialization"},
545 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
530 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
546 "return a \"mode\" suitable for v1 serialization"},
531 "return a \"mode\" suitable for v1 serialization"},
547 {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS,
532 {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS,
548 "return a \"size\" suitable for v1 serialization"},
533 "return a \"size\" suitable for v1 serialization"},
549 {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
534 {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
550 "return a \"mtime\" suitable for v1 serialization"},
535 "return a \"mtime\" suitable for v1 serialization"},
551 {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
552 "True if the stored mtime would be ambiguous with the current time"},
553 {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to,
536 {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to,
554 METH_O, "True if the stored mtime is likely equal to the given mtime"},
537 METH_O, "True if the stored mtime is likely equal to the given mtime"},
555 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
538 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
556 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
539 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
557 {"from_v2_data", (PyCFunction)dirstate_item_from_v2_meth,
540 {"from_v2_data", (PyCFunction)dirstate_item_from_v2_meth,
558 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V2 data"},
541 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V2 data"},
559 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
542 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
560 METH_NOARGS, "mark a file as \"possibly dirty\""},
543 METH_NOARGS, "mark a file as \"possibly dirty\""},
561 {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS,
544 {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS,
562 "mark a file as \"clean\""},
545 "mark a file as \"clean\""},
563 {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS,
546 {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS,
564 "mark a file as \"tracked\""},
547 "mark a file as \"tracked\""},
565 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
548 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
566 "mark a file as \"untracked\""},
549 "mark a file as \"untracked\""},
567 {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS,
550 {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS,
568 "remove all \"merge-only\" from a DirstateItem"},
551 "remove all \"merge-only\" from a DirstateItem"},
569 {NULL} /* Sentinel */
552 {NULL} /* Sentinel */
570 };
553 };
571
554
572 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
555 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
573 {
556 {
574 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
557 return PyInt_FromLong(dirstate_item_c_v1_mode(self));
575 };
558 };
576
559
577 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
560 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
578 {
561 {
579 return PyInt_FromLong(dirstate_item_c_v1_size(self));
562 return PyInt_FromLong(dirstate_item_c_v1_size(self));
580 };
563 };
581
564
582 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
565 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
583 {
566 {
584 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
567 return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
585 };
568 };
586
569
587 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
570 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
588 {
571 {
589 char state = dirstate_item_c_v1_state(self);
572 char state = dirstate_item_c_v1_state(self);
590 return PyBytes_FromStringAndSize(&state, 1);
573 return PyBytes_FromStringAndSize(&state, 1);
591 };
574 };
592
575
593 static PyObject *dirstate_item_get_has_fallback_exec(dirstateItemObject *self)
576 static PyObject *dirstate_item_get_has_fallback_exec(dirstateItemObject *self)
594 {
577 {
595 if (dirstate_item_c_has_fallback_exec(self)) {
578 if (dirstate_item_c_has_fallback_exec(self)) {
596 Py_RETURN_TRUE;
579 Py_RETURN_TRUE;
597 } else {
580 } else {
598 Py_RETURN_FALSE;
581 Py_RETURN_FALSE;
599 }
582 }
600 };
583 };
601
584
602 static PyObject *dirstate_item_get_fallback_exec(dirstateItemObject *self)
585 static PyObject *dirstate_item_get_fallback_exec(dirstateItemObject *self)
603 {
586 {
604 if (dirstate_item_c_has_fallback_exec(self)) {
587 if (dirstate_item_c_has_fallback_exec(self)) {
605 if (self->flags & dirstate_flag_fallback_exec) {
588 if (self->flags & dirstate_flag_fallback_exec) {
606 Py_RETURN_TRUE;
589 Py_RETURN_TRUE;
607 } else {
590 } else {
608 Py_RETURN_FALSE;
591 Py_RETURN_FALSE;
609 }
592 }
610 } else {
593 } else {
611 Py_RETURN_NONE;
594 Py_RETURN_NONE;
612 }
595 }
613 };
596 };
614
597
615 static int dirstate_item_set_fallback_exec(dirstateItemObject *self,
598 static int dirstate_item_set_fallback_exec(dirstateItemObject *self,
616 PyObject *value)
599 PyObject *value)
617 {
600 {
618 if ((value == Py_None) || (value == NULL)) {
601 if ((value == Py_None) || (value == NULL)) {
619 self->flags &= ~dirstate_flag_has_fallback_exec;
602 self->flags &= ~dirstate_flag_has_fallback_exec;
620 } else {
603 } else {
621 self->flags |= dirstate_flag_has_fallback_exec;
604 self->flags |= dirstate_flag_has_fallback_exec;
622 if (PyObject_IsTrue(value)) {
605 if (PyObject_IsTrue(value)) {
623 self->flags |= dirstate_flag_fallback_exec;
606 self->flags |= dirstate_flag_fallback_exec;
624 } else {
607 } else {
625 self->flags &= ~dirstate_flag_fallback_exec;
608 self->flags &= ~dirstate_flag_fallback_exec;
626 }
609 }
627 }
610 }
628 return 0;
611 return 0;
629 };
612 };
630
613
631 static PyObject *
614 static PyObject *
632 dirstate_item_get_has_fallback_symlink(dirstateItemObject *self)
615 dirstate_item_get_has_fallback_symlink(dirstateItemObject *self)
633 {
616 {
634 if (dirstate_item_c_has_fallback_symlink(self)) {
617 if (dirstate_item_c_has_fallback_symlink(self)) {
635 Py_RETURN_TRUE;
618 Py_RETURN_TRUE;
636 } else {
619 } else {
637 Py_RETURN_FALSE;
620 Py_RETURN_FALSE;
638 }
621 }
639 };
622 };
640
623
641 static PyObject *dirstate_item_get_fallback_symlink(dirstateItemObject *self)
624 static PyObject *dirstate_item_get_fallback_symlink(dirstateItemObject *self)
642 {
625 {
643 if (dirstate_item_c_has_fallback_symlink(self)) {
626 if (dirstate_item_c_has_fallback_symlink(self)) {
644 if (self->flags & dirstate_flag_fallback_symlink) {
627 if (self->flags & dirstate_flag_fallback_symlink) {
645 Py_RETURN_TRUE;
628 Py_RETURN_TRUE;
646 } else {
629 } else {
647 Py_RETURN_FALSE;
630 Py_RETURN_FALSE;
648 }
631 }
649 } else {
632 } else {
650 Py_RETURN_NONE;
633 Py_RETURN_NONE;
651 }
634 }
652 };
635 };
653
636
654 static int dirstate_item_set_fallback_symlink(dirstateItemObject *self,
637 static int dirstate_item_set_fallback_symlink(dirstateItemObject *self,
655 PyObject *value)
638 PyObject *value)
656 {
639 {
657 if ((value == Py_None) || (value == NULL)) {
640 if ((value == Py_None) || (value == NULL)) {
658 self->flags &= ~dirstate_flag_has_fallback_symlink;
641 self->flags &= ~dirstate_flag_has_fallback_symlink;
659 } else {
642 } else {
660 self->flags |= dirstate_flag_has_fallback_symlink;
643 self->flags |= dirstate_flag_has_fallback_symlink;
661 if (PyObject_IsTrue(value)) {
644 if (PyObject_IsTrue(value)) {
662 self->flags |= dirstate_flag_fallback_symlink;
645 self->flags |= dirstate_flag_fallback_symlink;
663 } else {
646 } else {
664 self->flags &= ~dirstate_flag_fallback_symlink;
647 self->flags &= ~dirstate_flag_fallback_symlink;
665 }
648 }
666 }
649 }
667 return 0;
650 return 0;
668 };
651 };
669
652
670 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
653 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
671 {
654 {
672 if (dirstate_item_c_tracked(self)) {
655 if (dirstate_item_c_tracked(self)) {
673 Py_RETURN_TRUE;
656 Py_RETURN_TRUE;
674 } else {
657 } else {
675 Py_RETURN_FALSE;
658 Py_RETURN_FALSE;
676 }
659 }
677 };
660 };
678 static PyObject *dirstate_item_get_p1_tracked(dirstateItemObject *self)
661 static PyObject *dirstate_item_get_p1_tracked(dirstateItemObject *self)
679 {
662 {
680 if (self->flags & dirstate_flag_p1_tracked) {
663 if (self->flags & dirstate_flag_p1_tracked) {
681 Py_RETURN_TRUE;
664 Py_RETURN_TRUE;
682 } else {
665 } else {
683 Py_RETURN_FALSE;
666 Py_RETURN_FALSE;
684 }
667 }
685 };
668 };
686
669
687 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
670 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
688 {
671 {
689 if (dirstate_item_c_added(self)) {
672 if (dirstate_item_c_added(self)) {
690 Py_RETURN_TRUE;
673 Py_RETURN_TRUE;
691 } else {
674 } else {
692 Py_RETURN_FALSE;
675 Py_RETURN_FALSE;
693 }
676 }
694 };
677 };
695
678
696 static PyObject *dirstate_item_get_p2_info(dirstateItemObject *self)
679 static PyObject *dirstate_item_get_p2_info(dirstateItemObject *self)
697 {
680 {
698 if (self->flags & dirstate_flag_wc_tracked &&
681 if (self->flags & dirstate_flag_wc_tracked &&
699 self->flags & dirstate_flag_p2_info) {
682 self->flags & dirstate_flag_p2_info) {
700 Py_RETURN_TRUE;
683 Py_RETURN_TRUE;
701 } else {
684 } else {
702 Py_RETURN_FALSE;
685 Py_RETURN_FALSE;
703 }
686 }
704 };
687 };
705
688
706 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
689 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
707 {
690 {
708 if (dirstate_item_c_merged(self)) {
691 if (dirstate_item_c_merged(self)) {
709 Py_RETURN_TRUE;
692 Py_RETURN_TRUE;
710 } else {
693 } else {
711 Py_RETURN_FALSE;
694 Py_RETURN_FALSE;
712 }
695 }
713 };
696 };
714
697
715 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
698 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
716 {
699 {
717 if (dirstate_item_c_from_p2(self)) {
700 if (dirstate_item_c_from_p2(self)) {
718 Py_RETURN_TRUE;
701 Py_RETURN_TRUE;
719 } else {
702 } else {
720 Py_RETURN_FALSE;
703 Py_RETURN_FALSE;
721 }
704 }
722 };
705 };
723
706
724 static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self)
707 static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self)
725 {
708 {
726 if (!(self->flags & dirstate_flag_wc_tracked)) {
709 if (!(self->flags & dirstate_flag_wc_tracked)) {
727 Py_RETURN_FALSE;
710 Py_RETURN_FALSE;
728 } else if (!(self->flags & dirstate_flag_p1_tracked)) {
711 } else if (!(self->flags & dirstate_flag_p1_tracked)) {
729 Py_RETURN_FALSE;
712 Py_RETURN_FALSE;
730 } else if (self->flags & dirstate_flag_p2_info) {
713 } else if (self->flags & dirstate_flag_p2_info) {
731 Py_RETURN_FALSE;
714 Py_RETURN_FALSE;
732 } else {
715 } else {
733 Py_RETURN_TRUE;
716 Py_RETURN_TRUE;
734 }
717 }
735 };
718 };
736
719
737 static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self)
720 static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self)
738 {
721 {
739 if (dirstate_item_c_any_tracked(self)) {
722 if (dirstate_item_c_any_tracked(self)) {
740 Py_RETURN_TRUE;
723 Py_RETURN_TRUE;
741 } else {
724 } else {
742 Py_RETURN_FALSE;
725 Py_RETURN_FALSE;
743 }
726 }
744 };
727 };
745
728
746 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
729 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
747 {
730 {
748 if (dirstate_item_c_removed(self)) {
731 if (dirstate_item_c_removed(self)) {
749 Py_RETURN_TRUE;
732 Py_RETURN_TRUE;
750 } else {
733 } else {
751 Py_RETURN_FALSE;
734 Py_RETURN_FALSE;
752 }
735 }
753 };
736 };
754
737
755 static PyGetSetDef dirstate_item_getset[] = {
738 static PyGetSetDef dirstate_item_getset[] = {
756 {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
739 {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
757 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
740 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
758 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
741 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
759 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
742 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
760 {"has_fallback_exec", (getter)dirstate_item_get_has_fallback_exec, NULL,
743 {"has_fallback_exec", (getter)dirstate_item_get_has_fallback_exec, NULL,
761 "has_fallback_exec", NULL},
744 "has_fallback_exec", NULL},
762 {"fallback_exec", (getter)dirstate_item_get_fallback_exec,
745 {"fallback_exec", (getter)dirstate_item_get_fallback_exec,
763 (setter)dirstate_item_set_fallback_exec, "fallback_exec", NULL},
746 (setter)dirstate_item_set_fallback_exec, "fallback_exec", NULL},
764 {"has_fallback_symlink", (getter)dirstate_item_get_has_fallback_symlink,
747 {"has_fallback_symlink", (getter)dirstate_item_get_has_fallback_symlink,
765 NULL, "has_fallback_symlink", NULL},
748 NULL, "has_fallback_symlink", NULL},
766 {"fallback_symlink", (getter)dirstate_item_get_fallback_symlink,
749 {"fallback_symlink", (getter)dirstate_item_get_fallback_symlink,
767 (setter)dirstate_item_set_fallback_symlink, "fallback_symlink", NULL},
750 (setter)dirstate_item_set_fallback_symlink, "fallback_symlink", NULL},
768 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
751 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
769 {"p1_tracked", (getter)dirstate_item_get_p1_tracked, NULL, "p1_tracked",
752 {"p1_tracked", (getter)dirstate_item_get_p1_tracked, NULL, "p1_tracked",
770 NULL},
753 NULL},
771 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
754 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
772 {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL},
755 {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL},
773 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
756 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
774 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
757 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
775 {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean",
758 {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean",
776 NULL},
759 NULL},
777 {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked",
760 {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked",
778 NULL},
761 NULL},
779 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
762 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
780 {NULL} /* Sentinel */
763 {NULL} /* Sentinel */
781 };
764 };
782
765
783 PyTypeObject dirstateItemType = {
766 PyTypeObject dirstateItemType = {
784 PyVarObject_HEAD_INIT(NULL, 0) /* header */
767 PyVarObject_HEAD_INIT(NULL, 0) /* header */
785 "dirstate_tuple", /* tp_name */
768 "dirstate_tuple", /* tp_name */
786 sizeof(dirstateItemObject), /* tp_basicsize */
769 sizeof(dirstateItemObject), /* tp_basicsize */
787 0, /* tp_itemsize */
770 0, /* tp_itemsize */
788 (destructor)dirstate_item_dealloc, /* tp_dealloc */
771 (destructor)dirstate_item_dealloc, /* tp_dealloc */
789 0, /* tp_print */
772 0, /* tp_print */
790 0, /* tp_getattr */
773 0, /* tp_getattr */
791 0, /* tp_setattr */
774 0, /* tp_setattr */
792 0, /* tp_compare */
775 0, /* tp_compare */
793 0, /* tp_repr */
776 0, /* tp_repr */
794 0, /* tp_as_number */
777 0, /* tp_as_number */
795 0, /* tp_as_sequence */
778 0, /* tp_as_sequence */
796 0, /* tp_as_mapping */
779 0, /* tp_as_mapping */
797 0, /* tp_hash */
780 0, /* tp_hash */
798 0, /* tp_call */
781 0, /* tp_call */
799 0, /* tp_str */
782 0, /* tp_str */
800 0, /* tp_getattro */
783 0, /* tp_getattro */
801 0, /* tp_setattro */
784 0, /* tp_setattro */
802 0, /* tp_as_buffer */
785 0, /* tp_as_buffer */
803 Py_TPFLAGS_DEFAULT, /* tp_flags */
786 Py_TPFLAGS_DEFAULT, /* tp_flags */
804 "dirstate tuple", /* tp_doc */
787 "dirstate tuple", /* tp_doc */
805 0, /* tp_traverse */
788 0, /* tp_traverse */
806 0, /* tp_clear */
789 0, /* tp_clear */
807 0, /* tp_richcompare */
790 0, /* tp_richcompare */
808 0, /* tp_weaklistoffset */
791 0, /* tp_weaklistoffset */
809 0, /* tp_iter */
792 0, /* tp_iter */
810 0, /* tp_iternext */
793 0, /* tp_iternext */
811 dirstate_item_methods, /* tp_methods */
794 dirstate_item_methods, /* tp_methods */
812 0, /* tp_members */
795 0, /* tp_members */
813 dirstate_item_getset, /* tp_getset */
796 dirstate_item_getset, /* tp_getset */
814 0, /* tp_base */
797 0, /* tp_base */
815 0, /* tp_dict */
798 0, /* tp_dict */
816 0, /* tp_descr_get */
799 0, /* tp_descr_get */
817 0, /* tp_descr_set */
800 0, /* tp_descr_set */
818 0, /* tp_dictoffset */
801 0, /* tp_dictoffset */
819 0, /* tp_init */
802 0, /* tp_init */
820 0, /* tp_alloc */
803 0, /* tp_alloc */
821 dirstate_item_new, /* tp_new */
804 dirstate_item_new, /* tp_new */
822 };
805 };
823
806
824 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
807 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
825 {
808 {
826 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
809 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
827 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
810 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
828 char state, *cur, *str, *cpos;
811 char state, *cur, *str, *cpos;
829 int mode, size, mtime;
812 int mode, size, mtime;
830 unsigned int flen, pos = 40;
813 unsigned int flen, pos = 40;
831 Py_ssize_t len = 40;
814 Py_ssize_t len = 40;
832 Py_ssize_t readlen;
815 Py_ssize_t readlen;
833
816
834 if (!PyArg_ParseTuple(
817 if (!PyArg_ParseTuple(
835 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
818 args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
836 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
819 &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
837 goto quit;
820 goto quit;
838 }
821 }
839
822
840 len = readlen;
823 len = readlen;
841
824
842 /* read parents */
825 /* read parents */
843 if (len < 40) {
826 if (len < 40) {
844 PyErr_SetString(PyExc_ValueError,
827 PyErr_SetString(PyExc_ValueError,
845 "too little data for parents");
828 "too little data for parents");
846 goto quit;
829 goto quit;
847 }
830 }
848
831
849 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
832 parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20,
850 str + 20, (Py_ssize_t)20);
833 str + 20, (Py_ssize_t)20);
851 if (!parents) {
834 if (!parents) {
852 goto quit;
835 goto quit;
853 }
836 }
854
837
855 /* read filenames */
838 /* read filenames */
856 while (pos >= 40 && pos < len) {
839 while (pos >= 40 && pos < len) {
857 if (pos + 17 > len) {
840 if (pos + 17 > len) {
858 PyErr_SetString(PyExc_ValueError,
841 PyErr_SetString(PyExc_ValueError,
859 "overflow in dirstate");
842 "overflow in dirstate");
860 goto quit;
843 goto quit;
861 }
844 }
862 cur = str + pos;
845 cur = str + pos;
863 /* unpack header */
846 /* unpack header */
864 state = *cur;
847 state = *cur;
865 mode = getbe32(cur + 1);
848 mode = getbe32(cur + 1);
866 size = getbe32(cur + 5);
849 size = getbe32(cur + 5);
867 mtime = getbe32(cur + 9);
850 mtime = getbe32(cur + 9);
868 flen = getbe32(cur + 13);
851 flen = getbe32(cur + 13);
869 pos += 17;
852 pos += 17;
870 cur += 17;
853 cur += 17;
871 if (flen > len - pos) {
854 if (flen > len - pos) {
872 PyErr_SetString(PyExc_ValueError,
855 PyErr_SetString(PyExc_ValueError,
873 "overflow in dirstate");
856 "overflow in dirstate");
874 goto quit;
857 goto quit;
875 }
858 }
876
859
877 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
860 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
878 size, mtime);
861 size, mtime);
879 if (!entry)
862 if (!entry)
880 goto quit;
863 goto quit;
881 cpos = memchr(cur, 0, flen);
864 cpos = memchr(cur, 0, flen);
882 if (cpos) {
865 if (cpos) {
883 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
866 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
884 cname = PyBytes_FromStringAndSize(
867 cname = PyBytes_FromStringAndSize(
885 cpos + 1, flen - (cpos - cur) - 1);
868 cpos + 1, flen - (cpos - cur) - 1);
886 if (!fname || !cname ||
869 if (!fname || !cname ||
887 PyDict_SetItem(cmap, fname, cname) == -1 ||
870 PyDict_SetItem(cmap, fname, cname) == -1 ||
888 PyDict_SetItem(dmap, fname, entry) == -1) {
871 PyDict_SetItem(dmap, fname, entry) == -1) {
889 goto quit;
872 goto quit;
890 }
873 }
891 Py_DECREF(cname);
874 Py_DECREF(cname);
892 } else {
875 } else {
893 fname = PyBytes_FromStringAndSize(cur, flen);
876 fname = PyBytes_FromStringAndSize(cur, flen);
894 if (!fname ||
877 if (!fname ||
895 PyDict_SetItem(dmap, fname, entry) == -1) {
878 PyDict_SetItem(dmap, fname, entry) == -1) {
896 goto quit;
879 goto quit;
897 }
880 }
898 }
881 }
899 Py_DECREF(fname);
882 Py_DECREF(fname);
900 Py_DECREF(entry);
883 Py_DECREF(entry);
901 fname = cname = entry = NULL;
884 fname = cname = entry = NULL;
902 pos += flen;
885 pos += flen;
903 }
886 }
904
887
905 ret = parents;
888 ret = parents;
906 Py_INCREF(ret);
889 Py_INCREF(ret);
907 quit:
890 quit:
908 Py_XDECREF(fname);
891 Py_XDECREF(fname);
909 Py_XDECREF(cname);
892 Py_XDECREF(cname);
910 Py_XDECREF(entry);
893 Py_XDECREF(entry);
911 Py_XDECREF(parents);
894 Py_XDECREF(parents);
912 return ret;
895 return ret;
913 }
896 }
914
897
915 /*
898 /*
916 * Efficiently pack a dirstate object into its on-disk format.
899 * Efficiently pack a dirstate object into its on-disk format.
917 */
900 */
918 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
901 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
919 {
902 {
920 PyObject *packobj = NULL;
903 PyObject *packobj = NULL;
921 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
904 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
922 Py_ssize_t nbytes, pos, l;
905 Py_ssize_t nbytes, pos, l;
923 PyObject *k, *v = NULL, *pn;
906 PyObject *k, *v = NULL, *pn;
924 char *p, *s;
907 char *p, *s;
925 int now_s;
926 int now_ns;
927
908
928 if (!PyArg_ParseTuple(args, "O!O!O!(ii):pack_dirstate", &PyDict_Type,
909 if (!PyArg_ParseTuple(args, "O!O!O!:pack_dirstate", &PyDict_Type, &map,
929 &map, &PyDict_Type, &copymap, &PyTuple_Type, &pl,
910 &PyDict_Type, &copymap, &PyTuple_Type, &pl)) {
930 &now_s, &now_ns)) {
931 return NULL;
911 return NULL;
932 }
912 }
933
913
934 if (PyTuple_Size(pl) != 2) {
914 if (PyTuple_Size(pl) != 2) {
935 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
915 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
936 return NULL;
916 return NULL;
937 }
917 }
938
918
939 /* Figure out how much we need to allocate. */
919 /* Figure out how much we need to allocate. */
940 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
920 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
941 PyObject *c;
921 PyObject *c;
942 if (!PyBytes_Check(k)) {
922 if (!PyBytes_Check(k)) {
943 PyErr_SetString(PyExc_TypeError, "expected string key");
923 PyErr_SetString(PyExc_TypeError, "expected string key");
944 goto bail;
924 goto bail;
945 }
925 }
946 nbytes += PyBytes_GET_SIZE(k) + 17;
926 nbytes += PyBytes_GET_SIZE(k) + 17;
947 c = PyDict_GetItem(copymap, k);
927 c = PyDict_GetItem(copymap, k);
948 if (c) {
928 if (c) {
949 if (!PyBytes_Check(c)) {
929 if (!PyBytes_Check(c)) {
950 PyErr_SetString(PyExc_TypeError,
930 PyErr_SetString(PyExc_TypeError,
951 "expected string key");
931 "expected string key");
952 goto bail;
932 goto bail;
953 }
933 }
954 nbytes += PyBytes_GET_SIZE(c) + 1;
934 nbytes += PyBytes_GET_SIZE(c) + 1;
955 }
935 }
956 }
936 }
957
937
958 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
938 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
959 if (packobj == NULL) {
939 if (packobj == NULL) {
960 goto bail;
940 goto bail;
961 }
941 }
962
942
963 p = PyBytes_AS_STRING(packobj);
943 p = PyBytes_AS_STRING(packobj);
964
944
965 pn = PyTuple_GET_ITEM(pl, 0);
945 pn = PyTuple_GET_ITEM(pl, 0);
966 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
946 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
967 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
947 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
968 goto bail;
948 goto bail;
969 }
949 }
970 memcpy(p, s, l);
950 memcpy(p, s, l);
971 p += 20;
951 p += 20;
972 pn = PyTuple_GET_ITEM(pl, 1);
952 pn = PyTuple_GET_ITEM(pl, 1);
973 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
953 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
974 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
954 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
975 goto bail;
955 goto bail;
976 }
956 }
977 memcpy(p, s, l);
957 memcpy(p, s, l);
978 p += 20;
958 p += 20;
979
959
980 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
960 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
981 dirstateItemObject *tuple;
961 dirstateItemObject *tuple;
982 char state;
962 char state;
983 int mode, size, mtime;
963 int mode, size, mtime;
984 Py_ssize_t len, l;
964 Py_ssize_t len, l;
985 PyObject *o;
965 PyObject *o;
986 char *t;
966 char *t;
987
967
988 if (!dirstate_tuple_check(v)) {
968 if (!dirstate_tuple_check(v)) {
989 PyErr_SetString(PyExc_TypeError,
969 PyErr_SetString(PyExc_TypeError,
990 "expected a dirstate tuple");
970 "expected a dirstate tuple");
991 goto bail;
971 goto bail;
992 }
972 }
993 tuple = (dirstateItemObject *)v;
973 tuple = (dirstateItemObject *)v;
994
974
995 state = dirstate_item_c_v1_state(tuple);
975 state = dirstate_item_c_v1_state(tuple);
996 mode = dirstate_item_c_v1_mode(tuple);
976 mode = dirstate_item_c_v1_mode(tuple);
997 size = dirstate_item_c_v1_size(tuple);
977 size = dirstate_item_c_v1_size(tuple);
998 mtime = dirstate_item_c_v1_mtime(tuple);
978 mtime = dirstate_item_c_v1_mtime(tuple);
999 if (state == 'n' && tuple->mtime_s == now_s) {
1000 /* See pure/parsers.py:pack_dirstate for why we do
1001 * this. */
1002 mtime = -1;
1003 mtime_unset = (PyObject *)dirstate_item_from_v1_data(
1004 state, mode, size, mtime);
1005 if (!mtime_unset) {
1006 goto bail;
1007 }
1008 if (PyDict_SetItem(map, k, mtime_unset) == -1) {
1009 goto bail;
1010 }
1011 Py_DECREF(mtime_unset);
1012 mtime_unset = NULL;
1013 }
1014 *p++ = state;
979 *p++ = state;
1015 putbe32((uint32_t)mode, p);
980 putbe32((uint32_t)mode, p);
1016 putbe32((uint32_t)size, p + 4);
981 putbe32((uint32_t)size, p + 4);
1017 putbe32((uint32_t)mtime, p + 8);
982 putbe32((uint32_t)mtime, p + 8);
1018 t = p + 12;
983 t = p + 12;
1019 p += 16;
984 p += 16;
1020 len = PyBytes_GET_SIZE(k);
985 len = PyBytes_GET_SIZE(k);
1021 memcpy(p, PyBytes_AS_STRING(k), len);
986 memcpy(p, PyBytes_AS_STRING(k), len);
1022 p += len;
987 p += len;
1023 o = PyDict_GetItem(copymap, k);
988 o = PyDict_GetItem(copymap, k);
1024 if (o) {
989 if (o) {
1025 *p++ = '\0';
990 *p++ = '\0';
1026 l = PyBytes_GET_SIZE(o);
991 l = PyBytes_GET_SIZE(o);
1027 memcpy(p, PyBytes_AS_STRING(o), l);
992 memcpy(p, PyBytes_AS_STRING(o), l);
1028 p += l;
993 p += l;
1029 len += l + 1;
994 len += l + 1;
1030 }
995 }
1031 putbe32((uint32_t)len, t);
996 putbe32((uint32_t)len, t);
1032 }
997 }
1033
998
1034 pos = p - PyBytes_AS_STRING(packobj);
999 pos = p - PyBytes_AS_STRING(packobj);
1035 if (pos != nbytes) {
1000 if (pos != nbytes) {
1036 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
1001 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
1037 (long)pos, (long)nbytes);
1002 (long)pos, (long)nbytes);
1038 goto bail;
1003 goto bail;
1039 }
1004 }
1040
1005
1041 return packobj;
1006 return packobj;
1042 bail:
1007 bail:
1043 Py_XDECREF(mtime_unset);
1008 Py_XDECREF(mtime_unset);
1044 Py_XDECREF(packobj);
1009 Py_XDECREF(packobj);
1045 Py_XDECREF(v);
1010 Py_XDECREF(v);
1046 return NULL;
1011 return NULL;
1047 }
1012 }
1048
1013
1049 #define BUMPED_FIX 1
1014 #define BUMPED_FIX 1
1050 #define USING_SHA_256 2
1015 #define USING_SHA_256 2
1051 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
1016 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
1052
1017
1053 static PyObject *readshas(const char *source, unsigned char num,
1018 static PyObject *readshas(const char *source, unsigned char num,
1054 Py_ssize_t hashwidth)
1019 Py_ssize_t hashwidth)
1055 {
1020 {
1056 int i;
1021 int i;
1057 PyObject *list = PyTuple_New(num);
1022 PyObject *list = PyTuple_New(num);
1058 if (list == NULL) {
1023 if (list == NULL) {
1059 return NULL;
1024 return NULL;
1060 }
1025 }
1061 for (i = 0; i < num; i++) {
1026 for (i = 0; i < num; i++) {
1062 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
1027 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
1063 if (hash == NULL) {
1028 if (hash == NULL) {
1064 Py_DECREF(list);
1029 Py_DECREF(list);
1065 return NULL;
1030 return NULL;
1066 }
1031 }
1067 PyTuple_SET_ITEM(list, i, hash);
1032 PyTuple_SET_ITEM(list, i, hash);
1068 source += hashwidth;
1033 source += hashwidth;
1069 }
1034 }
1070 return list;
1035 return list;
1071 }
1036 }
1072
1037
1073 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
1038 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
1074 uint32_t *msize)
1039 uint32_t *msize)
1075 {
1040 {
1076 const char *data = databegin;
1041 const char *data = databegin;
1077 const char *meta;
1042 const char *meta;
1078
1043
1079 double mtime;
1044 double mtime;
1080 int16_t tz;
1045 int16_t tz;
1081 uint16_t flags;
1046 uint16_t flags;
1082 unsigned char nsuccs, nparents, nmetadata;
1047 unsigned char nsuccs, nparents, nmetadata;
1083 Py_ssize_t hashwidth = 20;
1048 Py_ssize_t hashwidth = 20;
1084
1049
1085 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
1050 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
1086 PyObject *metadata = NULL, *ret = NULL;
1051 PyObject *metadata = NULL, *ret = NULL;
1087 int i;
1052 int i;
1088
1053
1089 if (data + FM1_HEADER_SIZE > dataend) {
1054 if (data + FM1_HEADER_SIZE > dataend) {
1090 goto overflow;
1055 goto overflow;
1091 }
1056 }
1092
1057
1093 *msize = getbe32(data);
1058 *msize = getbe32(data);
1094 data += 4;
1059 data += 4;
1095 mtime = getbefloat64(data);
1060 mtime = getbefloat64(data);
1096 data += 8;
1061 data += 8;
1097 tz = getbeint16(data);
1062 tz = getbeint16(data);
1098 data += 2;
1063 data += 2;
1099 flags = getbeuint16(data);
1064 flags = getbeuint16(data);
1100 data += 2;
1065 data += 2;
1101
1066
1102 if (flags & USING_SHA_256) {
1067 if (flags & USING_SHA_256) {
1103 hashwidth = 32;
1068 hashwidth = 32;
1104 }
1069 }
1105
1070
1106 nsuccs = (unsigned char)(*data++);
1071 nsuccs = (unsigned char)(*data++);
1107 nparents = (unsigned char)(*data++);
1072 nparents = (unsigned char)(*data++);
1108 nmetadata = (unsigned char)(*data++);
1073 nmetadata = (unsigned char)(*data++);
1109
1074
1110 if (databegin + *msize > dataend) {
1075 if (databegin + *msize > dataend) {
1111 goto overflow;
1076 goto overflow;
1112 }
1077 }
1113 dataend = databegin + *msize; /* narrow down to marker size */
1078 dataend = databegin + *msize; /* narrow down to marker size */
1114
1079
1115 if (data + hashwidth > dataend) {
1080 if (data + hashwidth > dataend) {
1116 goto overflow;
1081 goto overflow;
1117 }
1082 }
1118 prec = PyBytes_FromStringAndSize(data, hashwidth);
1083 prec = PyBytes_FromStringAndSize(data, hashwidth);
1119 data += hashwidth;
1084 data += hashwidth;
1120 if (prec == NULL) {
1085 if (prec == NULL) {
1121 goto bail;
1086 goto bail;
1122 }
1087 }
1123
1088
1124 if (data + nsuccs * hashwidth > dataend) {
1089 if (data + nsuccs * hashwidth > dataend) {
1125 goto overflow;
1090 goto overflow;
1126 }
1091 }
1127 succs = readshas(data, nsuccs, hashwidth);
1092 succs = readshas(data, nsuccs, hashwidth);
1128 if (succs == NULL) {
1093 if (succs == NULL) {
1129 goto bail;
1094 goto bail;
1130 }
1095 }
1131 data += nsuccs * hashwidth;
1096 data += nsuccs * hashwidth;
1132
1097
1133 if (nparents == 1 || nparents == 2) {
1098 if (nparents == 1 || nparents == 2) {
1134 if (data + nparents * hashwidth > dataend) {
1099 if (data + nparents * hashwidth > dataend) {
1135 goto overflow;
1100 goto overflow;
1136 }
1101 }
1137 parents = readshas(data, nparents, hashwidth);
1102 parents = readshas(data, nparents, hashwidth);
1138 if (parents == NULL) {
1103 if (parents == NULL) {
1139 goto bail;
1104 goto bail;
1140 }
1105 }
1141 data += nparents * hashwidth;
1106 data += nparents * hashwidth;
1142 } else {
1107 } else {
1143 parents = Py_None;
1108 parents = Py_None;
1144 Py_INCREF(parents);
1109 Py_INCREF(parents);
1145 }
1110 }
1146
1111
1147 if (data + 2 * nmetadata > dataend) {
1112 if (data + 2 * nmetadata > dataend) {
1148 goto overflow;
1113 goto overflow;
1149 }
1114 }
1150 meta = data + (2 * nmetadata);
1115 meta = data + (2 * nmetadata);
1151 metadata = PyTuple_New(nmetadata);
1116 metadata = PyTuple_New(nmetadata);
1152 if (metadata == NULL) {
1117 if (metadata == NULL) {
1153 goto bail;
1118 goto bail;
1154 }
1119 }
1155 for (i = 0; i < nmetadata; i++) {
1120 for (i = 0; i < nmetadata; i++) {
1156 PyObject *tmp, *left = NULL, *right = NULL;
1121 PyObject *tmp, *left = NULL, *right = NULL;
1157 Py_ssize_t leftsize = (unsigned char)(*data++);
1122 Py_ssize_t leftsize = (unsigned char)(*data++);
1158 Py_ssize_t rightsize = (unsigned char)(*data++);
1123 Py_ssize_t rightsize = (unsigned char)(*data++);
1159 if (meta + leftsize + rightsize > dataend) {
1124 if (meta + leftsize + rightsize > dataend) {
1160 goto overflow;
1125 goto overflow;
1161 }
1126 }
1162 left = PyBytes_FromStringAndSize(meta, leftsize);
1127 left = PyBytes_FromStringAndSize(meta, leftsize);
1163 meta += leftsize;
1128 meta += leftsize;
1164 right = PyBytes_FromStringAndSize(meta, rightsize);
1129 right = PyBytes_FromStringAndSize(meta, rightsize);
1165 meta += rightsize;
1130 meta += rightsize;
1166 tmp = PyTuple_New(2);
1131 tmp = PyTuple_New(2);
1167 if (!left || !right || !tmp) {
1132 if (!left || !right || !tmp) {
1168 Py_XDECREF(left);
1133 Py_XDECREF(left);
1169 Py_XDECREF(right);
1134 Py_XDECREF(right);
1170 Py_XDECREF(tmp);
1135 Py_XDECREF(tmp);
1171 goto bail;
1136 goto bail;
1172 }
1137 }
1173 PyTuple_SET_ITEM(tmp, 0, left);
1138 PyTuple_SET_ITEM(tmp, 0, left);
1174 PyTuple_SET_ITEM(tmp, 1, right);
1139 PyTuple_SET_ITEM(tmp, 1, right);
1175 PyTuple_SET_ITEM(metadata, i, tmp);
1140 PyTuple_SET_ITEM(metadata, i, tmp);
1176 }
1141 }
1177 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
1142 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
1178 (int)tz * 60, parents);
1143 (int)tz * 60, parents);
1179 goto bail; /* return successfully */
1144 goto bail; /* return successfully */
1180
1145
1181 overflow:
1146 overflow:
1182 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
1147 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
1183 bail:
1148 bail:
1184 Py_XDECREF(prec);
1149 Py_XDECREF(prec);
1185 Py_XDECREF(succs);
1150 Py_XDECREF(succs);
1186 Py_XDECREF(metadata);
1151 Py_XDECREF(metadata);
1187 Py_XDECREF(parents);
1152 Py_XDECREF(parents);
1188 return ret;
1153 return ret;
1189 }
1154 }
1190
1155
1191 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
1156 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
1192 {
1157 {
1193 const char *data, *dataend;
1158 const char *data, *dataend;
1194 Py_ssize_t datalen, offset, stop;
1159 Py_ssize_t datalen, offset, stop;
1195 PyObject *markers = NULL;
1160 PyObject *markers = NULL;
1196
1161
1197 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
1162 if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen,
1198 &offset, &stop)) {
1163 &offset, &stop)) {
1199 return NULL;
1164 return NULL;
1200 }
1165 }
1201 if (offset < 0) {
1166 if (offset < 0) {
1202 PyErr_SetString(PyExc_ValueError,
1167 PyErr_SetString(PyExc_ValueError,
1203 "invalid negative offset in fm1readmarkers");
1168 "invalid negative offset in fm1readmarkers");
1204 return NULL;
1169 return NULL;
1205 }
1170 }
1206 if (stop > datalen) {
1171 if (stop > datalen) {
1207 PyErr_SetString(
1172 PyErr_SetString(
1208 PyExc_ValueError,
1173 PyExc_ValueError,
1209 "stop longer than data length in fm1readmarkers");
1174 "stop longer than data length in fm1readmarkers");
1210 return NULL;
1175 return NULL;
1211 }
1176 }
1212 dataend = data + datalen;
1177 dataend = data + datalen;
1213 data += offset;
1178 data += offset;
1214 markers = PyList_New(0);
1179 markers = PyList_New(0);
1215 if (!markers) {
1180 if (!markers) {
1216 return NULL;
1181 return NULL;
1217 }
1182 }
1218 while (offset < stop) {
1183 while (offset < stop) {
1219 uint32_t msize;
1184 uint32_t msize;
1220 int error;
1185 int error;
1221 PyObject *record = fm1readmarker(data, dataend, &msize);
1186 PyObject *record = fm1readmarker(data, dataend, &msize);
1222 if (!record) {
1187 if (!record) {
1223 goto bail;
1188 goto bail;
1224 }
1189 }
1225 error = PyList_Append(markers, record);
1190 error = PyList_Append(markers, record);
1226 Py_DECREF(record);
1191 Py_DECREF(record);
1227 if (error) {
1192 if (error) {
1228 goto bail;
1193 goto bail;
1229 }
1194 }
1230 data += msize;
1195 data += msize;
1231 offset += msize;
1196 offset += msize;
1232 }
1197 }
1233 return markers;
1198 return markers;
1234 bail:
1199 bail:
1235 Py_DECREF(markers);
1200 Py_DECREF(markers);
1236 return NULL;
1201 return NULL;
1237 }
1202 }
1238
1203
1239 static char parsers_doc[] = "Efficient content parsing.";
1204 static char parsers_doc[] = "Efficient content parsing.";
1240
1205
1241 PyObject *encodedir(PyObject *self, PyObject *args);
1206 PyObject *encodedir(PyObject *self, PyObject *args);
1242 PyObject *pathencode(PyObject *self, PyObject *args);
1207 PyObject *pathencode(PyObject *self, PyObject *args);
1243 PyObject *lowerencode(PyObject *self, PyObject *args);
1208 PyObject *lowerencode(PyObject *self, PyObject *args);
1244 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
1209 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
1245
1210
1246 static PyMethodDef methods[] = {
1211 static PyMethodDef methods[] = {
1247 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
1212 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
1248 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
1213 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
1249 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
1214 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
1250 "parse a revlog index\n"},
1215 "parse a revlog index\n"},
1251 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
1216 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
1252 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
1217 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
1253 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
1218 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
1254 {"dict_new_presized", dict_new_presized, METH_VARARGS,
1219 {"dict_new_presized", dict_new_presized, METH_VARARGS,
1255 "construct a dict with an expected size\n"},
1220 "construct a dict with an expected size\n"},
1256 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
1221 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
1257 "make file foldmap\n"},
1222 "make file foldmap\n"},
1258 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
1223 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
1259 "escape a UTF-8 byte string to JSON (fast path)\n"},
1224 "escape a UTF-8 byte string to JSON (fast path)\n"},
1260 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
1225 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
1261 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
1226 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
1262 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
1227 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
1263 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
1228 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
1264 "parse v1 obsolete markers\n"},
1229 "parse v1 obsolete markers\n"},
1265 {NULL, NULL}};
1230 {NULL, NULL}};
1266
1231
1267 void dirs_module_init(PyObject *mod);
1232 void dirs_module_init(PyObject *mod);
1268 void manifest_module_init(PyObject *mod);
1233 void manifest_module_init(PyObject *mod);
1269 void revlog_module_init(PyObject *mod);
1234 void revlog_module_init(PyObject *mod);
1270
1235
1271 static const int version = 20;
1236 static const int version = 20;
1272
1237
1273 static void module_init(PyObject *mod)
1238 static void module_init(PyObject *mod)
1274 {
1239 {
1275 PyModule_AddIntConstant(mod, "version", version);
1240 PyModule_AddIntConstant(mod, "version", version);
1276
1241
1277 /* This module constant has two purposes. First, it lets us unit test
1242 /* This module constant has two purposes. First, it lets us unit test
1278 * the ImportError raised without hard-coding any error text. This
1243 * the ImportError raised without hard-coding any error text. This
1279 * means we can change the text in the future without breaking tests,
1244 * means we can change the text in the future without breaking tests,
1280 * even across changesets without a recompile. Second, its presence
1245 * even across changesets without a recompile. Second, its presence
1281 * can be used to determine whether the version-checking logic is
1246 * can be used to determine whether the version-checking logic is
1282 * present, which also helps in testing across changesets without a
1247 * present, which also helps in testing across changesets without a
1283 * recompile. Note that this means the pure-Python version of parsers
1248 * recompile. Note that this means the pure-Python version of parsers
1284 * should not have this module constant. */
1249 * should not have this module constant. */
1285 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
1250 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
1286
1251
1287 dirs_module_init(mod);
1252 dirs_module_init(mod);
1288 manifest_module_init(mod);
1253 manifest_module_init(mod);
1289 revlog_module_init(mod);
1254 revlog_module_init(mod);
1290
1255
1291 if (PyType_Ready(&dirstateItemType) < 0) {
1256 if (PyType_Ready(&dirstateItemType) < 0) {
1292 return;
1257 return;
1293 }
1258 }
1294 Py_INCREF(&dirstateItemType);
1259 Py_INCREF(&dirstateItemType);
1295 PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
1260 PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
1296 }
1261 }
1297
1262
1298 static int check_python_version(void)
1263 static int check_python_version(void)
1299 {
1264 {
1300 PyObject *sys = PyImport_ImportModule("sys"), *ver;
1265 PyObject *sys = PyImport_ImportModule("sys"), *ver;
1301 long hexversion;
1266 long hexversion;
1302 if (!sys) {
1267 if (!sys) {
1303 return -1;
1268 return -1;
1304 }
1269 }
1305 ver = PyObject_GetAttrString(sys, "hexversion");
1270 ver = PyObject_GetAttrString(sys, "hexversion");
1306 Py_DECREF(sys);
1271 Py_DECREF(sys);
1307 if (!ver) {
1272 if (!ver) {
1308 return -1;
1273 return -1;
1309 }
1274 }
1310 hexversion = PyInt_AsLong(ver);
1275 hexversion = PyInt_AsLong(ver);
1311 Py_DECREF(ver);
1276 Py_DECREF(ver);
1312 /* sys.hexversion is a 32-bit number by default, so the -1 case
1277 /* sys.hexversion is a 32-bit number by default, so the -1 case
1313 * should only occur in unusual circumstances (e.g. if sys.hexversion
1278 * should only occur in unusual circumstances (e.g. if sys.hexversion
1314 * is manually set to an invalid value). */
1279 * is manually set to an invalid value). */
1315 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
1280 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
1316 PyErr_Format(PyExc_ImportError,
1281 PyErr_Format(PyExc_ImportError,
1317 "%s: The Mercurial extension "
1282 "%s: The Mercurial extension "
1318 "modules were compiled with Python " PY_VERSION
1283 "modules were compiled with Python " PY_VERSION
1319 ", but "
1284 ", but "
1320 "Mercurial is currently using Python with "
1285 "Mercurial is currently using Python with "
1321 "sys.hexversion=%ld: "
1286 "sys.hexversion=%ld: "
1322 "Python %s\n at: %s",
1287 "Python %s\n at: %s",
1323 versionerrortext, hexversion, Py_GetVersion(),
1288 versionerrortext, hexversion, Py_GetVersion(),
1324 Py_GetProgramFullPath());
1289 Py_GetProgramFullPath());
1325 return -1;
1290 return -1;
1326 }
1291 }
1327 return 0;
1292 return 0;
1328 }
1293 }
1329
1294
1330 #ifdef IS_PY3K
1295 #ifdef IS_PY3K
1331 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
1296 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
1332 parsers_doc, -1, methods};
1297 parsers_doc, -1, methods};
1333
1298
1334 PyMODINIT_FUNC PyInit_parsers(void)
1299 PyMODINIT_FUNC PyInit_parsers(void)
1335 {
1300 {
1336 PyObject *mod;
1301 PyObject *mod;
1337
1302
1338 if (check_python_version() == -1)
1303 if (check_python_version() == -1)
1339 return NULL;
1304 return NULL;
1340 mod = PyModule_Create(&parsers_module);
1305 mod = PyModule_Create(&parsers_module);
1341 module_init(mod);
1306 module_init(mod);
1342 return mod;
1307 return mod;
1343 }
1308 }
1344 #else
1309 #else
1345 PyMODINIT_FUNC initparsers(void)
1310 PyMODINIT_FUNC initparsers(void)
1346 {
1311 {
1347 PyObject *mod;
1312 PyObject *mod;
1348
1313
1349 if (check_python_version() == -1) {
1314 if (check_python_version() == -1) {
1350 return;
1315 return;
1351 }
1316 }
1352 mod = Py_InitModule3("parsers", methods, parsers_doc);
1317 mod = Py_InitModule3("parsers", methods, parsers_doc);
1353 module_init(mod);
1318 module_init(mod);
1354 }
1319 }
1355 #endif
1320 #endif
@@ -1,1491 +1,1472 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .dirstateutils import (
34 from .dirstateutils import (
35 timestamp,
35 timestamp,
36 )
36 )
37
37
38 from .interfaces import (
38 from .interfaces import (
39 dirstate as intdirstate,
39 dirstate as intdirstate,
40 util as interfaceutil,
40 util as interfaceutil,
41 )
41 )
42
42
43 parsers = policy.importmod('parsers')
43 parsers = policy.importmod('parsers')
44 rustmod = policy.importrust('dirstate')
44 rustmod = policy.importrust('dirstate')
45
45
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47
47
48 propertycache = util.propertycache
48 propertycache = util.propertycache
49 filecache = scmutil.filecache
49 filecache = scmutil.filecache
50 _rangemask = dirstatemap.rangemask
50 _rangemask = dirstatemap.rangemask
51
51
52 DirstateItem = dirstatemap.DirstateItem
52 DirstateItem = dirstatemap.DirstateItem
53
53
54
54
55 class repocache(filecache):
55 class repocache(filecache):
56 """filecache for files in .hg/"""
56 """filecache for files in .hg/"""
57
57
58 def join(self, obj, fname):
58 def join(self, obj, fname):
59 return obj._opener.join(fname)
59 return obj._opener.join(fname)
60
60
61
61
62 class rootcache(filecache):
62 class rootcache(filecache):
63 """filecache for files in the repository root"""
63 """filecache for files in the repository root"""
64
64
65 def join(self, obj, fname):
65 def join(self, obj, fname):
66 return obj._join(fname)
66 return obj._join(fname)
67
67
68
68
69 def requires_parents_change(func):
69 def requires_parents_change(func):
70 def wrap(self, *args, **kwargs):
70 def wrap(self, *args, **kwargs):
71 if not self.pendingparentchange():
71 if not self.pendingparentchange():
72 msg = 'calling `%s` outside of a parentchange context'
72 msg = 'calling `%s` outside of a parentchange context'
73 msg %= func.__name__
73 msg %= func.__name__
74 raise error.ProgrammingError(msg)
74 raise error.ProgrammingError(msg)
75 return func(self, *args, **kwargs)
75 return func(self, *args, **kwargs)
76
76
77 return wrap
77 return wrap
78
78
79
79
80 def requires_no_parents_change(func):
80 def requires_no_parents_change(func):
81 def wrap(self, *args, **kwargs):
81 def wrap(self, *args, **kwargs):
82 if self.pendingparentchange():
82 if self.pendingparentchange():
83 msg = 'calling `%s` inside of a parentchange context'
83 msg = 'calling `%s` inside of a parentchange context'
84 msg %= func.__name__
84 msg %= func.__name__
85 raise error.ProgrammingError(msg)
85 raise error.ProgrammingError(msg)
86 return func(self, *args, **kwargs)
86 return func(self, *args, **kwargs)
87
87
88 return wrap
88 return wrap
89
89
90
90
91 @interfaceutil.implementer(intdirstate.idirstate)
91 @interfaceutil.implementer(intdirstate.idirstate)
92 class dirstate(object):
92 class dirstate(object):
93 def __init__(
93 def __init__(
94 self,
94 self,
95 opener,
95 opener,
96 ui,
96 ui,
97 root,
97 root,
98 validate,
98 validate,
99 sparsematchfn,
99 sparsematchfn,
100 nodeconstants,
100 nodeconstants,
101 use_dirstate_v2,
101 use_dirstate_v2,
102 ):
102 ):
103 """Create a new dirstate object.
103 """Create a new dirstate object.
104
104
105 opener is an open()-like callable that can be used to open the
105 opener is an open()-like callable that can be used to open the
106 dirstate file; root is the root of the directory tracked by
106 dirstate file; root is the root of the directory tracked by
107 the dirstate.
107 the dirstate.
108 """
108 """
109 self._use_dirstate_v2 = use_dirstate_v2
109 self._use_dirstate_v2 = use_dirstate_v2
110 self._nodeconstants = nodeconstants
110 self._nodeconstants = nodeconstants
111 self._opener = opener
111 self._opener = opener
112 self._validate = validate
112 self._validate = validate
113 self._root = root
113 self._root = root
114 self._sparsematchfn = sparsematchfn
114 self._sparsematchfn = sparsematchfn
115 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
115 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
116 # UNC path pointing to root share (issue4557)
116 # UNC path pointing to root share (issue4557)
117 self._rootdir = pathutil.normasprefix(root)
117 self._rootdir = pathutil.normasprefix(root)
118 self._dirty = False
118 self._dirty = False
119 self._ui = ui
119 self._ui = ui
120 self._filecache = {}
120 self._filecache = {}
121 self._parentwriters = 0
121 self._parentwriters = 0
122 self._filename = b'dirstate'
122 self._filename = b'dirstate'
123 self._pendingfilename = b'%s.pending' % self._filename
123 self._pendingfilename = b'%s.pending' % self._filename
124 self._plchangecallbacks = {}
124 self._plchangecallbacks = {}
125 self._origpl = None
125 self._origpl = None
126 self._mapcls = dirstatemap.dirstatemap
126 self._mapcls = dirstatemap.dirstatemap
127 # Access and cache cwd early, so we don't access it for the first time
127 # Access and cache cwd early, so we don't access it for the first time
128 # after a working-copy update caused it to not exist (accessing it then
128 # after a working-copy update caused it to not exist (accessing it then
129 # raises an exception).
129 # raises an exception).
130 self._cwd
130 self._cwd
131
131
132 def prefetch_parents(self):
132 def prefetch_parents(self):
133 """make sure the parents are loaded
133 """make sure the parents are loaded
134
134
135 Used to avoid a race condition.
135 Used to avoid a race condition.
136 """
136 """
137 self._pl
137 self._pl
138
138
139 @contextlib.contextmanager
139 @contextlib.contextmanager
140 def parentchange(self):
140 def parentchange(self):
141 """Context manager for handling dirstate parents.
141 """Context manager for handling dirstate parents.
142
142
143 If an exception occurs in the scope of the context manager,
143 If an exception occurs in the scope of the context manager,
144 the incoherent dirstate won't be written when wlock is
144 the incoherent dirstate won't be written when wlock is
145 released.
145 released.
146 """
146 """
147 self._parentwriters += 1
147 self._parentwriters += 1
148 yield
148 yield
149 # Typically we want the "undo" step of a context manager in a
149 # Typically we want the "undo" step of a context manager in a
150 # finally block so it happens even when an exception
150 # finally block so it happens even when an exception
151 # occurs. In this case, however, we only want to decrement
151 # occurs. In this case, however, we only want to decrement
152 # parentwriters if the code in the with statement exits
152 # parentwriters if the code in the with statement exits
153 # normally, so we don't have a try/finally here on purpose.
153 # normally, so we don't have a try/finally here on purpose.
154 self._parentwriters -= 1
154 self._parentwriters -= 1
155
155
156 def pendingparentchange(self):
156 def pendingparentchange(self):
157 """Returns true if the dirstate is in the middle of a set of changes
157 """Returns true if the dirstate is in the middle of a set of changes
158 that modify the dirstate parent.
158 that modify the dirstate parent.
159 """
159 """
160 return self._parentwriters > 0
160 return self._parentwriters > 0
161
161
162 @propertycache
162 @propertycache
163 def _map(self):
163 def _map(self):
164 """Return the dirstate contents (see documentation for dirstatemap)."""
164 """Return the dirstate contents (see documentation for dirstatemap)."""
165 self._map = self._mapcls(
165 self._map = self._mapcls(
166 self._ui,
166 self._ui,
167 self._opener,
167 self._opener,
168 self._root,
168 self._root,
169 self._nodeconstants,
169 self._nodeconstants,
170 self._use_dirstate_v2,
170 self._use_dirstate_v2,
171 )
171 )
172 return self._map
172 return self._map
173
173
174 @property
174 @property
175 def _sparsematcher(self):
175 def _sparsematcher(self):
176 """The matcher for the sparse checkout.
176 """The matcher for the sparse checkout.
177
177
178 The working directory may not include every file from a manifest. The
178 The working directory may not include every file from a manifest. The
179 matcher obtained by this property will match a path if it is to be
179 matcher obtained by this property will match a path if it is to be
180 included in the working directory.
180 included in the working directory.
181 """
181 """
182 # TODO there is potential to cache this property. For now, the matcher
182 # TODO there is potential to cache this property. For now, the matcher
183 # is resolved on every access. (But the called function does use a
183 # is resolved on every access. (But the called function does use a
184 # cache to keep the lookup fast.)
184 # cache to keep the lookup fast.)
185 return self._sparsematchfn()
185 return self._sparsematchfn()
186
186
187 @repocache(b'branch')
187 @repocache(b'branch')
188 def _branch(self):
188 def _branch(self):
189 try:
189 try:
190 return self._opener.read(b"branch").strip() or b"default"
190 return self._opener.read(b"branch").strip() or b"default"
191 except IOError as inst:
191 except IOError as inst:
192 if inst.errno != errno.ENOENT:
192 if inst.errno != errno.ENOENT:
193 raise
193 raise
194 return b"default"
194 return b"default"
195
195
196 @property
196 @property
197 def _pl(self):
197 def _pl(self):
198 return self._map.parents()
198 return self._map.parents()
199
199
200 def hasdir(self, d):
200 def hasdir(self, d):
201 return self._map.hastrackeddir(d)
201 return self._map.hastrackeddir(d)
202
202
203 @rootcache(b'.hgignore')
203 @rootcache(b'.hgignore')
204 def _ignore(self):
204 def _ignore(self):
205 files = self._ignorefiles()
205 files = self._ignorefiles()
206 if not files:
206 if not files:
207 return matchmod.never()
207 return matchmod.never()
208
208
209 pats = [b'include:%s' % f for f in files]
209 pats = [b'include:%s' % f for f in files]
210 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
210 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
211
211
212 @propertycache
212 @propertycache
213 def _slash(self):
213 def _slash(self):
214 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
214 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
215
215
216 @propertycache
216 @propertycache
217 def _checklink(self):
217 def _checklink(self):
218 return util.checklink(self._root)
218 return util.checklink(self._root)
219
219
220 @propertycache
220 @propertycache
221 def _checkexec(self):
221 def _checkexec(self):
222 return bool(util.checkexec(self._root))
222 return bool(util.checkexec(self._root))
223
223
224 @propertycache
224 @propertycache
225 def _checkcase(self):
225 def _checkcase(self):
226 return not util.fscasesensitive(self._join(b'.hg'))
226 return not util.fscasesensitive(self._join(b'.hg'))
227
227
228 def _join(self, f):
228 def _join(self, f):
229 # much faster than os.path.join()
229 # much faster than os.path.join()
230 # it's safe because f is always a relative path
230 # it's safe because f is always a relative path
231 return self._rootdir + f
231 return self._rootdir + f
232
232
233 def flagfunc(self, buildfallback):
233 def flagfunc(self, buildfallback):
234 """build a callable that returns flags associated with a filename
234 """build a callable that returns flags associated with a filename
235
235
236 The information is extracted from three possible layers:
236 The information is extracted from three possible layers:
237 1. the file system if it supports the information
237 1. the file system if it supports the information
238 2. the "fallback" information stored in the dirstate if any
238 2. the "fallback" information stored in the dirstate if any
239 3. a more expensive mechanism inferring the flags from the parents.
239 3. a more expensive mechanism inferring the flags from the parents.
240 """
240 """
241
241
242 # small hack to cache the result of buildfallback()
242 # small hack to cache the result of buildfallback()
243 fallback_func = []
243 fallback_func = []
244
244
245 def get_flags(x):
245 def get_flags(x):
246 entry = None
246 entry = None
247 fallback_value = None
247 fallback_value = None
248 try:
248 try:
249 st = os.lstat(self._join(x))
249 st = os.lstat(self._join(x))
250 except OSError:
250 except OSError:
251 return b''
251 return b''
252
252
253 if self._checklink:
253 if self._checklink:
254 if util.statislink(st):
254 if util.statislink(st):
255 return b'l'
255 return b'l'
256 else:
256 else:
257 entry = self.get_entry(x)
257 entry = self.get_entry(x)
258 if entry.has_fallback_symlink:
258 if entry.has_fallback_symlink:
259 if entry.fallback_symlink:
259 if entry.fallback_symlink:
260 return b'l'
260 return b'l'
261 else:
261 else:
262 if not fallback_func:
262 if not fallback_func:
263 fallback_func.append(buildfallback())
263 fallback_func.append(buildfallback())
264 fallback_value = fallback_func[0](x)
264 fallback_value = fallback_func[0](x)
265 if b'l' in fallback_value:
265 if b'l' in fallback_value:
266 return b'l'
266 return b'l'
267
267
268 if self._checkexec:
268 if self._checkexec:
269 if util.statisexec(st):
269 if util.statisexec(st):
270 return b'x'
270 return b'x'
271 else:
271 else:
272 if entry is None:
272 if entry is None:
273 entry = self.get_entry(x)
273 entry = self.get_entry(x)
274 if entry.has_fallback_exec:
274 if entry.has_fallback_exec:
275 if entry.fallback_exec:
275 if entry.fallback_exec:
276 return b'x'
276 return b'x'
277 else:
277 else:
278 if fallback_value is None:
278 if fallback_value is None:
279 if not fallback_func:
279 if not fallback_func:
280 fallback_func.append(buildfallback())
280 fallback_func.append(buildfallback())
281 fallback_value = fallback_func[0](x)
281 fallback_value = fallback_func[0](x)
282 if b'x' in fallback_value:
282 if b'x' in fallback_value:
283 return b'x'
283 return b'x'
284 return b''
284 return b''
285
285
286 return get_flags
286 return get_flags
287
287
288 @propertycache
288 @propertycache
289 def _cwd(self):
289 def _cwd(self):
290 # internal config: ui.forcecwd
290 # internal config: ui.forcecwd
291 forcecwd = self._ui.config(b'ui', b'forcecwd')
291 forcecwd = self._ui.config(b'ui', b'forcecwd')
292 if forcecwd:
292 if forcecwd:
293 return forcecwd
293 return forcecwd
294 return encoding.getcwd()
294 return encoding.getcwd()
295
295
296 def getcwd(self):
296 def getcwd(self):
297 """Return the path from which a canonical path is calculated.
297 """Return the path from which a canonical path is calculated.
298
298
299 This path should be used to resolve file patterns or to convert
299 This path should be used to resolve file patterns or to convert
300 canonical paths back to file paths for display. It shouldn't be
300 canonical paths back to file paths for display. It shouldn't be
301 used to get real file paths. Use vfs functions instead.
301 used to get real file paths. Use vfs functions instead.
302 """
302 """
303 cwd = self._cwd
303 cwd = self._cwd
304 if cwd == self._root:
304 if cwd == self._root:
305 return b''
305 return b''
306 # self._root ends with a path separator if self._root is '/' or 'C:\'
306 # self._root ends with a path separator if self._root is '/' or 'C:\'
307 rootsep = self._root
307 rootsep = self._root
308 if not util.endswithsep(rootsep):
308 if not util.endswithsep(rootsep):
309 rootsep += pycompat.ossep
309 rootsep += pycompat.ossep
310 if cwd.startswith(rootsep):
310 if cwd.startswith(rootsep):
311 return cwd[len(rootsep) :]
311 return cwd[len(rootsep) :]
312 else:
312 else:
313 # we're outside the repo. return an absolute path.
313 # we're outside the repo. return an absolute path.
314 return cwd
314 return cwd
315
315
316 def pathto(self, f, cwd=None):
316 def pathto(self, f, cwd=None):
317 if cwd is None:
317 if cwd is None:
318 cwd = self.getcwd()
318 cwd = self.getcwd()
319 path = util.pathto(self._root, cwd, f)
319 path = util.pathto(self._root, cwd, f)
320 if self._slash:
320 if self._slash:
321 return util.pconvert(path)
321 return util.pconvert(path)
322 return path
322 return path
323
323
324 def __getitem__(self, key):
324 def __getitem__(self, key):
325 """Return the current state of key (a filename) in the dirstate.
325 """Return the current state of key (a filename) in the dirstate.
326
326
327 States are:
327 States are:
328 n normal
328 n normal
329 m needs merging
329 m needs merging
330 r marked for removal
330 r marked for removal
331 a marked for addition
331 a marked for addition
332 ? not tracked
332 ? not tracked
333
333
334 XXX The "state" is a bit obscure to be in the "public" API. we should
334 XXX The "state" is a bit obscure to be in the "public" API. we should
335 consider migrating all user of this to going through the dirstate entry
335 consider migrating all user of this to going through the dirstate entry
336 instead.
336 instead.
337 """
337 """
338 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
338 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
339 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
339 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
340 entry = self._map.get(key)
340 entry = self._map.get(key)
341 if entry is not None:
341 if entry is not None:
342 return entry.state
342 return entry.state
343 return b'?'
343 return b'?'
344
344
345 def get_entry(self, path):
345 def get_entry(self, path):
346 """return a DirstateItem for the associated path"""
346 """return a DirstateItem for the associated path"""
347 entry = self._map.get(path)
347 entry = self._map.get(path)
348 if entry is None:
348 if entry is None:
349 return DirstateItem()
349 return DirstateItem()
350 return entry
350 return entry
351
351
352 def __contains__(self, key):
352 def __contains__(self, key):
353 return key in self._map
353 return key in self._map
354
354
355 def __iter__(self):
355 def __iter__(self):
356 return iter(sorted(self._map))
356 return iter(sorted(self._map))
357
357
358 def items(self):
358 def items(self):
359 return pycompat.iteritems(self._map)
359 return pycompat.iteritems(self._map)
360
360
361 iteritems = items
361 iteritems = items
362
362
363 def parents(self):
363 def parents(self):
364 return [self._validate(p) for p in self._pl]
364 return [self._validate(p) for p in self._pl]
365
365
366 def p1(self):
366 def p1(self):
367 return self._validate(self._pl[0])
367 return self._validate(self._pl[0])
368
368
369 def p2(self):
369 def p2(self):
370 return self._validate(self._pl[1])
370 return self._validate(self._pl[1])
371
371
372 @property
372 @property
373 def in_merge(self):
373 def in_merge(self):
374 """True if a merge is in progress"""
374 """True if a merge is in progress"""
375 return self._pl[1] != self._nodeconstants.nullid
375 return self._pl[1] != self._nodeconstants.nullid
376
376
377 def branch(self):
377 def branch(self):
378 return encoding.tolocal(self._branch)
378 return encoding.tolocal(self._branch)
379
379
380 def setparents(self, p1, p2=None):
380 def setparents(self, p1, p2=None):
381 """Set dirstate parents to p1 and p2.
381 """Set dirstate parents to p1 and p2.
382
382
383 When moving from two parents to one, "merged" entries a
383 When moving from two parents to one, "merged" entries a
384 adjusted to normal and previous copy records discarded and
384 adjusted to normal and previous copy records discarded and
385 returned by the call.
385 returned by the call.
386
386
387 See localrepo.setparents()
387 See localrepo.setparents()
388 """
388 """
389 if p2 is None:
389 if p2 is None:
390 p2 = self._nodeconstants.nullid
390 p2 = self._nodeconstants.nullid
391 if self._parentwriters == 0:
391 if self._parentwriters == 0:
392 raise ValueError(
392 raise ValueError(
393 b"cannot set dirstate parent outside of "
393 b"cannot set dirstate parent outside of "
394 b"dirstate.parentchange context manager"
394 b"dirstate.parentchange context manager"
395 )
395 )
396
396
397 self._dirty = True
397 self._dirty = True
398 oldp2 = self._pl[1]
398 oldp2 = self._pl[1]
399 if self._origpl is None:
399 if self._origpl is None:
400 self._origpl = self._pl
400 self._origpl = self._pl
401 nullid = self._nodeconstants.nullid
401 nullid = self._nodeconstants.nullid
402 # True if we need to fold p2 related state back to a linear case
402 # True if we need to fold p2 related state back to a linear case
403 fold_p2 = oldp2 != nullid and p2 == nullid
403 fold_p2 = oldp2 != nullid and p2 == nullid
404 return self._map.setparents(p1, p2, fold_p2=fold_p2)
404 return self._map.setparents(p1, p2, fold_p2=fold_p2)
405
405
406 def setbranch(self, branch):
406 def setbranch(self, branch):
407 self.__class__._branch.set(self, encoding.fromlocal(branch))
407 self.__class__._branch.set(self, encoding.fromlocal(branch))
408 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
408 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
409 try:
409 try:
410 f.write(self._branch + b'\n')
410 f.write(self._branch + b'\n')
411 f.close()
411 f.close()
412
412
413 # make sure filecache has the correct stat info for _branch after
413 # make sure filecache has the correct stat info for _branch after
414 # replacing the underlying file
414 # replacing the underlying file
415 ce = self._filecache[b'_branch']
415 ce = self._filecache[b'_branch']
416 if ce:
416 if ce:
417 ce.refresh()
417 ce.refresh()
418 except: # re-raises
418 except: # re-raises
419 f.discard()
419 f.discard()
420 raise
420 raise
421
421
422 def invalidate(self):
422 def invalidate(self):
423 """Causes the next access to reread the dirstate.
423 """Causes the next access to reread the dirstate.
424
424
425 This is different from localrepo.invalidatedirstate() because it always
425 This is different from localrepo.invalidatedirstate() because it always
426 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
426 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
427 check whether the dirstate has changed before rereading it."""
427 check whether the dirstate has changed before rereading it."""
428
428
429 for a in ("_map", "_branch", "_ignore"):
429 for a in ("_map", "_branch", "_ignore"):
430 if a in self.__dict__:
430 if a in self.__dict__:
431 delattr(self, a)
431 delattr(self, a)
432 self._dirty = False
432 self._dirty = False
433 self._parentwriters = 0
433 self._parentwriters = 0
434 self._origpl = None
434 self._origpl = None
435
435
436 def copy(self, source, dest):
436 def copy(self, source, dest):
437 """Mark dest as a copy of source. Unmark dest if source is None."""
437 """Mark dest as a copy of source. Unmark dest if source is None."""
438 if source == dest:
438 if source == dest:
439 return
439 return
440 self._dirty = True
440 self._dirty = True
441 if source is not None:
441 if source is not None:
442 self._map.copymap[dest] = source
442 self._map.copymap[dest] = source
443 else:
443 else:
444 self._map.copymap.pop(dest, None)
444 self._map.copymap.pop(dest, None)
445
445
446 def copied(self, file):
446 def copied(self, file):
447 return self._map.copymap.get(file, None)
447 return self._map.copymap.get(file, None)
448
448
449 def copies(self):
449 def copies(self):
450 return self._map.copymap
450 return self._map.copymap
451
451
452 @requires_no_parents_change
452 @requires_no_parents_change
453 def set_tracked(self, filename, reset_copy=False):
453 def set_tracked(self, filename, reset_copy=False):
454 """a "public" method for generic code to mark a file as tracked
454 """a "public" method for generic code to mark a file as tracked
455
455
456 This function is to be called outside of "update/merge" case. For
456 This function is to be called outside of "update/merge" case. For
457 example by a command like `hg add X`.
457 example by a command like `hg add X`.
458
458
459 if reset_copy is set, any existing copy information will be dropped.
459 if reset_copy is set, any existing copy information will be dropped.
460
460
461 return True the file was previously untracked, False otherwise.
461 return True the file was previously untracked, False otherwise.
462 """
462 """
463 self._dirty = True
463 self._dirty = True
464 entry = self._map.get(filename)
464 entry = self._map.get(filename)
465 if entry is None or not entry.tracked:
465 if entry is None or not entry.tracked:
466 self._check_new_tracked_filename(filename)
466 self._check_new_tracked_filename(filename)
467 pre_tracked = self._map.set_tracked(filename)
467 pre_tracked = self._map.set_tracked(filename)
468 if reset_copy:
468 if reset_copy:
469 self._map.copymap.pop(filename, None)
469 self._map.copymap.pop(filename, None)
470 return pre_tracked
470 return pre_tracked
471
471
472 @requires_no_parents_change
472 @requires_no_parents_change
473 def set_untracked(self, filename):
473 def set_untracked(self, filename):
474 """a "public" method for generic code to mark a file as untracked
474 """a "public" method for generic code to mark a file as untracked
475
475
476 This function is to be called outside of "update/merge" case. For
476 This function is to be called outside of "update/merge" case. For
477 example by a command like `hg remove X`.
477 example by a command like `hg remove X`.
478
478
479 return True the file was previously tracked, False otherwise.
479 return True the file was previously tracked, False otherwise.
480 """
480 """
481 ret = self._map.set_untracked(filename)
481 ret = self._map.set_untracked(filename)
482 if ret:
482 if ret:
483 self._dirty = True
483 self._dirty = True
484 return ret
484 return ret
485
485
486 @requires_no_parents_change
486 @requires_no_parents_change
487 def set_clean(self, filename, parentfiledata):
487 def set_clean(self, filename, parentfiledata):
488 """record that the current state of the file on disk is known to be clean"""
488 """record that the current state of the file on disk is known to be clean"""
489 self._dirty = True
489 self._dirty = True
490 if not self._map[filename].tracked:
490 if not self._map[filename].tracked:
491 self._check_new_tracked_filename(filename)
491 self._check_new_tracked_filename(filename)
492 (mode, size, mtime) = parentfiledata
492 (mode, size, mtime) = parentfiledata
493 self._map.set_clean(filename, mode, size, mtime)
493 self._map.set_clean(filename, mode, size, mtime)
494
494
495 @requires_no_parents_change
495 @requires_no_parents_change
496 def set_possibly_dirty(self, filename):
496 def set_possibly_dirty(self, filename):
497 """record that the current state of the file on disk is unknown"""
497 """record that the current state of the file on disk is unknown"""
498 self._dirty = True
498 self._dirty = True
499 self._map.set_possibly_dirty(filename)
499 self._map.set_possibly_dirty(filename)
500
500
501 @requires_parents_change
501 @requires_parents_change
502 def update_file_p1(
502 def update_file_p1(
503 self,
503 self,
504 filename,
504 filename,
505 p1_tracked,
505 p1_tracked,
506 ):
506 ):
507 """Set a file as tracked in the parent (or not)
507 """Set a file as tracked in the parent (or not)
508
508
509 This is to be called when adjust the dirstate to a new parent after an history
509 This is to be called when adjust the dirstate to a new parent after an history
510 rewriting operation.
510 rewriting operation.
511
511
512 It should not be called during a merge (p2 != nullid) and only within
512 It should not be called during a merge (p2 != nullid) and only within
513 a `with dirstate.parentchange():` context.
513 a `with dirstate.parentchange():` context.
514 """
514 """
515 if self.in_merge:
515 if self.in_merge:
516 msg = b'update_file_reference should not be called when merging'
516 msg = b'update_file_reference should not be called when merging'
517 raise error.ProgrammingError(msg)
517 raise error.ProgrammingError(msg)
518 entry = self._map.get(filename)
518 entry = self._map.get(filename)
519 if entry is None:
519 if entry is None:
520 wc_tracked = False
520 wc_tracked = False
521 else:
521 else:
522 wc_tracked = entry.tracked
522 wc_tracked = entry.tracked
523 if not (p1_tracked or wc_tracked):
523 if not (p1_tracked or wc_tracked):
524 # the file is no longer relevant to anyone
524 # the file is no longer relevant to anyone
525 if self._map.get(filename) is not None:
525 if self._map.get(filename) is not None:
526 self._map.reset_state(filename)
526 self._map.reset_state(filename)
527 self._dirty = True
527 self._dirty = True
528 elif (not p1_tracked) and wc_tracked:
528 elif (not p1_tracked) and wc_tracked:
529 if entry is not None and entry.added:
529 if entry is not None and entry.added:
530 return # avoid dropping copy information (maybe?)
530 return # avoid dropping copy information (maybe?)
531
531
532 self._map.reset_state(
532 self._map.reset_state(
533 filename,
533 filename,
534 wc_tracked,
534 wc_tracked,
535 p1_tracked,
535 p1_tracked,
536 # the underlying reference might have changed, we will have to
536 # the underlying reference might have changed, we will have to
537 # check it.
537 # check it.
538 has_meaningful_mtime=False,
538 has_meaningful_mtime=False,
539 )
539 )
540
540
541 @requires_parents_change
541 @requires_parents_change
542 def update_file(
542 def update_file(
543 self,
543 self,
544 filename,
544 filename,
545 wc_tracked,
545 wc_tracked,
546 p1_tracked,
546 p1_tracked,
547 p2_info=False,
547 p2_info=False,
548 possibly_dirty=False,
548 possibly_dirty=False,
549 parentfiledata=None,
549 parentfiledata=None,
550 ):
550 ):
551 """update the information about a file in the dirstate
551 """update the information about a file in the dirstate
552
552
553 This is to be called when the direstates parent changes to keep track
553 This is to be called when the direstates parent changes to keep track
554 of what is the file situation in regards to the working copy and its parent.
554 of what is the file situation in regards to the working copy and its parent.
555
555
556 This function must be called within a `dirstate.parentchange` context.
556 This function must be called within a `dirstate.parentchange` context.
557
557
558 note: the API is at an early stage and we might need to adjust it
558 note: the API is at an early stage and we might need to adjust it
559 depending of what information ends up being relevant and useful to
559 depending of what information ends up being relevant and useful to
560 other processing.
560 other processing.
561 """
561 """
562
562
563 # note: I do not think we need to double check name clash here since we
563 # note: I do not think we need to double check name clash here since we
564 # are in a update/merge case that should already have taken care of
564 # are in a update/merge case that should already have taken care of
565 # this. The test agrees
565 # this. The test agrees
566
566
567 self._dirty = True
567 self._dirty = True
568
568
569 self._map.reset_state(
569 self._map.reset_state(
570 filename,
570 filename,
571 wc_tracked,
571 wc_tracked,
572 p1_tracked,
572 p1_tracked,
573 p2_info=p2_info,
573 p2_info=p2_info,
574 has_meaningful_mtime=not possibly_dirty,
574 has_meaningful_mtime=not possibly_dirty,
575 parentfiledata=parentfiledata,
575 parentfiledata=parentfiledata,
576 )
576 )
577
577
578 def _check_new_tracked_filename(self, filename):
578 def _check_new_tracked_filename(self, filename):
579 scmutil.checkfilename(filename)
579 scmutil.checkfilename(filename)
580 if self._map.hastrackeddir(filename):
580 if self._map.hastrackeddir(filename):
581 msg = _(b'directory %r already in dirstate')
581 msg = _(b'directory %r already in dirstate')
582 msg %= pycompat.bytestr(filename)
582 msg %= pycompat.bytestr(filename)
583 raise error.Abort(msg)
583 raise error.Abort(msg)
584 # shadows
584 # shadows
585 for d in pathutil.finddirs(filename):
585 for d in pathutil.finddirs(filename):
586 if self._map.hastrackeddir(d):
586 if self._map.hastrackeddir(d):
587 break
587 break
588 entry = self._map.get(d)
588 entry = self._map.get(d)
589 if entry is not None and not entry.removed:
589 if entry is not None and not entry.removed:
590 msg = _(b'file %r in dirstate clashes with %r')
590 msg = _(b'file %r in dirstate clashes with %r')
591 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
591 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
592 raise error.Abort(msg)
592 raise error.Abort(msg)
593
593
594 def _get_filedata(self, filename):
594 def _get_filedata(self, filename):
595 """returns"""
595 """returns"""
596 s = os.lstat(self._join(filename))
596 s = os.lstat(self._join(filename))
597 mode = s.st_mode
597 mode = s.st_mode
598 size = s.st_size
598 size = s.st_size
599 mtime = timestamp.mtime_of(s)
599 mtime = timestamp.mtime_of(s)
600 return (mode, size, mtime)
600 return (mode, size, mtime)
601
601
602 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
602 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
603 if exists is None:
603 if exists is None:
604 exists = os.path.lexists(os.path.join(self._root, path))
604 exists = os.path.lexists(os.path.join(self._root, path))
605 if not exists:
605 if not exists:
606 # Maybe a path component exists
606 # Maybe a path component exists
607 if not ignoremissing and b'/' in path:
607 if not ignoremissing and b'/' in path:
608 d, f = path.rsplit(b'/', 1)
608 d, f = path.rsplit(b'/', 1)
609 d = self._normalize(d, False, ignoremissing, None)
609 d = self._normalize(d, False, ignoremissing, None)
610 folded = d + b"/" + f
610 folded = d + b"/" + f
611 else:
611 else:
612 # No path components, preserve original case
612 # No path components, preserve original case
613 folded = path
613 folded = path
614 else:
614 else:
615 # recursively normalize leading directory components
615 # recursively normalize leading directory components
616 # against dirstate
616 # against dirstate
617 if b'/' in normed:
617 if b'/' in normed:
618 d, f = normed.rsplit(b'/', 1)
618 d, f = normed.rsplit(b'/', 1)
619 d = self._normalize(d, False, ignoremissing, True)
619 d = self._normalize(d, False, ignoremissing, True)
620 r = self._root + b"/" + d
620 r = self._root + b"/" + d
621 folded = d + b"/" + util.fspath(f, r)
621 folded = d + b"/" + util.fspath(f, r)
622 else:
622 else:
623 folded = util.fspath(normed, self._root)
623 folded = util.fspath(normed, self._root)
624 storemap[normed] = folded
624 storemap[normed] = folded
625
625
626 return folded
626 return folded
627
627
628 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
628 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
629 normed = util.normcase(path)
629 normed = util.normcase(path)
630 folded = self._map.filefoldmap.get(normed, None)
630 folded = self._map.filefoldmap.get(normed, None)
631 if folded is None:
631 if folded is None:
632 if isknown:
632 if isknown:
633 folded = path
633 folded = path
634 else:
634 else:
635 folded = self._discoverpath(
635 folded = self._discoverpath(
636 path, normed, ignoremissing, exists, self._map.filefoldmap
636 path, normed, ignoremissing, exists, self._map.filefoldmap
637 )
637 )
638 return folded
638 return folded
639
639
640 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
640 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
641 normed = util.normcase(path)
641 normed = util.normcase(path)
642 folded = self._map.filefoldmap.get(normed, None)
642 folded = self._map.filefoldmap.get(normed, None)
643 if folded is None:
643 if folded is None:
644 folded = self._map.dirfoldmap.get(normed, None)
644 folded = self._map.dirfoldmap.get(normed, None)
645 if folded is None:
645 if folded is None:
646 if isknown:
646 if isknown:
647 folded = path
647 folded = path
648 else:
648 else:
649 # store discovered result in dirfoldmap so that future
649 # store discovered result in dirfoldmap so that future
650 # normalizefile calls don't start matching directories
650 # normalizefile calls don't start matching directories
651 folded = self._discoverpath(
651 folded = self._discoverpath(
652 path, normed, ignoremissing, exists, self._map.dirfoldmap
652 path, normed, ignoremissing, exists, self._map.dirfoldmap
653 )
653 )
654 return folded
654 return folded
655
655
656 def normalize(self, path, isknown=False, ignoremissing=False):
656 def normalize(self, path, isknown=False, ignoremissing=False):
657 """
657 """
658 normalize the case of a pathname when on a casefolding filesystem
658 normalize the case of a pathname when on a casefolding filesystem
659
659
660 isknown specifies whether the filename came from walking the
660 isknown specifies whether the filename came from walking the
661 disk, to avoid extra filesystem access.
661 disk, to avoid extra filesystem access.
662
662
663 If ignoremissing is True, missing path are returned
663 If ignoremissing is True, missing path are returned
664 unchanged. Otherwise, we try harder to normalize possibly
664 unchanged. Otherwise, we try harder to normalize possibly
665 existing path components.
665 existing path components.
666
666
667 The normalized case is determined based on the following precedence:
667 The normalized case is determined based on the following precedence:
668
668
669 - version of name already stored in the dirstate
669 - version of name already stored in the dirstate
670 - version of name stored on disk
670 - version of name stored on disk
671 - version provided via command arguments
671 - version provided via command arguments
672 """
672 """
673
673
674 if self._checkcase:
674 if self._checkcase:
675 return self._normalize(path, isknown, ignoremissing)
675 return self._normalize(path, isknown, ignoremissing)
676 return path
676 return path
677
677
678 def clear(self):
678 def clear(self):
679 self._map.clear()
679 self._map.clear()
680 self._dirty = True
680 self._dirty = True
681
681
682 def rebuild(self, parent, allfiles, changedfiles=None):
682 def rebuild(self, parent, allfiles, changedfiles=None):
683 if changedfiles is None:
683 if changedfiles is None:
684 # Rebuild entire dirstate
684 # Rebuild entire dirstate
685 to_lookup = allfiles
685 to_lookup = allfiles
686 to_drop = []
686 to_drop = []
687 self.clear()
687 self.clear()
688 elif len(changedfiles) < 10:
688 elif len(changedfiles) < 10:
689 # Avoid turning allfiles into a set, which can be expensive if it's
689 # Avoid turning allfiles into a set, which can be expensive if it's
690 # large.
690 # large.
691 to_lookup = []
691 to_lookup = []
692 to_drop = []
692 to_drop = []
693 for f in changedfiles:
693 for f in changedfiles:
694 if f in allfiles:
694 if f in allfiles:
695 to_lookup.append(f)
695 to_lookup.append(f)
696 else:
696 else:
697 to_drop.append(f)
697 to_drop.append(f)
698 else:
698 else:
699 changedfilesset = set(changedfiles)
699 changedfilesset = set(changedfiles)
700 to_lookup = changedfilesset & set(allfiles)
700 to_lookup = changedfilesset & set(allfiles)
701 to_drop = changedfilesset - to_lookup
701 to_drop = changedfilesset - to_lookup
702
702
703 if self._origpl is None:
703 if self._origpl is None:
704 self._origpl = self._pl
704 self._origpl = self._pl
705 self._map.setparents(parent, self._nodeconstants.nullid)
705 self._map.setparents(parent, self._nodeconstants.nullid)
706
706
707 for f in to_lookup:
707 for f in to_lookup:
708
708
709 if self.in_merge:
709 if self.in_merge:
710 self.set_tracked(f)
710 self.set_tracked(f)
711 else:
711 else:
712 self._map.reset_state(
712 self._map.reset_state(
713 f,
713 f,
714 wc_tracked=True,
714 wc_tracked=True,
715 p1_tracked=True,
715 p1_tracked=True,
716 )
716 )
717 for f in to_drop:
717 for f in to_drop:
718 self._map.reset_state(f)
718 self._map.reset_state(f)
719
719
720 self._dirty = True
720 self._dirty = True
721
721
722 def identity(self):
722 def identity(self):
723 """Return identity of dirstate itself to detect changing in storage
723 """Return identity of dirstate itself to detect changing in storage
724
724
725 If identity of previous dirstate is equal to this, writing
725 If identity of previous dirstate is equal to this, writing
726 changes based on the former dirstate out can keep consistency.
726 changes based on the former dirstate out can keep consistency.
727 """
727 """
728 return self._map.identity
728 return self._map.identity
729
729
730 def write(self, tr):
730 def write(self, tr):
731 if not self._dirty:
731 if not self._dirty:
732 return
732 return
733
733
734 filename = self._filename
734 filename = self._filename
735 if tr:
735 if tr:
736 # 'dirstate.write()' is not only for writing in-memory
736 # 'dirstate.write()' is not only for writing in-memory
737 # changes out, but also for dropping ambiguous timestamp.
737 # changes out, but also for dropping ambiguous timestamp.
738 # delayed writing re-raise "ambiguous timestamp issue".
738 # delayed writing re-raise "ambiguous timestamp issue".
739 # See also the wiki page below for detail:
739 # See also the wiki page below for detail:
740 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
740 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
741
741
742 # record when mtime start to be ambiguous
742 # record when mtime start to be ambiguous
743 now = timestamp.get_fs_now(self._opener)
743 now = timestamp.get_fs_now(self._opener)
744
744
745 # delay writing in-memory changes out
745 # delay writing in-memory changes out
746 tr.addfilegenerator(
746 tr.addfilegenerator(
747 b'dirstate',
747 b'dirstate',
748 (self._filename,),
748 (self._filename,),
749 lambda f: self._writedirstate(tr, f, now=now),
749 lambda f: self._writedirstate(tr, f, now=now),
750 location=b'plain',
750 location=b'plain',
751 )
751 )
752 return
752 return
753
753
754 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
754 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
755 self._writedirstate(tr, st)
755 self._writedirstate(tr, st)
756
756
757 def addparentchangecallback(self, category, callback):
757 def addparentchangecallback(self, category, callback):
758 """add a callback to be called when the wd parents are changed
758 """add a callback to be called when the wd parents are changed
759
759
760 Callback will be called with the following arguments:
760 Callback will be called with the following arguments:
761 dirstate, (oldp1, oldp2), (newp1, newp2)
761 dirstate, (oldp1, oldp2), (newp1, newp2)
762
762
763 Category is a unique identifier to allow overwriting an old callback
763 Category is a unique identifier to allow overwriting an old callback
764 with a newer callback.
764 with a newer callback.
765 """
765 """
766 self._plchangecallbacks[category] = callback
766 self._plchangecallbacks[category] = callback
767
767
768 def _writedirstate(self, tr, st, now=None):
768 def _writedirstate(self, tr, st, now=None):
769 # notify callbacks about parents change
769 # notify callbacks about parents change
770 if self._origpl is not None and self._origpl != self._pl:
770 if self._origpl is not None and self._origpl != self._pl:
771 for c, callback in sorted(
771 for c, callback in sorted(
772 pycompat.iteritems(self._plchangecallbacks)
772 pycompat.iteritems(self._plchangecallbacks)
773 ):
773 ):
774 callback(self, self._origpl, self._pl)
774 callback(self, self._origpl, self._pl)
775 self._origpl = None
775 self._origpl = None
776
776
777 if now is None:
777 if now is None:
778 # use the modification time of the newly created temporary file as the
778 # use the modification time of the newly created temporary file as the
779 # filesystem's notion of 'now'
779 # filesystem's notion of 'now'
780 now = timestamp.mtime_of(util.fstat(st))
780 now = timestamp.mtime_of(util.fstat(st))
781
781
782 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
783 # timestamp of each entries in dirstate, because of 'now > mtime'
784 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
785 if delaywrite > 0:
786 # do we have any files to delay for?
787 for f, e in pycompat.iteritems(self._map):
788 if e.need_delay(now):
789 import time # to avoid useless import
790
791 # rather than sleep n seconds, sleep until the next
792 # multiple of n seconds
793 clock = time.time()
794 start = int(clock) - (int(clock) % delaywrite)
795 end = start + delaywrite
796 time.sleep(end - clock)
797 # trust our estimate that the end is near now
798 now = timestamp.timestamp((end, 0))
799 break
800
801 self._map.write(tr, st, now)
782 self._map.write(tr, st, now)
802 self._dirty = False
783 self._dirty = False
803
784
804 def _dirignore(self, f):
785 def _dirignore(self, f):
805 if self._ignore(f):
786 if self._ignore(f):
806 return True
787 return True
807 for p in pathutil.finddirs(f):
788 for p in pathutil.finddirs(f):
808 if self._ignore(p):
789 if self._ignore(p):
809 return True
790 return True
810 return False
791 return False
811
792
812 def _ignorefiles(self):
793 def _ignorefiles(self):
813 files = []
794 files = []
814 if os.path.exists(self._join(b'.hgignore')):
795 if os.path.exists(self._join(b'.hgignore')):
815 files.append(self._join(b'.hgignore'))
796 files.append(self._join(b'.hgignore'))
816 for name, path in self._ui.configitems(b"ui"):
797 for name, path in self._ui.configitems(b"ui"):
817 if name == b'ignore' or name.startswith(b'ignore.'):
798 if name == b'ignore' or name.startswith(b'ignore.'):
818 # we need to use os.path.join here rather than self._join
799 # we need to use os.path.join here rather than self._join
819 # because path is arbitrary and user-specified
800 # because path is arbitrary and user-specified
820 files.append(os.path.join(self._rootdir, util.expandpath(path)))
801 files.append(os.path.join(self._rootdir, util.expandpath(path)))
821 return files
802 return files
822
803
823 def _ignorefileandline(self, f):
804 def _ignorefileandline(self, f):
824 files = collections.deque(self._ignorefiles())
805 files = collections.deque(self._ignorefiles())
825 visited = set()
806 visited = set()
826 while files:
807 while files:
827 i = files.popleft()
808 i = files.popleft()
828 patterns = matchmod.readpatternfile(
809 patterns = matchmod.readpatternfile(
829 i, self._ui.warn, sourceinfo=True
810 i, self._ui.warn, sourceinfo=True
830 )
811 )
831 for pattern, lineno, line in patterns:
812 for pattern, lineno, line in patterns:
832 kind, p = matchmod._patsplit(pattern, b'glob')
813 kind, p = matchmod._patsplit(pattern, b'glob')
833 if kind == b"subinclude":
814 if kind == b"subinclude":
834 if p not in visited:
815 if p not in visited:
835 files.append(p)
816 files.append(p)
836 continue
817 continue
837 m = matchmod.match(
818 m = matchmod.match(
838 self._root, b'', [], [pattern], warn=self._ui.warn
819 self._root, b'', [], [pattern], warn=self._ui.warn
839 )
820 )
840 if m(f):
821 if m(f):
841 return (i, lineno, line)
822 return (i, lineno, line)
842 visited.add(i)
823 visited.add(i)
843 return (None, -1, b"")
824 return (None, -1, b"")
844
825
845 def _walkexplicit(self, match, subrepos):
826 def _walkexplicit(self, match, subrepos):
846 """Get stat data about the files explicitly specified by match.
827 """Get stat data about the files explicitly specified by match.
847
828
848 Return a triple (results, dirsfound, dirsnotfound).
829 Return a triple (results, dirsfound, dirsnotfound).
849 - results is a mapping from filename to stat result. It also contains
830 - results is a mapping from filename to stat result. It also contains
850 listings mapping subrepos and .hg to None.
831 listings mapping subrepos and .hg to None.
851 - dirsfound is a list of files found to be directories.
832 - dirsfound is a list of files found to be directories.
852 - dirsnotfound is a list of files that the dirstate thinks are
833 - dirsnotfound is a list of files that the dirstate thinks are
853 directories and that were not found."""
834 directories and that were not found."""
854
835
855 def badtype(mode):
836 def badtype(mode):
856 kind = _(b'unknown')
837 kind = _(b'unknown')
857 if stat.S_ISCHR(mode):
838 if stat.S_ISCHR(mode):
858 kind = _(b'character device')
839 kind = _(b'character device')
859 elif stat.S_ISBLK(mode):
840 elif stat.S_ISBLK(mode):
860 kind = _(b'block device')
841 kind = _(b'block device')
861 elif stat.S_ISFIFO(mode):
842 elif stat.S_ISFIFO(mode):
862 kind = _(b'fifo')
843 kind = _(b'fifo')
863 elif stat.S_ISSOCK(mode):
844 elif stat.S_ISSOCK(mode):
864 kind = _(b'socket')
845 kind = _(b'socket')
865 elif stat.S_ISDIR(mode):
846 elif stat.S_ISDIR(mode):
866 kind = _(b'directory')
847 kind = _(b'directory')
867 return _(b'unsupported file type (type is %s)') % kind
848 return _(b'unsupported file type (type is %s)') % kind
868
849
869 badfn = match.bad
850 badfn = match.bad
870 dmap = self._map
851 dmap = self._map
871 lstat = os.lstat
852 lstat = os.lstat
872 getkind = stat.S_IFMT
853 getkind = stat.S_IFMT
873 dirkind = stat.S_IFDIR
854 dirkind = stat.S_IFDIR
874 regkind = stat.S_IFREG
855 regkind = stat.S_IFREG
875 lnkkind = stat.S_IFLNK
856 lnkkind = stat.S_IFLNK
876 join = self._join
857 join = self._join
877 dirsfound = []
858 dirsfound = []
878 foundadd = dirsfound.append
859 foundadd = dirsfound.append
879 dirsnotfound = []
860 dirsnotfound = []
880 notfoundadd = dirsnotfound.append
861 notfoundadd = dirsnotfound.append
881
862
882 if not match.isexact() and self._checkcase:
863 if not match.isexact() and self._checkcase:
883 normalize = self._normalize
864 normalize = self._normalize
884 else:
865 else:
885 normalize = None
866 normalize = None
886
867
887 files = sorted(match.files())
868 files = sorted(match.files())
888 subrepos.sort()
869 subrepos.sort()
889 i, j = 0, 0
870 i, j = 0, 0
890 while i < len(files) and j < len(subrepos):
871 while i < len(files) and j < len(subrepos):
891 subpath = subrepos[j] + b"/"
872 subpath = subrepos[j] + b"/"
892 if files[i] < subpath:
873 if files[i] < subpath:
893 i += 1
874 i += 1
894 continue
875 continue
895 while i < len(files) and files[i].startswith(subpath):
876 while i < len(files) and files[i].startswith(subpath):
896 del files[i]
877 del files[i]
897 j += 1
878 j += 1
898
879
899 if not files or b'' in files:
880 if not files or b'' in files:
900 files = [b'']
881 files = [b'']
901 # constructing the foldmap is expensive, so don't do it for the
882 # constructing the foldmap is expensive, so don't do it for the
902 # common case where files is ['']
883 # common case where files is ['']
903 normalize = None
884 normalize = None
904 results = dict.fromkeys(subrepos)
885 results = dict.fromkeys(subrepos)
905 results[b'.hg'] = None
886 results[b'.hg'] = None
906
887
907 for ff in files:
888 for ff in files:
908 if normalize:
889 if normalize:
909 nf = normalize(ff, False, True)
890 nf = normalize(ff, False, True)
910 else:
891 else:
911 nf = ff
892 nf = ff
912 if nf in results:
893 if nf in results:
913 continue
894 continue
914
895
915 try:
896 try:
916 st = lstat(join(nf))
897 st = lstat(join(nf))
917 kind = getkind(st.st_mode)
898 kind = getkind(st.st_mode)
918 if kind == dirkind:
899 if kind == dirkind:
919 if nf in dmap:
900 if nf in dmap:
920 # file replaced by dir on disk but still in dirstate
901 # file replaced by dir on disk but still in dirstate
921 results[nf] = None
902 results[nf] = None
922 foundadd((nf, ff))
903 foundadd((nf, ff))
923 elif kind == regkind or kind == lnkkind:
904 elif kind == regkind or kind == lnkkind:
924 results[nf] = st
905 results[nf] = st
925 else:
906 else:
926 badfn(ff, badtype(kind))
907 badfn(ff, badtype(kind))
927 if nf in dmap:
908 if nf in dmap:
928 results[nf] = None
909 results[nf] = None
929 except OSError as inst: # nf not found on disk - it is dirstate only
910 except OSError as inst: # nf not found on disk - it is dirstate only
930 if nf in dmap: # does it exactly match a missing file?
911 if nf in dmap: # does it exactly match a missing file?
931 results[nf] = None
912 results[nf] = None
932 else: # does it match a missing directory?
913 else: # does it match a missing directory?
933 if self._map.hasdir(nf):
914 if self._map.hasdir(nf):
934 notfoundadd(nf)
915 notfoundadd(nf)
935 else:
916 else:
936 badfn(ff, encoding.strtolocal(inst.strerror))
917 badfn(ff, encoding.strtolocal(inst.strerror))
937
918
938 # match.files() may contain explicitly-specified paths that shouldn't
919 # match.files() may contain explicitly-specified paths that shouldn't
939 # be taken; drop them from the list of files found. dirsfound/notfound
920 # be taken; drop them from the list of files found. dirsfound/notfound
940 # aren't filtered here because they will be tested later.
921 # aren't filtered here because they will be tested later.
941 if match.anypats():
922 if match.anypats():
942 for f in list(results):
923 for f in list(results):
943 if f == b'.hg' or f in subrepos:
924 if f == b'.hg' or f in subrepos:
944 # keep sentinel to disable further out-of-repo walks
925 # keep sentinel to disable further out-of-repo walks
945 continue
926 continue
946 if not match(f):
927 if not match(f):
947 del results[f]
928 del results[f]
948
929
949 # Case insensitive filesystems cannot rely on lstat() failing to detect
930 # Case insensitive filesystems cannot rely on lstat() failing to detect
950 # a case-only rename. Prune the stat object for any file that does not
931 # a case-only rename. Prune the stat object for any file that does not
951 # match the case in the filesystem, if there are multiple files that
932 # match the case in the filesystem, if there are multiple files that
952 # normalize to the same path.
933 # normalize to the same path.
953 if match.isexact() and self._checkcase:
934 if match.isexact() and self._checkcase:
954 normed = {}
935 normed = {}
955
936
956 for f, st in pycompat.iteritems(results):
937 for f, st in pycompat.iteritems(results):
957 if st is None:
938 if st is None:
958 continue
939 continue
959
940
960 nc = util.normcase(f)
941 nc = util.normcase(f)
961 paths = normed.get(nc)
942 paths = normed.get(nc)
962
943
963 if paths is None:
944 if paths is None:
964 paths = set()
945 paths = set()
965 normed[nc] = paths
946 normed[nc] = paths
966
947
967 paths.add(f)
948 paths.add(f)
968
949
969 for norm, paths in pycompat.iteritems(normed):
950 for norm, paths in pycompat.iteritems(normed):
970 if len(paths) > 1:
951 if len(paths) > 1:
971 for path in paths:
952 for path in paths:
972 folded = self._discoverpath(
953 folded = self._discoverpath(
973 path, norm, True, None, self._map.dirfoldmap
954 path, norm, True, None, self._map.dirfoldmap
974 )
955 )
975 if path != folded:
956 if path != folded:
976 results[path] = None
957 results[path] = None
977
958
978 return results, dirsfound, dirsnotfound
959 return results, dirsfound, dirsnotfound
979
960
980 def walk(self, match, subrepos, unknown, ignored, full=True):
961 def walk(self, match, subrepos, unknown, ignored, full=True):
981 """
962 """
982 Walk recursively through the directory tree, finding all files
963 Walk recursively through the directory tree, finding all files
983 matched by match.
964 matched by match.
984
965
985 If full is False, maybe skip some known-clean files.
966 If full is False, maybe skip some known-clean files.
986
967
987 Return a dict mapping filename to stat-like object (either
968 Return a dict mapping filename to stat-like object (either
988 mercurial.osutil.stat instance or return value of os.stat()).
969 mercurial.osutil.stat instance or return value of os.stat()).
989
970
990 """
971 """
991 # full is a flag that extensions that hook into walk can use -- this
972 # full is a flag that extensions that hook into walk can use -- this
992 # implementation doesn't use it at all. This satisfies the contract
973 # implementation doesn't use it at all. This satisfies the contract
993 # because we only guarantee a "maybe".
974 # because we only guarantee a "maybe".
994
975
995 if ignored:
976 if ignored:
996 ignore = util.never
977 ignore = util.never
997 dirignore = util.never
978 dirignore = util.never
998 elif unknown:
979 elif unknown:
999 ignore = self._ignore
980 ignore = self._ignore
1000 dirignore = self._dirignore
981 dirignore = self._dirignore
1001 else:
982 else:
1002 # if not unknown and not ignored, drop dir recursion and step 2
983 # if not unknown and not ignored, drop dir recursion and step 2
1003 ignore = util.always
984 ignore = util.always
1004 dirignore = util.always
985 dirignore = util.always
1005
986
1006 matchfn = match.matchfn
987 matchfn = match.matchfn
1007 matchalways = match.always()
988 matchalways = match.always()
1008 matchtdir = match.traversedir
989 matchtdir = match.traversedir
1009 dmap = self._map
990 dmap = self._map
1010 listdir = util.listdir
991 listdir = util.listdir
1011 lstat = os.lstat
992 lstat = os.lstat
1012 dirkind = stat.S_IFDIR
993 dirkind = stat.S_IFDIR
1013 regkind = stat.S_IFREG
994 regkind = stat.S_IFREG
1014 lnkkind = stat.S_IFLNK
995 lnkkind = stat.S_IFLNK
1015 join = self._join
996 join = self._join
1016
997
1017 exact = skipstep3 = False
998 exact = skipstep3 = False
1018 if match.isexact(): # match.exact
999 if match.isexact(): # match.exact
1019 exact = True
1000 exact = True
1020 dirignore = util.always # skip step 2
1001 dirignore = util.always # skip step 2
1021 elif match.prefix(): # match.match, no patterns
1002 elif match.prefix(): # match.match, no patterns
1022 skipstep3 = True
1003 skipstep3 = True
1023
1004
1024 if not exact and self._checkcase:
1005 if not exact and self._checkcase:
1025 normalize = self._normalize
1006 normalize = self._normalize
1026 normalizefile = self._normalizefile
1007 normalizefile = self._normalizefile
1027 skipstep3 = False
1008 skipstep3 = False
1028 else:
1009 else:
1029 normalize = self._normalize
1010 normalize = self._normalize
1030 normalizefile = None
1011 normalizefile = None
1031
1012
1032 # step 1: find all explicit files
1013 # step 1: find all explicit files
1033 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1014 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1034 if matchtdir:
1015 if matchtdir:
1035 for d in work:
1016 for d in work:
1036 matchtdir(d[0])
1017 matchtdir(d[0])
1037 for d in dirsnotfound:
1018 for d in dirsnotfound:
1038 matchtdir(d)
1019 matchtdir(d)
1039
1020
1040 skipstep3 = skipstep3 and not (work or dirsnotfound)
1021 skipstep3 = skipstep3 and not (work or dirsnotfound)
1041 work = [d for d in work if not dirignore(d[0])]
1022 work = [d for d in work if not dirignore(d[0])]
1042
1023
1043 # step 2: visit subdirectories
1024 # step 2: visit subdirectories
1044 def traverse(work, alreadynormed):
1025 def traverse(work, alreadynormed):
1045 wadd = work.append
1026 wadd = work.append
1046 while work:
1027 while work:
1047 tracing.counter('dirstate.walk work', len(work))
1028 tracing.counter('dirstate.walk work', len(work))
1048 nd = work.pop()
1029 nd = work.pop()
1049 visitentries = match.visitchildrenset(nd)
1030 visitentries = match.visitchildrenset(nd)
1050 if not visitentries:
1031 if not visitentries:
1051 continue
1032 continue
1052 if visitentries == b'this' or visitentries == b'all':
1033 if visitentries == b'this' or visitentries == b'all':
1053 visitentries = None
1034 visitentries = None
1054 skip = None
1035 skip = None
1055 if nd != b'':
1036 if nd != b'':
1056 skip = b'.hg'
1037 skip = b'.hg'
1057 try:
1038 try:
1058 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1039 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1059 entries = listdir(join(nd), stat=True, skip=skip)
1040 entries = listdir(join(nd), stat=True, skip=skip)
1060 except OSError as inst:
1041 except OSError as inst:
1061 if inst.errno in (errno.EACCES, errno.ENOENT):
1042 if inst.errno in (errno.EACCES, errno.ENOENT):
1062 match.bad(
1043 match.bad(
1063 self.pathto(nd), encoding.strtolocal(inst.strerror)
1044 self.pathto(nd), encoding.strtolocal(inst.strerror)
1064 )
1045 )
1065 continue
1046 continue
1066 raise
1047 raise
1067 for f, kind, st in entries:
1048 for f, kind, st in entries:
1068 # Some matchers may return files in the visitentries set,
1049 # Some matchers may return files in the visitentries set,
1069 # instead of 'this', if the matcher explicitly mentions them
1050 # instead of 'this', if the matcher explicitly mentions them
1070 # and is not an exactmatcher. This is acceptable; we do not
1051 # and is not an exactmatcher. This is acceptable; we do not
1071 # make any hard assumptions about file-or-directory below
1052 # make any hard assumptions about file-or-directory below
1072 # based on the presence of `f` in visitentries. If
1053 # based on the presence of `f` in visitentries. If
1073 # visitchildrenset returned a set, we can always skip the
1054 # visitchildrenset returned a set, we can always skip the
1074 # entries *not* in the set it provided regardless of whether
1055 # entries *not* in the set it provided regardless of whether
1075 # they're actually a file or a directory.
1056 # they're actually a file or a directory.
1076 if visitentries and f not in visitentries:
1057 if visitentries and f not in visitentries:
1077 continue
1058 continue
1078 if normalizefile:
1059 if normalizefile:
1079 # even though f might be a directory, we're only
1060 # even though f might be a directory, we're only
1080 # interested in comparing it to files currently in the
1061 # interested in comparing it to files currently in the
1081 # dmap -- therefore normalizefile is enough
1062 # dmap -- therefore normalizefile is enough
1082 nf = normalizefile(
1063 nf = normalizefile(
1083 nd and (nd + b"/" + f) or f, True, True
1064 nd and (nd + b"/" + f) or f, True, True
1084 )
1065 )
1085 else:
1066 else:
1086 nf = nd and (nd + b"/" + f) or f
1067 nf = nd and (nd + b"/" + f) or f
1087 if nf not in results:
1068 if nf not in results:
1088 if kind == dirkind:
1069 if kind == dirkind:
1089 if not ignore(nf):
1070 if not ignore(nf):
1090 if matchtdir:
1071 if matchtdir:
1091 matchtdir(nf)
1072 matchtdir(nf)
1092 wadd(nf)
1073 wadd(nf)
1093 if nf in dmap and (matchalways or matchfn(nf)):
1074 if nf in dmap and (matchalways or matchfn(nf)):
1094 results[nf] = None
1075 results[nf] = None
1095 elif kind == regkind or kind == lnkkind:
1076 elif kind == regkind or kind == lnkkind:
1096 if nf in dmap:
1077 if nf in dmap:
1097 if matchalways or matchfn(nf):
1078 if matchalways or matchfn(nf):
1098 results[nf] = st
1079 results[nf] = st
1099 elif (matchalways or matchfn(nf)) and not ignore(
1080 elif (matchalways or matchfn(nf)) and not ignore(
1100 nf
1081 nf
1101 ):
1082 ):
1102 # unknown file -- normalize if necessary
1083 # unknown file -- normalize if necessary
1103 if not alreadynormed:
1084 if not alreadynormed:
1104 nf = normalize(nf, False, True)
1085 nf = normalize(nf, False, True)
1105 results[nf] = st
1086 results[nf] = st
1106 elif nf in dmap and (matchalways or matchfn(nf)):
1087 elif nf in dmap and (matchalways or matchfn(nf)):
1107 results[nf] = None
1088 results[nf] = None
1108
1089
1109 for nd, d in work:
1090 for nd, d in work:
1110 # alreadynormed means that processwork doesn't have to do any
1091 # alreadynormed means that processwork doesn't have to do any
1111 # expensive directory normalization
1092 # expensive directory normalization
1112 alreadynormed = not normalize or nd == d
1093 alreadynormed = not normalize or nd == d
1113 traverse([d], alreadynormed)
1094 traverse([d], alreadynormed)
1114
1095
1115 for s in subrepos:
1096 for s in subrepos:
1116 del results[s]
1097 del results[s]
1117 del results[b'.hg']
1098 del results[b'.hg']
1118
1099
1119 # step 3: visit remaining files from dmap
1100 # step 3: visit remaining files from dmap
1120 if not skipstep3 and not exact:
1101 if not skipstep3 and not exact:
1121 # If a dmap file is not in results yet, it was either
1102 # If a dmap file is not in results yet, it was either
1122 # a) not matching matchfn b) ignored, c) missing, or d) under a
1103 # a) not matching matchfn b) ignored, c) missing, or d) under a
1123 # symlink directory.
1104 # symlink directory.
1124 if not results and matchalways:
1105 if not results and matchalways:
1125 visit = [f for f in dmap]
1106 visit = [f for f in dmap]
1126 else:
1107 else:
1127 visit = [f for f in dmap if f not in results and matchfn(f)]
1108 visit = [f for f in dmap if f not in results and matchfn(f)]
1128 visit.sort()
1109 visit.sort()
1129
1110
1130 if unknown:
1111 if unknown:
1131 # unknown == True means we walked all dirs under the roots
1112 # unknown == True means we walked all dirs under the roots
1132 # that wasn't ignored, and everything that matched was stat'ed
1113 # that wasn't ignored, and everything that matched was stat'ed
1133 # and is already in results.
1114 # and is already in results.
1134 # The rest must thus be ignored or under a symlink.
1115 # The rest must thus be ignored or under a symlink.
1135 audit_path = pathutil.pathauditor(self._root, cached=True)
1116 audit_path = pathutil.pathauditor(self._root, cached=True)
1136
1117
1137 for nf in iter(visit):
1118 for nf in iter(visit):
1138 # If a stat for the same file was already added with a
1119 # If a stat for the same file was already added with a
1139 # different case, don't add one for this, since that would
1120 # different case, don't add one for this, since that would
1140 # make it appear as if the file exists under both names
1121 # make it appear as if the file exists under both names
1141 # on disk.
1122 # on disk.
1142 if (
1123 if (
1143 normalizefile
1124 normalizefile
1144 and normalizefile(nf, True, True) in results
1125 and normalizefile(nf, True, True) in results
1145 ):
1126 ):
1146 results[nf] = None
1127 results[nf] = None
1147 # Report ignored items in the dmap as long as they are not
1128 # Report ignored items in the dmap as long as they are not
1148 # under a symlink directory.
1129 # under a symlink directory.
1149 elif audit_path.check(nf):
1130 elif audit_path.check(nf):
1150 try:
1131 try:
1151 results[nf] = lstat(join(nf))
1132 results[nf] = lstat(join(nf))
1152 # file was just ignored, no links, and exists
1133 # file was just ignored, no links, and exists
1153 except OSError:
1134 except OSError:
1154 # file doesn't exist
1135 # file doesn't exist
1155 results[nf] = None
1136 results[nf] = None
1156 else:
1137 else:
1157 # It's either missing or under a symlink directory
1138 # It's either missing or under a symlink directory
1158 # which we in this case report as missing
1139 # which we in this case report as missing
1159 results[nf] = None
1140 results[nf] = None
1160 else:
1141 else:
1161 # We may not have walked the full directory tree above,
1142 # We may not have walked the full directory tree above,
1162 # so stat and check everything we missed.
1143 # so stat and check everything we missed.
1163 iv = iter(visit)
1144 iv = iter(visit)
1164 for st in util.statfiles([join(i) for i in visit]):
1145 for st in util.statfiles([join(i) for i in visit]):
1165 results[next(iv)] = st
1146 results[next(iv)] = st
1166 return results
1147 return results
1167
1148
1168 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1149 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1169 # Force Rayon (Rust parallelism library) to respect the number of
1150 # Force Rayon (Rust parallelism library) to respect the number of
1170 # workers. This is a temporary workaround until Rust code knows
1151 # workers. This is a temporary workaround until Rust code knows
1171 # how to read the config file.
1152 # how to read the config file.
1172 numcpus = self._ui.configint(b"worker", b"numcpus")
1153 numcpus = self._ui.configint(b"worker", b"numcpus")
1173 if numcpus is not None:
1154 if numcpus is not None:
1174 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1155 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1175
1156
1176 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1157 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1177 if not workers_enabled:
1158 if not workers_enabled:
1178 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1159 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1179
1160
1180 (
1161 (
1181 lookup,
1162 lookup,
1182 modified,
1163 modified,
1183 added,
1164 added,
1184 removed,
1165 removed,
1185 deleted,
1166 deleted,
1186 clean,
1167 clean,
1187 ignored,
1168 ignored,
1188 unknown,
1169 unknown,
1189 warnings,
1170 warnings,
1190 bad,
1171 bad,
1191 traversed,
1172 traversed,
1192 dirty,
1173 dirty,
1193 ) = rustmod.status(
1174 ) = rustmod.status(
1194 self._map._map,
1175 self._map._map,
1195 matcher,
1176 matcher,
1196 self._rootdir,
1177 self._rootdir,
1197 self._ignorefiles(),
1178 self._ignorefiles(),
1198 self._checkexec,
1179 self._checkexec,
1199 bool(list_clean),
1180 bool(list_clean),
1200 bool(list_ignored),
1181 bool(list_ignored),
1201 bool(list_unknown),
1182 bool(list_unknown),
1202 bool(matcher.traversedir),
1183 bool(matcher.traversedir),
1203 )
1184 )
1204
1185
1205 self._dirty |= dirty
1186 self._dirty |= dirty
1206
1187
1207 if matcher.traversedir:
1188 if matcher.traversedir:
1208 for dir in traversed:
1189 for dir in traversed:
1209 matcher.traversedir(dir)
1190 matcher.traversedir(dir)
1210
1191
1211 if self._ui.warn:
1192 if self._ui.warn:
1212 for item in warnings:
1193 for item in warnings:
1213 if isinstance(item, tuple):
1194 if isinstance(item, tuple):
1214 file_path, syntax = item
1195 file_path, syntax = item
1215 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1196 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1216 file_path,
1197 file_path,
1217 syntax,
1198 syntax,
1218 )
1199 )
1219 self._ui.warn(msg)
1200 self._ui.warn(msg)
1220 else:
1201 else:
1221 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1202 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1222 self._ui.warn(
1203 self._ui.warn(
1223 msg
1204 msg
1224 % (
1205 % (
1225 pathutil.canonpath(
1206 pathutil.canonpath(
1226 self._rootdir, self._rootdir, item
1207 self._rootdir, self._rootdir, item
1227 ),
1208 ),
1228 b"No such file or directory",
1209 b"No such file or directory",
1229 )
1210 )
1230 )
1211 )
1231
1212
1232 for (fn, message) in bad:
1213 for (fn, message) in bad:
1233 matcher.bad(fn, encoding.strtolocal(message))
1214 matcher.bad(fn, encoding.strtolocal(message))
1234
1215
1235 status = scmutil.status(
1216 status = scmutil.status(
1236 modified=modified,
1217 modified=modified,
1237 added=added,
1218 added=added,
1238 removed=removed,
1219 removed=removed,
1239 deleted=deleted,
1220 deleted=deleted,
1240 unknown=unknown,
1221 unknown=unknown,
1241 ignored=ignored,
1222 ignored=ignored,
1242 clean=clean,
1223 clean=clean,
1243 )
1224 )
1244 return (lookup, status)
1225 return (lookup, status)
1245
1226
1246 def status(self, match, subrepos, ignored, clean, unknown):
1227 def status(self, match, subrepos, ignored, clean, unknown):
1247 """Determine the status of the working copy relative to the
1228 """Determine the status of the working copy relative to the
1248 dirstate and return a pair of (unsure, status), where status is of type
1229 dirstate and return a pair of (unsure, status), where status is of type
1249 scmutil.status and:
1230 scmutil.status and:
1250
1231
1251 unsure:
1232 unsure:
1252 files that might have been modified since the dirstate was
1233 files that might have been modified since the dirstate was
1253 written, but need to be read to be sure (size is the same
1234 written, but need to be read to be sure (size is the same
1254 but mtime differs)
1235 but mtime differs)
1255 status.modified:
1236 status.modified:
1256 files that have definitely been modified since the dirstate
1237 files that have definitely been modified since the dirstate
1257 was written (different size or mode)
1238 was written (different size or mode)
1258 status.clean:
1239 status.clean:
1259 files that have definitely not been modified since the
1240 files that have definitely not been modified since the
1260 dirstate was written
1241 dirstate was written
1261 """
1242 """
1262 listignored, listclean, listunknown = ignored, clean, unknown
1243 listignored, listclean, listunknown = ignored, clean, unknown
1263 lookup, modified, added, unknown, ignored = [], [], [], [], []
1244 lookup, modified, added, unknown, ignored = [], [], [], [], []
1264 removed, deleted, clean = [], [], []
1245 removed, deleted, clean = [], [], []
1265
1246
1266 dmap = self._map
1247 dmap = self._map
1267 dmap.preload()
1248 dmap.preload()
1268
1249
1269 use_rust = True
1250 use_rust = True
1270
1251
1271 allowed_matchers = (
1252 allowed_matchers = (
1272 matchmod.alwaysmatcher,
1253 matchmod.alwaysmatcher,
1273 matchmod.exactmatcher,
1254 matchmod.exactmatcher,
1274 matchmod.includematcher,
1255 matchmod.includematcher,
1275 )
1256 )
1276
1257
1277 if rustmod is None:
1258 if rustmod is None:
1278 use_rust = False
1259 use_rust = False
1279 elif self._checkcase:
1260 elif self._checkcase:
1280 # Case-insensitive filesystems are not handled yet
1261 # Case-insensitive filesystems are not handled yet
1281 use_rust = False
1262 use_rust = False
1282 elif subrepos:
1263 elif subrepos:
1283 use_rust = False
1264 use_rust = False
1284 elif sparse.enabled:
1265 elif sparse.enabled:
1285 use_rust = False
1266 use_rust = False
1286 elif not isinstance(match, allowed_matchers):
1267 elif not isinstance(match, allowed_matchers):
1287 # Some matchers have yet to be implemented
1268 # Some matchers have yet to be implemented
1288 use_rust = False
1269 use_rust = False
1289
1270
1290 # Get the time from the filesystem so we can disambiguate files that
1271 # Get the time from the filesystem so we can disambiguate files that
1291 # appear modified in the present or future.
1272 # appear modified in the present or future.
1292 try:
1273 try:
1293 mtime_boundary = timestamp.get_fs_now(self._opener)
1274 mtime_boundary = timestamp.get_fs_now(self._opener)
1294 except OSError:
1275 except OSError:
1295 # In largefiles or readonly context
1276 # In largefiles or readonly context
1296 mtime_boundary = None
1277 mtime_boundary = None
1297
1278
1298 if use_rust:
1279 if use_rust:
1299 try:
1280 try:
1300 res = self._rust_status(
1281 res = self._rust_status(
1301 match, listclean, listignored, listunknown
1282 match, listclean, listignored, listunknown
1302 )
1283 )
1303 return res + (mtime_boundary,)
1284 return res + (mtime_boundary,)
1304 except rustmod.FallbackError:
1285 except rustmod.FallbackError:
1305 pass
1286 pass
1306
1287
1307 def noop(f):
1288 def noop(f):
1308 pass
1289 pass
1309
1290
1310 dcontains = dmap.__contains__
1291 dcontains = dmap.__contains__
1311 dget = dmap.__getitem__
1292 dget = dmap.__getitem__
1312 ladd = lookup.append # aka "unsure"
1293 ladd = lookup.append # aka "unsure"
1313 madd = modified.append
1294 madd = modified.append
1314 aadd = added.append
1295 aadd = added.append
1315 uadd = unknown.append if listunknown else noop
1296 uadd = unknown.append if listunknown else noop
1316 iadd = ignored.append if listignored else noop
1297 iadd = ignored.append if listignored else noop
1317 radd = removed.append
1298 radd = removed.append
1318 dadd = deleted.append
1299 dadd = deleted.append
1319 cadd = clean.append if listclean else noop
1300 cadd = clean.append if listclean else noop
1320 mexact = match.exact
1301 mexact = match.exact
1321 dirignore = self._dirignore
1302 dirignore = self._dirignore
1322 checkexec = self._checkexec
1303 checkexec = self._checkexec
1323 checklink = self._checklink
1304 checklink = self._checklink
1324 copymap = self._map.copymap
1305 copymap = self._map.copymap
1325
1306
1326 # We need to do full walks when either
1307 # We need to do full walks when either
1327 # - we're listing all clean files, or
1308 # - we're listing all clean files, or
1328 # - match.traversedir does something, because match.traversedir should
1309 # - match.traversedir does something, because match.traversedir should
1329 # be called for every dir in the working dir
1310 # be called for every dir in the working dir
1330 full = listclean or match.traversedir is not None
1311 full = listclean or match.traversedir is not None
1331 for fn, st in pycompat.iteritems(
1312 for fn, st in pycompat.iteritems(
1332 self.walk(match, subrepos, listunknown, listignored, full=full)
1313 self.walk(match, subrepos, listunknown, listignored, full=full)
1333 ):
1314 ):
1334 if not dcontains(fn):
1315 if not dcontains(fn):
1335 if (listignored or mexact(fn)) and dirignore(fn):
1316 if (listignored or mexact(fn)) and dirignore(fn):
1336 if listignored:
1317 if listignored:
1337 iadd(fn)
1318 iadd(fn)
1338 else:
1319 else:
1339 uadd(fn)
1320 uadd(fn)
1340 continue
1321 continue
1341
1322
1342 t = dget(fn)
1323 t = dget(fn)
1343 mode = t.mode
1324 mode = t.mode
1344 size = t.size
1325 size = t.size
1345
1326
1346 if not st and t.tracked:
1327 if not st and t.tracked:
1347 dadd(fn)
1328 dadd(fn)
1348 elif t.p2_info:
1329 elif t.p2_info:
1349 madd(fn)
1330 madd(fn)
1350 elif t.added:
1331 elif t.added:
1351 aadd(fn)
1332 aadd(fn)
1352 elif t.removed:
1333 elif t.removed:
1353 radd(fn)
1334 radd(fn)
1354 elif t.tracked:
1335 elif t.tracked:
1355 if not checklink and t.has_fallback_symlink:
1336 if not checklink and t.has_fallback_symlink:
1356 # If the file system does not support symlink, the mode
1337 # If the file system does not support symlink, the mode
1357 # might not be correctly stored in the dirstate, so do not
1338 # might not be correctly stored in the dirstate, so do not
1358 # trust it.
1339 # trust it.
1359 ladd(fn)
1340 ladd(fn)
1360 elif not checkexec and t.has_fallback_exec:
1341 elif not checkexec and t.has_fallback_exec:
1361 # If the file system does not support exec bits, the mode
1342 # If the file system does not support exec bits, the mode
1362 # might not be correctly stored in the dirstate, so do not
1343 # might not be correctly stored in the dirstate, so do not
1363 # trust it.
1344 # trust it.
1364 ladd(fn)
1345 ladd(fn)
1365 elif (
1346 elif (
1366 size >= 0
1347 size >= 0
1367 and (
1348 and (
1368 (size != st.st_size and size != st.st_size & _rangemask)
1349 (size != st.st_size and size != st.st_size & _rangemask)
1369 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1350 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1370 )
1351 )
1371 or fn in copymap
1352 or fn in copymap
1372 ):
1353 ):
1373 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1354 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1374 # issue6456: Size returned may be longer due to
1355 # issue6456: Size returned may be longer due to
1375 # encryption on EXT-4 fscrypt, undecided.
1356 # encryption on EXT-4 fscrypt, undecided.
1376 ladd(fn)
1357 ladd(fn)
1377 else:
1358 else:
1378 madd(fn)
1359 madd(fn)
1379 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1360 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1380 # There might be a change in the future if for example the
1361 # There might be a change in the future if for example the
1381 # internal clock is off, but this is a case where the issues
1362 # internal clock is off, but this is a case where the issues
1382 # the user would face would be a lot worse and there is
1363 # the user would face would be a lot worse and there is
1383 # nothing we can really do.
1364 # nothing we can really do.
1384 ladd(fn)
1365 ladd(fn)
1385 elif listclean:
1366 elif listclean:
1386 cadd(fn)
1367 cadd(fn)
1387 status = scmutil.status(
1368 status = scmutil.status(
1388 modified, added, removed, deleted, unknown, ignored, clean
1369 modified, added, removed, deleted, unknown, ignored, clean
1389 )
1370 )
1390 return (lookup, status, mtime_boundary)
1371 return (lookup, status, mtime_boundary)
1391
1372
1392 def matches(self, match):
1373 def matches(self, match):
1393 """
1374 """
1394 return files in the dirstate (in whatever state) filtered by match
1375 return files in the dirstate (in whatever state) filtered by match
1395 """
1376 """
1396 dmap = self._map
1377 dmap = self._map
1397 if rustmod is not None:
1378 if rustmod is not None:
1398 dmap = self._map._map
1379 dmap = self._map._map
1399
1380
1400 if match.always():
1381 if match.always():
1401 return dmap.keys()
1382 return dmap.keys()
1402 files = match.files()
1383 files = match.files()
1403 if match.isexact():
1384 if match.isexact():
1404 # fast path -- filter the other way around, since typically files is
1385 # fast path -- filter the other way around, since typically files is
1405 # much smaller than dmap
1386 # much smaller than dmap
1406 return [f for f in files if f in dmap]
1387 return [f for f in files if f in dmap]
1407 if match.prefix() and all(fn in dmap for fn in files):
1388 if match.prefix() and all(fn in dmap for fn in files):
1408 # fast path -- all the values are known to be files, so just return
1389 # fast path -- all the values are known to be files, so just return
1409 # that
1390 # that
1410 return list(files)
1391 return list(files)
1411 return [f for f in dmap if match(f)]
1392 return [f for f in dmap if match(f)]
1412
1393
1413 def _actualfilename(self, tr):
1394 def _actualfilename(self, tr):
1414 if tr:
1395 if tr:
1415 return self._pendingfilename
1396 return self._pendingfilename
1416 else:
1397 else:
1417 return self._filename
1398 return self._filename
1418
1399
1419 def savebackup(self, tr, backupname):
1400 def savebackup(self, tr, backupname):
1420 '''Save current dirstate into backup file'''
1401 '''Save current dirstate into backup file'''
1421 filename = self._actualfilename(tr)
1402 filename = self._actualfilename(tr)
1422 assert backupname != filename
1403 assert backupname != filename
1423
1404
1424 # use '_writedirstate' instead of 'write' to write changes certainly,
1405 # use '_writedirstate' instead of 'write' to write changes certainly,
1425 # because the latter omits writing out if transaction is running.
1406 # because the latter omits writing out if transaction is running.
1426 # output file will be used to create backup of dirstate at this point.
1407 # output file will be used to create backup of dirstate at this point.
1427 if self._dirty or not self._opener.exists(filename):
1408 if self._dirty or not self._opener.exists(filename):
1428 self._writedirstate(
1409 self._writedirstate(
1429 tr,
1410 tr,
1430 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1411 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1431 )
1412 )
1432
1413
1433 if tr:
1414 if tr:
1434 # ensure that subsequent tr.writepending returns True for
1415 # ensure that subsequent tr.writepending returns True for
1435 # changes written out above, even if dirstate is never
1416 # changes written out above, even if dirstate is never
1436 # changed after this
1417 # changed after this
1437 tr.addfilegenerator(
1418 tr.addfilegenerator(
1438 b'dirstate',
1419 b'dirstate',
1439 (self._filename,),
1420 (self._filename,),
1440 lambda f: self._writedirstate(tr, f),
1421 lambda f: self._writedirstate(tr, f),
1441 location=b'plain',
1422 location=b'plain',
1442 )
1423 )
1443
1424
1444 # ensure that pending file written above is unlinked at
1425 # ensure that pending file written above is unlinked at
1445 # failure, even if tr.writepending isn't invoked until the
1426 # failure, even if tr.writepending isn't invoked until the
1446 # end of this transaction
1427 # end of this transaction
1447 tr.registertmp(filename, location=b'plain')
1428 tr.registertmp(filename, location=b'plain')
1448
1429
1449 self._opener.tryunlink(backupname)
1430 self._opener.tryunlink(backupname)
1450 # hardlink backup is okay because _writedirstate is always called
1431 # hardlink backup is okay because _writedirstate is always called
1451 # with an "atomictemp=True" file.
1432 # with an "atomictemp=True" file.
1452 util.copyfile(
1433 util.copyfile(
1453 self._opener.join(filename),
1434 self._opener.join(filename),
1454 self._opener.join(backupname),
1435 self._opener.join(backupname),
1455 hardlink=True,
1436 hardlink=True,
1456 )
1437 )
1457
1438
1458 def restorebackup(self, tr, backupname):
1439 def restorebackup(self, tr, backupname):
1459 '''Restore dirstate by backup file'''
1440 '''Restore dirstate by backup file'''
1460 # this "invalidate()" prevents "wlock.release()" from writing
1441 # this "invalidate()" prevents "wlock.release()" from writing
1461 # changes of dirstate out after restoring from backup file
1442 # changes of dirstate out after restoring from backup file
1462 self.invalidate()
1443 self.invalidate()
1463 filename = self._actualfilename(tr)
1444 filename = self._actualfilename(tr)
1464 o = self._opener
1445 o = self._opener
1465 if util.samefile(o.join(backupname), o.join(filename)):
1446 if util.samefile(o.join(backupname), o.join(filename)):
1466 o.unlink(backupname)
1447 o.unlink(backupname)
1467 else:
1448 else:
1468 o.rename(backupname, filename, checkambig=True)
1449 o.rename(backupname, filename, checkambig=True)
1469
1450
1470 def clearbackup(self, tr, backupname):
1451 def clearbackup(self, tr, backupname):
1471 '''Clear backup file'''
1452 '''Clear backup file'''
1472 self._opener.unlink(backupname)
1453 self._opener.unlink(backupname)
1473
1454
1474 def verify(self, m1, m2):
1455 def verify(self, m1, m2):
1475 """check the dirstate content again the parent manifest and yield errors"""
1456 """check the dirstate content again the parent manifest and yield errors"""
1476 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1457 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1477 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1458 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1478 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1459 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1479 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1460 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1480 for f, entry in self.items():
1461 for f, entry in self.items():
1481 state = entry.state
1462 state = entry.state
1482 if state in b"nr" and f not in m1:
1463 if state in b"nr" and f not in m1:
1483 yield (missing_from_p1, f, state)
1464 yield (missing_from_p1, f, state)
1484 if state in b"a" and f in m1:
1465 if state in b"a" and f in m1:
1485 yield (unexpected_in_p1, f, state)
1466 yield (unexpected_in_p1, f, state)
1486 if state in b"m" and f not in m1 and f not in m2:
1467 if state in b"m" and f not in m1 and f not in m2:
1487 yield (missing_from_ps, f, state)
1468 yield (missing_from_ps, f, state)
1488 for f in m1:
1469 for f in m1:
1489 state = self.get_entry(f).state
1470 state = self.get_entry(f).state
1490 if state not in b"nrm":
1471 if state not in b"nrm":
1491 yield (missing_from_ds, f, state)
1472 yield (missing_from_ds, f, state)
@@ -1,732 +1,732 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 v2,
23 v2,
24 )
24 )
25
25
26 parsers = policy.importmod('parsers')
26 parsers = policy.importmod('parsers')
27 rustmod = policy.importrust('dirstate')
27 rustmod = policy.importrust('dirstate')
28
28
29 propertycache = util.propertycache
29 propertycache = util.propertycache
30
30
31 if rustmod is None:
31 if rustmod is None:
32 DirstateItem = parsers.DirstateItem
32 DirstateItem = parsers.DirstateItem
33 else:
33 else:
34 DirstateItem = rustmod.DirstateItem
34 DirstateItem = rustmod.DirstateItem
35
35
36 rangemask = 0x7FFFFFFF
36 rangemask = 0x7FFFFFFF
37
37
38
38
39 class _dirstatemapcommon(object):
39 class _dirstatemapcommon(object):
40 """
40 """
41 Methods that are identical for both implementations of the dirstatemap
41 Methods that are identical for both implementations of the dirstatemap
42 class, with and without Rust extensions enabled.
42 class, with and without Rust extensions enabled.
43 """
43 """
44
44
45 # please pytype
45 # please pytype
46
46
47 _map = None
47 _map = None
48 copymap = None
48 copymap = None
49
49
50 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
50 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
51 self._use_dirstate_v2 = use_dirstate_v2
51 self._use_dirstate_v2 = use_dirstate_v2
52 self._nodeconstants = nodeconstants
52 self._nodeconstants = nodeconstants
53 self._ui = ui
53 self._ui = ui
54 self._opener = opener
54 self._opener = opener
55 self._root = root
55 self._root = root
56 self._filename = b'dirstate'
56 self._filename = b'dirstate'
57 self._nodelen = 20 # Also update Rust code when changing this!
57 self._nodelen = 20 # Also update Rust code when changing this!
58 self._parents = None
58 self._parents = None
59 self._dirtyparents = False
59 self._dirtyparents = False
60 self._docket = None
60 self._docket = None
61
61
62 # for consistent view between _pl() and _read() invocations
62 # for consistent view between _pl() and _read() invocations
63 self._pendingmode = None
63 self._pendingmode = None
64
64
65 def preload(self):
65 def preload(self):
66 """Loads the underlying data, if it's not already loaded"""
66 """Loads the underlying data, if it's not already loaded"""
67 self._map
67 self._map
68
68
69 def get(self, key, default=None):
69 def get(self, key, default=None):
70 return self._map.get(key, default)
70 return self._map.get(key, default)
71
71
72 def __len__(self):
72 def __len__(self):
73 return len(self._map)
73 return len(self._map)
74
74
75 def __iter__(self):
75 def __iter__(self):
76 return iter(self._map)
76 return iter(self._map)
77
77
78 def __contains__(self, key):
78 def __contains__(self, key):
79 return key in self._map
79 return key in self._map
80
80
81 def __getitem__(self, item):
81 def __getitem__(self, item):
82 return self._map[item]
82 return self._map[item]
83
83
84 ### sub-class utility method
84 ### sub-class utility method
85 #
85 #
86 # Use to allow for generic implementation of some method while still coping
86 # Use to allow for generic implementation of some method while still coping
87 # with minor difference between implementation.
87 # with minor difference between implementation.
88
88
89 def _dirs_incr(self, filename, old_entry=None):
89 def _dirs_incr(self, filename, old_entry=None):
90 """incremente the dirstate counter if applicable
90 """incremente the dirstate counter if applicable
91
91
92 This might be a no-op for some subclass who deal with directory
92 This might be a no-op for some subclass who deal with directory
93 tracking in a different way.
93 tracking in a different way.
94 """
94 """
95
95
96 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
96 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
97 """decremente the dirstate counter if applicable
97 """decremente the dirstate counter if applicable
98
98
99 This might be a no-op for some subclass who deal with directory
99 This might be a no-op for some subclass who deal with directory
100 tracking in a different way.
100 tracking in a different way.
101 """
101 """
102
102
103 def _refresh_entry(self, f, entry):
103 def _refresh_entry(self, f, entry):
104 """record updated state of an entry"""
104 """record updated state of an entry"""
105
105
106 def _insert_entry(self, f, entry):
106 def _insert_entry(self, f, entry):
107 """add a new dirstate entry (or replace an unrelated one)
107 """add a new dirstate entry (or replace an unrelated one)
108
108
109 The fact it is actually new is the responsability of the caller
109 The fact it is actually new is the responsability of the caller
110 """
110 """
111
111
112 def _drop_entry(self, f):
112 def _drop_entry(self, f):
113 """remove any entry for file f
113 """remove any entry for file f
114
114
115 This should also drop associated copy information
115 This should also drop associated copy information
116
116
117 The fact we actually need to drop it is the responsability of the caller"""
117 The fact we actually need to drop it is the responsability of the caller"""
118
118
119 ### method to manipulate the entries
119 ### method to manipulate the entries
120
120
121 def set_possibly_dirty(self, filename):
121 def set_possibly_dirty(self, filename):
122 """record that the current state of the file on disk is unknown"""
122 """record that the current state of the file on disk is unknown"""
123 entry = self[filename]
123 entry = self[filename]
124 entry.set_possibly_dirty()
124 entry.set_possibly_dirty()
125 self._refresh_entry(filename, entry)
125 self._refresh_entry(filename, entry)
126
126
127 def set_clean(self, filename, mode, size, mtime):
127 def set_clean(self, filename, mode, size, mtime):
128 """mark a file as back to a clean state"""
128 """mark a file as back to a clean state"""
129 entry = self[filename]
129 entry = self[filename]
130 size = size & rangemask
130 size = size & rangemask
131 entry.set_clean(mode, size, mtime)
131 entry.set_clean(mode, size, mtime)
132 self._refresh_entry(filename, entry)
132 self._refresh_entry(filename, entry)
133 self.copymap.pop(filename, None)
133 self.copymap.pop(filename, None)
134
134
135 def set_tracked(self, filename):
135 def set_tracked(self, filename):
136 new = False
136 new = False
137 entry = self.get(filename)
137 entry = self.get(filename)
138 if entry is None:
138 if entry is None:
139 self._dirs_incr(filename)
139 self._dirs_incr(filename)
140 entry = DirstateItem(
140 entry = DirstateItem(
141 wc_tracked=True,
141 wc_tracked=True,
142 )
142 )
143
143
144 self._insert_entry(filename, entry)
144 self._insert_entry(filename, entry)
145 new = True
145 new = True
146 elif not entry.tracked:
146 elif not entry.tracked:
147 self._dirs_incr(filename, entry)
147 self._dirs_incr(filename, entry)
148 entry.set_tracked()
148 entry.set_tracked()
149 self._refresh_entry(filename, entry)
149 self._refresh_entry(filename, entry)
150 new = True
150 new = True
151 else:
151 else:
152 # XXX This is probably overkill for more case, but we need this to
152 # XXX This is probably overkill for more case, but we need this to
153 # fully replace the `normallookup` call with `set_tracked` one.
153 # fully replace the `normallookup` call with `set_tracked` one.
154 # Consider smoothing this in the future.
154 # Consider smoothing this in the future.
155 entry.set_possibly_dirty()
155 entry.set_possibly_dirty()
156 self._refresh_entry(filename, entry)
156 self._refresh_entry(filename, entry)
157 return new
157 return new
158
158
159 def set_untracked(self, f):
159 def set_untracked(self, f):
160 """Mark a file as no longer tracked in the dirstate map"""
160 """Mark a file as no longer tracked in the dirstate map"""
161 entry = self.get(f)
161 entry = self.get(f)
162 if entry is None:
162 if entry is None:
163 return False
163 return False
164 else:
164 else:
165 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
165 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
166 if not entry.p2_info:
166 if not entry.p2_info:
167 self.copymap.pop(f, None)
167 self.copymap.pop(f, None)
168 entry.set_untracked()
168 entry.set_untracked()
169 self._refresh_entry(f, entry)
169 self._refresh_entry(f, entry)
170 return True
170 return True
171
171
172 def reset_state(
172 def reset_state(
173 self,
173 self,
174 filename,
174 filename,
175 wc_tracked=False,
175 wc_tracked=False,
176 p1_tracked=False,
176 p1_tracked=False,
177 p2_info=False,
177 p2_info=False,
178 has_meaningful_mtime=True,
178 has_meaningful_mtime=True,
179 has_meaningful_data=True,
179 has_meaningful_data=True,
180 parentfiledata=None,
180 parentfiledata=None,
181 ):
181 ):
182 """Set a entry to a given state, diregarding all previous state
182 """Set a entry to a given state, diregarding all previous state
183
183
184 This is to be used by the part of the dirstate API dedicated to
184 This is to be used by the part of the dirstate API dedicated to
185 adjusting the dirstate after a update/merge.
185 adjusting the dirstate after a update/merge.
186
186
187 note: calling this might result to no entry existing at all if the
187 note: calling this might result to no entry existing at all if the
188 dirstate map does not see any point at having one for this file
188 dirstate map does not see any point at having one for this file
189 anymore.
189 anymore.
190 """
190 """
191 # copy information are now outdated
191 # copy information are now outdated
192 # (maybe new information should be in directly passed to this function)
192 # (maybe new information should be in directly passed to this function)
193 self.copymap.pop(filename, None)
193 self.copymap.pop(filename, None)
194
194
195 if not (p1_tracked or p2_info or wc_tracked):
195 if not (p1_tracked or p2_info or wc_tracked):
196 old_entry = self._map.get(filename)
196 old_entry = self._map.get(filename)
197 self._drop_entry(filename)
197 self._drop_entry(filename)
198 self._dirs_decr(filename, old_entry=old_entry)
198 self._dirs_decr(filename, old_entry=old_entry)
199 return
199 return
200
200
201 old_entry = self._map.get(filename)
201 old_entry = self._map.get(filename)
202 self._dirs_incr(filename, old_entry)
202 self._dirs_incr(filename, old_entry)
203 entry = DirstateItem(
203 entry = DirstateItem(
204 wc_tracked=wc_tracked,
204 wc_tracked=wc_tracked,
205 p1_tracked=p1_tracked,
205 p1_tracked=p1_tracked,
206 p2_info=p2_info,
206 p2_info=p2_info,
207 has_meaningful_mtime=has_meaningful_mtime,
207 has_meaningful_mtime=has_meaningful_mtime,
208 parentfiledata=parentfiledata,
208 parentfiledata=parentfiledata,
209 )
209 )
210 self._insert_entry(filename, entry)
210 self._insert_entry(filename, entry)
211
211
212 ### disk interaction
212 ### disk interaction
213
213
214 def _opendirstatefile(self):
214 def _opendirstatefile(self):
215 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
215 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
216 if self._pendingmode is not None and self._pendingmode != mode:
216 if self._pendingmode is not None and self._pendingmode != mode:
217 fp.close()
217 fp.close()
218 raise error.Abort(
218 raise error.Abort(
219 _(b'working directory state may be changed parallelly')
219 _(b'working directory state may be changed parallelly')
220 )
220 )
221 self._pendingmode = mode
221 self._pendingmode = mode
222 return fp
222 return fp
223
223
224 def _readdirstatefile(self, size=-1):
224 def _readdirstatefile(self, size=-1):
225 try:
225 try:
226 with self._opendirstatefile() as fp:
226 with self._opendirstatefile() as fp:
227 return fp.read(size)
227 return fp.read(size)
228 except IOError as err:
228 except IOError as err:
229 if err.errno != errno.ENOENT:
229 if err.errno != errno.ENOENT:
230 raise
230 raise
231 # File doesn't exist, so the current state is empty
231 # File doesn't exist, so the current state is empty
232 return b''
232 return b''
233
233
234 @property
234 @property
235 def docket(self):
235 def docket(self):
236 if not self._docket:
236 if not self._docket:
237 if not self._use_dirstate_v2:
237 if not self._use_dirstate_v2:
238 raise error.ProgrammingError(
238 raise error.ProgrammingError(
239 b'dirstate only has a docket in v2 format'
239 b'dirstate only has a docket in v2 format'
240 )
240 )
241 self._docket = docketmod.DirstateDocket.parse(
241 self._docket = docketmod.DirstateDocket.parse(
242 self._readdirstatefile(), self._nodeconstants
242 self._readdirstatefile(), self._nodeconstants
243 )
243 )
244 return self._docket
244 return self._docket
245
245
246 def write_v2_no_append(self, tr, st, meta, packed):
246 def write_v2_no_append(self, tr, st, meta, packed):
247 old_docket = self.docket
247 old_docket = self.docket
248 new_docket = docketmod.DirstateDocket.with_new_uuid(
248 new_docket = docketmod.DirstateDocket.with_new_uuid(
249 self.parents(), len(packed), meta
249 self.parents(), len(packed), meta
250 )
250 )
251 data_filename = new_docket.data_filename()
251 data_filename = new_docket.data_filename()
252 if tr:
252 if tr:
253 tr.add(data_filename, 0)
253 tr.add(data_filename, 0)
254 self._opener.write(data_filename, packed)
254 self._opener.write(data_filename, packed)
255 # Write the new docket after the new data file has been
255 # Write the new docket after the new data file has been
256 # written. Because `st` was opened with `atomictemp=True`,
256 # written. Because `st` was opened with `atomictemp=True`,
257 # the actual `.hg/dirstate` file is only affected on close.
257 # the actual `.hg/dirstate` file is only affected on close.
258 st.write(new_docket.serialize())
258 st.write(new_docket.serialize())
259 st.close()
259 st.close()
260 # Remove the old data file after the new docket pointing to
260 # Remove the old data file after the new docket pointing to
261 # the new data file was written.
261 # the new data file was written.
262 if old_docket.uuid:
262 if old_docket.uuid:
263 data_filename = old_docket.data_filename()
263 data_filename = old_docket.data_filename()
264 unlink = lambda _tr=None: self._opener.unlink(data_filename)
264 unlink = lambda _tr=None: self._opener.unlink(data_filename)
265 if tr:
265 if tr:
266 category = b"dirstate-v2-clean-" + old_docket.uuid
266 category = b"dirstate-v2-clean-" + old_docket.uuid
267 tr.addpostclose(category, unlink)
267 tr.addpostclose(category, unlink)
268 else:
268 else:
269 unlink()
269 unlink()
270 self._docket = new_docket
270 self._docket = new_docket
271
271
272 ### reading/setting parents
272 ### reading/setting parents
273
273
274 def parents(self):
274 def parents(self):
275 if not self._parents:
275 if not self._parents:
276 if self._use_dirstate_v2:
276 if self._use_dirstate_v2:
277 self._parents = self.docket.parents
277 self._parents = self.docket.parents
278 else:
278 else:
279 read_len = self._nodelen * 2
279 read_len = self._nodelen * 2
280 st = self._readdirstatefile(read_len)
280 st = self._readdirstatefile(read_len)
281 l = len(st)
281 l = len(st)
282 if l == read_len:
282 if l == read_len:
283 self._parents = (
283 self._parents = (
284 st[: self._nodelen],
284 st[: self._nodelen],
285 st[self._nodelen : 2 * self._nodelen],
285 st[self._nodelen : 2 * self._nodelen],
286 )
286 )
287 elif l == 0:
287 elif l == 0:
288 self._parents = (
288 self._parents = (
289 self._nodeconstants.nullid,
289 self._nodeconstants.nullid,
290 self._nodeconstants.nullid,
290 self._nodeconstants.nullid,
291 )
291 )
292 else:
292 else:
293 raise error.Abort(
293 raise error.Abort(
294 _(b'working directory state appears damaged!')
294 _(b'working directory state appears damaged!')
295 )
295 )
296
296
297 return self._parents
297 return self._parents
298
298
299
299
300 class dirstatemap(_dirstatemapcommon):
300 class dirstatemap(_dirstatemapcommon):
301 """Map encapsulating the dirstate's contents.
301 """Map encapsulating the dirstate's contents.
302
302
303 The dirstate contains the following state:
303 The dirstate contains the following state:
304
304
305 - `identity` is the identity of the dirstate file, which can be used to
305 - `identity` is the identity of the dirstate file, which can be used to
306 detect when changes have occurred to the dirstate file.
306 detect when changes have occurred to the dirstate file.
307
307
308 - `parents` is a pair containing the parents of the working copy. The
308 - `parents` is a pair containing the parents of the working copy. The
309 parents are updated by calling `setparents`.
309 parents are updated by calling `setparents`.
310
310
311 - the state map maps filenames to tuples of (state, mode, size, mtime),
311 - the state map maps filenames to tuples of (state, mode, size, mtime),
312 where state is a single character representing 'normal', 'added',
312 where state is a single character representing 'normal', 'added',
313 'removed', or 'merged'. It is read by treating the dirstate as a
313 'removed', or 'merged'. It is read by treating the dirstate as a
314 dict. File state is updated by calling various methods (see each
314 dict. File state is updated by calling various methods (see each
315 documentation for details):
315 documentation for details):
316
316
317 - `reset_state`,
317 - `reset_state`,
318 - `set_tracked`
318 - `set_tracked`
319 - `set_untracked`
319 - `set_untracked`
320 - `set_clean`
320 - `set_clean`
321 - `set_possibly_dirty`
321 - `set_possibly_dirty`
322
322
323 - `copymap` maps destination filenames to their source filename.
323 - `copymap` maps destination filenames to their source filename.
324
324
325 The dirstate also provides the following views onto the state:
325 The dirstate also provides the following views onto the state:
326
326
327 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
327 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
328 form that they appear as in the dirstate.
328 form that they appear as in the dirstate.
329
329
330 - `dirfoldmap` is a dict mapping normalized directory names to the
330 - `dirfoldmap` is a dict mapping normalized directory names to the
331 denormalized form that they appear as in the dirstate.
331 denormalized form that they appear as in the dirstate.
332 """
332 """
333
333
334 ### Core data storage and access
334 ### Core data storage and access
335
335
336 @propertycache
336 @propertycache
337 def _map(self):
337 def _map(self):
338 self._map = {}
338 self._map = {}
339 self.read()
339 self.read()
340 return self._map
340 return self._map
341
341
342 @propertycache
342 @propertycache
343 def copymap(self):
343 def copymap(self):
344 self.copymap = {}
344 self.copymap = {}
345 self._map
345 self._map
346 return self.copymap
346 return self.copymap
347
347
348 def clear(self):
348 def clear(self):
349 self._map.clear()
349 self._map.clear()
350 self.copymap.clear()
350 self.copymap.clear()
351 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
351 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
352 util.clearcachedproperty(self, b"_dirs")
352 util.clearcachedproperty(self, b"_dirs")
353 util.clearcachedproperty(self, b"_alldirs")
353 util.clearcachedproperty(self, b"_alldirs")
354 util.clearcachedproperty(self, b"filefoldmap")
354 util.clearcachedproperty(self, b"filefoldmap")
355 util.clearcachedproperty(self, b"dirfoldmap")
355 util.clearcachedproperty(self, b"dirfoldmap")
356
356
357 def items(self):
357 def items(self):
358 return pycompat.iteritems(self._map)
358 return pycompat.iteritems(self._map)
359
359
360 # forward for python2,3 compat
360 # forward for python2,3 compat
361 iteritems = items
361 iteritems = items
362
362
363 def debug_iter(self, all):
363 def debug_iter(self, all):
364 """
364 """
365 Return an iterator of (filename, state, mode, size, mtime) tuples
365 Return an iterator of (filename, state, mode, size, mtime) tuples
366
366
367 `all` is unused when Rust is not enabled
367 `all` is unused when Rust is not enabled
368 """
368 """
369 for (filename, item) in self.items():
369 for (filename, item) in self.items():
370 yield (filename, item.state, item.mode, item.size, item.mtime)
370 yield (filename, item.state, item.mode, item.size, item.mtime)
371
371
372 def keys(self):
372 def keys(self):
373 return self._map.keys()
373 return self._map.keys()
374
374
375 ### reading/setting parents
375 ### reading/setting parents
376
376
377 def setparents(self, p1, p2, fold_p2=False):
377 def setparents(self, p1, p2, fold_p2=False):
378 self._parents = (p1, p2)
378 self._parents = (p1, p2)
379 self._dirtyparents = True
379 self._dirtyparents = True
380 copies = {}
380 copies = {}
381 if fold_p2:
381 if fold_p2:
382 for f, s in pycompat.iteritems(self._map):
382 for f, s in pycompat.iteritems(self._map):
383 # Discard "merged" markers when moving away from a merge state
383 # Discard "merged" markers when moving away from a merge state
384 if s.p2_info:
384 if s.p2_info:
385 source = self.copymap.pop(f, None)
385 source = self.copymap.pop(f, None)
386 if source:
386 if source:
387 copies[f] = source
387 copies[f] = source
388 s.drop_merge_data()
388 s.drop_merge_data()
389 return copies
389 return copies
390
390
391 ### disk interaction
391 ### disk interaction
392
392
393 def read(self):
393 def read(self):
394 # ignore HG_PENDING because identity is used only for writing
394 # ignore HG_PENDING because identity is used only for writing
395 self.identity = util.filestat.frompath(
395 self.identity = util.filestat.frompath(
396 self._opener.join(self._filename)
396 self._opener.join(self._filename)
397 )
397 )
398
398
399 if self._use_dirstate_v2:
399 if self._use_dirstate_v2:
400 if not self.docket.uuid:
400 if not self.docket.uuid:
401 return
401 return
402 st = self._opener.read(self.docket.data_filename())
402 st = self._opener.read(self.docket.data_filename())
403 else:
403 else:
404 st = self._readdirstatefile()
404 st = self._readdirstatefile()
405
405
406 if not st:
406 if not st:
407 return
407 return
408
408
409 # TODO: adjust this estimate for dirstate-v2
409 # TODO: adjust this estimate for dirstate-v2
410 if util.safehasattr(parsers, b'dict_new_presized'):
410 if util.safehasattr(parsers, b'dict_new_presized'):
411 # Make an estimate of the number of files in the dirstate based on
411 # Make an estimate of the number of files in the dirstate based on
412 # its size. This trades wasting some memory for avoiding costly
412 # its size. This trades wasting some memory for avoiding costly
413 # resizes. Each entry have a prefix of 17 bytes followed by one or
413 # resizes. Each entry have a prefix of 17 bytes followed by one or
414 # two path names. Studies on various large-scale real-world repositories
414 # two path names. Studies on various large-scale real-world repositories
415 # found 54 bytes a reasonable upper limit for the average path names.
415 # found 54 bytes a reasonable upper limit for the average path names.
416 # Copy entries are ignored for the sake of this estimate.
416 # Copy entries are ignored for the sake of this estimate.
417 self._map = parsers.dict_new_presized(len(st) // 71)
417 self._map = parsers.dict_new_presized(len(st) // 71)
418
418
419 # Python's garbage collector triggers a GC each time a certain number
419 # Python's garbage collector triggers a GC each time a certain number
420 # of container objects (the number being defined by
420 # of container objects (the number being defined by
421 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
421 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
422 # for each file in the dirstate. The C version then immediately marks
422 # for each file in the dirstate. The C version then immediately marks
423 # them as not to be tracked by the collector. However, this has no
423 # them as not to be tracked by the collector. However, this has no
424 # effect on when GCs are triggered, only on what objects the GC looks
424 # effect on when GCs are triggered, only on what objects the GC looks
425 # into. This means that O(number of files) GCs are unavoidable.
425 # into. This means that O(number of files) GCs are unavoidable.
426 # Depending on when in the process's lifetime the dirstate is parsed,
426 # Depending on when in the process's lifetime the dirstate is parsed,
427 # this can get very expensive. As a workaround, disable GC while
427 # this can get very expensive. As a workaround, disable GC while
428 # parsing the dirstate.
428 # parsing the dirstate.
429 #
429 #
430 # (we cannot decorate the function directly since it is in a C module)
430 # (we cannot decorate the function directly since it is in a C module)
431 if self._use_dirstate_v2:
431 if self._use_dirstate_v2:
432 p = self.docket.parents
432 p = self.docket.parents
433 meta = self.docket.tree_metadata
433 meta = self.docket.tree_metadata
434 parse_dirstate = util.nogc(v2.parse_dirstate)
434 parse_dirstate = util.nogc(v2.parse_dirstate)
435 parse_dirstate(self._map, self.copymap, st, meta)
435 parse_dirstate(self._map, self.copymap, st, meta)
436 else:
436 else:
437 parse_dirstate = util.nogc(parsers.parse_dirstate)
437 parse_dirstate = util.nogc(parsers.parse_dirstate)
438 p = parse_dirstate(self._map, self.copymap, st)
438 p = parse_dirstate(self._map, self.copymap, st)
439 if not self._dirtyparents:
439 if not self._dirtyparents:
440 self.setparents(*p)
440 self.setparents(*p)
441
441
442 # Avoid excess attribute lookups by fast pathing certain checks
442 # Avoid excess attribute lookups by fast pathing certain checks
443 self.__contains__ = self._map.__contains__
443 self.__contains__ = self._map.__contains__
444 self.__getitem__ = self._map.__getitem__
444 self.__getitem__ = self._map.__getitem__
445 self.get = self._map.get
445 self.get = self._map.get
446
446
447 def write(self, tr, st, now):
447 def write(self, tr, st, now):
448 if self._use_dirstate_v2:
448 if self._use_dirstate_v2:
449 packed, meta = v2.pack_dirstate(self._map, self.copymap, now)
449 packed, meta = v2.pack_dirstate(self._map, self.copymap)
450 self.write_v2_no_append(tr, st, meta, packed)
450 self.write_v2_no_append(tr, st, meta, packed)
451 else:
451 else:
452 packed = parsers.pack_dirstate(
452 packed = parsers.pack_dirstate(
453 self._map, self.copymap, self.parents(), now
453 self._map, self.copymap, self.parents()
454 )
454 )
455 st.write(packed)
455 st.write(packed)
456 st.close()
456 st.close()
457 self._dirtyparents = False
457 self._dirtyparents = False
458
458
459 @propertycache
459 @propertycache
460 def identity(self):
460 def identity(self):
461 self._map
461 self._map
462 return self.identity
462 return self.identity
463
463
464 ### code related to maintaining and accessing "extra" property
464 ### code related to maintaining and accessing "extra" property
465 # (e.g. "has_dir")
465 # (e.g. "has_dir")
466
466
467 def _dirs_incr(self, filename, old_entry=None):
467 def _dirs_incr(self, filename, old_entry=None):
468 """incremente the dirstate counter if applicable"""
468 """incremente the dirstate counter if applicable"""
469 if (
469 if (
470 old_entry is None or old_entry.removed
470 old_entry is None or old_entry.removed
471 ) and "_dirs" in self.__dict__:
471 ) and "_dirs" in self.__dict__:
472 self._dirs.addpath(filename)
472 self._dirs.addpath(filename)
473 if old_entry is None and "_alldirs" in self.__dict__:
473 if old_entry is None and "_alldirs" in self.__dict__:
474 self._alldirs.addpath(filename)
474 self._alldirs.addpath(filename)
475
475
476 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
476 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
477 """decremente the dirstate counter if applicable"""
477 """decremente the dirstate counter if applicable"""
478 if old_entry is not None:
478 if old_entry is not None:
479 if "_dirs" in self.__dict__ and not old_entry.removed:
479 if "_dirs" in self.__dict__ and not old_entry.removed:
480 self._dirs.delpath(filename)
480 self._dirs.delpath(filename)
481 if "_alldirs" in self.__dict__ and not remove_variant:
481 if "_alldirs" in self.__dict__ and not remove_variant:
482 self._alldirs.delpath(filename)
482 self._alldirs.delpath(filename)
483 elif remove_variant and "_alldirs" in self.__dict__:
483 elif remove_variant and "_alldirs" in self.__dict__:
484 self._alldirs.addpath(filename)
484 self._alldirs.addpath(filename)
485 if "filefoldmap" in self.__dict__:
485 if "filefoldmap" in self.__dict__:
486 normed = util.normcase(filename)
486 normed = util.normcase(filename)
487 self.filefoldmap.pop(normed, None)
487 self.filefoldmap.pop(normed, None)
488
488
489 @propertycache
489 @propertycache
490 def filefoldmap(self):
490 def filefoldmap(self):
491 """Returns a dictionary mapping normalized case paths to their
491 """Returns a dictionary mapping normalized case paths to their
492 non-normalized versions.
492 non-normalized versions.
493 """
493 """
494 try:
494 try:
495 makefilefoldmap = parsers.make_file_foldmap
495 makefilefoldmap = parsers.make_file_foldmap
496 except AttributeError:
496 except AttributeError:
497 pass
497 pass
498 else:
498 else:
499 return makefilefoldmap(
499 return makefilefoldmap(
500 self._map, util.normcasespec, util.normcasefallback
500 self._map, util.normcasespec, util.normcasefallback
501 )
501 )
502
502
503 f = {}
503 f = {}
504 normcase = util.normcase
504 normcase = util.normcase
505 for name, s in pycompat.iteritems(self._map):
505 for name, s in pycompat.iteritems(self._map):
506 if not s.removed:
506 if not s.removed:
507 f[normcase(name)] = name
507 f[normcase(name)] = name
508 f[b'.'] = b'.' # prevents useless util.fspath() invocation
508 f[b'.'] = b'.' # prevents useless util.fspath() invocation
509 return f
509 return f
510
510
511 @propertycache
511 @propertycache
512 def dirfoldmap(self):
512 def dirfoldmap(self):
513 f = {}
513 f = {}
514 normcase = util.normcase
514 normcase = util.normcase
515 for name in self._dirs:
515 for name in self._dirs:
516 f[normcase(name)] = name
516 f[normcase(name)] = name
517 return f
517 return f
518
518
519 def hastrackeddir(self, d):
519 def hastrackeddir(self, d):
520 """
520 """
521 Returns True if the dirstate contains a tracked (not removed) file
521 Returns True if the dirstate contains a tracked (not removed) file
522 in this directory.
522 in this directory.
523 """
523 """
524 return d in self._dirs
524 return d in self._dirs
525
525
526 def hasdir(self, d):
526 def hasdir(self, d):
527 """
527 """
528 Returns True if the dirstate contains a file (tracked or removed)
528 Returns True if the dirstate contains a file (tracked or removed)
529 in this directory.
529 in this directory.
530 """
530 """
531 return d in self._alldirs
531 return d in self._alldirs
532
532
533 @propertycache
533 @propertycache
534 def _dirs(self):
534 def _dirs(self):
535 return pathutil.dirs(self._map, only_tracked=True)
535 return pathutil.dirs(self._map, only_tracked=True)
536
536
537 @propertycache
537 @propertycache
538 def _alldirs(self):
538 def _alldirs(self):
539 return pathutil.dirs(self._map)
539 return pathutil.dirs(self._map)
540
540
541 ### code related to manipulation of entries and copy-sources
541 ### code related to manipulation of entries and copy-sources
542
542
543 def _refresh_entry(self, f, entry):
543 def _refresh_entry(self, f, entry):
544 if not entry.any_tracked:
544 if not entry.any_tracked:
545 self._map.pop(f, None)
545 self._map.pop(f, None)
546
546
547 def _insert_entry(self, f, entry):
547 def _insert_entry(self, f, entry):
548 self._map[f] = entry
548 self._map[f] = entry
549
549
550 def _drop_entry(self, f):
550 def _drop_entry(self, f):
551 self._map.pop(f, None)
551 self._map.pop(f, None)
552 self.copymap.pop(f, None)
552 self.copymap.pop(f, None)
553
553
554
554
555 if rustmod is not None:
555 if rustmod is not None:
556
556
557 class dirstatemap(_dirstatemapcommon):
557 class dirstatemap(_dirstatemapcommon):
558
558
559 ### Core data storage and access
559 ### Core data storage and access
560
560
561 @propertycache
561 @propertycache
562 def _map(self):
562 def _map(self):
563 """
563 """
564 Fills the Dirstatemap when called.
564 Fills the Dirstatemap when called.
565 """
565 """
566 # ignore HG_PENDING because identity is used only for writing
566 # ignore HG_PENDING because identity is used only for writing
567 self.identity = util.filestat.frompath(
567 self.identity = util.filestat.frompath(
568 self._opener.join(self._filename)
568 self._opener.join(self._filename)
569 )
569 )
570
570
571 if self._use_dirstate_v2:
571 if self._use_dirstate_v2:
572 if self.docket.uuid:
572 if self.docket.uuid:
573 # TODO: use mmap when possible
573 # TODO: use mmap when possible
574 data = self._opener.read(self.docket.data_filename())
574 data = self._opener.read(self.docket.data_filename())
575 else:
575 else:
576 data = b''
576 data = b''
577 self._map = rustmod.DirstateMap.new_v2(
577 self._map = rustmod.DirstateMap.new_v2(
578 data, self.docket.data_size, self.docket.tree_metadata
578 data, self.docket.data_size, self.docket.tree_metadata
579 )
579 )
580 parents = self.docket.parents
580 parents = self.docket.parents
581 else:
581 else:
582 self._map, parents = rustmod.DirstateMap.new_v1(
582 self._map, parents = rustmod.DirstateMap.new_v1(
583 self._readdirstatefile()
583 self._readdirstatefile()
584 )
584 )
585
585
586 if parents and not self._dirtyparents:
586 if parents and not self._dirtyparents:
587 self.setparents(*parents)
587 self.setparents(*parents)
588
588
589 self.__contains__ = self._map.__contains__
589 self.__contains__ = self._map.__contains__
590 self.__getitem__ = self._map.__getitem__
590 self.__getitem__ = self._map.__getitem__
591 self.get = self._map.get
591 self.get = self._map.get
592 return self._map
592 return self._map
593
593
594 @property
594 @property
595 def copymap(self):
595 def copymap(self):
596 return self._map.copymap()
596 return self._map.copymap()
597
597
598 def debug_iter(self, all):
598 def debug_iter(self, all):
599 """
599 """
600 Return an iterator of (filename, state, mode, size, mtime) tuples
600 Return an iterator of (filename, state, mode, size, mtime) tuples
601
601
602 `all`: also include with `state == b' '` dirstate tree nodes that
602 `all`: also include with `state == b' '` dirstate tree nodes that
603 don't have an associated `DirstateItem`.
603 don't have an associated `DirstateItem`.
604
604
605 """
605 """
606 return self._map.debug_iter(all)
606 return self._map.debug_iter(all)
607
607
608 def clear(self):
608 def clear(self):
609 self._map.clear()
609 self._map.clear()
610 self.setparents(
610 self.setparents(
611 self._nodeconstants.nullid, self._nodeconstants.nullid
611 self._nodeconstants.nullid, self._nodeconstants.nullid
612 )
612 )
613 util.clearcachedproperty(self, b"_dirs")
613 util.clearcachedproperty(self, b"_dirs")
614 util.clearcachedproperty(self, b"_alldirs")
614 util.clearcachedproperty(self, b"_alldirs")
615 util.clearcachedproperty(self, b"dirfoldmap")
615 util.clearcachedproperty(self, b"dirfoldmap")
616
616
617 def items(self):
617 def items(self):
618 return self._map.items()
618 return self._map.items()
619
619
620 # forward for python2,3 compat
620 # forward for python2,3 compat
621 iteritems = items
621 iteritems = items
622
622
623 def keys(self):
623 def keys(self):
624 return iter(self._map)
624 return iter(self._map)
625
625
626 ### reading/setting parents
626 ### reading/setting parents
627
627
628 def setparents(self, p1, p2, fold_p2=False):
628 def setparents(self, p1, p2, fold_p2=False):
629 self._parents = (p1, p2)
629 self._parents = (p1, p2)
630 self._dirtyparents = True
630 self._dirtyparents = True
631 copies = {}
631 copies = {}
632 if fold_p2:
632 if fold_p2:
633 # Collect into an intermediate list to avoid a `RuntimeError`
633 # Collect into an intermediate list to avoid a `RuntimeError`
634 # exception due to mutation during iteration.
634 # exception due to mutation during iteration.
635 # TODO: move this the whole loop to Rust where `iter_mut`
635 # TODO: move this the whole loop to Rust where `iter_mut`
636 # enables in-place mutation of elements of a collection while
636 # enables in-place mutation of elements of a collection while
637 # iterating it, without mutating the collection itself.
637 # iterating it, without mutating the collection itself.
638 files_with_p2_info = [
638 files_with_p2_info = [
639 f for f, s in self._map.items() if s.p2_info
639 f for f, s in self._map.items() if s.p2_info
640 ]
640 ]
641 rust_map = self._map
641 rust_map = self._map
642 for f in files_with_p2_info:
642 for f in files_with_p2_info:
643 e = rust_map.get(f)
643 e = rust_map.get(f)
644 source = self.copymap.pop(f, None)
644 source = self.copymap.pop(f, None)
645 if source:
645 if source:
646 copies[f] = source
646 copies[f] = source
647 e.drop_merge_data()
647 e.drop_merge_data()
648 rust_map.set_dirstate_item(f, e)
648 rust_map.set_dirstate_item(f, e)
649 return copies
649 return copies
650
650
651 ### disk interaction
651 ### disk interaction
652
652
653 @propertycache
653 @propertycache
654 def identity(self):
654 def identity(self):
655 self._map
655 self._map
656 return self.identity
656 return self.identity
657
657
658 def write(self, tr, st, now):
658 def write(self, tr, st, now):
659 if not self._use_dirstate_v2:
659 if not self._use_dirstate_v2:
660 p1, p2 = self.parents()
660 p1, p2 = self.parents()
661 packed = self._map.write_v1(p1, p2, now)
661 packed = self._map.write_v1(p1, p2)
662 st.write(packed)
662 st.write(packed)
663 st.close()
663 st.close()
664 self._dirtyparents = False
664 self._dirtyparents = False
665 return
665 return
666
666
667 # We can only append to an existing data file if there is one
667 # We can only append to an existing data file if there is one
668 can_append = self.docket.uuid is not None
668 can_append = self.docket.uuid is not None
669 packed, meta, append = self._map.write_v2(now, can_append)
669 packed, meta, append = self._map.write_v2(can_append)
670 if append:
670 if append:
671 docket = self.docket
671 docket = self.docket
672 data_filename = docket.data_filename()
672 data_filename = docket.data_filename()
673 if tr:
673 if tr:
674 tr.add(data_filename, docket.data_size)
674 tr.add(data_filename, docket.data_size)
675 with self._opener(data_filename, b'r+b') as fp:
675 with self._opener(data_filename, b'r+b') as fp:
676 fp.seek(docket.data_size)
676 fp.seek(docket.data_size)
677 assert fp.tell() == docket.data_size
677 assert fp.tell() == docket.data_size
678 written = fp.write(packed)
678 written = fp.write(packed)
679 if written is not None: # py2 may return None
679 if written is not None: # py2 may return None
680 assert written == len(packed), (written, len(packed))
680 assert written == len(packed), (written, len(packed))
681 docket.data_size += len(packed)
681 docket.data_size += len(packed)
682 docket.parents = self.parents()
682 docket.parents = self.parents()
683 docket.tree_metadata = meta
683 docket.tree_metadata = meta
684 st.write(docket.serialize())
684 st.write(docket.serialize())
685 st.close()
685 st.close()
686 else:
686 else:
687 self.write_v2_no_append(tr, st, meta, packed)
687 self.write_v2_no_append(tr, st, meta, packed)
688 # Reload from the newly-written file
688 # Reload from the newly-written file
689 util.clearcachedproperty(self, b"_map")
689 util.clearcachedproperty(self, b"_map")
690 self._dirtyparents = False
690 self._dirtyparents = False
691
691
692 ### code related to maintaining and accessing "extra" property
692 ### code related to maintaining and accessing "extra" property
693 # (e.g. "has_dir")
693 # (e.g. "has_dir")
694
694
695 @propertycache
695 @propertycache
696 def filefoldmap(self):
696 def filefoldmap(self):
697 """Returns a dictionary mapping normalized case paths to their
697 """Returns a dictionary mapping normalized case paths to their
698 non-normalized versions.
698 non-normalized versions.
699 """
699 """
700 return self._map.filefoldmapasdict()
700 return self._map.filefoldmapasdict()
701
701
702 def hastrackeddir(self, d):
702 def hastrackeddir(self, d):
703 return self._map.hastrackeddir(d)
703 return self._map.hastrackeddir(d)
704
704
705 def hasdir(self, d):
705 def hasdir(self, d):
706 return self._map.hasdir(d)
706 return self._map.hasdir(d)
707
707
708 @propertycache
708 @propertycache
709 def dirfoldmap(self):
709 def dirfoldmap(self):
710 f = {}
710 f = {}
711 normcase = util.normcase
711 normcase = util.normcase
712 for name in self._map.tracked_dirs():
712 for name in self._map.tracked_dirs():
713 f[normcase(name)] = name
713 f[normcase(name)] = name
714 return f
714 return f
715
715
716 ### code related to manipulation of entries and copy-sources
716 ### code related to manipulation of entries and copy-sources
717
717
718 def _refresh_entry(self, f, entry):
718 def _refresh_entry(self, f, entry):
719 if not entry.any_tracked:
719 if not entry.any_tracked:
720 self._map.drop_item_and_copy_source(f)
720 self._map.drop_item_and_copy_source(f)
721 else:
721 else:
722 self._map.addfile(f, entry)
722 self._map.addfile(f, entry)
723
723
724 def _insert_entry(self, f, entry):
724 def _insert_entry(self, f, entry):
725 self._map.addfile(f, entry)
725 self._map.addfile(f, entry)
726
726
727 def _drop_entry(self, f):
727 def _drop_entry(self, f):
728 self._map.drop_item_and_copy_source(f)
728 self._map.drop_item_and_copy_source(f)
729
729
730 def __setitem__(self, key, value):
730 def __setitem__(self, key, value):
731 assert isinstance(value, DirstateItem)
731 assert isinstance(value, DirstateItem)
732 self._map.set_dirstate_item(key, value)
732 self._map.set_dirstate_item(key, value)
@@ -1,414 +1,401 b''
1 # v2.py - Pure-Python implementation of the dirstate-v2 file format
1 # v2.py - Pure-Python implementation of the dirstate-v2 file format
2 #
2 #
3 # Copyright Mercurial Contributors
3 # Copyright Mercurial Contributors
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from ..thirdparty import attr
12 from ..thirdparty import attr
13 from .. import error, policy
13 from .. import error, policy
14
14
15 parsers = policy.importmod('parsers')
15 parsers = policy.importmod('parsers')
16
16
17
17
18 # Must match the constant of the same name in
18 # Must match the constant of the same name in
19 # `rust/hg-core/src/dirstate_tree/on_disk.rs`
19 # `rust/hg-core/src/dirstate_tree/on_disk.rs`
20 TREE_METADATA_SIZE = 44
20 TREE_METADATA_SIZE = 44
21 NODE_SIZE = 44
21 NODE_SIZE = 44
22
22
23
23
24 # Must match the `TreeMetadata` Rust struct in
24 # Must match the `TreeMetadata` Rust struct in
25 # `rust/hg-core/src/dirstate_tree/on_disk.rs`. See doc-comments there.
25 # `rust/hg-core/src/dirstate_tree/on_disk.rs`. See doc-comments there.
26 #
26 #
27 # * 4 bytes: start offset of root nodes
27 # * 4 bytes: start offset of root nodes
28 # * 4 bytes: number of root nodes
28 # * 4 bytes: number of root nodes
29 # * 4 bytes: total number of nodes in the tree that have an entry
29 # * 4 bytes: total number of nodes in the tree that have an entry
30 # * 4 bytes: total number of nodes in the tree that have a copy source
30 # * 4 bytes: total number of nodes in the tree that have a copy source
31 # * 4 bytes: number of bytes in the data file that are not used anymore
31 # * 4 bytes: number of bytes in the data file that are not used anymore
32 # * 4 bytes: unused
32 # * 4 bytes: unused
33 # * 20 bytes: SHA-1 hash of ignore patterns
33 # * 20 bytes: SHA-1 hash of ignore patterns
34 TREE_METADATA = struct.Struct('>LLLLL4s20s')
34 TREE_METADATA = struct.Struct('>LLLLL4s20s')
35
35
36
36
37 # Must match the `Node` Rust struct in
37 # Must match the `Node` Rust struct in
38 # `rust/hg-core/src/dirstate_tree/on_disk.rs`. See doc-comments there.
38 # `rust/hg-core/src/dirstate_tree/on_disk.rs`. See doc-comments there.
39 #
39 #
40 # * 4 bytes: start offset of full path
40 # * 4 bytes: start offset of full path
41 # * 2 bytes: length of the full path
41 # * 2 bytes: length of the full path
42 # * 2 bytes: length within the full path before its "base name"
42 # * 2 bytes: length within the full path before its "base name"
43 # * 4 bytes: start offset of the copy source if any, or zero for no copy source
43 # * 4 bytes: start offset of the copy source if any, or zero for no copy source
44 # * 2 bytes: length of the copy source if any, or unused
44 # * 2 bytes: length of the copy source if any, or unused
45 # * 4 bytes: start offset of child nodes
45 # * 4 bytes: start offset of child nodes
46 # * 4 bytes: number of child nodes
46 # * 4 bytes: number of child nodes
47 # * 4 bytes: number of descendant nodes that have an entry
47 # * 4 bytes: number of descendant nodes that have an entry
48 # * 4 bytes: number of descendant nodes that have a "tracked" state
48 # * 4 bytes: number of descendant nodes that have a "tracked" state
49 # * 1 byte: flags
49 # * 1 byte: flags
50 # * 4 bytes: expected size
50 # * 4 bytes: expected size
51 # * 4 bytes: mtime seconds
51 # * 4 bytes: mtime seconds
52 # * 4 bytes: mtime nanoseconds
52 # * 4 bytes: mtime nanoseconds
53 NODE = struct.Struct('>LHHLHLLLLHlll')
53 NODE = struct.Struct('>LHHLHLLLLHlll')
54
54
55
55
56 assert TREE_METADATA_SIZE == TREE_METADATA.size
56 assert TREE_METADATA_SIZE == TREE_METADATA.size
57 assert NODE_SIZE == NODE.size
57 assert NODE_SIZE == NODE.size
58
58
59 # match constant in mercurial/pure/parsers.py
59 # match constant in mercurial/pure/parsers.py
60 DIRSTATE_V2_DIRECTORY = 1 << 5
60 DIRSTATE_V2_DIRECTORY = 1 << 5
61
61
62
62
63 def parse_dirstate(map, copy_map, data, tree_metadata):
63 def parse_dirstate(map, copy_map, data, tree_metadata):
64 """parse a full v2-dirstate from a binary data into dictionnaries:
64 """parse a full v2-dirstate from a binary data into dictionnaries:
65
65
66 - map: a {path: entry} mapping that will be filled
66 - map: a {path: entry} mapping that will be filled
67 - copy_map: a {path: copy-source} mapping that will be filled
67 - copy_map: a {path: copy-source} mapping that will be filled
68 - data: a binary blob contains v2 nodes data
68 - data: a binary blob contains v2 nodes data
69 - tree_metadata:: a binary blob of the top level node (from the docket)
69 - tree_metadata:: a binary blob of the top level node (from the docket)
70 """
70 """
71 (
71 (
72 root_nodes_start,
72 root_nodes_start,
73 root_nodes_len,
73 root_nodes_len,
74 _nodes_with_entry_count,
74 _nodes_with_entry_count,
75 _nodes_with_copy_source_count,
75 _nodes_with_copy_source_count,
76 _unreachable_bytes,
76 _unreachable_bytes,
77 _unused,
77 _unused,
78 _ignore_patterns_hash,
78 _ignore_patterns_hash,
79 ) = TREE_METADATA.unpack(tree_metadata)
79 ) = TREE_METADATA.unpack(tree_metadata)
80 parse_nodes(map, copy_map, data, root_nodes_start, root_nodes_len)
80 parse_nodes(map, copy_map, data, root_nodes_start, root_nodes_len)
81
81
82
82
83 def parse_nodes(map, copy_map, data, start, len):
83 def parse_nodes(map, copy_map, data, start, len):
84 """parse <len> nodes from <data> starting at offset <start>
84 """parse <len> nodes from <data> starting at offset <start>
85
85
86 This is used by parse_dirstate to recursively fill `map` and `copy_map`.
86 This is used by parse_dirstate to recursively fill `map` and `copy_map`.
87
87
88 All directory specific information is ignored and do not need any
88 All directory specific information is ignored and do not need any
89 processing (DIRECTORY, ALL_UNKNOWN_RECORDED, ALL_IGNORED_RECORDED)
89 processing (DIRECTORY, ALL_UNKNOWN_RECORDED, ALL_IGNORED_RECORDED)
90 """
90 """
91 for i in range(len):
91 for i in range(len):
92 node_start = start + NODE_SIZE * i
92 node_start = start + NODE_SIZE * i
93 node_bytes = slice_with_len(data, node_start, NODE_SIZE)
93 node_bytes = slice_with_len(data, node_start, NODE_SIZE)
94 (
94 (
95 path_start,
95 path_start,
96 path_len,
96 path_len,
97 _basename_start,
97 _basename_start,
98 copy_source_start,
98 copy_source_start,
99 copy_source_len,
99 copy_source_len,
100 children_start,
100 children_start,
101 children_count,
101 children_count,
102 _descendants_with_entry_count,
102 _descendants_with_entry_count,
103 _tracked_descendants_count,
103 _tracked_descendants_count,
104 flags,
104 flags,
105 size,
105 size,
106 mtime_s,
106 mtime_s,
107 mtime_ns,
107 mtime_ns,
108 ) = NODE.unpack(node_bytes)
108 ) = NODE.unpack(node_bytes)
109
109
110 # Parse child nodes of this node recursively
110 # Parse child nodes of this node recursively
111 parse_nodes(map, copy_map, data, children_start, children_count)
111 parse_nodes(map, copy_map, data, children_start, children_count)
112
112
113 item = parsers.DirstateItem.from_v2_data(flags, size, mtime_s, mtime_ns)
113 item = parsers.DirstateItem.from_v2_data(flags, size, mtime_s, mtime_ns)
114 if not item.any_tracked:
114 if not item.any_tracked:
115 continue
115 continue
116 path = slice_with_len(data, path_start, path_len)
116 path = slice_with_len(data, path_start, path_len)
117 map[path] = item
117 map[path] = item
118 if copy_source_start:
118 if copy_source_start:
119 copy_map[path] = slice_with_len(
119 copy_map[path] = slice_with_len(
120 data, copy_source_start, copy_source_len
120 data, copy_source_start, copy_source_len
121 )
121 )
122
122
123
123
124 def slice_with_len(data, start, len):
124 def slice_with_len(data, start, len):
125 return data[start : start + len]
125 return data[start : start + len]
126
126
127
127
128 @attr.s
128 @attr.s
129 class Node(object):
129 class Node(object):
130 path = attr.ib()
130 path = attr.ib()
131 entry = attr.ib()
131 entry = attr.ib()
132 parent = attr.ib(default=None)
132 parent = attr.ib(default=None)
133 children_count = attr.ib(default=0)
133 children_count = attr.ib(default=0)
134 children_offset = attr.ib(default=0)
134 children_offset = attr.ib(default=0)
135 descendants_with_entry = attr.ib(default=0)
135 descendants_with_entry = attr.ib(default=0)
136 tracked_descendants = attr.ib(default=0)
136 tracked_descendants = attr.ib(default=0)
137
137
138 def pack(self, copy_map, paths_offset):
138 def pack(self, copy_map, paths_offset):
139 path = self.path
139 path = self.path
140 copy = copy_map.get(path)
140 copy = copy_map.get(path)
141 entry = self.entry
141 entry = self.entry
142
142
143 path_start = paths_offset
143 path_start = paths_offset
144 path_len = len(path)
144 path_len = len(path)
145 basename_start = path.rfind(b'/') + 1 # 0 if rfind returns -1
145 basename_start = path.rfind(b'/') + 1 # 0 if rfind returns -1
146 if copy is not None:
146 if copy is not None:
147 copy_source_start = paths_offset + len(path)
147 copy_source_start = paths_offset + len(path)
148 copy_source_len = len(copy)
148 copy_source_len = len(copy)
149 else:
149 else:
150 copy_source_start = 0
150 copy_source_start = 0
151 copy_source_len = 0
151 copy_source_len = 0
152 if entry is not None:
152 if entry is not None:
153 flags, size, mtime_s, mtime_ns = entry.v2_data()
153 flags, size, mtime_s, mtime_ns = entry.v2_data()
154 else:
154 else:
155 # There are no mtime-cached directories in the Python implementation
155 # There are no mtime-cached directories in the Python implementation
156 flags = DIRSTATE_V2_DIRECTORY
156 flags = DIRSTATE_V2_DIRECTORY
157 size = 0
157 size = 0
158 mtime_s = 0
158 mtime_s = 0
159 mtime_ns = 0
159 mtime_ns = 0
160 return NODE.pack(
160 return NODE.pack(
161 path_start,
161 path_start,
162 path_len,
162 path_len,
163 basename_start,
163 basename_start,
164 copy_source_start,
164 copy_source_start,
165 copy_source_len,
165 copy_source_len,
166 self.children_offset,
166 self.children_offset,
167 self.children_count,
167 self.children_count,
168 self.descendants_with_entry,
168 self.descendants_with_entry,
169 self.tracked_descendants,
169 self.tracked_descendants,
170 flags,
170 flags,
171 size,
171 size,
172 mtime_s,
172 mtime_s,
173 mtime_ns,
173 mtime_ns,
174 )
174 )
175
175
176
176
177 def pack_dirstate(map, copy_map, now):
177 def pack_dirstate(map, copy_map):
178 """
178 """
179 Pack `map` and `copy_map` into the dirstate v2 binary format and return
179 Pack `map` and `copy_map` into the dirstate v2 binary format and return
180 the bytearray.
180 the bytearray.
181 `now` is a timestamp of the current filesystem time used to detect race
182 conditions in writing the dirstate to disk, see inline comment.
183
181
184 The on-disk format expects a tree-like structure where the leaves are
182 The on-disk format expects a tree-like structure where the leaves are
185 written first (and sorted per-directory), going up levels until the root
183 written first (and sorted per-directory), going up levels until the root
186 node and writing that one to the docket. See more details on the on-disk
184 node and writing that one to the docket. See more details on the on-disk
187 format in `mercurial/helptext/internals/dirstate-v2`.
185 format in `mercurial/helptext/internals/dirstate-v2`.
188
186
189 Since both `map` and `copy_map` are flat dicts we need to figure out the
187 Since both `map` and `copy_map` are flat dicts we need to figure out the
190 hierarchy. This algorithm does so without having to build the entire tree
188 hierarchy. This algorithm does so without having to build the entire tree
191 in-memory: it only keeps the minimum number of nodes around to satisfy the
189 in-memory: it only keeps the minimum number of nodes around to satisfy the
192 format.
190 format.
193
191
194 # Algorithm explanation
192 # Algorithm explanation
195
193
196 This explanation does not talk about the different counters for tracked
194 This explanation does not talk about the different counters for tracked
197 descendents and storing the copies, but that work is pretty simple once this
195 descendents and storing the copies, but that work is pretty simple once this
198 algorithm is in place.
196 algorithm is in place.
199
197
200 ## Building a subtree
198 ## Building a subtree
201
199
202 First, sort `map`: this makes it so the leaves of the tree are contiguous
200 First, sort `map`: this makes it so the leaves of the tree are contiguous
203 per directory (i.e. a/b/c and a/b/d will be next to each other in the list),
201 per directory (i.e. a/b/c and a/b/d will be next to each other in the list),
204 and enables us to use the ordering of folders to have a "cursor" of the
202 and enables us to use the ordering of folders to have a "cursor" of the
205 current folder we're in without ever going twice in the same branch of the
203 current folder we're in without ever going twice in the same branch of the
206 tree. The cursor is a node that remembers its parent and any information
204 tree. The cursor is a node that remembers its parent and any information
207 relevant to the format (see the `Node` class), building the relevant part
205 relevant to the format (see the `Node` class), building the relevant part
208 of the tree lazily.
206 of the tree lazily.
209 Then, for each file in `map`, move the cursor into the tree to the
207 Then, for each file in `map`, move the cursor into the tree to the
210 corresponding folder of the file: for example, if the very first file
208 corresponding folder of the file: for example, if the very first file
211 is "a/b/c", we start from `Node[""]`, create `Node["a"]` which points to
209 is "a/b/c", we start from `Node[""]`, create `Node["a"]` which points to
212 its parent `Node[""]`, then create `Node["a/b"]`, which points to its parent
210 its parent `Node[""]`, then create `Node["a/b"]`, which points to its parent
213 `Node["a"]`. These nodes are kept around in a stack.
211 `Node["a"]`. These nodes are kept around in a stack.
214 If the next file in `map` is in the same subtree ("a/b/d" or "a/b/e/f"), we
212 If the next file in `map` is in the same subtree ("a/b/d" or "a/b/e/f"), we
215 add it to the stack and keep looping with the same logic of creating the
213 add it to the stack and keep looping with the same logic of creating the
216 tree nodes as needed. If however the next file in `map` is *not* in the same
214 tree nodes as needed. If however the next file in `map` is *not* in the same
217 subtree ("a/other", if we're still in the "a/b" folder), then we know that
215 subtree ("a/other", if we're still in the "a/b" folder), then we know that
218 the subtree we're in is complete.
216 the subtree we're in is complete.
219
217
220 ## Writing the subtree
218 ## Writing the subtree
221
219
222 We have the entire subtree in the stack, so we start writing it to disk
220 We have the entire subtree in the stack, so we start writing it to disk
223 folder by folder. The way we write a folder is to pop the stack into a list
221 folder by folder. The way we write a folder is to pop the stack into a list
224 until the folder changes, revert this list of direct children (to satisfy
222 until the folder changes, revert this list of direct children (to satisfy
225 the format requirement that children be sorted). This process repeats until
223 the format requirement that children be sorted). This process repeats until
226 we hit the "other" subtree.
224 we hit the "other" subtree.
227
225
228 An example:
226 An example:
229 a
227 a
230 dir1/b
228 dir1/b
231 dir1/c
229 dir1/c
232 dir2/dir3/d
230 dir2/dir3/d
233 dir2/dir3/e
231 dir2/dir3/e
234 dir2/f
232 dir2/f
235
233
236 Would have us:
234 Would have us:
237 - add to the stack until "dir2/dir3/e"
235 - add to the stack until "dir2/dir3/e"
238 - realize that "dir2/f" is in a different subtree
236 - realize that "dir2/f" is in a different subtree
239 - pop "dir2/dir3/e", "dir2/dir3/d", reverse them so they're sorted and
237 - pop "dir2/dir3/e", "dir2/dir3/d", reverse them so they're sorted and
240 pack them since the next entry is "dir2/dir3"
238 pack them since the next entry is "dir2/dir3"
241 - go back up to "dir2"
239 - go back up to "dir2"
242 - add "dir2/f" to the stack
240 - add "dir2/f" to the stack
243 - realize we're done with the map
241 - realize we're done with the map
244 - pop "dir2/f", "dir2/dir3" from the stack, reverse and pack them
242 - pop "dir2/f", "dir2/dir3" from the stack, reverse and pack them
245 - go up to the root node, do the same to write "a", "dir1" and "dir2" in
243 - go up to the root node, do the same to write "a", "dir1" and "dir2" in
246 that order
244 that order
247
245
248 ## Special case for the root node
246 ## Special case for the root node
249
247
250 The root node is not serialized in the format, but its information is
248 The root node is not serialized in the format, but its information is
251 written to the docket. Again, see more details on the on-disk format in
249 written to the docket. Again, see more details on the on-disk format in
252 `mercurial/helptext/internals/dirstate-v2`.
250 `mercurial/helptext/internals/dirstate-v2`.
253 """
251 """
254 data = bytearray()
252 data = bytearray()
255 root_nodes_start = 0
253 root_nodes_start = 0
256 root_nodes_len = 0
254 root_nodes_len = 0
257 nodes_with_entry_count = 0
255 nodes_with_entry_count = 0
258 nodes_with_copy_source_count = 0
256 nodes_with_copy_source_count = 0
259 # Will always be 0 since this implementation always re-writes everything
257 # Will always be 0 since this implementation always re-writes everything
260 # to disk
258 # to disk
261 unreachable_bytes = 0
259 unreachable_bytes = 0
262 unused = b'\x00' * 4
260 unused = b'\x00' * 4
263 # This is an optimization that's only useful for the Rust implementation
261 # This is an optimization that's only useful for the Rust implementation
264 ignore_patterns_hash = b'\x00' * 20
262 ignore_patterns_hash = b'\x00' * 20
265
263
266 if len(map) == 0:
264 if len(map) == 0:
267 tree_metadata = TREE_METADATA.pack(
265 tree_metadata = TREE_METADATA.pack(
268 root_nodes_start,
266 root_nodes_start,
269 root_nodes_len,
267 root_nodes_len,
270 nodes_with_entry_count,
268 nodes_with_entry_count,
271 nodes_with_copy_source_count,
269 nodes_with_copy_source_count,
272 unreachable_bytes,
270 unreachable_bytes,
273 unused,
271 unused,
274 ignore_patterns_hash,
272 ignore_patterns_hash,
275 )
273 )
276 return data, tree_metadata
274 return data, tree_metadata
277
275
278 sorted_map = sorted(map.items(), key=lambda x: x[0])
276 sorted_map = sorted(map.items(), key=lambda x: x[0])
279
277
280 # Use a stack to not have to only remember the nodes we currently need
278 # Use a stack to not have to only remember the nodes we currently need
281 # instead of building the entire tree in memory
279 # instead of building the entire tree in memory
282 stack = []
280 stack = []
283 current_node = Node(b"", None)
281 current_node = Node(b"", None)
284 stack.append(current_node)
282 stack.append(current_node)
285
283
286 for index, (path, entry) in enumerate(sorted_map, 1):
284 for index, (path, entry) in enumerate(sorted_map, 1):
287 if entry.need_delay(now):
288 # The file was last modified "simultaneously" with the current
289 # write to dirstate (i.e. within the same second for file-
290 # systems with a granularity of 1 sec). This commonly happens
291 # for at least a couple of files on 'update'.
292 # The user could change the file without changing its size
293 # within the same second. Invalidate the file's mtime in
294 # dirstate, forcing future 'status' calls to compare the
295 # contents of the file if the size is the same. This prevents
296 # mistakenly treating such files as clean.
297 entry.set_possibly_dirty()
298 nodes_with_entry_count += 1
285 nodes_with_entry_count += 1
299 if path in copy_map:
286 if path in copy_map:
300 nodes_with_copy_source_count += 1
287 nodes_with_copy_source_count += 1
301 current_folder = get_folder(path)
288 current_folder = get_folder(path)
302 current_node = move_to_correct_node_in_tree(
289 current_node = move_to_correct_node_in_tree(
303 current_folder, current_node, stack
290 current_folder, current_node, stack
304 )
291 )
305
292
306 current_node.children_count += 1
293 current_node.children_count += 1
307 # Entries from `map` are never `None`
294 # Entries from `map` are never `None`
308 if entry.tracked:
295 if entry.tracked:
309 current_node.tracked_descendants += 1
296 current_node.tracked_descendants += 1
310 current_node.descendants_with_entry += 1
297 current_node.descendants_with_entry += 1
311 stack.append(Node(path, entry, current_node))
298 stack.append(Node(path, entry, current_node))
312
299
313 should_pack = True
300 should_pack = True
314 next_path = None
301 next_path = None
315 if index < len(sorted_map):
302 if index < len(sorted_map):
316 # Determine if the next entry is in the same sub-tree, if so don't
303 # Determine if the next entry is in the same sub-tree, if so don't
317 # pack yet
304 # pack yet
318 next_path = sorted_map[index][0]
305 next_path = sorted_map[index][0]
319 should_pack = not get_folder(next_path).startswith(current_folder)
306 should_pack = not get_folder(next_path).startswith(current_folder)
320 if should_pack:
307 if should_pack:
321 pack_directory_children(current_node, copy_map, data, stack)
308 pack_directory_children(current_node, copy_map, data, stack)
322 while stack and current_node.path != b"":
309 while stack and current_node.path != b"":
323 # Go up the tree and write until we reach the folder of the next
310 # Go up the tree and write until we reach the folder of the next
324 # entry (if any, otherwise the root)
311 # entry (if any, otherwise the root)
325 parent = current_node.parent
312 parent = current_node.parent
326 in_parent_folder_of_next_entry = next_path is not None and (
313 in_parent_folder_of_next_entry = next_path is not None and (
327 get_folder(next_path).startswith(get_folder(stack[-1].path))
314 get_folder(next_path).startswith(get_folder(stack[-1].path))
328 )
315 )
329 if parent is None or in_parent_folder_of_next_entry:
316 if parent is None or in_parent_folder_of_next_entry:
330 break
317 break
331 pack_directory_children(parent, copy_map, data, stack)
318 pack_directory_children(parent, copy_map, data, stack)
332 current_node = parent
319 current_node = parent
333
320
334 # Special case for the root node since we don't write it to disk, only its
321 # Special case for the root node since we don't write it to disk, only its
335 # children to the docket
322 # children to the docket
336 current_node = stack.pop()
323 current_node = stack.pop()
337 assert current_node.path == b"", current_node.path
324 assert current_node.path == b"", current_node.path
338 assert len(stack) == 0, len(stack)
325 assert len(stack) == 0, len(stack)
339
326
340 tree_metadata = TREE_METADATA.pack(
327 tree_metadata = TREE_METADATA.pack(
341 current_node.children_offset,
328 current_node.children_offset,
342 current_node.children_count,
329 current_node.children_count,
343 nodes_with_entry_count,
330 nodes_with_entry_count,
344 nodes_with_copy_source_count,
331 nodes_with_copy_source_count,
345 unreachable_bytes,
332 unreachable_bytes,
346 unused,
333 unused,
347 ignore_patterns_hash,
334 ignore_patterns_hash,
348 )
335 )
349
336
350 return data, tree_metadata
337 return data, tree_metadata
351
338
352
339
353 def get_folder(path):
340 def get_folder(path):
354 """
341 """
355 Return the folder of the path that's given, an empty string for root paths.
342 Return the folder of the path that's given, an empty string for root paths.
356 """
343 """
357 return path.rsplit(b'/', 1)[0] if b'/' in path else b''
344 return path.rsplit(b'/', 1)[0] if b'/' in path else b''
358
345
359
346
360 def move_to_correct_node_in_tree(target_folder, current_node, stack):
347 def move_to_correct_node_in_tree(target_folder, current_node, stack):
361 """
348 """
362 Move inside the dirstate node tree to the node corresponding to
349 Move inside the dirstate node tree to the node corresponding to
363 `target_folder`, creating the missing nodes along the way if needed.
350 `target_folder`, creating the missing nodes along the way if needed.
364 """
351 """
365 while target_folder != current_node.path:
352 while target_folder != current_node.path:
366 if target_folder.startswith(current_node.path):
353 if target_folder.startswith(current_node.path):
367 # We need to go down a folder
354 # We need to go down a folder
368 prefix = target_folder[len(current_node.path) :].lstrip(b'/')
355 prefix = target_folder[len(current_node.path) :].lstrip(b'/')
369 subfolder_name = prefix.split(b'/', 1)[0]
356 subfolder_name = prefix.split(b'/', 1)[0]
370 if current_node.path:
357 if current_node.path:
371 subfolder_path = current_node.path + b'/' + subfolder_name
358 subfolder_path = current_node.path + b'/' + subfolder_name
372 else:
359 else:
373 subfolder_path = subfolder_name
360 subfolder_path = subfolder_name
374 next_node = stack[-1]
361 next_node = stack[-1]
375 if next_node.path == target_folder:
362 if next_node.path == target_folder:
376 # This folder is now a file and only contains removed entries
363 # This folder is now a file and only contains removed entries
377 # merge with the last node
364 # merge with the last node
378 current_node = next_node
365 current_node = next_node
379 else:
366 else:
380 current_node.children_count += 1
367 current_node.children_count += 1
381 current_node = Node(subfolder_path, None, current_node)
368 current_node = Node(subfolder_path, None, current_node)
382 stack.append(current_node)
369 stack.append(current_node)
383 else:
370 else:
384 # We need to go up a folder
371 # We need to go up a folder
385 current_node = current_node.parent
372 current_node = current_node.parent
386 return current_node
373 return current_node
387
374
388
375
389 def pack_directory_children(node, copy_map, data, stack):
376 def pack_directory_children(node, copy_map, data, stack):
390 """
377 """
391 Write the binary representation of the direct sorted children of `node` to
378 Write the binary representation of the direct sorted children of `node` to
392 `data`
379 `data`
393 """
380 """
394 direct_children = []
381 direct_children = []
395
382
396 while stack[-1].path != b"" and get_folder(stack[-1].path) == node.path:
383 while stack[-1].path != b"" and get_folder(stack[-1].path) == node.path:
397 direct_children.append(stack.pop())
384 direct_children.append(stack.pop())
398 if not direct_children:
385 if not direct_children:
399 raise error.ProgrammingError(b"no direct children for %r" % node.path)
386 raise error.ProgrammingError(b"no direct children for %r" % node.path)
400
387
401 # Reverse the stack to get the correct sorted order
388 # Reverse the stack to get the correct sorted order
402 direct_children.reverse()
389 direct_children.reverse()
403 packed_children = bytearray()
390 packed_children = bytearray()
404 # Write the paths to `data`. Pack child nodes but don't write them yet
391 # Write the paths to `data`. Pack child nodes but don't write them yet
405 for child in direct_children:
392 for child in direct_children:
406 packed = child.pack(copy_map=copy_map, paths_offset=len(data))
393 packed = child.pack(copy_map=copy_map, paths_offset=len(data))
407 packed_children.extend(packed)
394 packed_children.extend(packed)
408 data.extend(child.path)
395 data.extend(child.path)
409 data.extend(copy_map.get(child.path, b""))
396 data.extend(copy_map.get(child.path, b""))
410 node.tracked_descendants += child.tracked_descendants
397 node.tracked_descendants += child.tracked_descendants
411 node.descendants_with_entry += child.descendants_with_entry
398 node.descendants_with_entry += child.descendants_with_entry
412 # Write the fixed-size child nodes all together
399 # Write the fixed-size child nodes all together
413 node.children_offset = len(data)
400 node.children_offset = len(data)
414 data.extend(packed_children)
401 data.extend(packed_children)
@@ -1,937 +1,921 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11 import struct
11 import struct
12 import zlib
12 import zlib
13
13
14 from ..node import (
14 from ..node import (
15 nullrev,
15 nullrev,
16 sha1nodeconstants,
16 sha1nodeconstants,
17 )
17 )
18 from ..thirdparty import attr
18 from ..thirdparty import attr
19 from .. import (
19 from .. import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 revlogutils,
22 revlogutils,
23 util,
23 util,
24 )
24 )
25
25
26 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import nodemap as nodemaputil
27 from ..revlogutils import constants as revlog_constants
27 from ..revlogutils import constants as revlog_constants
28
28
29 stringio = pycompat.bytesio
29 stringio = pycompat.bytesio
30
30
31
31
32 _pack = struct.pack
32 _pack = struct.pack
33 _unpack = struct.unpack
33 _unpack = struct.unpack
34 _compress = zlib.compress
34 _compress = zlib.compress
35 _decompress = zlib.decompress
35 _decompress = zlib.decompress
36
36
37
37
38 # a special value used internally for `size` if the file come from the other parent
38 # a special value used internally for `size` if the file come from the other parent
39 FROM_P2 = -2
39 FROM_P2 = -2
40
40
41 # a special value used internally for `size` if the file is modified/merged/added
41 # a special value used internally for `size` if the file is modified/merged/added
42 NONNORMAL = -1
42 NONNORMAL = -1
43
43
44 # a special value used internally for `time` if the time is ambigeous
44 # a special value used internally for `time` if the time is ambigeous
45 AMBIGUOUS_TIME = -1
45 AMBIGUOUS_TIME = -1
46
46
47 # Bits of the `flags` byte inside a node in the file format
47 # Bits of the `flags` byte inside a node in the file format
48 DIRSTATE_V2_WDIR_TRACKED = 1 << 0
48 DIRSTATE_V2_WDIR_TRACKED = 1 << 0
49 DIRSTATE_V2_P1_TRACKED = 1 << 1
49 DIRSTATE_V2_P1_TRACKED = 1 << 1
50 DIRSTATE_V2_P2_INFO = 1 << 2
50 DIRSTATE_V2_P2_INFO = 1 << 2
51 DIRSTATE_V2_MODE_EXEC_PERM = 1 << 3
51 DIRSTATE_V2_MODE_EXEC_PERM = 1 << 3
52 DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 4
52 DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 4
53 DIRSTATE_V2_HAS_FALLBACK_EXEC = 1 << 5
53 DIRSTATE_V2_HAS_FALLBACK_EXEC = 1 << 5
54 DIRSTATE_V2_FALLBACK_EXEC = 1 << 6
54 DIRSTATE_V2_FALLBACK_EXEC = 1 << 6
55 DIRSTATE_V2_HAS_FALLBACK_SYMLINK = 1 << 7
55 DIRSTATE_V2_HAS_FALLBACK_SYMLINK = 1 << 7
56 DIRSTATE_V2_FALLBACK_SYMLINK = 1 << 8
56 DIRSTATE_V2_FALLBACK_SYMLINK = 1 << 8
57 DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED = 1 << 9
57 DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED = 1 << 9
58 DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 10
58 DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 10
59 DIRSTATE_V2_HAS_MTIME = 1 << 11
59 DIRSTATE_V2_HAS_MTIME = 1 << 11
60 DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12
60 DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12
61 DIRSTATE_V2_DIRECTORY = 1 << 13
61 DIRSTATE_V2_DIRECTORY = 1 << 13
62 DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14
62 DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14
63 DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15
63 DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15
64
64
65
65
66 @attr.s(slots=True, init=False)
66 @attr.s(slots=True, init=False)
67 class DirstateItem(object):
67 class DirstateItem(object):
68 """represent a dirstate entry
68 """represent a dirstate entry
69
69
70 It hold multiple attributes
70 It hold multiple attributes
71
71
72 # about file tracking
72 # about file tracking
73 - wc_tracked: is the file tracked by the working copy
73 - wc_tracked: is the file tracked by the working copy
74 - p1_tracked: is the file tracked in working copy first parent
74 - p1_tracked: is the file tracked in working copy first parent
75 - p2_info: the file has been involved in some merge operation. Either
75 - p2_info: the file has been involved in some merge operation. Either
76 because it was actually merged, or because the p2 version was
76 because it was actually merged, or because the p2 version was
77 ahead, or because some rename moved it there. In either case
77 ahead, or because some rename moved it there. In either case
78 `hg status` will want it displayed as modified.
78 `hg status` will want it displayed as modified.
79
79
80 # about the file state expected from p1 manifest:
80 # about the file state expected from p1 manifest:
81 - mode: the file mode in p1
81 - mode: the file mode in p1
82 - size: the file size in p1
82 - size: the file size in p1
83
83
84 These value can be set to None, which mean we don't have a meaningful value
84 These value can be set to None, which mean we don't have a meaningful value
85 to compare with. Either because we don't really care about them as there
85 to compare with. Either because we don't really care about them as there
86 `status` is known without having to look at the disk or because we don't
86 `status` is known without having to look at the disk or because we don't
87 know these right now and a full comparison will be needed to find out if
87 know these right now and a full comparison will be needed to find out if
88 the file is clean.
88 the file is clean.
89
89
90 # about the file state on disk last time we saw it:
90 # about the file state on disk last time we saw it:
91 - mtime: the last known clean mtime for the file.
91 - mtime: the last known clean mtime for the file.
92
92
93 This value can be set to None if no cachable state exist. Either because we
93 This value can be set to None if no cachable state exist. Either because we
94 do not care (see previous section) or because we could not cache something
94 do not care (see previous section) or because we could not cache something
95 yet.
95 yet.
96 """
96 """
97
97
98 _wc_tracked = attr.ib()
98 _wc_tracked = attr.ib()
99 _p1_tracked = attr.ib()
99 _p1_tracked = attr.ib()
100 _p2_info = attr.ib()
100 _p2_info = attr.ib()
101 _mode = attr.ib()
101 _mode = attr.ib()
102 _size = attr.ib()
102 _size = attr.ib()
103 _mtime_s = attr.ib()
103 _mtime_s = attr.ib()
104 _mtime_ns = attr.ib()
104 _mtime_ns = attr.ib()
105 _fallback_exec = attr.ib()
105 _fallback_exec = attr.ib()
106 _fallback_symlink = attr.ib()
106 _fallback_symlink = attr.ib()
107
107
108 def __init__(
108 def __init__(
109 self,
109 self,
110 wc_tracked=False,
110 wc_tracked=False,
111 p1_tracked=False,
111 p1_tracked=False,
112 p2_info=False,
112 p2_info=False,
113 has_meaningful_data=True,
113 has_meaningful_data=True,
114 has_meaningful_mtime=True,
114 has_meaningful_mtime=True,
115 parentfiledata=None,
115 parentfiledata=None,
116 fallback_exec=None,
116 fallback_exec=None,
117 fallback_symlink=None,
117 fallback_symlink=None,
118 ):
118 ):
119 self._wc_tracked = wc_tracked
119 self._wc_tracked = wc_tracked
120 self._p1_tracked = p1_tracked
120 self._p1_tracked = p1_tracked
121 self._p2_info = p2_info
121 self._p2_info = p2_info
122
122
123 self._fallback_exec = fallback_exec
123 self._fallback_exec = fallback_exec
124 self._fallback_symlink = fallback_symlink
124 self._fallback_symlink = fallback_symlink
125
125
126 self._mode = None
126 self._mode = None
127 self._size = None
127 self._size = None
128 self._mtime_s = None
128 self._mtime_s = None
129 self._mtime_ns = None
129 self._mtime_ns = None
130 if parentfiledata is None:
130 if parentfiledata is None:
131 has_meaningful_mtime = False
131 has_meaningful_mtime = False
132 has_meaningful_data = False
132 has_meaningful_data = False
133 elif parentfiledata[2] is None:
133 elif parentfiledata[2] is None:
134 has_meaningful_mtime = False
134 has_meaningful_mtime = False
135 if has_meaningful_data:
135 if has_meaningful_data:
136 self._mode = parentfiledata[0]
136 self._mode = parentfiledata[0]
137 self._size = parentfiledata[1]
137 self._size = parentfiledata[1]
138 if has_meaningful_mtime:
138 if has_meaningful_mtime:
139 self._mtime_s, self._mtime_ns = parentfiledata[2]
139 self._mtime_s, self._mtime_ns = parentfiledata[2]
140
140
141 @classmethod
141 @classmethod
142 def from_v2_data(cls, flags, size, mtime_s, mtime_ns):
142 def from_v2_data(cls, flags, size, mtime_s, mtime_ns):
143 """Build a new DirstateItem object from V2 data"""
143 """Build a new DirstateItem object from V2 data"""
144 has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
144 has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
145 has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME)
145 has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME)
146 if flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS:
146 if flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS:
147 # The current code is not able to do the more subtle comparison that the
147 # The current code is not able to do the more subtle comparison that the
148 # MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime
148 # MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime
149 has_meaningful_mtime = False
149 has_meaningful_mtime = False
150 mode = None
150 mode = None
151
151
152 if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED:
152 if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED:
153 # we do not have support for this flag in the code yet,
153 # we do not have support for this flag in the code yet,
154 # force a lookup for this file.
154 # force a lookup for this file.
155 has_mode_size = False
155 has_mode_size = False
156 has_meaningful_mtime = False
156 has_meaningful_mtime = False
157
157
158 fallback_exec = None
158 fallback_exec = None
159 if flags & DIRSTATE_V2_HAS_FALLBACK_EXEC:
159 if flags & DIRSTATE_V2_HAS_FALLBACK_EXEC:
160 fallback_exec = flags & DIRSTATE_V2_FALLBACK_EXEC
160 fallback_exec = flags & DIRSTATE_V2_FALLBACK_EXEC
161
161
162 fallback_symlink = None
162 fallback_symlink = None
163 if flags & DIRSTATE_V2_HAS_FALLBACK_SYMLINK:
163 if flags & DIRSTATE_V2_HAS_FALLBACK_SYMLINK:
164 fallback_symlink = flags & DIRSTATE_V2_FALLBACK_SYMLINK
164 fallback_symlink = flags & DIRSTATE_V2_FALLBACK_SYMLINK
165
165
166 if has_mode_size:
166 if has_mode_size:
167 assert stat.S_IXUSR == 0o100
167 assert stat.S_IXUSR == 0o100
168 if flags & DIRSTATE_V2_MODE_EXEC_PERM:
168 if flags & DIRSTATE_V2_MODE_EXEC_PERM:
169 mode = 0o755
169 mode = 0o755
170 else:
170 else:
171 mode = 0o644
171 mode = 0o644
172 if flags & DIRSTATE_V2_MODE_IS_SYMLINK:
172 if flags & DIRSTATE_V2_MODE_IS_SYMLINK:
173 mode |= stat.S_IFLNK
173 mode |= stat.S_IFLNK
174 else:
174 else:
175 mode |= stat.S_IFREG
175 mode |= stat.S_IFREG
176 return cls(
176 return cls(
177 wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
177 wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
178 p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
178 p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
179 p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
179 p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
180 has_meaningful_data=has_mode_size,
180 has_meaningful_data=has_mode_size,
181 has_meaningful_mtime=has_meaningful_mtime,
181 has_meaningful_mtime=has_meaningful_mtime,
182 parentfiledata=(mode, size, (mtime_s, mtime_ns)),
182 parentfiledata=(mode, size, (mtime_s, mtime_ns)),
183 fallback_exec=fallback_exec,
183 fallback_exec=fallback_exec,
184 fallback_symlink=fallback_symlink,
184 fallback_symlink=fallback_symlink,
185 )
185 )
186
186
187 @classmethod
187 @classmethod
188 def from_v1_data(cls, state, mode, size, mtime):
188 def from_v1_data(cls, state, mode, size, mtime):
189 """Build a new DirstateItem object from V1 data
189 """Build a new DirstateItem object from V1 data
190
190
191 Since the dirstate-v1 format is frozen, the signature of this function
191 Since the dirstate-v1 format is frozen, the signature of this function
192 is not expected to change, unlike the __init__ one.
192 is not expected to change, unlike the __init__ one.
193 """
193 """
194 if state == b'm':
194 if state == b'm':
195 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
195 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
196 elif state == b'a':
196 elif state == b'a':
197 return cls(wc_tracked=True)
197 return cls(wc_tracked=True)
198 elif state == b'r':
198 elif state == b'r':
199 if size == NONNORMAL:
199 if size == NONNORMAL:
200 p1_tracked = True
200 p1_tracked = True
201 p2_info = True
201 p2_info = True
202 elif size == FROM_P2:
202 elif size == FROM_P2:
203 p1_tracked = False
203 p1_tracked = False
204 p2_info = True
204 p2_info = True
205 else:
205 else:
206 p1_tracked = True
206 p1_tracked = True
207 p2_info = False
207 p2_info = False
208 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
208 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
209 elif state == b'n':
209 elif state == b'n':
210 if size == FROM_P2:
210 if size == FROM_P2:
211 return cls(wc_tracked=True, p2_info=True)
211 return cls(wc_tracked=True, p2_info=True)
212 elif size == NONNORMAL:
212 elif size == NONNORMAL:
213 return cls(wc_tracked=True, p1_tracked=True)
213 return cls(wc_tracked=True, p1_tracked=True)
214 elif mtime == AMBIGUOUS_TIME:
214 elif mtime == AMBIGUOUS_TIME:
215 return cls(
215 return cls(
216 wc_tracked=True,
216 wc_tracked=True,
217 p1_tracked=True,
217 p1_tracked=True,
218 has_meaningful_mtime=False,
218 has_meaningful_mtime=False,
219 parentfiledata=(mode, size, (42, 0)),
219 parentfiledata=(mode, size, (42, 0)),
220 )
220 )
221 else:
221 else:
222 return cls(
222 return cls(
223 wc_tracked=True,
223 wc_tracked=True,
224 p1_tracked=True,
224 p1_tracked=True,
225 parentfiledata=(mode, size, (mtime, 0)),
225 parentfiledata=(mode, size, (mtime, 0)),
226 )
226 )
227 else:
227 else:
228 raise RuntimeError(b'unknown state: %s' % state)
228 raise RuntimeError(b'unknown state: %s' % state)
229
229
230 def set_possibly_dirty(self):
230 def set_possibly_dirty(self):
231 """Mark a file as "possibly dirty"
231 """Mark a file as "possibly dirty"
232
232
233 This means the next status call will have to actually check its content
233 This means the next status call will have to actually check its content
234 to make sure it is correct.
234 to make sure it is correct.
235 """
235 """
236 self._mtime_s = None
236 self._mtime_s = None
237 self._mtime_ns = None
237 self._mtime_ns = None
238
238
239 def set_clean(self, mode, size, mtime):
239 def set_clean(self, mode, size, mtime):
240 """mark a file as "clean" cancelling potential "possibly dirty call"
240 """mark a file as "clean" cancelling potential "possibly dirty call"
241
241
242 Note: this function is a descendant of `dirstate.normal` and is
242 Note: this function is a descendant of `dirstate.normal` and is
243 currently expected to be call on "normal" entry only. There are not
243 currently expected to be call on "normal" entry only. There are not
244 reason for this to not change in the future as long as the ccode is
244 reason for this to not change in the future as long as the ccode is
245 updated to preserve the proper state of the non-normal files.
245 updated to preserve the proper state of the non-normal files.
246 """
246 """
247 self._wc_tracked = True
247 self._wc_tracked = True
248 self._p1_tracked = True
248 self._p1_tracked = True
249 self._mode = mode
249 self._mode = mode
250 self._size = size
250 self._size = size
251 self._mtime_s, self._mtime_ns = mtime
251 self._mtime_s, self._mtime_ns = mtime
252
252
253 def set_tracked(self):
253 def set_tracked(self):
254 """mark a file as tracked in the working copy
254 """mark a file as tracked in the working copy
255
255
256 This will ultimately be called by command like `hg add`.
256 This will ultimately be called by command like `hg add`.
257 """
257 """
258 self._wc_tracked = True
258 self._wc_tracked = True
259 # `set_tracked` is replacing various `normallookup` call. So we mark
259 # `set_tracked` is replacing various `normallookup` call. So we mark
260 # the files as needing lookup
260 # the files as needing lookup
261 #
261 #
262 # Consider dropping this in the future in favor of something less broad.
262 # Consider dropping this in the future in favor of something less broad.
263 self._mtime_s = None
263 self._mtime_s = None
264 self._mtime_ns = None
264 self._mtime_ns = None
265
265
266 def set_untracked(self):
266 def set_untracked(self):
267 """mark a file as untracked in the working copy
267 """mark a file as untracked in the working copy
268
268
269 This will ultimately be called by command like `hg remove`.
269 This will ultimately be called by command like `hg remove`.
270 """
270 """
271 self._wc_tracked = False
271 self._wc_tracked = False
272 self._mode = None
272 self._mode = None
273 self._size = None
273 self._size = None
274 self._mtime_s = None
274 self._mtime_s = None
275 self._mtime_ns = None
275 self._mtime_ns = None
276
276
277 def drop_merge_data(self):
277 def drop_merge_data(self):
278 """remove all "merge-only" from a DirstateItem
278 """remove all "merge-only" from a DirstateItem
279
279
280 This is to be call by the dirstatemap code when the second parent is dropped
280 This is to be call by the dirstatemap code when the second parent is dropped
281 """
281 """
282 if self._p2_info:
282 if self._p2_info:
283 self._p2_info = False
283 self._p2_info = False
284 self._mode = None
284 self._mode = None
285 self._size = None
285 self._size = None
286 self._mtime_s = None
286 self._mtime_s = None
287 self._mtime_ns = None
287 self._mtime_ns = None
288
288
289 @property
289 @property
290 def mode(self):
290 def mode(self):
291 return self.v1_mode()
291 return self.v1_mode()
292
292
293 @property
293 @property
294 def size(self):
294 def size(self):
295 return self.v1_size()
295 return self.v1_size()
296
296
297 @property
297 @property
298 def mtime(self):
298 def mtime(self):
299 return self.v1_mtime()
299 return self.v1_mtime()
300
300
301 def mtime_likely_equal_to(self, other_mtime):
301 def mtime_likely_equal_to(self, other_mtime):
302 self_sec = self._mtime_s
302 self_sec = self._mtime_s
303 if self_sec is None:
303 if self_sec is None:
304 return False
304 return False
305 self_ns = self._mtime_ns
305 self_ns = self._mtime_ns
306 other_sec, other_ns = other_mtime
306 other_sec, other_ns = other_mtime
307 return self_sec == other_sec and (
307 return self_sec == other_sec and (
308 self_ns == other_ns or self_ns == 0 or other_ns == 0
308 self_ns == other_ns or self_ns == 0 or other_ns == 0
309 )
309 )
310
310
311 @property
311 @property
312 def state(self):
312 def state(self):
313 """
313 """
314 States are:
314 States are:
315 n normal
315 n normal
316 m needs merging
316 m needs merging
317 r marked for removal
317 r marked for removal
318 a marked for addition
318 a marked for addition
319
319
320 XXX This "state" is a bit obscure and mostly a direct expression of the
320 XXX This "state" is a bit obscure and mostly a direct expression of the
321 dirstatev1 format. It would make sense to ultimately deprecate it in
321 dirstatev1 format. It would make sense to ultimately deprecate it in
322 favor of the more "semantic" attributes.
322 favor of the more "semantic" attributes.
323 """
323 """
324 if not self.any_tracked:
324 if not self.any_tracked:
325 return b'?'
325 return b'?'
326 return self.v1_state()
326 return self.v1_state()
327
327
328 @property
328 @property
329 def has_fallback_exec(self):
329 def has_fallback_exec(self):
330 """True if "fallback" information are available for the "exec" bit
330 """True if "fallback" information are available for the "exec" bit
331
331
332 Fallback information can be stored in the dirstate to keep track of
332 Fallback information can be stored in the dirstate to keep track of
333 filesystem attribute tracked by Mercurial when the underlying file
333 filesystem attribute tracked by Mercurial when the underlying file
334 system or operating system does not support that property, (e.g.
334 system or operating system does not support that property, (e.g.
335 Windows).
335 Windows).
336
336
337 Not all version of the dirstate on-disk storage support preserving this
337 Not all version of the dirstate on-disk storage support preserving this
338 information.
338 information.
339 """
339 """
340 return self._fallback_exec is not None
340 return self._fallback_exec is not None
341
341
342 @property
342 @property
343 def fallback_exec(self):
343 def fallback_exec(self):
344 """ "fallback" information for the executable bit
344 """ "fallback" information for the executable bit
345
345
346 True if the file should be considered executable when we cannot get
346 True if the file should be considered executable when we cannot get
347 this information from the files system. False if it should be
347 this information from the files system. False if it should be
348 considered non-executable.
348 considered non-executable.
349
349
350 See has_fallback_exec for details."""
350 See has_fallback_exec for details."""
351 return self._fallback_exec
351 return self._fallback_exec
352
352
353 @fallback_exec.setter
353 @fallback_exec.setter
354 def set_fallback_exec(self, value):
354 def set_fallback_exec(self, value):
355 """control "fallback" executable bit
355 """control "fallback" executable bit
356
356
357 Set to:
357 Set to:
358 - True if the file should be considered executable,
358 - True if the file should be considered executable,
359 - False if the file should be considered non-executable,
359 - False if the file should be considered non-executable,
360 - None if we do not have valid fallback data.
360 - None if we do not have valid fallback data.
361
361
362 See has_fallback_exec for details."""
362 See has_fallback_exec for details."""
363 if value is None:
363 if value is None:
364 self._fallback_exec = None
364 self._fallback_exec = None
365 else:
365 else:
366 self._fallback_exec = bool(value)
366 self._fallback_exec = bool(value)
367
367
368 @property
368 @property
369 def has_fallback_symlink(self):
369 def has_fallback_symlink(self):
370 """True if "fallback" information are available for symlink status
370 """True if "fallback" information are available for symlink status
371
371
372 Fallback information can be stored in the dirstate to keep track of
372 Fallback information can be stored in the dirstate to keep track of
373 filesystem attribute tracked by Mercurial when the underlying file
373 filesystem attribute tracked by Mercurial when the underlying file
374 system or operating system does not support that property, (e.g.
374 system or operating system does not support that property, (e.g.
375 Windows).
375 Windows).
376
376
377 Not all version of the dirstate on-disk storage support preserving this
377 Not all version of the dirstate on-disk storage support preserving this
378 information."""
378 information."""
379 return self._fallback_symlink is not None
379 return self._fallback_symlink is not None
380
380
381 @property
381 @property
382 def fallback_symlink(self):
382 def fallback_symlink(self):
383 """ "fallback" information for symlink status
383 """ "fallback" information for symlink status
384
384
385 True if the file should be considered executable when we cannot get
385 True if the file should be considered executable when we cannot get
386 this information from the files system. False if it should be
386 this information from the files system. False if it should be
387 considered non-executable.
387 considered non-executable.
388
388
389 See has_fallback_exec for details."""
389 See has_fallback_exec for details."""
390 return self._fallback_symlink
390 return self._fallback_symlink
391
391
392 @fallback_symlink.setter
392 @fallback_symlink.setter
393 def set_fallback_symlink(self, value):
393 def set_fallback_symlink(self, value):
394 """control "fallback" symlink status
394 """control "fallback" symlink status
395
395
396 Set to:
396 Set to:
397 - True if the file should be considered a symlink,
397 - True if the file should be considered a symlink,
398 - False if the file should be considered not a symlink,
398 - False if the file should be considered not a symlink,
399 - None if we do not have valid fallback data.
399 - None if we do not have valid fallback data.
400
400
401 See has_fallback_symlink for details."""
401 See has_fallback_symlink for details."""
402 if value is None:
402 if value is None:
403 self._fallback_symlink = None
403 self._fallback_symlink = None
404 else:
404 else:
405 self._fallback_symlink = bool(value)
405 self._fallback_symlink = bool(value)
406
406
407 @property
407 @property
408 def tracked(self):
408 def tracked(self):
409 """True is the file is tracked in the working copy"""
409 """True is the file is tracked in the working copy"""
410 return self._wc_tracked
410 return self._wc_tracked
411
411
412 @property
412 @property
413 def any_tracked(self):
413 def any_tracked(self):
414 """True is the file is tracked anywhere (wc or parents)"""
414 """True is the file is tracked anywhere (wc or parents)"""
415 return self._wc_tracked or self._p1_tracked or self._p2_info
415 return self._wc_tracked or self._p1_tracked or self._p2_info
416
416
417 @property
417 @property
418 def added(self):
418 def added(self):
419 """True if the file has been added"""
419 """True if the file has been added"""
420 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
420 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
421
421
422 @property
422 @property
423 def maybe_clean(self):
423 def maybe_clean(self):
424 """True if the file has a chance to be in the "clean" state"""
424 """True if the file has a chance to be in the "clean" state"""
425 if not self._wc_tracked:
425 if not self._wc_tracked:
426 return False
426 return False
427 elif not self._p1_tracked:
427 elif not self._p1_tracked:
428 return False
428 return False
429 elif self._p2_info:
429 elif self._p2_info:
430 return False
430 return False
431 return True
431 return True
432
432
433 @property
433 @property
434 def p1_tracked(self):
434 def p1_tracked(self):
435 """True if the file is tracked in the first parent manifest"""
435 """True if the file is tracked in the first parent manifest"""
436 return self._p1_tracked
436 return self._p1_tracked
437
437
438 @property
438 @property
439 def p2_info(self):
439 def p2_info(self):
440 """True if the file needed to merge or apply any input from p2
440 """True if the file needed to merge or apply any input from p2
441
441
442 See the class documentation for details.
442 See the class documentation for details.
443 """
443 """
444 return self._wc_tracked and self._p2_info
444 return self._wc_tracked and self._p2_info
445
445
446 @property
446 @property
447 def removed(self):
447 def removed(self):
448 """True if the file has been removed"""
448 """True if the file has been removed"""
449 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
449 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
450
450
451 def v2_data(self):
451 def v2_data(self):
452 """Returns (flags, mode, size, mtime) for v2 serialization"""
452 """Returns (flags, mode, size, mtime) for v2 serialization"""
453 flags = 0
453 flags = 0
454 if self._wc_tracked:
454 if self._wc_tracked:
455 flags |= DIRSTATE_V2_WDIR_TRACKED
455 flags |= DIRSTATE_V2_WDIR_TRACKED
456 if self._p1_tracked:
456 if self._p1_tracked:
457 flags |= DIRSTATE_V2_P1_TRACKED
457 flags |= DIRSTATE_V2_P1_TRACKED
458 if self._p2_info:
458 if self._p2_info:
459 flags |= DIRSTATE_V2_P2_INFO
459 flags |= DIRSTATE_V2_P2_INFO
460 if self._mode is not None and self._size is not None:
460 if self._mode is not None and self._size is not None:
461 flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE
461 flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE
462 if self.mode & stat.S_IXUSR:
462 if self.mode & stat.S_IXUSR:
463 flags |= DIRSTATE_V2_MODE_EXEC_PERM
463 flags |= DIRSTATE_V2_MODE_EXEC_PERM
464 if stat.S_ISLNK(self.mode):
464 if stat.S_ISLNK(self.mode):
465 flags |= DIRSTATE_V2_MODE_IS_SYMLINK
465 flags |= DIRSTATE_V2_MODE_IS_SYMLINK
466 if self._mtime_s is not None:
466 if self._mtime_s is not None:
467 flags |= DIRSTATE_V2_HAS_MTIME
467 flags |= DIRSTATE_V2_HAS_MTIME
468
468
469 if self._fallback_exec is not None:
469 if self._fallback_exec is not None:
470 flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC
470 flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC
471 if self._fallback_exec:
471 if self._fallback_exec:
472 flags |= DIRSTATE_V2_FALLBACK_EXEC
472 flags |= DIRSTATE_V2_FALLBACK_EXEC
473
473
474 if self._fallback_symlink is not None:
474 if self._fallback_symlink is not None:
475 flags |= DIRSTATE_V2_HAS_FALLBACK_SYMLINK
475 flags |= DIRSTATE_V2_HAS_FALLBACK_SYMLINK
476 if self._fallback_symlink:
476 if self._fallback_symlink:
477 flags |= DIRSTATE_V2_FALLBACK_SYMLINK
477 flags |= DIRSTATE_V2_FALLBACK_SYMLINK
478
478
479 # Note: we do not need to do anything regarding
479 # Note: we do not need to do anything regarding
480 # DIRSTATE_V2_ALL_UNKNOWN_RECORDED and DIRSTATE_V2_ALL_IGNORED_RECORDED
480 # DIRSTATE_V2_ALL_UNKNOWN_RECORDED and DIRSTATE_V2_ALL_IGNORED_RECORDED
481 # since we never set _DIRSTATE_V2_HAS_DIRCTORY_MTIME
481 # since we never set _DIRSTATE_V2_HAS_DIRCTORY_MTIME
482 return (flags, self._size or 0, self._mtime_s or 0, self._mtime_ns or 0)
482 return (flags, self._size or 0, self._mtime_s or 0, self._mtime_ns or 0)
483
483
484 def v1_state(self):
484 def v1_state(self):
485 """return a "state" suitable for v1 serialization"""
485 """return a "state" suitable for v1 serialization"""
486 if not self.any_tracked:
486 if not self.any_tracked:
487 # the object has no state to record, this is -currently-
487 # the object has no state to record, this is -currently-
488 # unsupported
488 # unsupported
489 raise RuntimeError('untracked item')
489 raise RuntimeError('untracked item')
490 elif self.removed:
490 elif self.removed:
491 return b'r'
491 return b'r'
492 elif self._p1_tracked and self._p2_info:
492 elif self._p1_tracked and self._p2_info:
493 return b'm'
493 return b'm'
494 elif self.added:
494 elif self.added:
495 return b'a'
495 return b'a'
496 else:
496 else:
497 return b'n'
497 return b'n'
498
498
499 def v1_mode(self):
499 def v1_mode(self):
500 """return a "mode" suitable for v1 serialization"""
500 """return a "mode" suitable for v1 serialization"""
501 return self._mode if self._mode is not None else 0
501 return self._mode if self._mode is not None else 0
502
502
503 def v1_size(self):
503 def v1_size(self):
504 """return a "size" suitable for v1 serialization"""
504 """return a "size" suitable for v1 serialization"""
505 if not self.any_tracked:
505 if not self.any_tracked:
506 # the object has no state to record, this is -currently-
506 # the object has no state to record, this is -currently-
507 # unsupported
507 # unsupported
508 raise RuntimeError('untracked item')
508 raise RuntimeError('untracked item')
509 elif self.removed and self._p1_tracked and self._p2_info:
509 elif self.removed and self._p1_tracked and self._p2_info:
510 return NONNORMAL
510 return NONNORMAL
511 elif self._p2_info:
511 elif self._p2_info:
512 return FROM_P2
512 return FROM_P2
513 elif self.removed:
513 elif self.removed:
514 return 0
514 return 0
515 elif self.added:
515 elif self.added:
516 return NONNORMAL
516 return NONNORMAL
517 elif self._size is None:
517 elif self._size is None:
518 return NONNORMAL
518 return NONNORMAL
519 else:
519 else:
520 return self._size
520 return self._size
521
521
522 def v1_mtime(self):
522 def v1_mtime(self):
523 """return a "mtime" suitable for v1 serialization"""
523 """return a "mtime" suitable for v1 serialization"""
524 if not self.any_tracked:
524 if not self.any_tracked:
525 # the object has no state to record, this is -currently-
525 # the object has no state to record, this is -currently-
526 # unsupported
526 # unsupported
527 raise RuntimeError('untracked item')
527 raise RuntimeError('untracked item')
528 elif self.removed:
528 elif self.removed:
529 return 0
529 return 0
530 elif self._mtime_s is None:
530 elif self._mtime_s is None:
531 return AMBIGUOUS_TIME
531 return AMBIGUOUS_TIME
532 elif self._p2_info:
532 elif self._p2_info:
533 return AMBIGUOUS_TIME
533 return AMBIGUOUS_TIME
534 elif not self._p1_tracked:
534 elif not self._p1_tracked:
535 return AMBIGUOUS_TIME
535 return AMBIGUOUS_TIME
536 else:
536 else:
537 return self._mtime_s
537 return self._mtime_s
538
538
539 def need_delay(self, now):
540 """True if the stored mtime would be ambiguous with the current time"""
541 return self.v1_state() == b'n' and self._mtime_s == now[0]
542
543
539
544 def gettype(q):
540 def gettype(q):
545 return int(q & 0xFFFF)
541 return int(q & 0xFFFF)
546
542
547
543
548 class BaseIndexObject(object):
544 class BaseIndexObject(object):
549 # Can I be passed to an algorithme implemented in Rust ?
545 # Can I be passed to an algorithme implemented in Rust ?
550 rust_ext_compat = 0
546 rust_ext_compat = 0
551 # Format of an index entry according to Python's `struct` language
547 # Format of an index entry according to Python's `struct` language
552 index_format = revlog_constants.INDEX_ENTRY_V1
548 index_format = revlog_constants.INDEX_ENTRY_V1
553 # Size of a C unsigned long long int, platform independent
549 # Size of a C unsigned long long int, platform independent
554 big_int_size = struct.calcsize(b'>Q')
550 big_int_size = struct.calcsize(b'>Q')
555 # Size of a C long int, platform independent
551 # Size of a C long int, platform independent
556 int_size = struct.calcsize(b'>i')
552 int_size = struct.calcsize(b'>i')
557 # An empty index entry, used as a default value to be overridden, or nullrev
553 # An empty index entry, used as a default value to be overridden, or nullrev
558 null_item = (
554 null_item = (
559 0,
555 0,
560 0,
556 0,
561 0,
557 0,
562 -1,
558 -1,
563 -1,
559 -1,
564 -1,
560 -1,
565 -1,
561 -1,
566 sha1nodeconstants.nullid,
562 sha1nodeconstants.nullid,
567 0,
563 0,
568 0,
564 0,
569 revlog_constants.COMP_MODE_INLINE,
565 revlog_constants.COMP_MODE_INLINE,
570 revlog_constants.COMP_MODE_INLINE,
566 revlog_constants.COMP_MODE_INLINE,
571 )
567 )
572
568
573 @util.propertycache
569 @util.propertycache
574 def entry_size(self):
570 def entry_size(self):
575 return self.index_format.size
571 return self.index_format.size
576
572
577 @property
573 @property
578 def nodemap(self):
574 def nodemap(self):
579 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
575 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
580 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
576 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
581 return self._nodemap
577 return self._nodemap
582
578
583 @util.propertycache
579 @util.propertycache
584 def _nodemap(self):
580 def _nodemap(self):
585 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
581 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
586 for r in range(0, len(self)):
582 for r in range(0, len(self)):
587 n = self[r][7]
583 n = self[r][7]
588 nodemap[n] = r
584 nodemap[n] = r
589 return nodemap
585 return nodemap
590
586
591 def has_node(self, node):
587 def has_node(self, node):
592 """return True if the node exist in the index"""
588 """return True if the node exist in the index"""
593 return node in self._nodemap
589 return node in self._nodemap
594
590
595 def rev(self, node):
591 def rev(self, node):
596 """return a revision for a node
592 """return a revision for a node
597
593
598 If the node is unknown, raise a RevlogError"""
594 If the node is unknown, raise a RevlogError"""
599 return self._nodemap[node]
595 return self._nodemap[node]
600
596
601 def get_rev(self, node):
597 def get_rev(self, node):
602 """return a revision for a node
598 """return a revision for a node
603
599
604 If the node is unknown, return None"""
600 If the node is unknown, return None"""
605 return self._nodemap.get(node)
601 return self._nodemap.get(node)
606
602
607 def _stripnodes(self, start):
603 def _stripnodes(self, start):
608 if '_nodemap' in vars(self):
604 if '_nodemap' in vars(self):
609 for r in range(start, len(self)):
605 for r in range(start, len(self)):
610 n = self[r][7]
606 n = self[r][7]
611 del self._nodemap[n]
607 del self._nodemap[n]
612
608
613 def clearcaches(self):
609 def clearcaches(self):
614 self.__dict__.pop('_nodemap', None)
610 self.__dict__.pop('_nodemap', None)
615
611
616 def __len__(self):
612 def __len__(self):
617 return self._lgt + len(self._extra)
613 return self._lgt + len(self._extra)
618
614
619 def append(self, tup):
615 def append(self, tup):
620 if '_nodemap' in vars(self):
616 if '_nodemap' in vars(self):
621 self._nodemap[tup[7]] = len(self)
617 self._nodemap[tup[7]] = len(self)
622 data = self._pack_entry(len(self), tup)
618 data = self._pack_entry(len(self), tup)
623 self._extra.append(data)
619 self._extra.append(data)
624
620
625 def _pack_entry(self, rev, entry):
621 def _pack_entry(self, rev, entry):
626 assert entry[8] == 0
622 assert entry[8] == 0
627 assert entry[9] == 0
623 assert entry[9] == 0
628 return self.index_format.pack(*entry[:8])
624 return self.index_format.pack(*entry[:8])
629
625
630 def _check_index(self, i):
626 def _check_index(self, i):
631 if not isinstance(i, int):
627 if not isinstance(i, int):
632 raise TypeError(b"expecting int indexes")
628 raise TypeError(b"expecting int indexes")
633 if i < 0 or i >= len(self):
629 if i < 0 or i >= len(self):
634 raise IndexError
630 raise IndexError
635
631
636 def __getitem__(self, i):
632 def __getitem__(self, i):
637 if i == -1:
633 if i == -1:
638 return self.null_item
634 return self.null_item
639 self._check_index(i)
635 self._check_index(i)
640 if i >= self._lgt:
636 if i >= self._lgt:
641 data = self._extra[i - self._lgt]
637 data = self._extra[i - self._lgt]
642 else:
638 else:
643 index = self._calculate_index(i)
639 index = self._calculate_index(i)
644 data = self._data[index : index + self.entry_size]
640 data = self._data[index : index + self.entry_size]
645 r = self._unpack_entry(i, data)
641 r = self._unpack_entry(i, data)
646 if self._lgt and i == 0:
642 if self._lgt and i == 0:
647 offset = revlogutils.offset_type(0, gettype(r[0]))
643 offset = revlogutils.offset_type(0, gettype(r[0]))
648 r = (offset,) + r[1:]
644 r = (offset,) + r[1:]
649 return r
645 return r
650
646
651 def _unpack_entry(self, rev, data):
647 def _unpack_entry(self, rev, data):
652 r = self.index_format.unpack(data)
648 r = self.index_format.unpack(data)
653 r = r + (
649 r = r + (
654 0,
650 0,
655 0,
651 0,
656 revlog_constants.COMP_MODE_INLINE,
652 revlog_constants.COMP_MODE_INLINE,
657 revlog_constants.COMP_MODE_INLINE,
653 revlog_constants.COMP_MODE_INLINE,
658 )
654 )
659 return r
655 return r
660
656
661 def pack_header(self, header):
657 def pack_header(self, header):
662 """pack header information as binary"""
658 """pack header information as binary"""
663 v_fmt = revlog_constants.INDEX_HEADER
659 v_fmt = revlog_constants.INDEX_HEADER
664 return v_fmt.pack(header)
660 return v_fmt.pack(header)
665
661
666 def entry_binary(self, rev):
662 def entry_binary(self, rev):
667 """return the raw binary string representing a revision"""
663 """return the raw binary string representing a revision"""
668 entry = self[rev]
664 entry = self[rev]
669 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
665 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
670 if rev == 0:
666 if rev == 0:
671 p = p[revlog_constants.INDEX_HEADER.size :]
667 p = p[revlog_constants.INDEX_HEADER.size :]
672 return p
668 return p
673
669
674
670
675 class IndexObject(BaseIndexObject):
671 class IndexObject(BaseIndexObject):
676 def __init__(self, data):
672 def __init__(self, data):
677 assert len(data) % self.entry_size == 0, (
673 assert len(data) % self.entry_size == 0, (
678 len(data),
674 len(data),
679 self.entry_size,
675 self.entry_size,
680 len(data) % self.entry_size,
676 len(data) % self.entry_size,
681 )
677 )
682 self._data = data
678 self._data = data
683 self._lgt = len(data) // self.entry_size
679 self._lgt = len(data) // self.entry_size
684 self._extra = []
680 self._extra = []
685
681
686 def _calculate_index(self, i):
682 def _calculate_index(self, i):
687 return i * self.entry_size
683 return i * self.entry_size
688
684
689 def __delitem__(self, i):
685 def __delitem__(self, i):
690 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
686 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
691 raise ValueError(b"deleting slices only supports a:-1 with step 1")
687 raise ValueError(b"deleting slices only supports a:-1 with step 1")
692 i = i.start
688 i = i.start
693 self._check_index(i)
689 self._check_index(i)
694 self._stripnodes(i)
690 self._stripnodes(i)
695 if i < self._lgt:
691 if i < self._lgt:
696 self._data = self._data[: i * self.entry_size]
692 self._data = self._data[: i * self.entry_size]
697 self._lgt = i
693 self._lgt = i
698 self._extra = []
694 self._extra = []
699 else:
695 else:
700 self._extra = self._extra[: i - self._lgt]
696 self._extra = self._extra[: i - self._lgt]
701
697
702
698
703 class PersistentNodeMapIndexObject(IndexObject):
699 class PersistentNodeMapIndexObject(IndexObject):
704 """a Debug oriented class to test persistent nodemap
700 """a Debug oriented class to test persistent nodemap
705
701
706 We need a simple python object to test API and higher level behavior. See
702 We need a simple python object to test API and higher level behavior. See
707 the Rust implementation for more serious usage. This should be used only
703 the Rust implementation for more serious usage. This should be used only
708 through the dedicated `devel.persistent-nodemap` config.
704 through the dedicated `devel.persistent-nodemap` config.
709 """
705 """
710
706
711 def nodemap_data_all(self):
707 def nodemap_data_all(self):
712 """Return bytes containing a full serialization of a nodemap
708 """Return bytes containing a full serialization of a nodemap
713
709
714 The nodemap should be valid for the full set of revisions in the
710 The nodemap should be valid for the full set of revisions in the
715 index."""
711 index."""
716 return nodemaputil.persistent_data(self)
712 return nodemaputil.persistent_data(self)
717
713
718 def nodemap_data_incremental(self):
714 def nodemap_data_incremental(self):
719 """Return bytes containing a incremental update to persistent nodemap
715 """Return bytes containing a incremental update to persistent nodemap
720
716
721 This containst the data for an append-only update of the data provided
717 This containst the data for an append-only update of the data provided
722 in the last call to `update_nodemap_data`.
718 in the last call to `update_nodemap_data`.
723 """
719 """
724 if self._nm_root is None:
720 if self._nm_root is None:
725 return None
721 return None
726 docket = self._nm_docket
722 docket = self._nm_docket
727 changed, data = nodemaputil.update_persistent_data(
723 changed, data = nodemaputil.update_persistent_data(
728 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
724 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
729 )
725 )
730
726
731 self._nm_root = self._nm_max_idx = self._nm_docket = None
727 self._nm_root = self._nm_max_idx = self._nm_docket = None
732 return docket, changed, data
728 return docket, changed, data
733
729
734 def update_nodemap_data(self, docket, nm_data):
730 def update_nodemap_data(self, docket, nm_data):
735 """provide full block of persisted binary data for a nodemap
731 """provide full block of persisted binary data for a nodemap
736
732
737 The data are expected to come from disk. See `nodemap_data_all` for a
733 The data are expected to come from disk. See `nodemap_data_all` for a
738 produceur of such data."""
734 produceur of such data."""
739 if nm_data is not None:
735 if nm_data is not None:
740 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
736 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
741 if self._nm_root:
737 if self._nm_root:
742 self._nm_docket = docket
738 self._nm_docket = docket
743 else:
739 else:
744 self._nm_root = self._nm_max_idx = self._nm_docket = None
740 self._nm_root = self._nm_max_idx = self._nm_docket = None
745
741
746
742
747 class InlinedIndexObject(BaseIndexObject):
743 class InlinedIndexObject(BaseIndexObject):
748 def __init__(self, data, inline=0):
744 def __init__(self, data, inline=0):
749 self._data = data
745 self._data = data
750 self._lgt = self._inline_scan(None)
746 self._lgt = self._inline_scan(None)
751 self._inline_scan(self._lgt)
747 self._inline_scan(self._lgt)
752 self._extra = []
748 self._extra = []
753
749
754 def _inline_scan(self, lgt):
750 def _inline_scan(self, lgt):
755 off = 0
751 off = 0
756 if lgt is not None:
752 if lgt is not None:
757 self._offsets = [0] * lgt
753 self._offsets = [0] * lgt
758 count = 0
754 count = 0
759 while off <= len(self._data) - self.entry_size:
755 while off <= len(self._data) - self.entry_size:
760 start = off + self.big_int_size
756 start = off + self.big_int_size
761 (s,) = struct.unpack(
757 (s,) = struct.unpack(
762 b'>i',
758 b'>i',
763 self._data[start : start + self.int_size],
759 self._data[start : start + self.int_size],
764 )
760 )
765 if lgt is not None:
761 if lgt is not None:
766 self._offsets[count] = off
762 self._offsets[count] = off
767 count += 1
763 count += 1
768 off += self.entry_size + s
764 off += self.entry_size + s
769 if off != len(self._data):
765 if off != len(self._data):
770 raise ValueError(b"corrupted data")
766 raise ValueError(b"corrupted data")
771 return count
767 return count
772
768
773 def __delitem__(self, i):
769 def __delitem__(self, i):
774 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
770 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
775 raise ValueError(b"deleting slices only supports a:-1 with step 1")
771 raise ValueError(b"deleting slices only supports a:-1 with step 1")
776 i = i.start
772 i = i.start
777 self._check_index(i)
773 self._check_index(i)
778 self._stripnodes(i)
774 self._stripnodes(i)
779 if i < self._lgt:
775 if i < self._lgt:
780 self._offsets = self._offsets[:i]
776 self._offsets = self._offsets[:i]
781 self._lgt = i
777 self._lgt = i
782 self._extra = []
778 self._extra = []
783 else:
779 else:
784 self._extra = self._extra[: i - self._lgt]
780 self._extra = self._extra[: i - self._lgt]
785
781
786 def _calculate_index(self, i):
782 def _calculate_index(self, i):
787 return self._offsets[i]
783 return self._offsets[i]
788
784
789
785
790 def parse_index2(data, inline, revlogv2=False):
786 def parse_index2(data, inline, revlogv2=False):
791 if not inline:
787 if not inline:
792 cls = IndexObject2 if revlogv2 else IndexObject
788 cls = IndexObject2 if revlogv2 else IndexObject
793 return cls(data), None
789 return cls(data), None
794 cls = InlinedIndexObject
790 cls = InlinedIndexObject
795 return cls(data, inline), (0, data)
791 return cls(data, inline), (0, data)
796
792
797
793
798 def parse_index_cl_v2(data):
794 def parse_index_cl_v2(data):
799 return IndexChangelogV2(data), None
795 return IndexChangelogV2(data), None
800
796
801
797
802 class IndexObject2(IndexObject):
798 class IndexObject2(IndexObject):
803 index_format = revlog_constants.INDEX_ENTRY_V2
799 index_format = revlog_constants.INDEX_ENTRY_V2
804
800
805 def replace_sidedata_info(
801 def replace_sidedata_info(
806 self,
802 self,
807 rev,
803 rev,
808 sidedata_offset,
804 sidedata_offset,
809 sidedata_length,
805 sidedata_length,
810 offset_flags,
806 offset_flags,
811 compression_mode,
807 compression_mode,
812 ):
808 ):
813 """
809 """
814 Replace an existing index entry's sidedata offset and length with new
810 Replace an existing index entry's sidedata offset and length with new
815 ones.
811 ones.
816 This cannot be used outside of the context of sidedata rewriting,
812 This cannot be used outside of the context of sidedata rewriting,
817 inside the transaction that creates the revision `rev`.
813 inside the transaction that creates the revision `rev`.
818 """
814 """
819 if rev < 0:
815 if rev < 0:
820 raise KeyError
816 raise KeyError
821 self._check_index(rev)
817 self._check_index(rev)
822 if rev < self._lgt:
818 if rev < self._lgt:
823 msg = b"cannot rewrite entries outside of this transaction"
819 msg = b"cannot rewrite entries outside of this transaction"
824 raise KeyError(msg)
820 raise KeyError(msg)
825 else:
821 else:
826 entry = list(self[rev])
822 entry = list(self[rev])
827 entry[0] = offset_flags
823 entry[0] = offset_flags
828 entry[8] = sidedata_offset
824 entry[8] = sidedata_offset
829 entry[9] = sidedata_length
825 entry[9] = sidedata_length
830 entry[11] = compression_mode
826 entry[11] = compression_mode
831 entry = tuple(entry)
827 entry = tuple(entry)
832 new = self._pack_entry(rev, entry)
828 new = self._pack_entry(rev, entry)
833 self._extra[rev - self._lgt] = new
829 self._extra[rev - self._lgt] = new
834
830
835 def _unpack_entry(self, rev, data):
831 def _unpack_entry(self, rev, data):
836 data = self.index_format.unpack(data)
832 data = self.index_format.unpack(data)
837 entry = data[:10]
833 entry = data[:10]
838 data_comp = data[10] & 3
834 data_comp = data[10] & 3
839 sidedata_comp = (data[10] & (3 << 2)) >> 2
835 sidedata_comp = (data[10] & (3 << 2)) >> 2
840 return entry + (data_comp, sidedata_comp)
836 return entry + (data_comp, sidedata_comp)
841
837
842 def _pack_entry(self, rev, entry):
838 def _pack_entry(self, rev, entry):
843 data = entry[:10]
839 data = entry[:10]
844 data_comp = entry[10] & 3
840 data_comp = entry[10] & 3
845 sidedata_comp = (entry[11] & 3) << 2
841 sidedata_comp = (entry[11] & 3) << 2
846 data += (data_comp | sidedata_comp,)
842 data += (data_comp | sidedata_comp,)
847
843
848 return self.index_format.pack(*data)
844 return self.index_format.pack(*data)
849
845
850 def entry_binary(self, rev):
846 def entry_binary(self, rev):
851 """return the raw binary string representing a revision"""
847 """return the raw binary string representing a revision"""
852 entry = self[rev]
848 entry = self[rev]
853 return self._pack_entry(rev, entry)
849 return self._pack_entry(rev, entry)
854
850
855 def pack_header(self, header):
851 def pack_header(self, header):
856 """pack header information as binary"""
852 """pack header information as binary"""
857 msg = 'version header should go in the docket, not the index: %d'
853 msg = 'version header should go in the docket, not the index: %d'
858 msg %= header
854 msg %= header
859 raise error.ProgrammingError(msg)
855 raise error.ProgrammingError(msg)
860
856
861
857
862 class IndexChangelogV2(IndexObject2):
858 class IndexChangelogV2(IndexObject2):
863 index_format = revlog_constants.INDEX_ENTRY_CL_V2
859 index_format = revlog_constants.INDEX_ENTRY_CL_V2
864
860
865 def _unpack_entry(self, rev, data, r=True):
861 def _unpack_entry(self, rev, data, r=True):
866 items = self.index_format.unpack(data)
862 items = self.index_format.unpack(data)
867 entry = items[:3] + (rev, rev) + items[3:8]
863 entry = items[:3] + (rev, rev) + items[3:8]
868 data_comp = items[8] & 3
864 data_comp = items[8] & 3
869 sidedata_comp = (items[8] >> 2) & 3
865 sidedata_comp = (items[8] >> 2) & 3
870 return entry + (data_comp, sidedata_comp)
866 return entry + (data_comp, sidedata_comp)
871
867
872 def _pack_entry(self, rev, entry):
868 def _pack_entry(self, rev, entry):
873 assert entry[3] == rev, entry[3]
869 assert entry[3] == rev, entry[3]
874 assert entry[4] == rev, entry[4]
870 assert entry[4] == rev, entry[4]
875 data = entry[:3] + entry[5:10]
871 data = entry[:3] + entry[5:10]
876 data_comp = entry[10] & 3
872 data_comp = entry[10] & 3
877 sidedata_comp = (entry[11] & 3) << 2
873 sidedata_comp = (entry[11] & 3) << 2
878 data += (data_comp | sidedata_comp,)
874 data += (data_comp | sidedata_comp,)
879 return self.index_format.pack(*data)
875 return self.index_format.pack(*data)
880
876
881
877
882 def parse_index_devel_nodemap(data, inline):
878 def parse_index_devel_nodemap(data, inline):
883 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
879 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
884 return PersistentNodeMapIndexObject(data), None
880 return PersistentNodeMapIndexObject(data), None
885
881
886
882
887 def parse_dirstate(dmap, copymap, st):
883 def parse_dirstate(dmap, copymap, st):
888 parents = [st[:20], st[20:40]]
884 parents = [st[:20], st[20:40]]
889 # dereference fields so they will be local in loop
885 # dereference fields so they will be local in loop
890 format = b">cllll"
886 format = b">cllll"
891 e_size = struct.calcsize(format)
887 e_size = struct.calcsize(format)
892 pos1 = 40
888 pos1 = 40
893 l = len(st)
889 l = len(st)
894
890
895 # the inner loop
891 # the inner loop
896 while pos1 < l:
892 while pos1 < l:
897 pos2 = pos1 + e_size
893 pos2 = pos1 + e_size
898 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
894 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
899 pos1 = pos2 + e[4]
895 pos1 = pos2 + e[4]
900 f = st[pos2:pos1]
896 f = st[pos2:pos1]
901 if b'\0' in f:
897 if b'\0' in f:
902 f, c = f.split(b'\0')
898 f, c = f.split(b'\0')
903 copymap[f] = c
899 copymap[f] = c
904 dmap[f] = DirstateItem.from_v1_data(*e[:4])
900 dmap[f] = DirstateItem.from_v1_data(*e[:4])
905 return parents
901 return parents
906
902
907
903
908 def pack_dirstate(dmap, copymap, pl, now):
904 def pack_dirstate(dmap, copymap, pl):
909 cs = stringio()
905 cs = stringio()
910 write = cs.write
906 write = cs.write
911 write(b"".join(pl))
907 write(b"".join(pl))
912 for f, e in pycompat.iteritems(dmap):
908 for f, e in pycompat.iteritems(dmap):
913 if e.need_delay(now):
914 # The file was last modified "simultaneously" with the current
915 # write to dirstate (i.e. within the same second for file-
916 # systems with a granularity of 1 sec). This commonly happens
917 # for at least a couple of files on 'update'.
918 # The user could change the file without changing its size
919 # within the same second. Invalidate the file's mtime in
920 # dirstate, forcing future 'status' calls to compare the
921 # contents of the file if the size is the same. This prevents
922 # mistakenly treating such files as clean.
923 e.set_possibly_dirty()
924
925 if f in copymap:
909 if f in copymap:
926 f = b"%s\0%s" % (f, copymap[f])
910 f = b"%s\0%s" % (f, copymap[f])
927 e = _pack(
911 e = _pack(
928 b">cllll",
912 b">cllll",
929 e.v1_state(),
913 e.v1_state(),
930 e.v1_mode(),
914 e.v1_mode(),
931 e.v1_size(),
915 e.v1_size(),
932 e.v1_mtime(),
916 e.v1_mtime(),
933 len(f),
917 len(f),
934 )
918 )
935 write(e)
919 write(e)
936 write(f)
920 write(f)
937 return cs.getvalue()
921 return cs.getvalue()
@@ -1,649 +1,639 b''
1 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
1 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
2 use crate::errors::HgError;
2 use crate::errors::HgError;
3 use bitflags::bitflags;
3 use bitflags::bitflags;
4 use std::convert::{TryFrom, TryInto};
4 use std::convert::{TryFrom, TryInto};
5 use std::fs;
5 use std::fs;
6 use std::io;
6 use std::io;
7 use std::time::{SystemTime, UNIX_EPOCH};
7 use std::time::{SystemTime, UNIX_EPOCH};
8
8
9 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
9 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
10 pub enum EntryState {
10 pub enum EntryState {
11 Normal,
11 Normal,
12 Added,
12 Added,
13 Removed,
13 Removed,
14 Merged,
14 Merged,
15 }
15 }
16
16
17 /// `size` and `mtime.seconds` are truncated to 31 bits.
17 /// `size` and `mtime.seconds` are truncated to 31 bits.
18 ///
18 ///
19 /// TODO: double-check status algorithm correctness for files
19 /// TODO: double-check status algorithm correctness for files
20 /// larger than 2 GiB or modified after 2038.
20 /// larger than 2 GiB or modified after 2038.
21 #[derive(Debug, Copy, Clone)]
21 #[derive(Debug, Copy, Clone)]
22 pub struct DirstateEntry {
22 pub struct DirstateEntry {
23 pub(crate) flags: Flags,
23 pub(crate) flags: Flags,
24 mode_size: Option<(u32, u32)>,
24 mode_size: Option<(u32, u32)>,
25 mtime: Option<TruncatedTimestamp>,
25 mtime: Option<TruncatedTimestamp>,
26 }
26 }
27
27
28 bitflags! {
28 bitflags! {
29 pub(crate) struct Flags: u8 {
29 pub(crate) struct Flags: u8 {
30 const WDIR_TRACKED = 1 << 0;
30 const WDIR_TRACKED = 1 << 0;
31 const P1_TRACKED = 1 << 1;
31 const P1_TRACKED = 1 << 1;
32 const P2_INFO = 1 << 2;
32 const P2_INFO = 1 << 2;
33 const HAS_FALLBACK_EXEC = 1 << 3;
33 const HAS_FALLBACK_EXEC = 1 << 3;
34 const FALLBACK_EXEC = 1 << 4;
34 const FALLBACK_EXEC = 1 << 4;
35 const HAS_FALLBACK_SYMLINK = 1 << 5;
35 const HAS_FALLBACK_SYMLINK = 1 << 5;
36 const FALLBACK_SYMLINK = 1 << 6;
36 const FALLBACK_SYMLINK = 1 << 6;
37 }
37 }
38 }
38 }
39
39
40 /// A Unix timestamp with nanoseconds precision
40 /// A Unix timestamp with nanoseconds precision
41 #[derive(Debug, Copy, Clone)]
41 #[derive(Debug, Copy, Clone)]
42 pub struct TruncatedTimestamp {
42 pub struct TruncatedTimestamp {
43 truncated_seconds: u32,
43 truncated_seconds: u32,
44 /// Always in the `0 .. 1_000_000_000` range.
44 /// Always in the `0 .. 1_000_000_000` range.
45 nanoseconds: u32,
45 nanoseconds: u32,
46 }
46 }
47
47
48 impl TruncatedTimestamp {
48 impl TruncatedTimestamp {
49 /// Constructs from a timestamp potentially outside of the supported range,
49 /// Constructs from a timestamp potentially outside of the supported range,
50 /// and truncate the seconds components to its lower 31 bits.
50 /// and truncate the seconds components to its lower 31 bits.
51 ///
51 ///
52 /// Panics if the nanoseconds components is not in the expected range.
52 /// Panics if the nanoseconds components is not in the expected range.
53 pub fn new_truncate(seconds: i64, nanoseconds: u32) -> Self {
53 pub fn new_truncate(seconds: i64, nanoseconds: u32) -> Self {
54 assert!(nanoseconds < NSEC_PER_SEC);
54 assert!(nanoseconds < NSEC_PER_SEC);
55 Self {
55 Self {
56 truncated_seconds: seconds as u32 & RANGE_MASK_31BIT,
56 truncated_seconds: seconds as u32 & RANGE_MASK_31BIT,
57 nanoseconds,
57 nanoseconds,
58 }
58 }
59 }
59 }
60
60
61 /// Construct from components. Returns an error if they are not in the
61 /// Construct from components. Returns an error if they are not in the
62 /// expcted range.
62 /// expcted range.
63 pub fn from_already_truncated(
63 pub fn from_already_truncated(
64 truncated_seconds: u32,
64 truncated_seconds: u32,
65 nanoseconds: u32,
65 nanoseconds: u32,
66 ) -> Result<Self, DirstateV2ParseError> {
66 ) -> Result<Self, DirstateV2ParseError> {
67 if truncated_seconds & !RANGE_MASK_31BIT == 0
67 if truncated_seconds & !RANGE_MASK_31BIT == 0
68 && nanoseconds < NSEC_PER_SEC
68 && nanoseconds < NSEC_PER_SEC
69 {
69 {
70 Ok(Self {
70 Ok(Self {
71 truncated_seconds,
71 truncated_seconds,
72 nanoseconds,
72 nanoseconds,
73 })
73 })
74 } else {
74 } else {
75 Err(DirstateV2ParseError)
75 Err(DirstateV2ParseError)
76 }
76 }
77 }
77 }
78
78
79 pub fn for_mtime_of(metadata: &fs::Metadata) -> io::Result<Self> {
79 pub fn for_mtime_of(metadata: &fs::Metadata) -> io::Result<Self> {
80 #[cfg(unix)]
80 #[cfg(unix)]
81 {
81 {
82 use std::os::unix::fs::MetadataExt;
82 use std::os::unix::fs::MetadataExt;
83 let seconds = metadata.mtime();
83 let seconds = metadata.mtime();
84 // i64 -> u32 with value always in the `0 .. NSEC_PER_SEC` range
84 // i64 -> u32 with value always in the `0 .. NSEC_PER_SEC` range
85 let nanoseconds = metadata.mtime_nsec().try_into().unwrap();
85 let nanoseconds = metadata.mtime_nsec().try_into().unwrap();
86 Ok(Self::new_truncate(seconds, nanoseconds))
86 Ok(Self::new_truncate(seconds, nanoseconds))
87 }
87 }
88 #[cfg(not(unix))]
88 #[cfg(not(unix))]
89 {
89 {
90 metadata.modified().map(Self::from)
90 metadata.modified().map(Self::from)
91 }
91 }
92 }
92 }
93
93
94 /// The lower 31 bits of the number of seconds since the epoch.
94 /// The lower 31 bits of the number of seconds since the epoch.
95 pub fn truncated_seconds(&self) -> u32 {
95 pub fn truncated_seconds(&self) -> u32 {
96 self.truncated_seconds
96 self.truncated_seconds
97 }
97 }
98
98
99 /// The sub-second component of this timestamp, in nanoseconds.
99 /// The sub-second component of this timestamp, in nanoseconds.
100 /// Always in the `0 .. 1_000_000_000` range.
100 /// Always in the `0 .. 1_000_000_000` range.
101 ///
101 ///
102 /// This timestamp is after `(seconds, 0)` by this many nanoseconds.
102 /// This timestamp is after `(seconds, 0)` by this many nanoseconds.
103 pub fn nanoseconds(&self) -> u32 {
103 pub fn nanoseconds(&self) -> u32 {
104 self.nanoseconds
104 self.nanoseconds
105 }
105 }
106
106
107 /// Returns whether two timestamps are equal modulo 2**31 seconds.
107 /// Returns whether two timestamps are equal modulo 2**31 seconds.
108 ///
108 ///
109 /// If this returns `true`, the original values converted from `SystemTime`
109 /// If this returns `true`, the original values converted from `SystemTime`
110 /// or given to `new_truncate` were very likely equal. A false positive is
110 /// or given to `new_truncate` were very likely equal. A false positive is
111 /// possible if they were exactly a multiple of 2**31 seconds apart (around
111 /// possible if they were exactly a multiple of 2**31 seconds apart (around
112 /// 68 years). This is deemed very unlikely to happen by chance, especially
112 /// 68 years). This is deemed very unlikely to happen by chance, especially
113 /// on filesystems that support sub-second precision.
113 /// on filesystems that support sub-second precision.
114 ///
114 ///
115 /// If someone is manipulating the modification times of some files to
115 /// If someone is manipulating the modification times of some files to
116 /// intentionally make `hg status` return incorrect results, not truncating
116 /// intentionally make `hg status` return incorrect results, not truncating
117 /// wouldn’t help much since they can set exactly the expected timestamp.
117 /// wouldn’t help much since they can set exactly the expected timestamp.
118 ///
118 ///
119 /// Sub-second precision is ignored if it is zero in either value.
119 /// Sub-second precision is ignored if it is zero in either value.
120 /// Some APIs simply return zero when more precision is not available.
120 /// Some APIs simply return zero when more precision is not available.
121 /// When comparing values from different sources, if only one is truncated
121 /// When comparing values from different sources, if only one is truncated
122 /// in that way, doing a simple comparison would cause many false
122 /// in that way, doing a simple comparison would cause many false
123 /// negatives.
123 /// negatives.
124 pub fn likely_equal(self, other: Self) -> bool {
124 pub fn likely_equal(self, other: Self) -> bool {
125 self.truncated_seconds == other.truncated_seconds
125 self.truncated_seconds == other.truncated_seconds
126 && (self.nanoseconds == other.nanoseconds
126 && (self.nanoseconds == other.nanoseconds
127 || self.nanoseconds == 0
127 || self.nanoseconds == 0
128 || other.nanoseconds == 0)
128 || other.nanoseconds == 0)
129 }
129 }
130
130
131 pub fn likely_equal_to_mtime_of(
131 pub fn likely_equal_to_mtime_of(
132 self,
132 self,
133 metadata: &fs::Metadata,
133 metadata: &fs::Metadata,
134 ) -> io::Result<bool> {
134 ) -> io::Result<bool> {
135 Ok(self.likely_equal(Self::for_mtime_of(metadata)?))
135 Ok(self.likely_equal(Self::for_mtime_of(metadata)?))
136 }
136 }
137 }
137 }
138
138
139 impl From<SystemTime> for TruncatedTimestamp {
139 impl From<SystemTime> for TruncatedTimestamp {
140 fn from(system_time: SystemTime) -> Self {
140 fn from(system_time: SystemTime) -> Self {
141 // On Unix, `SystemTime` is a wrapper for the `timespec` C struct:
141 // On Unix, `SystemTime` is a wrapper for the `timespec` C struct:
142 // https://www.gnu.org/software/libc/manual/html_node/Time-Types.html#index-struct-timespec
142 // https://www.gnu.org/software/libc/manual/html_node/Time-Types.html#index-struct-timespec
143 // We want to effectively access its fields, but the Rust standard
143 // We want to effectively access its fields, but the Rust standard
144 // library does not expose them. The best we can do is:
144 // library does not expose them. The best we can do is:
145 let seconds;
145 let seconds;
146 let nanoseconds;
146 let nanoseconds;
147 match system_time.duration_since(UNIX_EPOCH) {
147 match system_time.duration_since(UNIX_EPOCH) {
148 Ok(duration) => {
148 Ok(duration) => {
149 seconds = duration.as_secs() as i64;
149 seconds = duration.as_secs() as i64;
150 nanoseconds = duration.subsec_nanos();
150 nanoseconds = duration.subsec_nanos();
151 }
151 }
152 Err(error) => {
152 Err(error) => {
153 // `system_time` is before `UNIX_EPOCH`.
153 // `system_time` is before `UNIX_EPOCH`.
154 // We need to undo this algorithm:
154 // We need to undo this algorithm:
155 // https://github.com/rust-lang/rust/blob/6bed1f0bc3cc50c10aab26d5f94b16a00776b8a5/library/std/src/sys/unix/time.rs#L40-L41
155 // https://github.com/rust-lang/rust/blob/6bed1f0bc3cc50c10aab26d5f94b16a00776b8a5/library/std/src/sys/unix/time.rs#L40-L41
156 let negative = error.duration();
156 let negative = error.duration();
157 let negative_secs = negative.as_secs() as i64;
157 let negative_secs = negative.as_secs() as i64;
158 let negative_nanos = negative.subsec_nanos();
158 let negative_nanos = negative.subsec_nanos();
159 if negative_nanos == 0 {
159 if negative_nanos == 0 {
160 seconds = -negative_secs;
160 seconds = -negative_secs;
161 nanoseconds = 0;
161 nanoseconds = 0;
162 } else {
162 } else {
163 // For example if `system_time` was 4.3 seconds before
163 // For example if `system_time` was 4.3 seconds before
164 // the Unix epoch we get a Duration that represents
164 // the Unix epoch we get a Duration that represents
165 // `(-4, -0.3)` but we want `(-5, +0.7)`:
165 // `(-4, -0.3)` but we want `(-5, +0.7)`:
166 seconds = -1 - negative_secs;
166 seconds = -1 - negative_secs;
167 nanoseconds = NSEC_PER_SEC - negative_nanos;
167 nanoseconds = NSEC_PER_SEC - negative_nanos;
168 }
168 }
169 }
169 }
170 };
170 };
171 Self::new_truncate(seconds, nanoseconds)
171 Self::new_truncate(seconds, nanoseconds)
172 }
172 }
173 }
173 }
174
174
175 const NSEC_PER_SEC: u32 = 1_000_000_000;
175 const NSEC_PER_SEC: u32 = 1_000_000_000;
176 const RANGE_MASK_31BIT: u32 = 0x7FFF_FFFF;
176 const RANGE_MASK_31BIT: u32 = 0x7FFF_FFFF;
177
177
178 pub const MTIME_UNSET: i32 = -1;
178 pub const MTIME_UNSET: i32 = -1;
179
179
180 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
180 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
181 /// other parent. This allows revert to pick the right status back during a
181 /// other parent. This allows revert to pick the right status back during a
182 /// merge.
182 /// merge.
183 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
183 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
184 /// A special value used for internal representation of special case in
184 /// A special value used for internal representation of special case in
185 /// dirstate v1 format.
185 /// dirstate v1 format.
186 pub const SIZE_NON_NORMAL: i32 = -1;
186 pub const SIZE_NON_NORMAL: i32 = -1;
187
187
188 impl DirstateEntry {
188 impl DirstateEntry {
189 pub fn from_v2_data(
189 pub fn from_v2_data(
190 wdir_tracked: bool,
190 wdir_tracked: bool,
191 p1_tracked: bool,
191 p1_tracked: bool,
192 p2_info: bool,
192 p2_info: bool,
193 mode_size: Option<(u32, u32)>,
193 mode_size: Option<(u32, u32)>,
194 mtime: Option<TruncatedTimestamp>,
194 mtime: Option<TruncatedTimestamp>,
195 fallback_exec: Option<bool>,
195 fallback_exec: Option<bool>,
196 fallback_symlink: Option<bool>,
196 fallback_symlink: Option<bool>,
197 ) -> Self {
197 ) -> Self {
198 if let Some((mode, size)) = mode_size {
198 if let Some((mode, size)) = mode_size {
199 // TODO: return an error for out of range values?
199 // TODO: return an error for out of range values?
200 assert!(mode & !RANGE_MASK_31BIT == 0);
200 assert!(mode & !RANGE_MASK_31BIT == 0);
201 assert!(size & !RANGE_MASK_31BIT == 0);
201 assert!(size & !RANGE_MASK_31BIT == 0);
202 }
202 }
203 let mut flags = Flags::empty();
203 let mut flags = Flags::empty();
204 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
204 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
205 flags.set(Flags::P1_TRACKED, p1_tracked);
205 flags.set(Flags::P1_TRACKED, p1_tracked);
206 flags.set(Flags::P2_INFO, p2_info);
206 flags.set(Flags::P2_INFO, p2_info);
207 if let Some(exec) = fallback_exec {
207 if let Some(exec) = fallback_exec {
208 flags.insert(Flags::HAS_FALLBACK_EXEC);
208 flags.insert(Flags::HAS_FALLBACK_EXEC);
209 if exec {
209 if exec {
210 flags.insert(Flags::FALLBACK_EXEC);
210 flags.insert(Flags::FALLBACK_EXEC);
211 }
211 }
212 }
212 }
213 if let Some(exec) = fallback_symlink {
213 if let Some(exec) = fallback_symlink {
214 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
214 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
215 if exec {
215 if exec {
216 flags.insert(Flags::FALLBACK_SYMLINK);
216 flags.insert(Flags::FALLBACK_SYMLINK);
217 }
217 }
218 }
218 }
219 Self {
219 Self {
220 flags,
220 flags,
221 mode_size,
221 mode_size,
222 mtime,
222 mtime,
223 }
223 }
224 }
224 }
225
225
226 pub fn from_v1_data(
226 pub fn from_v1_data(
227 state: EntryState,
227 state: EntryState,
228 mode: i32,
228 mode: i32,
229 size: i32,
229 size: i32,
230 mtime: i32,
230 mtime: i32,
231 ) -> Self {
231 ) -> Self {
232 match state {
232 match state {
233 EntryState::Normal => {
233 EntryState::Normal => {
234 if size == SIZE_FROM_OTHER_PARENT {
234 if size == SIZE_FROM_OTHER_PARENT {
235 Self {
235 Self {
236 // might be missing P1_TRACKED
236 // might be missing P1_TRACKED
237 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
237 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
238 mode_size: None,
238 mode_size: None,
239 mtime: None,
239 mtime: None,
240 }
240 }
241 } else if size == SIZE_NON_NORMAL {
241 } else if size == SIZE_NON_NORMAL {
242 Self {
242 Self {
243 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
243 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
244 mode_size: None,
244 mode_size: None,
245 mtime: None,
245 mtime: None,
246 }
246 }
247 } else if mtime == MTIME_UNSET {
247 } else if mtime == MTIME_UNSET {
248 // TODO: return an error for negative values?
248 // TODO: return an error for negative values?
249 let mode = u32::try_from(mode).unwrap();
249 let mode = u32::try_from(mode).unwrap();
250 let size = u32::try_from(size).unwrap();
250 let size = u32::try_from(size).unwrap();
251 Self {
251 Self {
252 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
252 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
253 mode_size: Some((mode, size)),
253 mode_size: Some((mode, size)),
254 mtime: None,
254 mtime: None,
255 }
255 }
256 } else {
256 } else {
257 // TODO: return an error for negative values?
257 // TODO: return an error for negative values?
258 let mode = u32::try_from(mode).unwrap();
258 let mode = u32::try_from(mode).unwrap();
259 let size = u32::try_from(size).unwrap();
259 let size = u32::try_from(size).unwrap();
260 let mtime = u32::try_from(mtime).unwrap();
260 let mtime = u32::try_from(mtime).unwrap();
261 let mtime =
261 let mtime =
262 TruncatedTimestamp::from_already_truncated(mtime, 0)
262 TruncatedTimestamp::from_already_truncated(mtime, 0)
263 .unwrap();
263 .unwrap();
264 Self {
264 Self {
265 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
265 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
266 mode_size: Some((mode, size)),
266 mode_size: Some((mode, size)),
267 mtime: Some(mtime),
267 mtime: Some(mtime),
268 }
268 }
269 }
269 }
270 }
270 }
271 EntryState::Added => Self {
271 EntryState::Added => Self {
272 flags: Flags::WDIR_TRACKED,
272 flags: Flags::WDIR_TRACKED,
273 mode_size: None,
273 mode_size: None,
274 mtime: None,
274 mtime: None,
275 },
275 },
276 EntryState::Removed => Self {
276 EntryState::Removed => Self {
277 flags: if size == SIZE_NON_NORMAL {
277 flags: if size == SIZE_NON_NORMAL {
278 Flags::P1_TRACKED | Flags::P2_INFO
278 Flags::P1_TRACKED | Flags::P2_INFO
279 } else if size == SIZE_FROM_OTHER_PARENT {
279 } else if size == SIZE_FROM_OTHER_PARENT {
280 // We don’t know if P1_TRACKED should be set (file history)
280 // We don’t know if P1_TRACKED should be set (file history)
281 Flags::P2_INFO
281 Flags::P2_INFO
282 } else {
282 } else {
283 Flags::P1_TRACKED
283 Flags::P1_TRACKED
284 },
284 },
285 mode_size: None,
285 mode_size: None,
286 mtime: None,
286 mtime: None,
287 },
287 },
288 EntryState::Merged => Self {
288 EntryState::Merged => Self {
289 flags: Flags::WDIR_TRACKED
289 flags: Flags::WDIR_TRACKED
290 | Flags::P1_TRACKED // might not be true because of rename ?
290 | Flags::P1_TRACKED // might not be true because of rename ?
291 | Flags::P2_INFO, // might not be true because of rename ?
291 | Flags::P2_INFO, // might not be true because of rename ?
292 mode_size: None,
292 mode_size: None,
293 mtime: None,
293 mtime: None,
294 },
294 },
295 }
295 }
296 }
296 }
297
297
298 /// Creates a new entry in "removed" state.
298 /// Creates a new entry in "removed" state.
299 ///
299 ///
300 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
300 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
301 /// `SIZE_FROM_OTHER_PARENT`
301 /// `SIZE_FROM_OTHER_PARENT`
302 pub fn new_removed(size: i32) -> Self {
302 pub fn new_removed(size: i32) -> Self {
303 Self::from_v1_data(EntryState::Removed, 0, size, 0)
303 Self::from_v1_data(EntryState::Removed, 0, size, 0)
304 }
304 }
305
305
306 pub fn tracked(&self) -> bool {
306 pub fn tracked(&self) -> bool {
307 self.flags.contains(Flags::WDIR_TRACKED)
307 self.flags.contains(Flags::WDIR_TRACKED)
308 }
308 }
309
309
310 pub fn p1_tracked(&self) -> bool {
310 pub fn p1_tracked(&self) -> bool {
311 self.flags.contains(Flags::P1_TRACKED)
311 self.flags.contains(Flags::P1_TRACKED)
312 }
312 }
313
313
314 fn in_either_parent(&self) -> bool {
314 fn in_either_parent(&self) -> bool {
315 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
315 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
316 }
316 }
317
317
318 pub fn removed(&self) -> bool {
318 pub fn removed(&self) -> bool {
319 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
319 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
320 }
320 }
321
321
322 pub fn p2_info(&self) -> bool {
322 pub fn p2_info(&self) -> bool {
323 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
323 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
324 }
324 }
325
325
326 pub fn added(&self) -> bool {
326 pub fn added(&self) -> bool {
327 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
327 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
328 }
328 }
329
329
330 pub fn maybe_clean(&self) -> bool {
330 pub fn maybe_clean(&self) -> bool {
331 if !self.flags.contains(Flags::WDIR_TRACKED) {
331 if !self.flags.contains(Flags::WDIR_TRACKED) {
332 false
332 false
333 } else if !self.flags.contains(Flags::P1_TRACKED) {
333 } else if !self.flags.contains(Flags::P1_TRACKED) {
334 false
334 false
335 } else if self.flags.contains(Flags::P2_INFO) {
335 } else if self.flags.contains(Flags::P2_INFO) {
336 false
336 false
337 } else {
337 } else {
338 true
338 true
339 }
339 }
340 }
340 }
341
341
342 pub fn any_tracked(&self) -> bool {
342 pub fn any_tracked(&self) -> bool {
343 self.flags.intersects(
343 self.flags.intersects(
344 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
344 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
345 )
345 )
346 }
346 }
347
347
348 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
348 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
349 pub(crate) fn v2_data(
349 pub(crate) fn v2_data(
350 &self,
350 &self,
351 ) -> (
351 ) -> (
352 bool,
352 bool,
353 bool,
353 bool,
354 bool,
354 bool,
355 Option<(u32, u32)>,
355 Option<(u32, u32)>,
356 Option<TruncatedTimestamp>,
356 Option<TruncatedTimestamp>,
357 Option<bool>,
357 Option<bool>,
358 Option<bool>,
358 Option<bool>,
359 ) {
359 ) {
360 if !self.any_tracked() {
360 if !self.any_tracked() {
361 // TODO: return an Option instead?
361 // TODO: return an Option instead?
362 panic!("Accessing v1_state of an untracked DirstateEntry")
362 panic!("Accessing v1_state of an untracked DirstateEntry")
363 }
363 }
364 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
364 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
365 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
365 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
366 let p2_info = self.flags.contains(Flags::P2_INFO);
366 let p2_info = self.flags.contains(Flags::P2_INFO);
367 let mode_size = self.mode_size;
367 let mode_size = self.mode_size;
368 let mtime = self.mtime;
368 let mtime = self.mtime;
369 (
369 (
370 wdir_tracked,
370 wdir_tracked,
371 p1_tracked,
371 p1_tracked,
372 p2_info,
372 p2_info,
373 mode_size,
373 mode_size,
374 mtime,
374 mtime,
375 self.get_fallback_exec(),
375 self.get_fallback_exec(),
376 self.get_fallback_symlink(),
376 self.get_fallback_symlink(),
377 )
377 )
378 }
378 }
379
379
380 fn v1_state(&self) -> EntryState {
380 fn v1_state(&self) -> EntryState {
381 if !self.any_tracked() {
381 if !self.any_tracked() {
382 // TODO: return an Option instead?
382 // TODO: return an Option instead?
383 panic!("Accessing v1_state of an untracked DirstateEntry")
383 panic!("Accessing v1_state of an untracked DirstateEntry")
384 }
384 }
385 if self.removed() {
385 if self.removed() {
386 EntryState::Removed
386 EntryState::Removed
387 } else if self
387 } else if self
388 .flags
388 .flags
389 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
389 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
390 {
390 {
391 EntryState::Merged
391 EntryState::Merged
392 } else if self.added() {
392 } else if self.added() {
393 EntryState::Added
393 EntryState::Added
394 } else {
394 } else {
395 EntryState::Normal
395 EntryState::Normal
396 }
396 }
397 }
397 }
398
398
399 fn v1_mode(&self) -> i32 {
399 fn v1_mode(&self) -> i32 {
400 if let Some((mode, _size)) = self.mode_size {
400 if let Some((mode, _size)) = self.mode_size {
401 i32::try_from(mode).unwrap()
401 i32::try_from(mode).unwrap()
402 } else {
402 } else {
403 0
403 0
404 }
404 }
405 }
405 }
406
406
407 fn v1_size(&self) -> i32 {
407 fn v1_size(&self) -> i32 {
408 if !self.any_tracked() {
408 if !self.any_tracked() {
409 // TODO: return an Option instead?
409 // TODO: return an Option instead?
410 panic!("Accessing v1_size of an untracked DirstateEntry")
410 panic!("Accessing v1_size of an untracked DirstateEntry")
411 }
411 }
412 if self.removed()
412 if self.removed()
413 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
413 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
414 {
414 {
415 SIZE_NON_NORMAL
415 SIZE_NON_NORMAL
416 } else if self.flags.contains(Flags::P2_INFO) {
416 } else if self.flags.contains(Flags::P2_INFO) {
417 SIZE_FROM_OTHER_PARENT
417 SIZE_FROM_OTHER_PARENT
418 } else if self.removed() {
418 } else if self.removed() {
419 0
419 0
420 } else if self.added() {
420 } else if self.added() {
421 SIZE_NON_NORMAL
421 SIZE_NON_NORMAL
422 } else if let Some((_mode, size)) = self.mode_size {
422 } else if let Some((_mode, size)) = self.mode_size {
423 i32::try_from(size).unwrap()
423 i32::try_from(size).unwrap()
424 } else {
424 } else {
425 SIZE_NON_NORMAL
425 SIZE_NON_NORMAL
426 }
426 }
427 }
427 }
428
428
429 fn v1_mtime(&self) -> i32 {
429 fn v1_mtime(&self) -> i32 {
430 if !self.any_tracked() {
430 if !self.any_tracked() {
431 // TODO: return an Option instead?
431 // TODO: return an Option instead?
432 panic!("Accessing v1_mtime of an untracked DirstateEntry")
432 panic!("Accessing v1_mtime of an untracked DirstateEntry")
433 }
433 }
434 if self.removed() {
434 if self.removed() {
435 0
435 0
436 } else if self.flags.contains(Flags::P2_INFO) {
436 } else if self.flags.contains(Flags::P2_INFO) {
437 MTIME_UNSET
437 MTIME_UNSET
438 } else if !self.flags.contains(Flags::P1_TRACKED) {
438 } else if !self.flags.contains(Flags::P1_TRACKED) {
439 MTIME_UNSET
439 MTIME_UNSET
440 } else if let Some(mtime) = self.mtime {
440 } else if let Some(mtime) = self.mtime {
441 i32::try_from(mtime.truncated_seconds()).unwrap()
441 i32::try_from(mtime.truncated_seconds()).unwrap()
442 } else {
442 } else {
443 MTIME_UNSET
443 MTIME_UNSET
444 }
444 }
445 }
445 }
446
446
447 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
447 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
448 pub fn state(&self) -> EntryState {
448 pub fn state(&self) -> EntryState {
449 self.v1_state()
449 self.v1_state()
450 }
450 }
451
451
452 // TODO: return Option?
452 // TODO: return Option?
453 pub fn mode(&self) -> i32 {
453 pub fn mode(&self) -> i32 {
454 self.v1_mode()
454 self.v1_mode()
455 }
455 }
456
456
457 // TODO: return Option?
457 // TODO: return Option?
458 pub fn size(&self) -> i32 {
458 pub fn size(&self) -> i32 {
459 self.v1_size()
459 self.v1_size()
460 }
460 }
461
461
462 // TODO: return Option?
462 // TODO: return Option?
463 pub fn mtime(&self) -> i32 {
463 pub fn mtime(&self) -> i32 {
464 self.v1_mtime()
464 self.v1_mtime()
465 }
465 }
466
466
467 pub fn get_fallback_exec(&self) -> Option<bool> {
467 pub fn get_fallback_exec(&self) -> Option<bool> {
468 if self.flags.contains(Flags::HAS_FALLBACK_EXEC) {
468 if self.flags.contains(Flags::HAS_FALLBACK_EXEC) {
469 Some(self.flags.contains(Flags::FALLBACK_EXEC))
469 Some(self.flags.contains(Flags::FALLBACK_EXEC))
470 } else {
470 } else {
471 None
471 None
472 }
472 }
473 }
473 }
474
474
475 pub fn set_fallback_exec(&mut self, value: Option<bool>) {
475 pub fn set_fallback_exec(&mut self, value: Option<bool>) {
476 match value {
476 match value {
477 None => {
477 None => {
478 self.flags.remove(Flags::HAS_FALLBACK_EXEC);
478 self.flags.remove(Flags::HAS_FALLBACK_EXEC);
479 self.flags.remove(Flags::FALLBACK_EXEC);
479 self.flags.remove(Flags::FALLBACK_EXEC);
480 }
480 }
481 Some(exec) => {
481 Some(exec) => {
482 self.flags.insert(Flags::HAS_FALLBACK_EXEC);
482 self.flags.insert(Flags::HAS_FALLBACK_EXEC);
483 if exec {
483 if exec {
484 self.flags.insert(Flags::FALLBACK_EXEC);
484 self.flags.insert(Flags::FALLBACK_EXEC);
485 }
485 }
486 }
486 }
487 }
487 }
488 }
488 }
489
489
490 pub fn get_fallback_symlink(&self) -> Option<bool> {
490 pub fn get_fallback_symlink(&self) -> Option<bool> {
491 if self.flags.contains(Flags::HAS_FALLBACK_SYMLINK) {
491 if self.flags.contains(Flags::HAS_FALLBACK_SYMLINK) {
492 Some(self.flags.contains(Flags::FALLBACK_SYMLINK))
492 Some(self.flags.contains(Flags::FALLBACK_SYMLINK))
493 } else {
493 } else {
494 None
494 None
495 }
495 }
496 }
496 }
497
497
498 pub fn set_fallback_symlink(&mut self, value: Option<bool>) {
498 pub fn set_fallback_symlink(&mut self, value: Option<bool>) {
499 match value {
499 match value {
500 None => {
500 None => {
501 self.flags.remove(Flags::HAS_FALLBACK_SYMLINK);
501 self.flags.remove(Flags::HAS_FALLBACK_SYMLINK);
502 self.flags.remove(Flags::FALLBACK_SYMLINK);
502 self.flags.remove(Flags::FALLBACK_SYMLINK);
503 }
503 }
504 Some(symlink) => {
504 Some(symlink) => {
505 self.flags.insert(Flags::HAS_FALLBACK_SYMLINK);
505 self.flags.insert(Flags::HAS_FALLBACK_SYMLINK);
506 if symlink {
506 if symlink {
507 self.flags.insert(Flags::FALLBACK_SYMLINK);
507 self.flags.insert(Flags::FALLBACK_SYMLINK);
508 }
508 }
509 }
509 }
510 }
510 }
511 }
511 }
512
512
513 pub fn truncated_mtime(&self) -> Option<TruncatedTimestamp> {
513 pub fn truncated_mtime(&self) -> Option<TruncatedTimestamp> {
514 self.mtime
514 self.mtime
515 }
515 }
516
516
517 pub fn drop_merge_data(&mut self) {
517 pub fn drop_merge_data(&mut self) {
518 if self.flags.contains(Flags::P2_INFO) {
518 if self.flags.contains(Flags::P2_INFO) {
519 self.flags.remove(Flags::P2_INFO);
519 self.flags.remove(Flags::P2_INFO);
520 self.mode_size = None;
520 self.mode_size = None;
521 self.mtime = None;
521 self.mtime = None;
522 }
522 }
523 }
523 }
524
524
525 pub fn set_possibly_dirty(&mut self) {
525 pub fn set_possibly_dirty(&mut self) {
526 self.mtime = None
526 self.mtime = None
527 }
527 }
528
528
529 pub fn set_clean(
529 pub fn set_clean(
530 &mut self,
530 &mut self,
531 mode: u32,
531 mode: u32,
532 size: u32,
532 size: u32,
533 mtime: TruncatedTimestamp,
533 mtime: TruncatedTimestamp,
534 ) {
534 ) {
535 let size = size & RANGE_MASK_31BIT;
535 let size = size & RANGE_MASK_31BIT;
536 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
536 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
537 self.mode_size = Some((mode, size));
537 self.mode_size = Some((mode, size));
538 self.mtime = Some(mtime);
538 self.mtime = Some(mtime);
539 }
539 }
540
540
541 pub fn set_tracked(&mut self) {
541 pub fn set_tracked(&mut self) {
542 self.flags.insert(Flags::WDIR_TRACKED);
542 self.flags.insert(Flags::WDIR_TRACKED);
543 // `set_tracked` is replacing various `normallookup` call. So we mark
543 // `set_tracked` is replacing various `normallookup` call. So we mark
544 // the files as needing lookup
544 // the files as needing lookup
545 //
545 //
546 // Consider dropping this in the future in favor of something less
546 // Consider dropping this in the future in favor of something less
547 // broad.
547 // broad.
548 self.mtime = None;
548 self.mtime = None;
549 }
549 }
550
550
551 pub fn set_untracked(&mut self) {
551 pub fn set_untracked(&mut self) {
552 self.flags.remove(Flags::WDIR_TRACKED);
552 self.flags.remove(Flags::WDIR_TRACKED);
553 self.mode_size = None;
553 self.mode_size = None;
554 self.mtime = None;
554 self.mtime = None;
555 }
555 }
556
556
557 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
557 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
558 /// in the dirstate-v1 format.
558 /// in the dirstate-v1 format.
559 ///
559 ///
560 /// This includes marker values such as `mtime == -1`. In the future we may
560 /// This includes marker values such as `mtime == -1`. In the future we may
561 /// want to not represent these cases that way in memory, but serialization
561 /// want to not represent these cases that way in memory, but serialization
562 /// will need to keep the same format.
562 /// will need to keep the same format.
563 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
563 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
564 (
564 (
565 self.v1_state().into(),
565 self.v1_state().into(),
566 self.v1_mode(),
566 self.v1_mode(),
567 self.v1_size(),
567 self.v1_size(),
568 self.v1_mtime(),
568 self.v1_mtime(),
569 )
569 )
570 }
570 }
571
571
572 pub(crate) fn is_from_other_parent(&self) -> bool {
572 pub(crate) fn is_from_other_parent(&self) -> bool {
573 self.state() == EntryState::Normal
573 self.state() == EntryState::Normal
574 && self.size() == SIZE_FROM_OTHER_PARENT
574 && self.size() == SIZE_FROM_OTHER_PARENT
575 }
575 }
576
576
577 // TODO: other platforms
577 // TODO: other platforms
578 #[cfg(unix)]
578 #[cfg(unix)]
579 pub fn mode_changed(
579 pub fn mode_changed(
580 &self,
580 &self,
581 filesystem_metadata: &std::fs::Metadata,
581 filesystem_metadata: &std::fs::Metadata,
582 ) -> bool {
582 ) -> bool {
583 let dirstate_exec_bit = (self.mode() as u32 & EXEC_BIT_MASK) != 0;
583 let dirstate_exec_bit = (self.mode() as u32 & EXEC_BIT_MASK) != 0;
584 let fs_exec_bit = has_exec_bit(filesystem_metadata);
584 let fs_exec_bit = has_exec_bit(filesystem_metadata);
585 dirstate_exec_bit != fs_exec_bit
585 dirstate_exec_bit != fs_exec_bit
586 }
586 }
587
587
588 /// Returns a `(state, mode, size, mtime)` tuple as for
588 /// Returns a `(state, mode, size, mtime)` tuple as for
589 /// `DirstateMapMethods::debug_iter`.
589 /// `DirstateMapMethods::debug_iter`.
590 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
590 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
591 (self.state().into(), self.mode(), self.size(), self.mtime())
591 (self.state().into(), self.mode(), self.size(), self.mtime())
592 }
592 }
593
594 /// True if the stored mtime would be ambiguous with the current time
595 pub fn need_delay(&self, now: TruncatedTimestamp) -> bool {
596 if let Some(mtime) = self.mtime {
597 self.state() == EntryState::Normal
598 && mtime.truncated_seconds() == now.truncated_seconds()
599 } else {
600 false
601 }
602 }
603 }
593 }
604
594
605 impl EntryState {
595 impl EntryState {
606 pub fn is_tracked(self) -> bool {
596 pub fn is_tracked(self) -> bool {
607 use EntryState::*;
597 use EntryState::*;
608 match self {
598 match self {
609 Normal | Added | Merged => true,
599 Normal | Added | Merged => true,
610 Removed => false,
600 Removed => false,
611 }
601 }
612 }
602 }
613 }
603 }
614
604
615 impl TryFrom<u8> for EntryState {
605 impl TryFrom<u8> for EntryState {
616 type Error = HgError;
606 type Error = HgError;
617
607
618 fn try_from(value: u8) -> Result<Self, Self::Error> {
608 fn try_from(value: u8) -> Result<Self, Self::Error> {
619 match value {
609 match value {
620 b'n' => Ok(EntryState::Normal),
610 b'n' => Ok(EntryState::Normal),
621 b'a' => Ok(EntryState::Added),
611 b'a' => Ok(EntryState::Added),
622 b'r' => Ok(EntryState::Removed),
612 b'r' => Ok(EntryState::Removed),
623 b'm' => Ok(EntryState::Merged),
613 b'm' => Ok(EntryState::Merged),
624 _ => Err(HgError::CorruptedRepository(format!(
614 _ => Err(HgError::CorruptedRepository(format!(
625 "Incorrect dirstate entry state {}",
615 "Incorrect dirstate entry state {}",
626 value
616 value
627 ))),
617 ))),
628 }
618 }
629 }
619 }
630 }
620 }
631
621
632 impl Into<u8> for EntryState {
622 impl Into<u8> for EntryState {
633 fn into(self) -> u8 {
623 fn into(self) -> u8 {
634 match self {
624 match self {
635 EntryState::Normal => b'n',
625 EntryState::Normal => b'n',
636 EntryState::Added => b'a',
626 EntryState::Added => b'a',
637 EntryState::Removed => b'r',
627 EntryState::Removed => b'r',
638 EntryState::Merged => b'm',
628 EntryState::Merged => b'm',
639 }
629 }
640 }
630 }
641 }
631 }
642
632
643 const EXEC_BIT_MASK: u32 = 0o100;
633 const EXEC_BIT_MASK: u32 = 0o100;
644
634
645 pub fn has_exec_bit(metadata: &std::fs::Metadata) -> bool {
635 pub fn has_exec_bit(metadata: &std::fs::Metadata) -> bool {
646 // TODO: How to handle executable permissions on Windows?
636 // TODO: How to handle executable permissions on Windows?
647 use std::os::unix::fs::MetadataExt;
637 use std::os::unix::fs::MetadataExt;
648 (metadata.mode() & EXEC_BIT_MASK) != 0
638 (metadata.mode() & EXEC_BIT_MASK) != 0
649 }
639 }
@@ -1,1184 +1,1139 b''
1 use bytes_cast::BytesCast;
1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
2 use micro_timer::timed;
3 use std::borrow::Cow;
3 use std::borrow::Cow;
4 use std::path::PathBuf;
4 use std::path::PathBuf;
5
5
6 use super::on_disk;
6 use super::on_disk;
7 use super::on_disk::DirstateV2ParseError;
7 use super::on_disk::DirstateV2ParseError;
8 use super::owning::OwningDirstateMap;
8 use super::owning::OwningDirstateMap;
9 use super::path_with_basename::WithBasename;
9 use super::path_with_basename::WithBasename;
10 use crate::dirstate::parsers::pack_entry;
10 use crate::dirstate::parsers::pack_entry;
11 use crate::dirstate::parsers::packed_entry_size;
11 use crate::dirstate::parsers::packed_entry_size;
12 use crate::dirstate::parsers::parse_dirstate_entries;
12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::CopyMapIter;
13 use crate::dirstate::CopyMapIter;
14 use crate::dirstate::StateMapIter;
14 use crate::dirstate::StateMapIter;
15 use crate::dirstate::TruncatedTimestamp;
15 use crate::dirstate::TruncatedTimestamp;
16 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
16 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
17 use crate::dirstate::SIZE_NON_NORMAL;
17 use crate::dirstate::SIZE_NON_NORMAL;
18 use crate::matchers::Matcher;
18 use crate::matchers::Matcher;
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 use crate::DirstateEntry;
20 use crate::DirstateEntry;
21 use crate::DirstateError;
21 use crate::DirstateError;
22 use crate::DirstateParents;
22 use crate::DirstateParents;
23 use crate::DirstateStatus;
23 use crate::DirstateStatus;
24 use crate::EntryState;
24 use crate::EntryState;
25 use crate::FastHashMap;
25 use crate::FastHashMap;
26 use crate::PatternFileWarning;
26 use crate::PatternFileWarning;
27 use crate::StatusError;
27 use crate::StatusError;
28 use crate::StatusOptions;
28 use crate::StatusOptions;
29
29
30 /// Append to an existing data file if the amount of unreachable data (not used
30 /// Append to an existing data file if the amount of unreachable data (not used
31 /// anymore) is less than this fraction of the total amount of existing data.
31 /// anymore) is less than this fraction of the total amount of existing data.
32 const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5;
32 const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5;
33
33
34 pub struct DirstateMap<'on_disk> {
34 pub struct DirstateMap<'on_disk> {
35 /// Contents of the `.hg/dirstate` file
35 /// Contents of the `.hg/dirstate` file
36 pub(super) on_disk: &'on_disk [u8],
36 pub(super) on_disk: &'on_disk [u8],
37
37
38 pub(super) root: ChildNodes<'on_disk>,
38 pub(super) root: ChildNodes<'on_disk>,
39
39
40 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
40 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
41 pub(super) nodes_with_entry_count: u32,
41 pub(super) nodes_with_entry_count: u32,
42
42
43 /// Number of nodes anywhere in the tree that have
43 /// Number of nodes anywhere in the tree that have
44 /// `.copy_source.is_some()`.
44 /// `.copy_source.is_some()`.
45 pub(super) nodes_with_copy_source_count: u32,
45 pub(super) nodes_with_copy_source_count: u32,
46
46
47 /// See on_disk::Header
47 /// See on_disk::Header
48 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
48 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
49
49
50 /// How many bytes of `on_disk` are not used anymore
50 /// How many bytes of `on_disk` are not used anymore
51 pub(super) unreachable_bytes: u32,
51 pub(super) unreachable_bytes: u32,
52 }
52 }
53
53
54 /// Using a plain `HgPathBuf` of the full path from the repository root as a
54 /// Using a plain `HgPathBuf` of the full path from the repository root as a
55 /// map key would also work: all paths in a given map have the same parent
55 /// map key would also work: all paths in a given map have the same parent
56 /// path, so comparing full paths gives the same result as comparing base
56 /// path, so comparing full paths gives the same result as comparing base
57 /// names. However `HashMap` would waste time always re-hashing the same
57 /// names. However `HashMap` would waste time always re-hashing the same
58 /// string prefix.
58 /// string prefix.
59 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
59 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
60
60
61 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
61 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
62 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
62 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
63 pub(super) enum BorrowedPath<'tree, 'on_disk> {
63 pub(super) enum BorrowedPath<'tree, 'on_disk> {
64 InMemory(&'tree HgPathBuf),
64 InMemory(&'tree HgPathBuf),
65 OnDisk(&'on_disk HgPath),
65 OnDisk(&'on_disk HgPath),
66 }
66 }
67
67
68 pub(super) enum ChildNodes<'on_disk> {
68 pub(super) enum ChildNodes<'on_disk> {
69 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
69 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
70 OnDisk(&'on_disk [on_disk::Node]),
70 OnDisk(&'on_disk [on_disk::Node]),
71 }
71 }
72
72
73 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
73 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
74 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
74 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
75 OnDisk(&'on_disk [on_disk::Node]),
75 OnDisk(&'on_disk [on_disk::Node]),
76 }
76 }
77
77
78 pub(super) enum NodeRef<'tree, 'on_disk> {
78 pub(super) enum NodeRef<'tree, 'on_disk> {
79 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
79 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
80 OnDisk(&'on_disk on_disk::Node),
80 OnDisk(&'on_disk on_disk::Node),
81 }
81 }
82
82
83 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
83 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
84 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
84 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
85 match *self {
85 match *self {
86 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
86 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
87 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
87 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
88 }
88 }
89 }
89 }
90 }
90 }
91
91
92 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
92 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
93 type Target = HgPath;
93 type Target = HgPath;
94
94
95 fn deref(&self) -> &HgPath {
95 fn deref(&self) -> &HgPath {
96 match *self {
96 match *self {
97 BorrowedPath::InMemory(in_memory) => in_memory,
97 BorrowedPath::InMemory(in_memory) => in_memory,
98 BorrowedPath::OnDisk(on_disk) => on_disk,
98 BorrowedPath::OnDisk(on_disk) => on_disk,
99 }
99 }
100 }
100 }
101 }
101 }
102
102
103 impl Default for ChildNodes<'_> {
103 impl Default for ChildNodes<'_> {
104 fn default() -> Self {
104 fn default() -> Self {
105 ChildNodes::InMemory(Default::default())
105 ChildNodes::InMemory(Default::default())
106 }
106 }
107 }
107 }
108
108
109 impl<'on_disk> ChildNodes<'on_disk> {
109 impl<'on_disk> ChildNodes<'on_disk> {
110 pub(super) fn as_ref<'tree>(
110 pub(super) fn as_ref<'tree>(
111 &'tree self,
111 &'tree self,
112 ) -> ChildNodesRef<'tree, 'on_disk> {
112 ) -> ChildNodesRef<'tree, 'on_disk> {
113 match self {
113 match self {
114 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
114 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
115 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
115 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
116 }
116 }
117 }
117 }
118
118
119 pub(super) fn is_empty(&self) -> bool {
119 pub(super) fn is_empty(&self) -> bool {
120 match self {
120 match self {
121 ChildNodes::InMemory(nodes) => nodes.is_empty(),
121 ChildNodes::InMemory(nodes) => nodes.is_empty(),
122 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
122 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
123 }
123 }
124 }
124 }
125
125
126 fn make_mut(
126 fn make_mut(
127 &mut self,
127 &mut self,
128 on_disk: &'on_disk [u8],
128 on_disk: &'on_disk [u8],
129 unreachable_bytes: &mut u32,
129 unreachable_bytes: &mut u32,
130 ) -> Result<
130 ) -> Result<
131 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
131 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
132 DirstateV2ParseError,
132 DirstateV2ParseError,
133 > {
133 > {
134 match self {
134 match self {
135 ChildNodes::InMemory(nodes) => Ok(nodes),
135 ChildNodes::InMemory(nodes) => Ok(nodes),
136 ChildNodes::OnDisk(nodes) => {
136 ChildNodes::OnDisk(nodes) => {
137 *unreachable_bytes +=
137 *unreachable_bytes +=
138 std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32;
138 std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32;
139 let nodes = nodes
139 let nodes = nodes
140 .iter()
140 .iter()
141 .map(|node| {
141 .map(|node| {
142 Ok((
142 Ok((
143 node.path(on_disk)?,
143 node.path(on_disk)?,
144 node.to_in_memory_node(on_disk)?,
144 node.to_in_memory_node(on_disk)?,
145 ))
145 ))
146 })
146 })
147 .collect::<Result<_, _>>()?;
147 .collect::<Result<_, _>>()?;
148 *self = ChildNodes::InMemory(nodes);
148 *self = ChildNodes::InMemory(nodes);
149 match self {
149 match self {
150 ChildNodes::InMemory(nodes) => Ok(nodes),
150 ChildNodes::InMemory(nodes) => Ok(nodes),
151 ChildNodes::OnDisk(_) => unreachable!(),
151 ChildNodes::OnDisk(_) => unreachable!(),
152 }
152 }
153 }
153 }
154 }
154 }
155 }
155 }
156 }
156 }
157
157
158 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
158 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
159 pub(super) fn get(
159 pub(super) fn get(
160 &self,
160 &self,
161 base_name: &HgPath,
161 base_name: &HgPath,
162 on_disk: &'on_disk [u8],
162 on_disk: &'on_disk [u8],
163 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
163 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
164 match self {
164 match self {
165 ChildNodesRef::InMemory(nodes) => Ok(nodes
165 ChildNodesRef::InMemory(nodes) => Ok(nodes
166 .get_key_value(base_name)
166 .get_key_value(base_name)
167 .map(|(k, v)| NodeRef::InMemory(k, v))),
167 .map(|(k, v)| NodeRef::InMemory(k, v))),
168 ChildNodesRef::OnDisk(nodes) => {
168 ChildNodesRef::OnDisk(nodes) => {
169 let mut parse_result = Ok(());
169 let mut parse_result = Ok(());
170 let search_result = nodes.binary_search_by(|node| {
170 let search_result = nodes.binary_search_by(|node| {
171 match node.base_name(on_disk) {
171 match node.base_name(on_disk) {
172 Ok(node_base_name) => node_base_name.cmp(base_name),
172 Ok(node_base_name) => node_base_name.cmp(base_name),
173 Err(e) => {
173 Err(e) => {
174 parse_result = Err(e);
174 parse_result = Err(e);
175 // Dummy comparison result, `search_result` won’t
175 // Dummy comparison result, `search_result` won’t
176 // be used since `parse_result` is an error
176 // be used since `parse_result` is an error
177 std::cmp::Ordering::Equal
177 std::cmp::Ordering::Equal
178 }
178 }
179 }
179 }
180 });
180 });
181 parse_result.map(|()| {
181 parse_result.map(|()| {
182 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
182 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
183 })
183 })
184 }
184 }
185 }
185 }
186 }
186 }
187
187
188 /// Iterate in undefined order
188 /// Iterate in undefined order
189 pub(super) fn iter(
189 pub(super) fn iter(
190 &self,
190 &self,
191 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
191 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
192 match self {
192 match self {
193 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
193 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
194 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
194 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
195 ),
195 ),
196 ChildNodesRef::OnDisk(nodes) => {
196 ChildNodesRef::OnDisk(nodes) => {
197 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
197 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
198 }
198 }
199 }
199 }
200 }
200 }
201
201
202 /// Iterate in parallel in undefined order
202 /// Iterate in parallel in undefined order
203 pub(super) fn par_iter(
203 pub(super) fn par_iter(
204 &self,
204 &self,
205 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
205 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
206 {
206 {
207 use rayon::prelude::*;
207 use rayon::prelude::*;
208 match self {
208 match self {
209 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
209 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
210 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
210 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
211 ),
211 ),
212 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
212 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
213 nodes.par_iter().map(NodeRef::OnDisk),
213 nodes.par_iter().map(NodeRef::OnDisk),
214 ),
214 ),
215 }
215 }
216 }
216 }
217
217
218 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
218 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
219 match self {
219 match self {
220 ChildNodesRef::InMemory(nodes) => {
220 ChildNodesRef::InMemory(nodes) => {
221 let mut vec: Vec<_> = nodes
221 let mut vec: Vec<_> = nodes
222 .iter()
222 .iter()
223 .map(|(k, v)| NodeRef::InMemory(k, v))
223 .map(|(k, v)| NodeRef::InMemory(k, v))
224 .collect();
224 .collect();
225 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
225 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
226 match node {
226 match node {
227 NodeRef::InMemory(path, _node) => path.base_name(),
227 NodeRef::InMemory(path, _node) => path.base_name(),
228 NodeRef::OnDisk(_) => unreachable!(),
228 NodeRef::OnDisk(_) => unreachable!(),
229 }
229 }
230 }
230 }
231 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
231 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
232 // value: https://github.com/rust-lang/rust/issues/34162
232 // value: https://github.com/rust-lang/rust/issues/34162
233 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
233 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
234 vec
234 vec
235 }
235 }
236 ChildNodesRef::OnDisk(nodes) => {
236 ChildNodesRef::OnDisk(nodes) => {
237 // Nodes on disk are already sorted
237 // Nodes on disk are already sorted
238 nodes.iter().map(NodeRef::OnDisk).collect()
238 nodes.iter().map(NodeRef::OnDisk).collect()
239 }
239 }
240 }
240 }
241 }
241 }
242 }
242 }
243
243
244 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
244 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
245 pub(super) fn full_path(
245 pub(super) fn full_path(
246 &self,
246 &self,
247 on_disk: &'on_disk [u8],
247 on_disk: &'on_disk [u8],
248 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
248 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
249 match self {
249 match self {
250 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
250 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
251 NodeRef::OnDisk(node) => node.full_path(on_disk),
251 NodeRef::OnDisk(node) => node.full_path(on_disk),
252 }
252 }
253 }
253 }
254
254
255 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
255 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
256 /// HgPath>` detached from `'tree`
256 /// HgPath>` detached from `'tree`
257 pub(super) fn full_path_borrowed(
257 pub(super) fn full_path_borrowed(
258 &self,
258 &self,
259 on_disk: &'on_disk [u8],
259 on_disk: &'on_disk [u8],
260 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
260 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
261 match self {
261 match self {
262 NodeRef::InMemory(path, _node) => match path.full_path() {
262 NodeRef::InMemory(path, _node) => match path.full_path() {
263 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
263 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
264 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
264 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
265 },
265 },
266 NodeRef::OnDisk(node) => {
266 NodeRef::OnDisk(node) => {
267 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
267 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
268 }
268 }
269 }
269 }
270 }
270 }
271
271
272 pub(super) fn base_name(
272 pub(super) fn base_name(
273 &self,
273 &self,
274 on_disk: &'on_disk [u8],
274 on_disk: &'on_disk [u8],
275 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
275 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
276 match self {
276 match self {
277 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
277 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
278 NodeRef::OnDisk(node) => node.base_name(on_disk),
278 NodeRef::OnDisk(node) => node.base_name(on_disk),
279 }
279 }
280 }
280 }
281
281
282 pub(super) fn children(
282 pub(super) fn children(
283 &self,
283 &self,
284 on_disk: &'on_disk [u8],
284 on_disk: &'on_disk [u8],
285 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
285 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
286 match self {
286 match self {
287 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
287 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
288 NodeRef::OnDisk(node) => {
288 NodeRef::OnDisk(node) => {
289 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
289 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
290 }
290 }
291 }
291 }
292 }
292 }
293
293
294 pub(super) fn has_copy_source(&self) -> bool {
294 pub(super) fn has_copy_source(&self) -> bool {
295 match self {
295 match self {
296 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
296 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
297 NodeRef::OnDisk(node) => node.has_copy_source(),
297 NodeRef::OnDisk(node) => node.has_copy_source(),
298 }
298 }
299 }
299 }
300
300
301 pub(super) fn copy_source(
301 pub(super) fn copy_source(
302 &self,
302 &self,
303 on_disk: &'on_disk [u8],
303 on_disk: &'on_disk [u8],
304 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
304 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
305 match self {
305 match self {
306 NodeRef::InMemory(_path, node) => {
306 NodeRef::InMemory(_path, node) => {
307 Ok(node.copy_source.as_ref().map(|s| &**s))
307 Ok(node.copy_source.as_ref().map(|s| &**s))
308 }
308 }
309 NodeRef::OnDisk(node) => node.copy_source(on_disk),
309 NodeRef::OnDisk(node) => node.copy_source(on_disk),
310 }
310 }
311 }
311 }
312
312
313 pub(super) fn entry(
313 pub(super) fn entry(
314 &self,
314 &self,
315 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
315 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
316 match self {
316 match self {
317 NodeRef::InMemory(_path, node) => {
317 NodeRef::InMemory(_path, node) => {
318 Ok(node.data.as_entry().copied())
318 Ok(node.data.as_entry().copied())
319 }
319 }
320 NodeRef::OnDisk(node) => node.entry(),
320 NodeRef::OnDisk(node) => node.entry(),
321 }
321 }
322 }
322 }
323
323
324 pub(super) fn state(
324 pub(super) fn state(
325 &self,
325 &self,
326 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
326 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
327 Ok(self.entry()?.map(|e| e.state()))
327 Ok(self.entry()?.map(|e| e.state()))
328 }
328 }
329
329
330 pub(super) fn cached_directory_mtime(
330 pub(super) fn cached_directory_mtime(
331 &self,
331 &self,
332 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
332 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
333 match self {
333 match self {
334 NodeRef::InMemory(_path, node) => Ok(match node.data {
334 NodeRef::InMemory(_path, node) => Ok(match node.data {
335 NodeData::CachedDirectory { mtime } => Some(mtime),
335 NodeData::CachedDirectory { mtime } => Some(mtime),
336 _ => None,
336 _ => None,
337 }),
337 }),
338 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
338 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
339 }
339 }
340 }
340 }
341
341
342 pub(super) fn descendants_with_entry_count(&self) -> u32 {
342 pub(super) fn descendants_with_entry_count(&self) -> u32 {
343 match self {
343 match self {
344 NodeRef::InMemory(_path, node) => {
344 NodeRef::InMemory(_path, node) => {
345 node.descendants_with_entry_count
345 node.descendants_with_entry_count
346 }
346 }
347 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
347 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
348 }
348 }
349 }
349 }
350
350
351 pub(super) fn tracked_descendants_count(&self) -> u32 {
351 pub(super) fn tracked_descendants_count(&self) -> u32 {
352 match self {
352 match self {
353 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
353 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
354 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
354 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
355 }
355 }
356 }
356 }
357 }
357 }
358
358
359 /// Represents a file or a directory
359 /// Represents a file or a directory
360 #[derive(Default)]
360 #[derive(Default)]
361 pub(super) struct Node<'on_disk> {
361 pub(super) struct Node<'on_disk> {
362 pub(super) data: NodeData,
362 pub(super) data: NodeData,
363
363
364 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
364 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
365
365
366 pub(super) children: ChildNodes<'on_disk>,
366 pub(super) children: ChildNodes<'on_disk>,
367
367
368 /// How many (non-inclusive) descendants of this node have an entry.
368 /// How many (non-inclusive) descendants of this node have an entry.
369 pub(super) descendants_with_entry_count: u32,
369 pub(super) descendants_with_entry_count: u32,
370
370
371 /// How many (non-inclusive) descendants of this node have an entry whose
371 /// How many (non-inclusive) descendants of this node have an entry whose
372 /// state is "tracked".
372 /// state is "tracked".
373 pub(super) tracked_descendants_count: u32,
373 pub(super) tracked_descendants_count: u32,
374 }
374 }
375
375
376 pub(super) enum NodeData {
376 pub(super) enum NodeData {
377 Entry(DirstateEntry),
377 Entry(DirstateEntry),
378 CachedDirectory { mtime: TruncatedTimestamp },
378 CachedDirectory { mtime: TruncatedTimestamp },
379 None,
379 None,
380 }
380 }
381
381
382 impl Default for NodeData {
382 impl Default for NodeData {
383 fn default() -> Self {
383 fn default() -> Self {
384 NodeData::None
384 NodeData::None
385 }
385 }
386 }
386 }
387
387
388 impl NodeData {
388 impl NodeData {
389 fn has_entry(&self) -> bool {
389 fn has_entry(&self) -> bool {
390 match self {
390 match self {
391 NodeData::Entry(_) => true,
391 NodeData::Entry(_) => true,
392 _ => false,
392 _ => false,
393 }
393 }
394 }
394 }
395
395
396 fn as_entry(&self) -> Option<&DirstateEntry> {
396 fn as_entry(&self) -> Option<&DirstateEntry> {
397 match self {
397 match self {
398 NodeData::Entry(entry) => Some(entry),
398 NodeData::Entry(entry) => Some(entry),
399 _ => None,
399 _ => None,
400 }
400 }
401 }
401 }
402 }
402 }
403
403
404 impl<'on_disk> DirstateMap<'on_disk> {
404 impl<'on_disk> DirstateMap<'on_disk> {
405 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
405 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
406 Self {
406 Self {
407 on_disk,
407 on_disk,
408 root: ChildNodes::default(),
408 root: ChildNodes::default(),
409 nodes_with_entry_count: 0,
409 nodes_with_entry_count: 0,
410 nodes_with_copy_source_count: 0,
410 nodes_with_copy_source_count: 0,
411 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
411 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
412 unreachable_bytes: 0,
412 unreachable_bytes: 0,
413 }
413 }
414 }
414 }
415
415
416 #[timed]
416 #[timed]
417 pub fn new_v2(
417 pub fn new_v2(
418 on_disk: &'on_disk [u8],
418 on_disk: &'on_disk [u8],
419 data_size: usize,
419 data_size: usize,
420 metadata: &[u8],
420 metadata: &[u8],
421 ) -> Result<Self, DirstateError> {
421 ) -> Result<Self, DirstateError> {
422 if let Some(data) = on_disk.get(..data_size) {
422 if let Some(data) = on_disk.get(..data_size) {
423 Ok(on_disk::read(data, metadata)?)
423 Ok(on_disk::read(data, metadata)?)
424 } else {
424 } else {
425 Err(DirstateV2ParseError.into())
425 Err(DirstateV2ParseError.into())
426 }
426 }
427 }
427 }
428
428
429 #[timed]
429 #[timed]
430 pub fn new_v1(
430 pub fn new_v1(
431 on_disk: &'on_disk [u8],
431 on_disk: &'on_disk [u8],
432 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
432 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
433 let mut map = Self::empty(on_disk);
433 let mut map = Self::empty(on_disk);
434 if map.on_disk.is_empty() {
434 if map.on_disk.is_empty() {
435 return Ok((map, None));
435 return Ok((map, None));
436 }
436 }
437
437
438 let parents = parse_dirstate_entries(
438 let parents = parse_dirstate_entries(
439 map.on_disk,
439 map.on_disk,
440 |path, entry, copy_source| {
440 |path, entry, copy_source| {
441 let tracked = entry.state().is_tracked();
441 let tracked = entry.state().is_tracked();
442 let node = Self::get_or_insert_node(
442 let node = Self::get_or_insert_node(
443 map.on_disk,
443 map.on_disk,
444 &mut map.unreachable_bytes,
444 &mut map.unreachable_bytes,
445 &mut map.root,
445 &mut map.root,
446 path,
446 path,
447 WithBasename::to_cow_borrowed,
447 WithBasename::to_cow_borrowed,
448 |ancestor| {
448 |ancestor| {
449 if tracked {
449 if tracked {
450 ancestor.tracked_descendants_count += 1
450 ancestor.tracked_descendants_count += 1
451 }
451 }
452 ancestor.descendants_with_entry_count += 1
452 ancestor.descendants_with_entry_count += 1
453 },
453 },
454 )?;
454 )?;
455 assert!(
455 assert!(
456 !node.data.has_entry(),
456 !node.data.has_entry(),
457 "duplicate dirstate entry in read"
457 "duplicate dirstate entry in read"
458 );
458 );
459 assert!(
459 assert!(
460 node.copy_source.is_none(),
460 node.copy_source.is_none(),
461 "duplicate dirstate entry in read"
461 "duplicate dirstate entry in read"
462 );
462 );
463 node.data = NodeData::Entry(*entry);
463 node.data = NodeData::Entry(*entry);
464 node.copy_source = copy_source.map(Cow::Borrowed);
464 node.copy_source = copy_source.map(Cow::Borrowed);
465 map.nodes_with_entry_count += 1;
465 map.nodes_with_entry_count += 1;
466 if copy_source.is_some() {
466 if copy_source.is_some() {
467 map.nodes_with_copy_source_count += 1
467 map.nodes_with_copy_source_count += 1
468 }
468 }
469 Ok(())
469 Ok(())
470 },
470 },
471 )?;
471 )?;
472 let parents = Some(parents.clone());
472 let parents = Some(parents.clone());
473
473
474 Ok((map, parents))
474 Ok((map, parents))
475 }
475 }
476
476
477 /// Assuming dirstate-v2 format, returns whether the next write should
477 /// Assuming dirstate-v2 format, returns whether the next write should
478 /// append to the existing data file that contains `self.on_disk` (true),
478 /// append to the existing data file that contains `self.on_disk` (true),
479 /// or create a new data file from scratch (false).
479 /// or create a new data file from scratch (false).
480 pub(super) fn write_should_append(&self) -> bool {
480 pub(super) fn write_should_append(&self) -> bool {
481 let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32;
481 let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32;
482 ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO
482 ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO
483 }
483 }
484
484
485 fn get_node<'tree>(
485 fn get_node<'tree>(
486 &'tree self,
486 &'tree self,
487 path: &HgPath,
487 path: &HgPath,
488 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
488 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
489 let mut children = self.root.as_ref();
489 let mut children = self.root.as_ref();
490 let mut components = path.components();
490 let mut components = path.components();
491 let mut component =
491 let mut component =
492 components.next().expect("expected at least one components");
492 components.next().expect("expected at least one components");
493 loop {
493 loop {
494 if let Some(child) = children.get(component, self.on_disk)? {
494 if let Some(child) = children.get(component, self.on_disk)? {
495 if let Some(next_component) = components.next() {
495 if let Some(next_component) = components.next() {
496 component = next_component;
496 component = next_component;
497 children = child.children(self.on_disk)?;
497 children = child.children(self.on_disk)?;
498 } else {
498 } else {
499 return Ok(Some(child));
499 return Ok(Some(child));
500 }
500 }
501 } else {
501 } else {
502 return Ok(None);
502 return Ok(None);
503 }
503 }
504 }
504 }
505 }
505 }
506
506
507 /// Returns a mutable reference to the node at `path` if it exists
507 /// Returns a mutable reference to the node at `path` if it exists
508 ///
508 ///
509 /// This takes `root` instead of `&mut self` so that callers can mutate
509 /// This takes `root` instead of `&mut self` so that callers can mutate
510 /// other fields while the returned borrow is still valid
510 /// other fields while the returned borrow is still valid
511 fn get_node_mut<'tree>(
511 fn get_node_mut<'tree>(
512 on_disk: &'on_disk [u8],
512 on_disk: &'on_disk [u8],
513 unreachable_bytes: &mut u32,
513 unreachable_bytes: &mut u32,
514 root: &'tree mut ChildNodes<'on_disk>,
514 root: &'tree mut ChildNodes<'on_disk>,
515 path: &HgPath,
515 path: &HgPath,
516 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
516 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
517 let mut children = root;
517 let mut children = root;
518 let mut components = path.components();
518 let mut components = path.components();
519 let mut component =
519 let mut component =
520 components.next().expect("expected at least one components");
520 components.next().expect("expected at least one components");
521 loop {
521 loop {
522 if let Some(child) = children
522 if let Some(child) = children
523 .make_mut(on_disk, unreachable_bytes)?
523 .make_mut(on_disk, unreachable_bytes)?
524 .get_mut(component)
524 .get_mut(component)
525 {
525 {
526 if let Some(next_component) = components.next() {
526 if let Some(next_component) = components.next() {
527 component = next_component;
527 component = next_component;
528 children = &mut child.children;
528 children = &mut child.children;
529 } else {
529 } else {
530 return Ok(Some(child));
530 return Ok(Some(child));
531 }
531 }
532 } else {
532 } else {
533 return Ok(None);
533 return Ok(None);
534 }
534 }
535 }
535 }
536 }
536 }
537
537
538 pub(super) fn get_or_insert<'tree, 'path>(
538 pub(super) fn get_or_insert<'tree, 'path>(
539 &'tree mut self,
539 &'tree mut self,
540 path: &HgPath,
540 path: &HgPath,
541 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
541 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
542 Self::get_or_insert_node(
542 Self::get_or_insert_node(
543 self.on_disk,
543 self.on_disk,
544 &mut self.unreachable_bytes,
544 &mut self.unreachable_bytes,
545 &mut self.root,
545 &mut self.root,
546 path,
546 path,
547 WithBasename::to_cow_owned,
547 WithBasename::to_cow_owned,
548 |_| {},
548 |_| {},
549 )
549 )
550 }
550 }
551
551
552 fn get_or_insert_node<'tree, 'path>(
552 fn get_or_insert_node<'tree, 'path>(
553 on_disk: &'on_disk [u8],
553 on_disk: &'on_disk [u8],
554 unreachable_bytes: &mut u32,
554 unreachable_bytes: &mut u32,
555 root: &'tree mut ChildNodes<'on_disk>,
555 root: &'tree mut ChildNodes<'on_disk>,
556 path: &'path HgPath,
556 path: &'path HgPath,
557 to_cow: impl Fn(
557 to_cow: impl Fn(
558 WithBasename<&'path HgPath>,
558 WithBasename<&'path HgPath>,
559 ) -> WithBasename<Cow<'on_disk, HgPath>>,
559 ) -> WithBasename<Cow<'on_disk, HgPath>>,
560 mut each_ancestor: impl FnMut(&mut Node),
560 mut each_ancestor: impl FnMut(&mut Node),
561 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
561 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
562 let mut child_nodes = root;
562 let mut child_nodes = root;
563 let mut inclusive_ancestor_paths =
563 let mut inclusive_ancestor_paths =
564 WithBasename::inclusive_ancestors_of(path);
564 WithBasename::inclusive_ancestors_of(path);
565 let mut ancestor_path = inclusive_ancestor_paths
565 let mut ancestor_path = inclusive_ancestor_paths
566 .next()
566 .next()
567 .expect("expected at least one inclusive ancestor");
567 .expect("expected at least one inclusive ancestor");
568 loop {
568 loop {
569 // TODO: can we avoid allocating an owned key in cases where the
569 // TODO: can we avoid allocating an owned key in cases where the
570 // map already contains that key, without introducing double
570 // map already contains that key, without introducing double
571 // lookup?
571 // lookup?
572 let child_node = child_nodes
572 let child_node = child_nodes
573 .make_mut(on_disk, unreachable_bytes)?
573 .make_mut(on_disk, unreachable_bytes)?
574 .entry(to_cow(ancestor_path))
574 .entry(to_cow(ancestor_path))
575 .or_default();
575 .or_default();
576 if let Some(next) = inclusive_ancestor_paths.next() {
576 if let Some(next) = inclusive_ancestor_paths.next() {
577 each_ancestor(child_node);
577 each_ancestor(child_node);
578 ancestor_path = next;
578 ancestor_path = next;
579 child_nodes = &mut child_node.children;
579 child_nodes = &mut child_node.children;
580 } else {
580 } else {
581 return Ok(child_node);
581 return Ok(child_node);
582 }
582 }
583 }
583 }
584 }
584 }
585
585
586 fn add_or_remove_file(
586 fn add_or_remove_file(
587 &mut self,
587 &mut self,
588 path: &HgPath,
588 path: &HgPath,
589 old_state: Option<EntryState>,
589 old_state: Option<EntryState>,
590 new_entry: DirstateEntry,
590 new_entry: DirstateEntry,
591 ) -> Result<(), DirstateV2ParseError> {
591 ) -> Result<(), DirstateV2ParseError> {
592 let had_entry = old_state.is_some();
592 let had_entry = old_state.is_some();
593 let was_tracked = old_state.map_or(false, |s| s.is_tracked());
593 let was_tracked = old_state.map_or(false, |s| s.is_tracked());
594 let tracked_count_increment =
594 let tracked_count_increment =
595 match (was_tracked, new_entry.state().is_tracked()) {
595 match (was_tracked, new_entry.state().is_tracked()) {
596 (false, true) => 1,
596 (false, true) => 1,
597 (true, false) => -1,
597 (true, false) => -1,
598 _ => 0,
598 _ => 0,
599 };
599 };
600
600
601 let node = Self::get_or_insert_node(
601 let node = Self::get_or_insert_node(
602 self.on_disk,
602 self.on_disk,
603 &mut self.unreachable_bytes,
603 &mut self.unreachable_bytes,
604 &mut self.root,
604 &mut self.root,
605 path,
605 path,
606 WithBasename::to_cow_owned,
606 WithBasename::to_cow_owned,
607 |ancestor| {
607 |ancestor| {
608 if !had_entry {
608 if !had_entry {
609 ancestor.descendants_with_entry_count += 1;
609 ancestor.descendants_with_entry_count += 1;
610 }
610 }
611
611
612 // We can’t use `+= increment` because the counter is unsigned,
612 // We can’t use `+= increment` because the counter is unsigned,
613 // and we want debug builds to detect accidental underflow
613 // and we want debug builds to detect accidental underflow
614 // through zero
614 // through zero
615 match tracked_count_increment {
615 match tracked_count_increment {
616 1 => ancestor.tracked_descendants_count += 1,
616 1 => ancestor.tracked_descendants_count += 1,
617 -1 => ancestor.tracked_descendants_count -= 1,
617 -1 => ancestor.tracked_descendants_count -= 1,
618 _ => {}
618 _ => {}
619 }
619 }
620 },
620 },
621 )?;
621 )?;
622 if !had_entry {
622 if !had_entry {
623 self.nodes_with_entry_count += 1
623 self.nodes_with_entry_count += 1
624 }
624 }
625 node.data = NodeData::Entry(new_entry);
625 node.data = NodeData::Entry(new_entry);
626 Ok(())
626 Ok(())
627 }
627 }
628
628
629 fn iter_nodes<'tree>(
629 fn iter_nodes<'tree>(
630 &'tree self,
630 &'tree self,
631 ) -> impl Iterator<
631 ) -> impl Iterator<
632 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
632 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
633 > + 'tree {
633 > + 'tree {
634 // Depth first tree traversal.
634 // Depth first tree traversal.
635 //
635 //
636 // If we could afford internal iteration and recursion,
636 // If we could afford internal iteration and recursion,
637 // this would look like:
637 // this would look like:
638 //
638 //
639 // ```
639 // ```
640 // fn traverse_children(
640 // fn traverse_children(
641 // children: &ChildNodes,
641 // children: &ChildNodes,
642 // each: &mut impl FnMut(&Node),
642 // each: &mut impl FnMut(&Node),
643 // ) {
643 // ) {
644 // for child in children.values() {
644 // for child in children.values() {
645 // traverse_children(&child.children, each);
645 // traverse_children(&child.children, each);
646 // each(child);
646 // each(child);
647 // }
647 // }
648 // }
648 // }
649 // ```
649 // ```
650 //
650 //
651 // However we want an external iterator and therefore can’t use the
651 // However we want an external iterator and therefore can’t use the
652 // call stack. Use an explicit stack instead:
652 // call stack. Use an explicit stack instead:
653 let mut stack = Vec::new();
653 let mut stack = Vec::new();
654 let mut iter = self.root.as_ref().iter();
654 let mut iter = self.root.as_ref().iter();
655 std::iter::from_fn(move || {
655 std::iter::from_fn(move || {
656 while let Some(child_node) = iter.next() {
656 while let Some(child_node) = iter.next() {
657 let children = match child_node.children(self.on_disk) {
657 let children = match child_node.children(self.on_disk) {
658 Ok(children) => children,
658 Ok(children) => children,
659 Err(error) => return Some(Err(error)),
659 Err(error) => return Some(Err(error)),
660 };
660 };
661 // Pseudo-recursion
661 // Pseudo-recursion
662 let new_iter = children.iter();
662 let new_iter = children.iter();
663 let old_iter = std::mem::replace(&mut iter, new_iter);
663 let old_iter = std::mem::replace(&mut iter, new_iter);
664 stack.push((child_node, old_iter));
664 stack.push((child_node, old_iter));
665 }
665 }
666 // Found the end of a `children.iter()` iterator.
666 // Found the end of a `children.iter()` iterator.
667 if let Some((child_node, next_iter)) = stack.pop() {
667 if let Some((child_node, next_iter)) = stack.pop() {
668 // "Return" from pseudo-recursion by restoring state from the
668 // "Return" from pseudo-recursion by restoring state from the
669 // explicit stack
669 // explicit stack
670 iter = next_iter;
670 iter = next_iter;
671
671
672 Some(Ok(child_node))
672 Some(Ok(child_node))
673 } else {
673 } else {
674 // Reached the bottom of the stack, we’re done
674 // Reached the bottom of the stack, we’re done
675 None
675 None
676 }
676 }
677 })
677 })
678 }
678 }
679
679
680 fn clear_known_ambiguous_mtimes(
681 &mut self,
682 paths: &[impl AsRef<HgPath>],
683 ) -> Result<(), DirstateV2ParseError> {
684 for path in paths {
685 if let Some(node) = Self::get_node_mut(
686 self.on_disk,
687 &mut self.unreachable_bytes,
688 &mut self.root,
689 path.as_ref(),
690 )? {
691 if let NodeData::Entry(entry) = &mut node.data {
692 entry.set_possibly_dirty();
693 }
694 }
695 }
696 Ok(())
697 }
698
699 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
680 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
700 if let Cow::Borrowed(path) = path {
681 if let Cow::Borrowed(path) = path {
701 *unreachable_bytes += path.len() as u32
682 *unreachable_bytes += path.len() as u32
702 }
683 }
703 }
684 }
704 }
685 }
705
686
706 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
687 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
707 ///
688 ///
708 /// The callback is only called for incoming `Ok` values. Errors are passed
689 /// The callback is only called for incoming `Ok` values. Errors are passed
709 /// through as-is. In order to let it use the `?` operator the callback is
690 /// through as-is. In order to let it use the `?` operator the callback is
710 /// expected to return a `Result` of `Option`, instead of an `Option` of
691 /// expected to return a `Result` of `Option`, instead of an `Option` of
711 /// `Result`.
692 /// `Result`.
712 fn filter_map_results<'a, I, F, A, B, E>(
693 fn filter_map_results<'a, I, F, A, B, E>(
713 iter: I,
694 iter: I,
714 f: F,
695 f: F,
715 ) -> impl Iterator<Item = Result<B, E>> + 'a
696 ) -> impl Iterator<Item = Result<B, E>> + 'a
716 where
697 where
717 I: Iterator<Item = Result<A, E>> + 'a,
698 I: Iterator<Item = Result<A, E>> + 'a,
718 F: Fn(A) -> Result<Option<B>, E> + 'a,
699 F: Fn(A) -> Result<Option<B>, E> + 'a,
719 {
700 {
720 iter.filter_map(move |result| match result {
701 iter.filter_map(move |result| match result {
721 Ok(node) => f(node).transpose(),
702 Ok(node) => f(node).transpose(),
722 Err(e) => Some(Err(e)),
703 Err(e) => Some(Err(e)),
723 })
704 })
724 }
705 }
725
706
726 impl OwningDirstateMap {
707 impl OwningDirstateMap {
727 pub fn clear(&mut self) {
708 pub fn clear(&mut self) {
728 let map = self.get_map_mut();
709 let map = self.get_map_mut();
729 map.root = Default::default();
710 map.root = Default::default();
730 map.nodes_with_entry_count = 0;
711 map.nodes_with_entry_count = 0;
731 map.nodes_with_copy_source_count = 0;
712 map.nodes_with_copy_source_count = 0;
732 }
713 }
733
714
734 pub fn set_entry(
715 pub fn set_entry(
735 &mut self,
716 &mut self,
736 filename: &HgPath,
717 filename: &HgPath,
737 entry: DirstateEntry,
718 entry: DirstateEntry,
738 ) -> Result<(), DirstateV2ParseError> {
719 ) -> Result<(), DirstateV2ParseError> {
739 let map = self.get_map_mut();
720 let map = self.get_map_mut();
740 map.get_or_insert(&filename)?.data = NodeData::Entry(entry);
721 map.get_or_insert(&filename)?.data = NodeData::Entry(entry);
741 Ok(())
722 Ok(())
742 }
723 }
743
724
744 pub fn add_file(
725 pub fn add_file(
745 &mut self,
726 &mut self,
746 filename: &HgPath,
727 filename: &HgPath,
747 entry: DirstateEntry,
728 entry: DirstateEntry,
748 ) -> Result<(), DirstateError> {
729 ) -> Result<(), DirstateError> {
749 let old_state = self.get(filename)?.map(|e| e.state());
730 let old_state = self.get(filename)?.map(|e| e.state());
750 let map = self.get_map_mut();
731 let map = self.get_map_mut();
751 Ok(map.add_or_remove_file(filename, old_state, entry)?)
732 Ok(map.add_or_remove_file(filename, old_state, entry)?)
752 }
733 }
753
734
754 pub fn remove_file(
735 pub fn remove_file(
755 &mut self,
736 &mut self,
756 filename: &HgPath,
737 filename: &HgPath,
757 in_merge: bool,
738 in_merge: bool,
758 ) -> Result<(), DirstateError> {
739 ) -> Result<(), DirstateError> {
759 let old_entry_opt = self.get(filename)?;
740 let old_entry_opt = self.get(filename)?;
760 let old_state = old_entry_opt.map(|e| e.state());
741 let old_state = old_entry_opt.map(|e| e.state());
761 let mut size = 0;
742 let mut size = 0;
762 if in_merge {
743 if in_merge {
763 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
744 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
764 // during a merge. So I (marmoute) am not sure we need the
745 // during a merge. So I (marmoute) am not sure we need the
765 // conditionnal at all. Adding double checking this with assert
746 // conditionnal at all. Adding double checking this with assert
766 // would be nice.
747 // would be nice.
767 if let Some(old_entry) = old_entry_opt {
748 if let Some(old_entry) = old_entry_opt {
768 // backup the previous state
749 // backup the previous state
769 if old_entry.state() == EntryState::Merged {
750 if old_entry.state() == EntryState::Merged {
770 size = SIZE_NON_NORMAL;
751 size = SIZE_NON_NORMAL;
771 } else if old_entry.state() == EntryState::Normal
752 } else if old_entry.state() == EntryState::Normal
772 && old_entry.size() == SIZE_FROM_OTHER_PARENT
753 && old_entry.size() == SIZE_FROM_OTHER_PARENT
773 {
754 {
774 // other parent
755 // other parent
775 size = SIZE_FROM_OTHER_PARENT;
756 size = SIZE_FROM_OTHER_PARENT;
776 }
757 }
777 }
758 }
778 }
759 }
779 if size == 0 {
760 if size == 0 {
780 self.copy_map_remove(filename)?;
761 self.copy_map_remove(filename)?;
781 }
762 }
782 let map = self.get_map_mut();
763 let map = self.get_map_mut();
783 let entry = DirstateEntry::new_removed(size);
764 let entry = DirstateEntry::new_removed(size);
784 Ok(map.add_or_remove_file(filename, old_state, entry)?)
765 Ok(map.add_or_remove_file(filename, old_state, entry)?)
785 }
766 }
786
767
787 pub fn drop_entry_and_copy_source(
768 pub fn drop_entry_and_copy_source(
788 &mut self,
769 &mut self,
789 filename: &HgPath,
770 filename: &HgPath,
790 ) -> Result<(), DirstateError> {
771 ) -> Result<(), DirstateError> {
791 let was_tracked = self
772 let was_tracked = self
792 .get(filename)?
773 .get(filename)?
793 .map_or(false, |e| e.state().is_tracked());
774 .map_or(false, |e| e.state().is_tracked());
794 let map = self.get_map_mut();
775 let map = self.get_map_mut();
795 struct Dropped {
776 struct Dropped {
796 was_tracked: bool,
777 was_tracked: bool,
797 had_entry: bool,
778 had_entry: bool,
798 had_copy_source: bool,
779 had_copy_source: bool,
799 }
780 }
800
781
801 /// If this returns `Ok(Some((dropped, removed)))`, then
782 /// If this returns `Ok(Some((dropped, removed)))`, then
802 ///
783 ///
803 /// * `dropped` is about the leaf node that was at `filename`
784 /// * `dropped` is about the leaf node that was at `filename`
804 /// * `removed` is whether this particular level of recursion just
785 /// * `removed` is whether this particular level of recursion just
805 /// removed a node in `nodes`.
786 /// removed a node in `nodes`.
806 fn recur<'on_disk>(
787 fn recur<'on_disk>(
807 on_disk: &'on_disk [u8],
788 on_disk: &'on_disk [u8],
808 unreachable_bytes: &mut u32,
789 unreachable_bytes: &mut u32,
809 nodes: &mut ChildNodes<'on_disk>,
790 nodes: &mut ChildNodes<'on_disk>,
810 path: &HgPath,
791 path: &HgPath,
811 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
792 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
812 let (first_path_component, rest_of_path) =
793 let (first_path_component, rest_of_path) =
813 path.split_first_component();
794 path.split_first_component();
814 let nodes = nodes.make_mut(on_disk, unreachable_bytes)?;
795 let nodes = nodes.make_mut(on_disk, unreachable_bytes)?;
815 let node = if let Some(node) = nodes.get_mut(first_path_component)
796 let node = if let Some(node) = nodes.get_mut(first_path_component)
816 {
797 {
817 node
798 node
818 } else {
799 } else {
819 return Ok(None);
800 return Ok(None);
820 };
801 };
821 let dropped;
802 let dropped;
822 if let Some(rest) = rest_of_path {
803 if let Some(rest) = rest_of_path {
823 if let Some((d, removed)) = recur(
804 if let Some((d, removed)) = recur(
824 on_disk,
805 on_disk,
825 unreachable_bytes,
806 unreachable_bytes,
826 &mut node.children,
807 &mut node.children,
827 rest,
808 rest,
828 )? {
809 )? {
829 dropped = d;
810 dropped = d;
830 if dropped.had_entry {
811 if dropped.had_entry {
831 node.descendants_with_entry_count -= 1;
812 node.descendants_with_entry_count -= 1;
832 }
813 }
833 if dropped.was_tracked {
814 if dropped.was_tracked {
834 node.tracked_descendants_count -= 1;
815 node.tracked_descendants_count -= 1;
835 }
816 }
836
817
837 // Directory caches must be invalidated when removing a
818 // Directory caches must be invalidated when removing a
838 // child node
819 // child node
839 if removed {
820 if removed {
840 if let NodeData::CachedDirectory { .. } = &node.data {
821 if let NodeData::CachedDirectory { .. } = &node.data {
841 node.data = NodeData::None
822 node.data = NodeData::None
842 }
823 }
843 }
824 }
844 } else {
825 } else {
845 return Ok(None);
826 return Ok(None);
846 }
827 }
847 } else {
828 } else {
848 let had_entry = node.data.has_entry();
829 let had_entry = node.data.has_entry();
849 if had_entry {
830 if had_entry {
850 node.data = NodeData::None
831 node.data = NodeData::None
851 }
832 }
852 if let Some(source) = &node.copy_source {
833 if let Some(source) = &node.copy_source {
853 DirstateMap::count_dropped_path(unreachable_bytes, source);
834 DirstateMap::count_dropped_path(unreachable_bytes, source);
854 node.copy_source = None
835 node.copy_source = None
855 }
836 }
856 dropped = Dropped {
837 dropped = Dropped {
857 was_tracked: node
838 was_tracked: node
858 .data
839 .data
859 .as_entry()
840 .as_entry()
860 .map_or(false, |entry| entry.state().is_tracked()),
841 .map_or(false, |entry| entry.state().is_tracked()),
861 had_entry,
842 had_entry,
862 had_copy_source: node.copy_source.take().is_some(),
843 had_copy_source: node.copy_source.take().is_some(),
863 };
844 };
864 }
845 }
865 // After recursion, for both leaf (rest_of_path is None) nodes and
846 // After recursion, for both leaf (rest_of_path is None) nodes and
866 // parent nodes, remove a node if it just became empty.
847 // parent nodes, remove a node if it just became empty.
867 let remove = !node.data.has_entry()
848 let remove = !node.data.has_entry()
868 && node.copy_source.is_none()
849 && node.copy_source.is_none()
869 && node.children.is_empty();
850 && node.children.is_empty();
870 if remove {
851 if remove {
871 let (key, _) =
852 let (key, _) =
872 nodes.remove_entry(first_path_component).unwrap();
853 nodes.remove_entry(first_path_component).unwrap();
873 DirstateMap::count_dropped_path(
854 DirstateMap::count_dropped_path(
874 unreachable_bytes,
855 unreachable_bytes,
875 key.full_path(),
856 key.full_path(),
876 )
857 )
877 }
858 }
878 Ok(Some((dropped, remove)))
859 Ok(Some((dropped, remove)))
879 }
860 }
880
861
881 if let Some((dropped, _removed)) = recur(
862 if let Some((dropped, _removed)) = recur(
882 map.on_disk,
863 map.on_disk,
883 &mut map.unreachable_bytes,
864 &mut map.unreachable_bytes,
884 &mut map.root,
865 &mut map.root,
885 filename,
866 filename,
886 )? {
867 )? {
887 if dropped.had_entry {
868 if dropped.had_entry {
888 map.nodes_with_entry_count -= 1
869 map.nodes_with_entry_count -= 1
889 }
870 }
890 if dropped.had_copy_source {
871 if dropped.had_copy_source {
891 map.nodes_with_copy_source_count -= 1
872 map.nodes_with_copy_source_count -= 1
892 }
873 }
893 } else {
874 } else {
894 debug_assert!(!was_tracked);
875 debug_assert!(!was_tracked);
895 }
876 }
896 Ok(())
877 Ok(())
897 }
878 }
898
879
899 pub fn has_tracked_dir(
880 pub fn has_tracked_dir(
900 &mut self,
881 &mut self,
901 directory: &HgPath,
882 directory: &HgPath,
902 ) -> Result<bool, DirstateError> {
883 ) -> Result<bool, DirstateError> {
903 let map = self.get_map_mut();
884 let map = self.get_map_mut();
904 if let Some(node) = map.get_node(directory)? {
885 if let Some(node) = map.get_node(directory)? {
905 // A node without a `DirstateEntry` was created to hold child
886 // A node without a `DirstateEntry` was created to hold child
906 // nodes, and is therefore a directory.
887 // nodes, and is therefore a directory.
907 let state = node.state()?;
888 let state = node.state()?;
908 Ok(state.is_none() && node.tracked_descendants_count() > 0)
889 Ok(state.is_none() && node.tracked_descendants_count() > 0)
909 } else {
890 } else {
910 Ok(false)
891 Ok(false)
911 }
892 }
912 }
893 }
913
894
914 pub fn has_dir(
895 pub fn has_dir(
915 &mut self,
896 &mut self,
916 directory: &HgPath,
897 directory: &HgPath,
917 ) -> Result<bool, DirstateError> {
898 ) -> Result<bool, DirstateError> {
918 let map = self.get_map_mut();
899 let map = self.get_map_mut();
919 if let Some(node) = map.get_node(directory)? {
900 if let Some(node) = map.get_node(directory)? {
920 // A node without a `DirstateEntry` was created to hold child
901 // A node without a `DirstateEntry` was created to hold child
921 // nodes, and is therefore a directory.
902 // nodes, and is therefore a directory.
922 let state = node.state()?;
903 let state = node.state()?;
923 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
904 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
924 } else {
905 } else {
925 Ok(false)
906 Ok(false)
926 }
907 }
927 }
908 }
928
909
929 #[timed]
910 #[timed]
930 pub fn pack_v1(
911 pub fn pack_v1(
931 &mut self,
912 &mut self,
932 parents: DirstateParents,
913 parents: DirstateParents,
933 now: TruncatedTimestamp,
934 ) -> Result<Vec<u8>, DirstateError> {
914 ) -> Result<Vec<u8>, DirstateError> {
935 let map = self.get_map_mut();
915 let map = self.get_map_mut();
936 let mut ambiguous_mtimes = Vec::new();
937 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
916 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
938 // reallocations
917 // reallocations
939 let mut size = parents.as_bytes().len();
918 let mut size = parents.as_bytes().len();
940 for node in map.iter_nodes() {
919 for node in map.iter_nodes() {
941 let node = node?;
920 let node = node?;
942 if let Some(entry) = node.entry()? {
921 if node.entry()?.is_some() {
943 size += packed_entry_size(
922 size += packed_entry_size(
944 node.full_path(map.on_disk)?,
923 node.full_path(map.on_disk)?,
945 node.copy_source(map.on_disk)?,
924 node.copy_source(map.on_disk)?,
946 );
925 );
947 if entry.need_delay(now) {
948 ambiguous_mtimes.push(
949 node.full_path_borrowed(map.on_disk)?
950 .detach_from_tree(),
951 )
952 }
953 }
926 }
954 }
927 }
955 map.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
956
928
957 let mut packed = Vec::with_capacity(size);
929 let mut packed = Vec::with_capacity(size);
958 packed.extend(parents.as_bytes());
930 packed.extend(parents.as_bytes());
959
931
960 for node in map.iter_nodes() {
932 for node in map.iter_nodes() {
961 let node = node?;
933 let node = node?;
962 if let Some(entry) = node.entry()? {
934 if let Some(entry) = node.entry()? {
963 pack_entry(
935 pack_entry(
964 node.full_path(map.on_disk)?,
936 node.full_path(map.on_disk)?,
965 &entry,
937 &entry,
966 node.copy_source(map.on_disk)?,
938 node.copy_source(map.on_disk)?,
967 &mut packed,
939 &mut packed,
968 );
940 );
969 }
941 }
970 }
942 }
971 Ok(packed)
943 Ok(packed)
972 }
944 }
973
945
974 /// Returns new data and metadata together with whether that data should be
946 /// Returns new data and metadata together with whether that data should be
975 /// appended to the existing data file whose content is at
947 /// appended to the existing data file whose content is at
976 /// `map.on_disk` (true), instead of written to a new data file
948 /// `map.on_disk` (true), instead of written to a new data file
977 /// (false).
949 /// (false).
978 #[timed]
950 #[timed]
979 pub fn pack_v2(
951 pub fn pack_v2(
980 &mut self,
952 &mut self,
981 now: TruncatedTimestamp,
982 can_append: bool,
953 can_append: bool,
983 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
954 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
984 let map = self.get_map_mut();
955 let map = self.get_map_mut();
985 let mut paths = Vec::new();
986 for node in map.iter_nodes() {
987 let node = node?;
988 if let Some(entry) = node.entry()? {
989 if entry.need_delay(now) {
990 paths.push(
991 node.full_path_borrowed(map.on_disk)?
992 .detach_from_tree(),
993 )
994 }
995 }
996 }
997 // Borrow of `self` ends here since we collect cloned paths
998
999 map.clear_known_ambiguous_mtimes(&paths)?;
1000
1001 on_disk::write(map, can_append)
956 on_disk::write(map, can_append)
1002 }
957 }
1003
958
1004 pub fn status<'a>(
959 pub fn status<'a>(
1005 &'a mut self,
960 &'a mut self,
1006 matcher: &'a (dyn Matcher + Sync),
961 matcher: &'a (dyn Matcher + Sync),
1007 root_dir: PathBuf,
962 root_dir: PathBuf,
1008 ignore_files: Vec<PathBuf>,
963 ignore_files: Vec<PathBuf>,
1009 options: StatusOptions,
964 options: StatusOptions,
1010 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
965 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1011 {
966 {
1012 let map = self.get_map_mut();
967 let map = self.get_map_mut();
1013 super::status::status(map, matcher, root_dir, ignore_files, options)
968 super::status::status(map, matcher, root_dir, ignore_files, options)
1014 }
969 }
1015
970
1016 pub fn copy_map_len(&self) -> usize {
971 pub fn copy_map_len(&self) -> usize {
1017 let map = self.get_map();
972 let map = self.get_map();
1018 map.nodes_with_copy_source_count as usize
973 map.nodes_with_copy_source_count as usize
1019 }
974 }
1020
975
1021 pub fn copy_map_iter(&self) -> CopyMapIter<'_> {
976 pub fn copy_map_iter(&self) -> CopyMapIter<'_> {
1022 let map = self.get_map();
977 let map = self.get_map();
1023 Box::new(filter_map_results(map.iter_nodes(), move |node| {
978 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1024 Ok(if let Some(source) = node.copy_source(map.on_disk)? {
979 Ok(if let Some(source) = node.copy_source(map.on_disk)? {
1025 Some((node.full_path(map.on_disk)?, source))
980 Some((node.full_path(map.on_disk)?, source))
1026 } else {
981 } else {
1027 None
982 None
1028 })
983 })
1029 }))
984 }))
1030 }
985 }
1031
986
1032 pub fn copy_map_contains_key(
987 pub fn copy_map_contains_key(
1033 &self,
988 &self,
1034 key: &HgPath,
989 key: &HgPath,
1035 ) -> Result<bool, DirstateV2ParseError> {
990 ) -> Result<bool, DirstateV2ParseError> {
1036 let map = self.get_map();
991 let map = self.get_map();
1037 Ok(if let Some(node) = map.get_node(key)? {
992 Ok(if let Some(node) = map.get_node(key)? {
1038 node.has_copy_source()
993 node.has_copy_source()
1039 } else {
994 } else {
1040 false
995 false
1041 })
996 })
1042 }
997 }
1043
998
1044 pub fn copy_map_get(
999 pub fn copy_map_get(
1045 &self,
1000 &self,
1046 key: &HgPath,
1001 key: &HgPath,
1047 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1002 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1048 let map = self.get_map();
1003 let map = self.get_map();
1049 if let Some(node) = map.get_node(key)? {
1004 if let Some(node) = map.get_node(key)? {
1050 if let Some(source) = node.copy_source(map.on_disk)? {
1005 if let Some(source) = node.copy_source(map.on_disk)? {
1051 return Ok(Some(source));
1006 return Ok(Some(source));
1052 }
1007 }
1053 }
1008 }
1054 Ok(None)
1009 Ok(None)
1055 }
1010 }
1056
1011
1057 pub fn copy_map_remove(
1012 pub fn copy_map_remove(
1058 &mut self,
1013 &mut self,
1059 key: &HgPath,
1014 key: &HgPath,
1060 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1015 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1061 let map = self.get_map_mut();
1016 let map = self.get_map_mut();
1062 let count = &mut map.nodes_with_copy_source_count;
1017 let count = &mut map.nodes_with_copy_source_count;
1063 let unreachable_bytes = &mut map.unreachable_bytes;
1018 let unreachable_bytes = &mut map.unreachable_bytes;
1064 Ok(DirstateMap::get_node_mut(
1019 Ok(DirstateMap::get_node_mut(
1065 map.on_disk,
1020 map.on_disk,
1066 unreachable_bytes,
1021 unreachable_bytes,
1067 &mut map.root,
1022 &mut map.root,
1068 key,
1023 key,
1069 )?
1024 )?
1070 .and_then(|node| {
1025 .and_then(|node| {
1071 if let Some(source) = &node.copy_source {
1026 if let Some(source) = &node.copy_source {
1072 *count -= 1;
1027 *count -= 1;
1073 DirstateMap::count_dropped_path(unreachable_bytes, source);
1028 DirstateMap::count_dropped_path(unreachable_bytes, source);
1074 }
1029 }
1075 node.copy_source.take().map(Cow::into_owned)
1030 node.copy_source.take().map(Cow::into_owned)
1076 }))
1031 }))
1077 }
1032 }
1078
1033
1079 pub fn copy_map_insert(
1034 pub fn copy_map_insert(
1080 &mut self,
1035 &mut self,
1081 key: HgPathBuf,
1036 key: HgPathBuf,
1082 value: HgPathBuf,
1037 value: HgPathBuf,
1083 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1038 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1084 let map = self.get_map_mut();
1039 let map = self.get_map_mut();
1085 let node = DirstateMap::get_or_insert_node(
1040 let node = DirstateMap::get_or_insert_node(
1086 map.on_disk,
1041 map.on_disk,
1087 &mut map.unreachable_bytes,
1042 &mut map.unreachable_bytes,
1088 &mut map.root,
1043 &mut map.root,
1089 &key,
1044 &key,
1090 WithBasename::to_cow_owned,
1045 WithBasename::to_cow_owned,
1091 |_ancestor| {},
1046 |_ancestor| {},
1092 )?;
1047 )?;
1093 if node.copy_source.is_none() {
1048 if node.copy_source.is_none() {
1094 map.nodes_with_copy_source_count += 1
1049 map.nodes_with_copy_source_count += 1
1095 }
1050 }
1096 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1051 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1097 }
1052 }
1098
1053
1099 pub fn len(&self) -> usize {
1054 pub fn len(&self) -> usize {
1100 let map = self.get_map();
1055 let map = self.get_map();
1101 map.nodes_with_entry_count as usize
1056 map.nodes_with_entry_count as usize
1102 }
1057 }
1103
1058
1104 pub fn contains_key(
1059 pub fn contains_key(
1105 &self,
1060 &self,
1106 key: &HgPath,
1061 key: &HgPath,
1107 ) -> Result<bool, DirstateV2ParseError> {
1062 ) -> Result<bool, DirstateV2ParseError> {
1108 Ok(self.get(key)?.is_some())
1063 Ok(self.get(key)?.is_some())
1109 }
1064 }
1110
1065
1111 pub fn get(
1066 pub fn get(
1112 &self,
1067 &self,
1113 key: &HgPath,
1068 key: &HgPath,
1114 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1069 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1115 let map = self.get_map();
1070 let map = self.get_map();
1116 Ok(if let Some(node) = map.get_node(key)? {
1071 Ok(if let Some(node) = map.get_node(key)? {
1117 node.entry()?
1072 node.entry()?
1118 } else {
1073 } else {
1119 None
1074 None
1120 })
1075 })
1121 }
1076 }
1122
1077
1123 pub fn iter(&self) -> StateMapIter<'_> {
1078 pub fn iter(&self) -> StateMapIter<'_> {
1124 let map = self.get_map();
1079 let map = self.get_map();
1125 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1080 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1126 Ok(if let Some(entry) = node.entry()? {
1081 Ok(if let Some(entry) = node.entry()? {
1127 Some((node.full_path(map.on_disk)?, entry))
1082 Some((node.full_path(map.on_disk)?, entry))
1128 } else {
1083 } else {
1129 None
1084 None
1130 })
1085 })
1131 }))
1086 }))
1132 }
1087 }
1133
1088
1134 pub fn iter_tracked_dirs(
1089 pub fn iter_tracked_dirs(
1135 &mut self,
1090 &mut self,
1136 ) -> Result<
1091 ) -> Result<
1137 Box<
1092 Box<
1138 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
1093 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
1139 + Send
1094 + Send
1140 + '_,
1095 + '_,
1141 >,
1096 >,
1142 DirstateError,
1097 DirstateError,
1143 > {
1098 > {
1144 let map = self.get_map_mut();
1099 let map = self.get_map_mut();
1145 let on_disk = map.on_disk;
1100 let on_disk = map.on_disk;
1146 Ok(Box::new(filter_map_results(
1101 Ok(Box::new(filter_map_results(
1147 map.iter_nodes(),
1102 map.iter_nodes(),
1148 move |node| {
1103 move |node| {
1149 Ok(if node.tracked_descendants_count() > 0 {
1104 Ok(if node.tracked_descendants_count() > 0 {
1150 Some(node.full_path(on_disk)?)
1105 Some(node.full_path(on_disk)?)
1151 } else {
1106 } else {
1152 None
1107 None
1153 })
1108 })
1154 },
1109 },
1155 )))
1110 )))
1156 }
1111 }
1157
1112
1158 pub fn debug_iter(
1113 pub fn debug_iter(
1159 &self,
1114 &self,
1160 all: bool,
1115 all: bool,
1161 ) -> Box<
1116 ) -> Box<
1162 dyn Iterator<
1117 dyn Iterator<
1163 Item = Result<
1118 Item = Result<
1164 (&HgPath, (u8, i32, i32, i32)),
1119 (&HgPath, (u8, i32, i32, i32)),
1165 DirstateV2ParseError,
1120 DirstateV2ParseError,
1166 >,
1121 >,
1167 > + Send
1122 > + Send
1168 + '_,
1123 + '_,
1169 > {
1124 > {
1170 let map = self.get_map();
1125 let map = self.get_map();
1171 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1126 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1172 let debug_tuple = if let Some(entry) = node.entry()? {
1127 let debug_tuple = if let Some(entry) = node.entry()? {
1173 entry.debug_tuple()
1128 entry.debug_tuple()
1174 } else if !all {
1129 } else if !all {
1175 return Ok(None);
1130 return Ok(None);
1176 } else if let Some(mtime) = node.cached_directory_mtime()? {
1131 } else if let Some(mtime) = node.cached_directory_mtime()? {
1177 (b' ', 0, -1, mtime.truncated_seconds() as i32)
1132 (b' ', 0, -1, mtime.truncated_seconds() as i32)
1178 } else {
1133 } else {
1179 (b' ', 0, -1, -1)
1134 (b' ', 0, -1, -1)
1180 };
1135 };
1181 Ok(Some((node.full_path(map.on_disk)?, debug_tuple)))
1136 Ok(Some((node.full_path(map.on_disk)?, debug_tuple)))
1182 }))
1137 }))
1183 }
1138 }
1184 }
1139 }
@@ -1,505 +1,499 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
9 //! `hg-core` package.
9 //! `hg-core` package.
10
10
11 use std::cell::{RefCell, RefMut};
11 use std::cell::{RefCell, RefMut};
12 use std::convert::TryInto;
12 use std::convert::TryInto;
13
13
14 use cpython::{
14 use cpython::{
15 exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject,
15 exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject,
16 PyResult, Python, PythonObject, ToPyObject, UnsafePyLeaked,
16 PyResult, Python, PythonObject, ToPyObject, UnsafePyLeaked,
17 };
17 };
18
18
19 use crate::{
19 use crate::{
20 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
20 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
21 dirstate::item::{timestamp, DirstateItem},
21 dirstate::item::DirstateItem,
22 pybytes_deref::PyBytesDeref,
22 pybytes_deref::PyBytesDeref,
23 };
23 };
24 use hg::{
24 use hg::{
25 dirstate::StateMapIter,
25 dirstate::StateMapIter,
26 dirstate_tree::dirstate_map::DirstateMap as TreeDirstateMap,
26 dirstate_tree::dirstate_map::DirstateMap as TreeDirstateMap,
27 dirstate_tree::on_disk::DirstateV2ParseError,
27 dirstate_tree::on_disk::DirstateV2ParseError,
28 dirstate_tree::owning::OwningDirstateMap,
28 dirstate_tree::owning::OwningDirstateMap,
29 revlog::Node,
29 revlog::Node,
30 utils::files::normalize_case,
30 utils::files::normalize_case,
31 utils::hg_path::{HgPath, HgPathBuf},
31 utils::hg_path::{HgPath, HgPathBuf},
32 DirstateEntry, DirstateError, DirstateParents, EntryState,
32 DirstateEntry, DirstateError, DirstateParents, EntryState,
33 };
33 };
34
34
35 // TODO
35 // TODO
36 // This object needs to share references to multiple members of its Rust
36 // This object needs to share references to multiple members of its Rust
37 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
37 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
38 // Right now `CopyMap` is done, but it needs to have an explicit reference
38 // Right now `CopyMap` is done, but it needs to have an explicit reference
39 // to `RustDirstateMap` which itself needs to have an encapsulation for
39 // to `RustDirstateMap` which itself needs to have an encapsulation for
40 // every method in `CopyMap` (copymapcopy, etc.).
40 // every method in `CopyMap` (copymapcopy, etc.).
41 // This is ugly and hard to maintain.
41 // This is ugly and hard to maintain.
42 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
42 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
43 // `py_class!` is already implemented and does not mention
43 // `py_class!` is already implemented and does not mention
44 // `RustDirstateMap`, rightfully so.
44 // `RustDirstateMap`, rightfully so.
45 // All attributes also have to have a separate refcount data attribute for
45 // All attributes also have to have a separate refcount data attribute for
46 // leaks, with all methods that go along for reference sharing.
46 // leaks, with all methods that go along for reference sharing.
47 py_class!(pub class DirstateMap |py| {
47 py_class!(pub class DirstateMap |py| {
48 @shared data inner: OwningDirstateMap;
48 @shared data inner: OwningDirstateMap;
49
49
50 /// Returns a `(dirstate_map, parents)` tuple
50 /// Returns a `(dirstate_map, parents)` tuple
51 @staticmethod
51 @staticmethod
52 def new_v1(
52 def new_v1(
53 on_disk: PyBytes,
53 on_disk: PyBytes,
54 ) -> PyResult<PyObject> {
54 ) -> PyResult<PyObject> {
55 let on_disk = PyBytesDeref::new(py, on_disk);
55 let on_disk = PyBytesDeref::new(py, on_disk);
56 let mut map = OwningDirstateMap::new_empty(on_disk);
56 let mut map = OwningDirstateMap::new_empty(on_disk);
57 let (on_disk, map_placeholder) = map.get_pair_mut();
57 let (on_disk, map_placeholder) = map.get_pair_mut();
58
58
59 let (actual_map, parents) = TreeDirstateMap::new_v1(on_disk)
59 let (actual_map, parents) = TreeDirstateMap::new_v1(on_disk)
60 .map_err(|e| dirstate_error(py, e))?;
60 .map_err(|e| dirstate_error(py, e))?;
61 *map_placeholder = actual_map;
61 *map_placeholder = actual_map;
62 let map = Self::create_instance(py, map)?;
62 let map = Self::create_instance(py, map)?;
63 let parents = parents.map(|p| {
63 let parents = parents.map(|p| {
64 let p1 = PyBytes::new(py, p.p1.as_bytes());
64 let p1 = PyBytes::new(py, p.p1.as_bytes());
65 let p2 = PyBytes::new(py, p.p2.as_bytes());
65 let p2 = PyBytes::new(py, p.p2.as_bytes());
66 (p1, p2)
66 (p1, p2)
67 });
67 });
68 Ok((map, parents).to_py_object(py).into_object())
68 Ok((map, parents).to_py_object(py).into_object())
69 }
69 }
70
70
71 /// Returns a DirstateMap
71 /// Returns a DirstateMap
72 @staticmethod
72 @staticmethod
73 def new_v2(
73 def new_v2(
74 on_disk: PyBytes,
74 on_disk: PyBytes,
75 data_size: usize,
75 data_size: usize,
76 tree_metadata: PyBytes,
76 tree_metadata: PyBytes,
77 ) -> PyResult<PyObject> {
77 ) -> PyResult<PyObject> {
78 let dirstate_error = |e: DirstateError| {
78 let dirstate_error = |e: DirstateError| {
79 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
79 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
80 };
80 };
81 let on_disk = PyBytesDeref::new(py, on_disk);
81 let on_disk = PyBytesDeref::new(py, on_disk);
82 let mut map = OwningDirstateMap::new_empty(on_disk);
82 let mut map = OwningDirstateMap::new_empty(on_disk);
83 let (on_disk, map_placeholder) = map.get_pair_mut();
83 let (on_disk, map_placeholder) = map.get_pair_mut();
84 *map_placeholder = TreeDirstateMap::new_v2(
84 *map_placeholder = TreeDirstateMap::new_v2(
85 on_disk, data_size, tree_metadata.data(py),
85 on_disk, data_size, tree_metadata.data(py),
86 ).map_err(dirstate_error)?;
86 ).map_err(dirstate_error)?;
87 let map = Self::create_instance(py, map)?;
87 let map = Self::create_instance(py, map)?;
88 Ok(map.into_object())
88 Ok(map.into_object())
89 }
89 }
90
90
91 def clear(&self) -> PyResult<PyObject> {
91 def clear(&self) -> PyResult<PyObject> {
92 self.inner(py).borrow_mut().clear();
92 self.inner(py).borrow_mut().clear();
93 Ok(py.None())
93 Ok(py.None())
94 }
94 }
95
95
96 def get(
96 def get(
97 &self,
97 &self,
98 key: PyObject,
98 key: PyObject,
99 default: Option<PyObject> = None
99 default: Option<PyObject> = None
100 ) -> PyResult<Option<PyObject>> {
100 ) -> PyResult<Option<PyObject>> {
101 let key = key.extract::<PyBytes>(py)?;
101 let key = key.extract::<PyBytes>(py)?;
102 match self
102 match self
103 .inner(py)
103 .inner(py)
104 .borrow()
104 .borrow()
105 .get(HgPath::new(key.data(py)))
105 .get(HgPath::new(key.data(py)))
106 .map_err(|e| v2_error(py, e))?
106 .map_err(|e| v2_error(py, e))?
107 {
107 {
108 Some(entry) => {
108 Some(entry) => {
109 Ok(Some(DirstateItem::new_as_pyobject(py, entry)?))
109 Ok(Some(DirstateItem::new_as_pyobject(py, entry)?))
110 },
110 },
111 None => Ok(default)
111 None => Ok(default)
112 }
112 }
113 }
113 }
114
114
115 def set_dirstate_item(
115 def set_dirstate_item(
116 &self,
116 &self,
117 path: PyObject,
117 path: PyObject,
118 item: DirstateItem
118 item: DirstateItem
119 ) -> PyResult<PyObject> {
119 ) -> PyResult<PyObject> {
120 let f = path.extract::<PyBytes>(py)?;
120 let f = path.extract::<PyBytes>(py)?;
121 let filename = HgPath::new(f.data(py));
121 let filename = HgPath::new(f.data(py));
122 self.inner(py)
122 self.inner(py)
123 .borrow_mut()
123 .borrow_mut()
124 .set_entry(filename, item.get_entry(py))
124 .set_entry(filename, item.get_entry(py))
125 .map_err(|e| v2_error(py, e))?;
125 .map_err(|e| v2_error(py, e))?;
126 Ok(py.None())
126 Ok(py.None())
127 }
127 }
128
128
129 def addfile(
129 def addfile(
130 &self,
130 &self,
131 f: PyBytes,
131 f: PyBytes,
132 item: DirstateItem,
132 item: DirstateItem,
133 ) -> PyResult<PyNone> {
133 ) -> PyResult<PyNone> {
134 let filename = HgPath::new(f.data(py));
134 let filename = HgPath::new(f.data(py));
135 let entry = item.get_entry(py);
135 let entry = item.get_entry(py);
136 self.inner(py)
136 self.inner(py)
137 .borrow_mut()
137 .borrow_mut()
138 .add_file(filename, entry)
138 .add_file(filename, entry)
139 .map_err(|e |dirstate_error(py, e))?;
139 .map_err(|e |dirstate_error(py, e))?;
140 Ok(PyNone)
140 Ok(PyNone)
141 }
141 }
142
142
143 def removefile(
143 def removefile(
144 &self,
144 &self,
145 f: PyObject,
145 f: PyObject,
146 in_merge: PyObject
146 in_merge: PyObject
147 ) -> PyResult<PyObject> {
147 ) -> PyResult<PyObject> {
148 self.inner(py).borrow_mut()
148 self.inner(py).borrow_mut()
149 .remove_file(
149 .remove_file(
150 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
150 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
151 in_merge.extract::<PyBool>(py)?.is_true(),
151 in_merge.extract::<PyBool>(py)?.is_true(),
152 )
152 )
153 .or_else(|_| {
153 .or_else(|_| {
154 Err(PyErr::new::<exc::OSError, _>(
154 Err(PyErr::new::<exc::OSError, _>(
155 py,
155 py,
156 "Dirstate error".to_string(),
156 "Dirstate error".to_string(),
157 ))
157 ))
158 })?;
158 })?;
159 Ok(py.None())
159 Ok(py.None())
160 }
160 }
161
161
162 def drop_item_and_copy_source(
162 def drop_item_and_copy_source(
163 &self,
163 &self,
164 f: PyBytes,
164 f: PyBytes,
165 ) -> PyResult<PyNone> {
165 ) -> PyResult<PyNone> {
166 self.inner(py)
166 self.inner(py)
167 .borrow_mut()
167 .borrow_mut()
168 .drop_entry_and_copy_source(HgPath::new(f.data(py)))
168 .drop_entry_and_copy_source(HgPath::new(f.data(py)))
169 .map_err(|e |dirstate_error(py, e))?;
169 .map_err(|e |dirstate_error(py, e))?;
170 Ok(PyNone)
170 Ok(PyNone)
171 }
171 }
172
172
173 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
173 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
174 let d = d.extract::<PyBytes>(py)?;
174 let d = d.extract::<PyBytes>(py)?;
175 Ok(self.inner(py).borrow_mut()
175 Ok(self.inner(py).borrow_mut()
176 .has_tracked_dir(HgPath::new(d.data(py)))
176 .has_tracked_dir(HgPath::new(d.data(py)))
177 .map_err(|e| {
177 .map_err(|e| {
178 PyErr::new::<exc::ValueError, _>(py, e.to_string())
178 PyErr::new::<exc::ValueError, _>(py, e.to_string())
179 })?
179 })?
180 .to_py_object(py))
180 .to_py_object(py))
181 }
181 }
182
182
183 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
183 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
184 let d = d.extract::<PyBytes>(py)?;
184 let d = d.extract::<PyBytes>(py)?;
185 Ok(self.inner(py).borrow_mut()
185 Ok(self.inner(py).borrow_mut()
186 .has_dir(HgPath::new(d.data(py)))
186 .has_dir(HgPath::new(d.data(py)))
187 .map_err(|e| {
187 .map_err(|e| {
188 PyErr::new::<exc::ValueError, _>(py, e.to_string())
188 PyErr::new::<exc::ValueError, _>(py, e.to_string())
189 })?
189 })?
190 .to_py_object(py))
190 .to_py_object(py))
191 }
191 }
192
192
193 def write_v1(
193 def write_v1(
194 &self,
194 &self,
195 p1: PyObject,
195 p1: PyObject,
196 p2: PyObject,
196 p2: PyObject,
197 now: (u32, u32)
198 ) -> PyResult<PyBytes> {
197 ) -> PyResult<PyBytes> {
199 let now = timestamp(py, now)?;
200
201 let mut inner = self.inner(py).borrow_mut();
198 let mut inner = self.inner(py).borrow_mut();
202 let parents = DirstateParents {
199 let parents = DirstateParents {
203 p1: extract_node_id(py, &p1)?,
200 p1: extract_node_id(py, &p1)?,
204 p2: extract_node_id(py, &p2)?,
201 p2: extract_node_id(py, &p2)?,
205 };
202 };
206 let result = inner.pack_v1(parents, now);
203 let result = inner.pack_v1(parents);
207 match result {
204 match result {
208 Ok(packed) => Ok(PyBytes::new(py, &packed)),
205 Ok(packed) => Ok(PyBytes::new(py, &packed)),
209 Err(_) => Err(PyErr::new::<exc::OSError, _>(
206 Err(_) => Err(PyErr::new::<exc::OSError, _>(
210 py,
207 py,
211 "Dirstate error".to_string(),
208 "Dirstate error".to_string(),
212 )),
209 )),
213 }
210 }
214 }
211 }
215
212
216 /// Returns new data together with whether that data should be appended to
213 /// Returns new data together with whether that data should be appended to
217 /// the existing data file whose content is at `self.on_disk` (True),
214 /// the existing data file whose content is at `self.on_disk` (True),
218 /// instead of written to a new data file (False).
215 /// instead of written to a new data file (False).
219 def write_v2(
216 def write_v2(
220 &self,
217 &self,
221 now: (u32, u32),
222 can_append: bool,
218 can_append: bool,
223 ) -> PyResult<PyObject> {
219 ) -> PyResult<PyObject> {
224 let now = timestamp(py, now)?;
225
226 let mut inner = self.inner(py).borrow_mut();
220 let mut inner = self.inner(py).borrow_mut();
227 let result = inner.pack_v2(now, can_append);
221 let result = inner.pack_v2(can_append);
228 match result {
222 match result {
229 Ok((packed, tree_metadata, append)) => {
223 Ok((packed, tree_metadata, append)) => {
230 let packed = PyBytes::new(py, &packed);
224 let packed = PyBytes::new(py, &packed);
231 let tree_metadata = PyBytes::new(py, &tree_metadata);
225 let tree_metadata = PyBytes::new(py, &tree_metadata);
232 let tuple = (packed, tree_metadata, append);
226 let tuple = (packed, tree_metadata, append);
233 Ok(tuple.to_py_object(py).into_object())
227 Ok(tuple.to_py_object(py).into_object())
234 },
228 },
235 Err(_) => Err(PyErr::new::<exc::OSError, _>(
229 Err(_) => Err(PyErr::new::<exc::OSError, _>(
236 py,
230 py,
237 "Dirstate error".to_string(),
231 "Dirstate error".to_string(),
238 )),
232 )),
239 }
233 }
240 }
234 }
241
235
242 def filefoldmapasdict(&self) -> PyResult<PyDict> {
236 def filefoldmapasdict(&self) -> PyResult<PyDict> {
243 let dict = PyDict::new(py);
237 let dict = PyDict::new(py);
244 for item in self.inner(py).borrow_mut().iter() {
238 for item in self.inner(py).borrow_mut().iter() {
245 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
239 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
246 if entry.state() != EntryState::Removed {
240 if entry.state() != EntryState::Removed {
247 let key = normalize_case(path);
241 let key = normalize_case(path);
248 let value = path;
242 let value = path;
249 dict.set_item(
243 dict.set_item(
250 py,
244 py,
251 PyBytes::new(py, key.as_bytes()).into_object(),
245 PyBytes::new(py, key.as_bytes()).into_object(),
252 PyBytes::new(py, value.as_bytes()).into_object(),
246 PyBytes::new(py, value.as_bytes()).into_object(),
253 )?;
247 )?;
254 }
248 }
255 }
249 }
256 Ok(dict)
250 Ok(dict)
257 }
251 }
258
252
259 def __len__(&self) -> PyResult<usize> {
253 def __len__(&self) -> PyResult<usize> {
260 Ok(self.inner(py).borrow().len())
254 Ok(self.inner(py).borrow().len())
261 }
255 }
262
256
263 def __contains__(&self, key: PyObject) -> PyResult<bool> {
257 def __contains__(&self, key: PyObject) -> PyResult<bool> {
264 let key = key.extract::<PyBytes>(py)?;
258 let key = key.extract::<PyBytes>(py)?;
265 self.inner(py)
259 self.inner(py)
266 .borrow()
260 .borrow()
267 .contains_key(HgPath::new(key.data(py)))
261 .contains_key(HgPath::new(key.data(py)))
268 .map_err(|e| v2_error(py, e))
262 .map_err(|e| v2_error(py, e))
269 }
263 }
270
264
271 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
265 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
272 let key = key.extract::<PyBytes>(py)?;
266 let key = key.extract::<PyBytes>(py)?;
273 let key = HgPath::new(key.data(py));
267 let key = HgPath::new(key.data(py));
274 match self
268 match self
275 .inner(py)
269 .inner(py)
276 .borrow()
270 .borrow()
277 .get(key)
271 .get(key)
278 .map_err(|e| v2_error(py, e))?
272 .map_err(|e| v2_error(py, e))?
279 {
273 {
280 Some(entry) => {
274 Some(entry) => {
281 Ok(DirstateItem::new_as_pyobject(py, entry)?)
275 Ok(DirstateItem::new_as_pyobject(py, entry)?)
282 },
276 },
283 None => Err(PyErr::new::<exc::KeyError, _>(
277 None => Err(PyErr::new::<exc::KeyError, _>(
284 py,
278 py,
285 String::from_utf8_lossy(key.as_bytes()),
279 String::from_utf8_lossy(key.as_bytes()),
286 )),
280 )),
287 }
281 }
288 }
282 }
289
283
290 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
284 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
291 let leaked_ref = self.inner(py).leak_immutable();
285 let leaked_ref = self.inner(py).leak_immutable();
292 DirstateMapKeysIterator::from_inner(
286 DirstateMapKeysIterator::from_inner(
293 py,
287 py,
294 unsafe { leaked_ref.map(py, |o| o.iter()) },
288 unsafe { leaked_ref.map(py, |o| o.iter()) },
295 )
289 )
296 }
290 }
297
291
298 def items(&self) -> PyResult<DirstateMapItemsIterator> {
292 def items(&self) -> PyResult<DirstateMapItemsIterator> {
299 let leaked_ref = self.inner(py).leak_immutable();
293 let leaked_ref = self.inner(py).leak_immutable();
300 DirstateMapItemsIterator::from_inner(
294 DirstateMapItemsIterator::from_inner(
301 py,
295 py,
302 unsafe { leaked_ref.map(py, |o| o.iter()) },
296 unsafe { leaked_ref.map(py, |o| o.iter()) },
303 )
297 )
304 }
298 }
305
299
306 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
300 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
307 let leaked_ref = self.inner(py).leak_immutable();
301 let leaked_ref = self.inner(py).leak_immutable();
308 DirstateMapKeysIterator::from_inner(
302 DirstateMapKeysIterator::from_inner(
309 py,
303 py,
310 unsafe { leaked_ref.map(py, |o| o.iter()) },
304 unsafe { leaked_ref.map(py, |o| o.iter()) },
311 )
305 )
312 }
306 }
313
307
314 // TODO all copymap* methods, see docstring above
308 // TODO all copymap* methods, see docstring above
315 def copymapcopy(&self) -> PyResult<PyDict> {
309 def copymapcopy(&self) -> PyResult<PyDict> {
316 let dict = PyDict::new(py);
310 let dict = PyDict::new(py);
317 for item in self.inner(py).borrow().copy_map_iter() {
311 for item in self.inner(py).borrow().copy_map_iter() {
318 let (key, value) = item.map_err(|e| v2_error(py, e))?;
312 let (key, value) = item.map_err(|e| v2_error(py, e))?;
319 dict.set_item(
313 dict.set_item(
320 py,
314 py,
321 PyBytes::new(py, key.as_bytes()),
315 PyBytes::new(py, key.as_bytes()),
322 PyBytes::new(py, value.as_bytes()),
316 PyBytes::new(py, value.as_bytes()),
323 )?;
317 )?;
324 }
318 }
325 Ok(dict)
319 Ok(dict)
326 }
320 }
327
321
328 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
322 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
329 let key = key.extract::<PyBytes>(py)?;
323 let key = key.extract::<PyBytes>(py)?;
330 match self
324 match self
331 .inner(py)
325 .inner(py)
332 .borrow()
326 .borrow()
333 .copy_map_get(HgPath::new(key.data(py)))
327 .copy_map_get(HgPath::new(key.data(py)))
334 .map_err(|e| v2_error(py, e))?
328 .map_err(|e| v2_error(py, e))?
335 {
329 {
336 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
330 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
337 None => Err(PyErr::new::<exc::KeyError, _>(
331 None => Err(PyErr::new::<exc::KeyError, _>(
338 py,
332 py,
339 String::from_utf8_lossy(key.data(py)),
333 String::from_utf8_lossy(key.data(py)),
340 )),
334 )),
341 }
335 }
342 }
336 }
343 def copymap(&self) -> PyResult<CopyMap> {
337 def copymap(&self) -> PyResult<CopyMap> {
344 CopyMap::from_inner(py, self.clone_ref(py))
338 CopyMap::from_inner(py, self.clone_ref(py))
345 }
339 }
346
340
347 def copymaplen(&self) -> PyResult<usize> {
341 def copymaplen(&self) -> PyResult<usize> {
348 Ok(self.inner(py).borrow().copy_map_len())
342 Ok(self.inner(py).borrow().copy_map_len())
349 }
343 }
350 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
344 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
351 let key = key.extract::<PyBytes>(py)?;
345 let key = key.extract::<PyBytes>(py)?;
352 self.inner(py)
346 self.inner(py)
353 .borrow()
347 .borrow()
354 .copy_map_contains_key(HgPath::new(key.data(py)))
348 .copy_map_contains_key(HgPath::new(key.data(py)))
355 .map_err(|e| v2_error(py, e))
349 .map_err(|e| v2_error(py, e))
356 }
350 }
357 def copymapget(
351 def copymapget(
358 &self,
352 &self,
359 key: PyObject,
353 key: PyObject,
360 default: Option<PyObject>
354 default: Option<PyObject>
361 ) -> PyResult<Option<PyObject>> {
355 ) -> PyResult<Option<PyObject>> {
362 let key = key.extract::<PyBytes>(py)?;
356 let key = key.extract::<PyBytes>(py)?;
363 match self
357 match self
364 .inner(py)
358 .inner(py)
365 .borrow()
359 .borrow()
366 .copy_map_get(HgPath::new(key.data(py)))
360 .copy_map_get(HgPath::new(key.data(py)))
367 .map_err(|e| v2_error(py, e))?
361 .map_err(|e| v2_error(py, e))?
368 {
362 {
369 Some(copy) => Ok(Some(
363 Some(copy) => Ok(Some(
370 PyBytes::new(py, copy.as_bytes()).into_object(),
364 PyBytes::new(py, copy.as_bytes()).into_object(),
371 )),
365 )),
372 None => Ok(default),
366 None => Ok(default),
373 }
367 }
374 }
368 }
375 def copymapsetitem(
369 def copymapsetitem(
376 &self,
370 &self,
377 key: PyObject,
371 key: PyObject,
378 value: PyObject
372 value: PyObject
379 ) -> PyResult<PyObject> {
373 ) -> PyResult<PyObject> {
380 let key = key.extract::<PyBytes>(py)?;
374 let key = key.extract::<PyBytes>(py)?;
381 let value = value.extract::<PyBytes>(py)?;
375 let value = value.extract::<PyBytes>(py)?;
382 self.inner(py)
376 self.inner(py)
383 .borrow_mut()
377 .borrow_mut()
384 .copy_map_insert(
378 .copy_map_insert(
385 HgPathBuf::from_bytes(key.data(py)),
379 HgPathBuf::from_bytes(key.data(py)),
386 HgPathBuf::from_bytes(value.data(py)),
380 HgPathBuf::from_bytes(value.data(py)),
387 )
381 )
388 .map_err(|e| v2_error(py, e))?;
382 .map_err(|e| v2_error(py, e))?;
389 Ok(py.None())
383 Ok(py.None())
390 }
384 }
391 def copymappop(
385 def copymappop(
392 &self,
386 &self,
393 key: PyObject,
387 key: PyObject,
394 default: Option<PyObject>
388 default: Option<PyObject>
395 ) -> PyResult<Option<PyObject>> {
389 ) -> PyResult<Option<PyObject>> {
396 let key = key.extract::<PyBytes>(py)?;
390 let key = key.extract::<PyBytes>(py)?;
397 match self
391 match self
398 .inner(py)
392 .inner(py)
399 .borrow_mut()
393 .borrow_mut()
400 .copy_map_remove(HgPath::new(key.data(py)))
394 .copy_map_remove(HgPath::new(key.data(py)))
401 .map_err(|e| v2_error(py, e))?
395 .map_err(|e| v2_error(py, e))?
402 {
396 {
403 Some(copy) => Ok(Some(
397 Some(copy) => Ok(Some(
404 PyBytes::new(py, copy.as_bytes()).into_object(),
398 PyBytes::new(py, copy.as_bytes()).into_object(),
405 )),
399 )),
406 None => Ok(default),
400 None => Ok(default),
407 }
401 }
408 }
402 }
409
403
410 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
404 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
411 let leaked_ref = self.inner(py).leak_immutable();
405 let leaked_ref = self.inner(py).leak_immutable();
412 CopyMapKeysIterator::from_inner(
406 CopyMapKeysIterator::from_inner(
413 py,
407 py,
414 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
408 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
415 )
409 )
416 }
410 }
417
411
418 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
412 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
419 let leaked_ref = self.inner(py).leak_immutable();
413 let leaked_ref = self.inner(py).leak_immutable();
420 CopyMapItemsIterator::from_inner(
414 CopyMapItemsIterator::from_inner(
421 py,
415 py,
422 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
416 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
423 )
417 )
424 }
418 }
425
419
426 def tracked_dirs(&self) -> PyResult<PyList> {
420 def tracked_dirs(&self) -> PyResult<PyList> {
427 let dirs = PyList::new(py, &[]);
421 let dirs = PyList::new(py, &[]);
428 for path in self.inner(py).borrow_mut().iter_tracked_dirs()
422 for path in self.inner(py).borrow_mut().iter_tracked_dirs()
429 .map_err(|e |dirstate_error(py, e))?
423 .map_err(|e |dirstate_error(py, e))?
430 {
424 {
431 let path = path.map_err(|e| v2_error(py, e))?;
425 let path = path.map_err(|e| v2_error(py, e))?;
432 let path = PyBytes::new(py, path.as_bytes());
426 let path = PyBytes::new(py, path.as_bytes());
433 dirs.append(py, path.into_object())
427 dirs.append(py, path.into_object())
434 }
428 }
435 Ok(dirs)
429 Ok(dirs)
436 }
430 }
437
431
438 def debug_iter(&self, all: bool) -> PyResult<PyList> {
432 def debug_iter(&self, all: bool) -> PyResult<PyList> {
439 let dirs = PyList::new(py, &[]);
433 let dirs = PyList::new(py, &[]);
440 for item in self.inner(py).borrow().debug_iter(all) {
434 for item in self.inner(py).borrow().debug_iter(all) {
441 let (path, (state, mode, size, mtime)) =
435 let (path, (state, mode, size, mtime)) =
442 item.map_err(|e| v2_error(py, e))?;
436 item.map_err(|e| v2_error(py, e))?;
443 let path = PyBytes::new(py, path.as_bytes());
437 let path = PyBytes::new(py, path.as_bytes());
444 let item = (path, state, mode, size, mtime);
438 let item = (path, state, mode, size, mtime);
445 dirs.append(py, item.to_py_object(py).into_object())
439 dirs.append(py, item.to_py_object(py).into_object())
446 }
440 }
447 Ok(dirs)
441 Ok(dirs)
448 }
442 }
449 });
443 });
450
444
451 impl DirstateMap {
445 impl DirstateMap {
452 pub fn get_inner_mut<'a>(
446 pub fn get_inner_mut<'a>(
453 &'a self,
447 &'a self,
454 py: Python<'a>,
448 py: Python<'a>,
455 ) -> RefMut<'a, OwningDirstateMap> {
449 ) -> RefMut<'a, OwningDirstateMap> {
456 self.inner(py).borrow_mut()
450 self.inner(py).borrow_mut()
457 }
451 }
458 fn translate_key(
452 fn translate_key(
459 py: Python,
453 py: Python,
460 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
454 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
461 ) -> PyResult<Option<PyBytes>> {
455 ) -> PyResult<Option<PyBytes>> {
462 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
456 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
463 Ok(Some(PyBytes::new(py, f.as_bytes())))
457 Ok(Some(PyBytes::new(py, f.as_bytes())))
464 }
458 }
465 fn translate_key_value(
459 fn translate_key_value(
466 py: Python,
460 py: Python,
467 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
461 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
468 ) -> PyResult<Option<(PyBytes, PyObject)>> {
462 ) -> PyResult<Option<(PyBytes, PyObject)>> {
469 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
463 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
470 Ok(Some((
464 Ok(Some((
471 PyBytes::new(py, f.as_bytes()),
465 PyBytes::new(py, f.as_bytes()),
472 DirstateItem::new_as_pyobject(py, entry)?,
466 DirstateItem::new_as_pyobject(py, entry)?,
473 )))
467 )))
474 }
468 }
475 }
469 }
476
470
477 py_shared_iterator!(
471 py_shared_iterator!(
478 DirstateMapKeysIterator,
472 DirstateMapKeysIterator,
479 UnsafePyLeaked<StateMapIter<'static>>,
473 UnsafePyLeaked<StateMapIter<'static>>,
480 DirstateMap::translate_key,
474 DirstateMap::translate_key,
481 Option<PyBytes>
475 Option<PyBytes>
482 );
476 );
483
477
484 py_shared_iterator!(
478 py_shared_iterator!(
485 DirstateMapItemsIterator,
479 DirstateMapItemsIterator,
486 UnsafePyLeaked<StateMapIter<'static>>,
480 UnsafePyLeaked<StateMapIter<'static>>,
487 DirstateMap::translate_key_value,
481 DirstateMap::translate_key_value,
488 Option<(PyBytes, PyObject)>
482 Option<(PyBytes, PyObject)>
489 );
483 );
490
484
491 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
485 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
492 let bytes = obj.extract::<PyBytes>(py)?;
486 let bytes = obj.extract::<PyBytes>(py)?;
493 match bytes.data(py).try_into() {
487 match bytes.data(py).try_into() {
494 Ok(s) => Ok(s),
488 Ok(s) => Ok(s),
495 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
489 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
496 }
490 }
497 }
491 }
498
492
499 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
493 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
500 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
494 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
501 }
495 }
502
496
503 fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr {
497 fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr {
504 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
498 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
505 }
499 }
@@ -1,288 +1,283 b''
1 use cpython::exc;
1 use cpython::exc;
2 use cpython::ObjectProtocol;
2 use cpython::ObjectProtocol;
3 use cpython::PyBytes;
3 use cpython::PyBytes;
4 use cpython::PyErr;
4 use cpython::PyErr;
5 use cpython::PyNone;
5 use cpython::PyNone;
6 use cpython::PyObject;
6 use cpython::PyObject;
7 use cpython::PyResult;
7 use cpython::PyResult;
8 use cpython::Python;
8 use cpython::Python;
9 use cpython::PythonObject;
9 use cpython::PythonObject;
10 use hg::dirstate::DirstateEntry;
10 use hg::dirstate::DirstateEntry;
11 use hg::dirstate::EntryState;
11 use hg::dirstate::EntryState;
12 use hg::dirstate::TruncatedTimestamp;
12 use hg::dirstate::TruncatedTimestamp;
13 use std::cell::Cell;
13 use std::cell::Cell;
14 use std::convert::TryFrom;
14 use std::convert::TryFrom;
15
15
16 py_class!(pub class DirstateItem |py| {
16 py_class!(pub class DirstateItem |py| {
17 data entry: Cell<DirstateEntry>;
17 data entry: Cell<DirstateEntry>;
18
18
19 def __new__(
19 def __new__(
20 _cls,
20 _cls,
21 wc_tracked: bool = false,
21 wc_tracked: bool = false,
22 p1_tracked: bool = false,
22 p1_tracked: bool = false,
23 p2_info: bool = false,
23 p2_info: bool = false,
24 has_meaningful_data: bool = true,
24 has_meaningful_data: bool = true,
25 has_meaningful_mtime: bool = true,
25 has_meaningful_mtime: bool = true,
26 parentfiledata: Option<(u32, u32, Option<(u32, u32)>)> = None,
26 parentfiledata: Option<(u32, u32, Option<(u32, u32)>)> = None,
27 fallback_exec: Option<bool> = None,
27 fallback_exec: Option<bool> = None,
28 fallback_symlink: Option<bool> = None,
28 fallback_symlink: Option<bool> = None,
29
29
30 ) -> PyResult<DirstateItem> {
30 ) -> PyResult<DirstateItem> {
31 let mut mode_size_opt = None;
31 let mut mode_size_opt = None;
32 let mut mtime_opt = None;
32 let mut mtime_opt = None;
33 if let Some((mode, size, mtime)) = parentfiledata {
33 if let Some((mode, size, mtime)) = parentfiledata {
34 if has_meaningful_data {
34 if has_meaningful_data {
35 mode_size_opt = Some((mode, size))
35 mode_size_opt = Some((mode, size))
36 }
36 }
37 if has_meaningful_mtime {
37 if has_meaningful_mtime {
38 if let Some(m) = mtime {
38 if let Some(m) = mtime {
39 mtime_opt = Some(timestamp(py, m)?);
39 mtime_opt = Some(timestamp(py, m)?);
40 }
40 }
41 }
41 }
42 }
42 }
43 let entry = DirstateEntry::from_v2_data(
43 let entry = DirstateEntry::from_v2_data(
44 wc_tracked,
44 wc_tracked,
45 p1_tracked,
45 p1_tracked,
46 p2_info,
46 p2_info,
47 mode_size_opt,
47 mode_size_opt,
48 mtime_opt,
48 mtime_opt,
49 fallback_exec,
49 fallback_exec,
50 fallback_symlink,
50 fallback_symlink,
51 );
51 );
52 DirstateItem::create_instance(py, Cell::new(entry))
52 DirstateItem::create_instance(py, Cell::new(entry))
53 }
53 }
54
54
55 @property
55 @property
56 def state(&self) -> PyResult<PyBytes> {
56 def state(&self) -> PyResult<PyBytes> {
57 let state_byte: u8 = self.entry(py).get().state().into();
57 let state_byte: u8 = self.entry(py).get().state().into();
58 Ok(PyBytes::new(py, &[state_byte]))
58 Ok(PyBytes::new(py, &[state_byte]))
59 }
59 }
60
60
61 @property
61 @property
62 def mode(&self) -> PyResult<i32> {
62 def mode(&self) -> PyResult<i32> {
63 Ok(self.entry(py).get().mode())
63 Ok(self.entry(py).get().mode())
64 }
64 }
65
65
66 @property
66 @property
67 def size(&self) -> PyResult<i32> {
67 def size(&self) -> PyResult<i32> {
68 Ok(self.entry(py).get().size())
68 Ok(self.entry(py).get().size())
69 }
69 }
70
70
71 @property
71 @property
72 def mtime(&self) -> PyResult<i32> {
72 def mtime(&self) -> PyResult<i32> {
73 Ok(self.entry(py).get().mtime())
73 Ok(self.entry(py).get().mtime())
74 }
74 }
75
75
76 @property
76 @property
77 def has_fallback_exec(&self) -> PyResult<bool> {
77 def has_fallback_exec(&self) -> PyResult<bool> {
78 match self.entry(py).get().get_fallback_exec() {
78 match self.entry(py).get().get_fallback_exec() {
79 Some(_) => Ok(true),
79 Some(_) => Ok(true),
80 None => Ok(false),
80 None => Ok(false),
81 }
81 }
82 }
82 }
83
83
84 @property
84 @property
85 def fallback_exec(&self) -> PyResult<Option<bool>> {
85 def fallback_exec(&self) -> PyResult<Option<bool>> {
86 match self.entry(py).get().get_fallback_exec() {
86 match self.entry(py).get().get_fallback_exec() {
87 Some(exec) => Ok(Some(exec)),
87 Some(exec) => Ok(Some(exec)),
88 None => Ok(None),
88 None => Ok(None),
89 }
89 }
90 }
90 }
91
91
92 @fallback_exec.setter
92 @fallback_exec.setter
93 def set_fallback_exec(&self, value: Option<PyObject>) -> PyResult<()> {
93 def set_fallback_exec(&self, value: Option<PyObject>) -> PyResult<()> {
94 match value {
94 match value {
95 None => {self.entry(py).get().set_fallback_exec(None);},
95 None => {self.entry(py).get().set_fallback_exec(None);},
96 Some(value) => {
96 Some(value) => {
97 if value.is_none(py) {
97 if value.is_none(py) {
98 self.entry(py).get().set_fallback_exec(None);
98 self.entry(py).get().set_fallback_exec(None);
99 } else {
99 } else {
100 self.entry(py).get().set_fallback_exec(
100 self.entry(py).get().set_fallback_exec(
101 Some(value.is_true(py)?)
101 Some(value.is_true(py)?)
102 );
102 );
103 }},
103 }},
104 }
104 }
105 Ok(())
105 Ok(())
106 }
106 }
107
107
108 @property
108 @property
109 def has_fallback_symlink(&self) -> PyResult<bool> {
109 def has_fallback_symlink(&self) -> PyResult<bool> {
110 match self.entry(py).get().get_fallback_symlink() {
110 match self.entry(py).get().get_fallback_symlink() {
111 Some(_) => Ok(true),
111 Some(_) => Ok(true),
112 None => Ok(false),
112 None => Ok(false),
113 }
113 }
114 }
114 }
115
115
116 @property
116 @property
117 def fallback_symlink(&self) -> PyResult<Option<bool>> {
117 def fallback_symlink(&self) -> PyResult<Option<bool>> {
118 match self.entry(py).get().get_fallback_symlink() {
118 match self.entry(py).get().get_fallback_symlink() {
119 Some(symlink) => Ok(Some(symlink)),
119 Some(symlink) => Ok(Some(symlink)),
120 None => Ok(None),
120 None => Ok(None),
121 }
121 }
122 }
122 }
123
123
124 @fallback_symlink.setter
124 @fallback_symlink.setter
125 def set_fallback_symlink(&self, value: Option<PyObject>) -> PyResult<()> {
125 def set_fallback_symlink(&self, value: Option<PyObject>) -> PyResult<()> {
126 match value {
126 match value {
127 None => {self.entry(py).get().set_fallback_symlink(None);},
127 None => {self.entry(py).get().set_fallback_symlink(None);},
128 Some(value) => {
128 Some(value) => {
129 if value.is_none(py) {
129 if value.is_none(py) {
130 self.entry(py).get().set_fallback_symlink(None);
130 self.entry(py).get().set_fallback_symlink(None);
131 } else {
131 } else {
132 self.entry(py).get().set_fallback_symlink(
132 self.entry(py).get().set_fallback_symlink(
133 Some(value.is_true(py)?)
133 Some(value.is_true(py)?)
134 );
134 );
135 }},
135 }},
136 }
136 }
137 Ok(())
137 Ok(())
138 }
138 }
139
139
140 @property
140 @property
141 def tracked(&self) -> PyResult<bool> {
141 def tracked(&self) -> PyResult<bool> {
142 Ok(self.entry(py).get().tracked())
142 Ok(self.entry(py).get().tracked())
143 }
143 }
144
144
145 @property
145 @property
146 def p1_tracked(&self) -> PyResult<bool> {
146 def p1_tracked(&self) -> PyResult<bool> {
147 Ok(self.entry(py).get().p1_tracked())
147 Ok(self.entry(py).get().p1_tracked())
148 }
148 }
149
149
150 @property
150 @property
151 def added(&self) -> PyResult<bool> {
151 def added(&self) -> PyResult<bool> {
152 Ok(self.entry(py).get().added())
152 Ok(self.entry(py).get().added())
153 }
153 }
154
154
155
155
156 @property
156 @property
157 def p2_info(&self) -> PyResult<bool> {
157 def p2_info(&self) -> PyResult<bool> {
158 Ok(self.entry(py).get().p2_info())
158 Ok(self.entry(py).get().p2_info())
159 }
159 }
160
160
161 @property
161 @property
162 def removed(&self) -> PyResult<bool> {
162 def removed(&self) -> PyResult<bool> {
163 Ok(self.entry(py).get().removed())
163 Ok(self.entry(py).get().removed())
164 }
164 }
165
165
166 @property
166 @property
167 def maybe_clean(&self) -> PyResult<bool> {
167 def maybe_clean(&self) -> PyResult<bool> {
168 Ok(self.entry(py).get().maybe_clean())
168 Ok(self.entry(py).get().maybe_clean())
169 }
169 }
170
170
171 @property
171 @property
172 def any_tracked(&self) -> PyResult<bool> {
172 def any_tracked(&self) -> PyResult<bool> {
173 Ok(self.entry(py).get().any_tracked())
173 Ok(self.entry(py).get().any_tracked())
174 }
174 }
175
175
176 def v1_state(&self) -> PyResult<PyBytes> {
176 def v1_state(&self) -> PyResult<PyBytes> {
177 let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data();
177 let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data();
178 let state_byte: u8 = state.into();
178 let state_byte: u8 = state.into();
179 Ok(PyBytes::new(py, &[state_byte]))
179 Ok(PyBytes::new(py, &[state_byte]))
180 }
180 }
181
181
182 def v1_mode(&self) -> PyResult<i32> {
182 def v1_mode(&self) -> PyResult<i32> {
183 let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data();
183 let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data();
184 Ok(mode)
184 Ok(mode)
185 }
185 }
186
186
187 def v1_size(&self) -> PyResult<i32> {
187 def v1_size(&self) -> PyResult<i32> {
188 let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data();
188 let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data();
189 Ok(size)
189 Ok(size)
190 }
190 }
191
191
192 def v1_mtime(&self) -> PyResult<i32> {
192 def v1_mtime(&self) -> PyResult<i32> {
193 let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data();
193 let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data();
194 Ok(mtime)
194 Ok(mtime)
195 }
195 }
196
196
197 def need_delay(&self, now: (u32, u32)) -> PyResult<bool> {
198 let now = timestamp(py, now)?;
199 Ok(self.entry(py).get().need_delay(now))
200 }
201
202 def mtime_likely_equal_to(&self, other: (u32, u32)) -> PyResult<bool> {
197 def mtime_likely_equal_to(&self, other: (u32, u32)) -> PyResult<bool> {
203 if let Some(mtime) = self.entry(py).get().truncated_mtime() {
198 if let Some(mtime) = self.entry(py).get().truncated_mtime() {
204 Ok(mtime.likely_equal(timestamp(py, other)?))
199 Ok(mtime.likely_equal(timestamp(py, other)?))
205 } else {
200 } else {
206 Ok(false)
201 Ok(false)
207 }
202 }
208 }
203 }
209
204
210 @classmethod
205 @classmethod
211 def from_v1_data(
206 def from_v1_data(
212 _cls,
207 _cls,
213 state: PyBytes,
208 state: PyBytes,
214 mode: i32,
209 mode: i32,
215 size: i32,
210 size: i32,
216 mtime: i32,
211 mtime: i32,
217 ) -> PyResult<Self> {
212 ) -> PyResult<Self> {
218 let state = <[u8; 1]>::try_from(state.data(py))
213 let state = <[u8; 1]>::try_from(state.data(py))
219 .ok()
214 .ok()
220 .and_then(|state| EntryState::try_from(state[0]).ok())
215 .and_then(|state| EntryState::try_from(state[0]).ok())
221 .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?;
216 .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?;
222 let entry = DirstateEntry::from_v1_data(state, mode, size, mtime);
217 let entry = DirstateEntry::from_v1_data(state, mode, size, mtime);
223 DirstateItem::create_instance(py, Cell::new(entry))
218 DirstateItem::create_instance(py, Cell::new(entry))
224 }
219 }
225
220
226 def drop_merge_data(&self) -> PyResult<PyNone> {
221 def drop_merge_data(&self) -> PyResult<PyNone> {
227 self.update(py, |entry| entry.drop_merge_data());
222 self.update(py, |entry| entry.drop_merge_data());
228 Ok(PyNone)
223 Ok(PyNone)
229 }
224 }
230
225
231 def set_clean(
226 def set_clean(
232 &self,
227 &self,
233 mode: u32,
228 mode: u32,
234 size: u32,
229 size: u32,
235 mtime: (u32, u32),
230 mtime: (u32, u32),
236 ) -> PyResult<PyNone> {
231 ) -> PyResult<PyNone> {
237 let mtime = timestamp(py, mtime)?;
232 let mtime = timestamp(py, mtime)?;
238 self.update(py, |entry| entry.set_clean(mode, size, mtime));
233 self.update(py, |entry| entry.set_clean(mode, size, mtime));
239 Ok(PyNone)
234 Ok(PyNone)
240 }
235 }
241
236
242 def set_possibly_dirty(&self) -> PyResult<PyNone> {
237 def set_possibly_dirty(&self) -> PyResult<PyNone> {
243 self.update(py, |entry| entry.set_possibly_dirty());
238 self.update(py, |entry| entry.set_possibly_dirty());
244 Ok(PyNone)
239 Ok(PyNone)
245 }
240 }
246
241
247 def set_tracked(&self) -> PyResult<PyNone> {
242 def set_tracked(&self) -> PyResult<PyNone> {
248 self.update(py, |entry| entry.set_tracked());
243 self.update(py, |entry| entry.set_tracked());
249 Ok(PyNone)
244 Ok(PyNone)
250 }
245 }
251
246
252 def set_untracked(&self) -> PyResult<PyNone> {
247 def set_untracked(&self) -> PyResult<PyNone> {
253 self.update(py, |entry| entry.set_untracked());
248 self.update(py, |entry| entry.set_untracked());
254 Ok(PyNone)
249 Ok(PyNone)
255 }
250 }
256 });
251 });
257
252
258 impl DirstateItem {
253 impl DirstateItem {
259 pub fn new_as_pyobject(
254 pub fn new_as_pyobject(
260 py: Python<'_>,
255 py: Python<'_>,
261 entry: DirstateEntry,
256 entry: DirstateEntry,
262 ) -> PyResult<PyObject> {
257 ) -> PyResult<PyObject> {
263 Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object())
258 Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object())
264 }
259 }
265
260
266 pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry {
261 pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry {
267 self.entry(py).get()
262 self.entry(py).get()
268 }
263 }
269
264
270 // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable
265 // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable
271 pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) {
266 pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) {
272 let mut entry = self.entry(py).get();
267 let mut entry = self.entry(py).get();
273 f(&mut entry);
268 f(&mut entry);
274 self.entry(py).set(entry)
269 self.entry(py).set(entry)
275 }
270 }
276 }
271 }
277
272
278 pub(crate) fn timestamp(
273 pub(crate) fn timestamp(
279 py: Python<'_>,
274 py: Python<'_>,
280 (s, ns): (u32, u32),
275 (s, ns): (u32, u32),
281 ) -> PyResult<TruncatedTimestamp> {
276 ) -> PyResult<TruncatedTimestamp> {
282 TruncatedTimestamp::from_already_truncated(s, ns).map_err(|_| {
277 TruncatedTimestamp::from_already_truncated(s, ns).map_err(|_| {
283 PyErr::new::<exc::ValueError, _>(
278 PyErr::new::<exc::ValueError, _>(
284 py,
279 py,
285 "expected mtime truncated to 31 bits",
280 "expected mtime truncated to 31 bits",
286 )
281 )
287 })
282 })
288 }
283 }
@@ -1,106 +1,100 b''
1 # extension to emulate invoking 'dirstate.write()' at the time
1 # extension to emulate invoking 'dirstate.write()' at the time
2 # specified by '[fakedirstatewritetime] fakenow', only when
2 # specified by '[fakedirstatewritetime] fakenow', only when
3 # 'dirstate.write()' is invoked via functions below:
3 # 'dirstate.write()' is invoked via functions below:
4 #
4 #
5 # - 'workingctx._poststatusfixup()' (= 'repo.status()')
5 # - 'workingctx._poststatusfixup()' (= 'repo.status()')
6 # - 'committablectx.markcommitted()'
6 # - 'committablectx.markcommitted()'
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial import (
10 from mercurial import (
11 context,
11 context,
12 dirstatemap as dirstatemapmod,
12 dirstatemap as dirstatemapmod,
13 extensions,
13 extensions,
14 policy,
14 policy,
15 registrar,
15 registrar,
16 )
16 )
17 from mercurial.dirstateutils import timestamp
17 from mercurial.dirstateutils import timestamp
18 from mercurial.utils import dateutil
18 from mercurial.utils import dateutil
19
19
20 try:
20 try:
21 from mercurial import rustext
21 from mercurial import rustext
22
22
23 rustext.__name__ # force actual import (see hgdemandimport)
23 rustext.__name__ # force actual import (see hgdemandimport)
24 except ImportError:
24 except ImportError:
25 rustext = None
25 rustext = None
26
26
27 configtable = {}
27 configtable = {}
28 configitem = registrar.configitem(configtable)
28 configitem = registrar.configitem(configtable)
29
29
30 configitem(
30 configitem(
31 b'fakedirstatewritetime',
31 b'fakedirstatewritetime',
32 b'fakenow',
32 b'fakenow',
33 default=None,
33 default=None,
34 )
34 )
35
35
36 parsers = policy.importmod('parsers')
36 parsers = policy.importmod('parsers')
37 has_rust_dirstate = policy.importrust('dirstate') is not None
37 has_rust_dirstate = policy.importrust('dirstate') is not None
38
38
39
39
40 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
40 def pack_dirstate(orig, dmap, copymap, pl):
41 # execute what original parsers.pack_dirstate should do actually
41 return orig(dmap, copymap, pl)
42 # for consistency
43 for f, e in dmap.items():
44 if e.need_delay(now):
45 e.set_possibly_dirty()
46
47 return orig(dmap, copymap, pl, fakenow)
48
42
49
43
50 def fakewrite(ui, func):
44 def fakewrite(ui, func):
51 # fake "now" of 'pack_dirstate' only if it is invoked while 'func'
45 # fake "now" of 'pack_dirstate' only if it is invoked while 'func'
52
46
53 fakenow = ui.config(b'fakedirstatewritetime', b'fakenow')
47 fakenow = ui.config(b'fakedirstatewritetime', b'fakenow')
54 if not fakenow:
48 if not fakenow:
55 # Execute original one, if fakenow isn't configured. This is
49 # Execute original one, if fakenow isn't configured. This is
56 # useful to prevent subrepos from executing replaced one,
50 # useful to prevent subrepos from executing replaced one,
57 # because replacing 'parsers.pack_dirstate' is also effective
51 # because replacing 'parsers.pack_dirstate' is also effective
58 # in subrepos.
52 # in subrepos.
59 return func()
53 return func()
60
54
61 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
55 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
62 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
56 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
63 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
57 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
64 fakenow = timestamp.timestamp((fakenow, 0))
58 fakenow = timestamp.timestamp((fakenow, 0))
65
59
66 if has_rust_dirstate:
60 if has_rust_dirstate:
67 # The Rust implementation does not use public parse/pack dirstate
61 # The Rust implementation does not use public parse/pack dirstate
68 # to prevent conversion round-trips
62 # to prevent conversion round-trips
69 orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
63 orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
70 wrapper = lambda self, tr, st, now: orig_dirstatemap_write(
64 wrapper = lambda self, tr, st: orig_dirstatemap_write(self, tr, st)
71 self, tr, st, fakenow
72 )
73 dirstatemapmod.dirstatemap.write = wrapper
65 dirstatemapmod.dirstatemap.write = wrapper
74
66
75 orig_get_fs_now = timestamp.get_fs_now
67 orig_get_fs_now = timestamp.get_fs_now
76 wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args)
68 wrapper = lambda *args: pack_dirstate(orig_pack_dirstate, *args)
77
69
78 orig_module = parsers
70 orig_module = parsers
79 orig_pack_dirstate = parsers.pack_dirstate
71 orig_pack_dirstate = parsers.pack_dirstate
80
72
81 orig_module.pack_dirstate = wrapper
73 orig_module.pack_dirstate = wrapper
82 timestamp.get_fs_now = lambda *args: fakenow
74 timestamp.get_fs_now = (
75 lambda *args: fakenow
76 ) # XXX useless for this purpose now
83 try:
77 try:
84 return func()
78 return func()
85 finally:
79 finally:
86 orig_module.pack_dirstate = orig_pack_dirstate
80 orig_module.pack_dirstate = orig_pack_dirstate
87 timestamp.get_fs_now = orig_get_fs_now
81 timestamp.get_fs_now = orig_get_fs_now
88 if has_rust_dirstate:
82 if has_rust_dirstate:
89 dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
83 dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
90
84
91
85
92 def _poststatusfixup(orig, workingctx, status, fixup):
86 def _poststatusfixup(orig, workingctx, status, fixup):
93 ui = workingctx.repo().ui
87 ui = workingctx.repo().ui
94 return fakewrite(ui, lambda: orig(workingctx, status, fixup))
88 return fakewrite(ui, lambda: orig(workingctx, status, fixup))
95
89
96
90
97 def markcommitted(orig, committablectx, node):
91 def markcommitted(orig, committablectx, node):
98 ui = committablectx.repo().ui
92 ui = committablectx.repo().ui
99 return fakewrite(ui, lambda: orig(committablectx, node))
93 return fakewrite(ui, lambda: orig(committablectx, node))
100
94
101
95
102 def extsetup(ui):
96 def extsetup(ui):
103 extensions.wrapfunction(
97 extensions.wrapfunction(
104 context.workingctx, '_poststatusfixup', _poststatusfixup
98 context.workingctx, '_poststatusfixup', _poststatusfixup
105 )
99 )
106 extensions.wrapfunction(context.workingctx, 'markcommitted', markcommitted)
100 extensions.wrapfunction(context.workingctx, 'markcommitted', markcommitted)
@@ -1,439 +1,426 b''
1 $ cat <<EOF > merge
1 $ cat <<EOF > merge
2 > from __future__ import print_function
2 > from __future__ import print_function
3 > import sys, os
3 > import sys, os
4 >
4 >
5 > try:
5 > try:
6 > import msvcrt
6 > import msvcrt
7 > msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
7 > msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
8 > msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
8 > msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
9 > except ImportError:
9 > except ImportError:
10 > pass
10 > pass
11 >
11 >
12 > print("merging for", os.path.basename(sys.argv[1]))
12 > print("merging for", os.path.basename(sys.argv[1]))
13 > EOF
13 > EOF
14 $ HGMERGE="\"$PYTHON\" ../merge"; export HGMERGE
14 $ HGMERGE="\"$PYTHON\" ../merge"; export HGMERGE
15
15
16 $ hg init t
16 $ hg init t
17 $ cd t
17 $ cd t
18 $ echo This is file a1 > a
18 $ echo This is file a1 > a
19 $ hg add a
19 $ hg add a
20 $ hg commit -m "commit #0"
20 $ hg commit -m "commit #0"
21 $ echo This is file b1 > b
21 $ echo This is file b1 > b
22 $ hg add b
22 $ hg add b
23 $ hg commit -m "commit #1"
23 $ hg commit -m "commit #1"
24
24
25 $ hg update 0
25 $ hg update 0
26 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
26 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
27
27
28 Test interrupted updates by having a non-empty dir with the same name as one
28 Test interrupted updates by having a non-empty dir with the same name as one
29 of the files in a commit we're updating to
29 of the files in a commit we're updating to
30
30
31 $ mkdir b && touch b/nonempty
31 $ mkdir b && touch b/nonempty
32 $ hg up
32 $ hg up
33 abort: Unlinking directory not permitted: *$TESTTMP/t/b* (glob) (windows !)
33 abort: Unlinking directory not permitted: *$TESTTMP/t/b* (glob) (windows !)
34 abort: Directory not empty: '?\$TESTTMP/t/b'? (re) (no-windows !)
34 abort: Directory not empty: '?\$TESTTMP/t/b'? (re) (no-windows !)
35 [255]
35 [255]
36 $ hg ci
36 $ hg ci
37 abort: last update was interrupted
37 abort: last update was interrupted
38 (use 'hg update' to get a consistent checkout)
38 (use 'hg update' to get a consistent checkout)
39 [20]
39 [20]
40 $ hg sum
40 $ hg sum
41 parent: 0:538afb845929
41 parent: 0:538afb845929
42 commit #0
42 commit #0
43 branch: default
43 branch: default
44 commit: 1 unknown (interrupted update)
44 commit: 1 unknown (interrupted update)
45 update: 1 new changesets (update)
45 update: 1 new changesets (update)
46 phases: 2 draft
46 phases: 2 draft
47 Detect interrupted update by hg status --verbose
47 Detect interrupted update by hg status --verbose
48 $ hg status -v
48 $ hg status -v
49 ? b/nonempty
49 ? b/nonempty
50 # The repository is in an unfinished *update* state.
50 # The repository is in an unfinished *update* state.
51
51
52 # To continue: hg update .
52 # To continue: hg update .
53
53
54
54
55 $ rm b/nonempty
55 $ rm b/nonempty
56
56
57 $ hg up
57 $ hg up
58 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
59 $ hg sum
59 $ hg sum
60 parent: 1:b8bb4a988f25 tip
60 parent: 1:b8bb4a988f25 tip
61 commit #1
61 commit #1
62 branch: default
62 branch: default
63 commit: (clean)
63 commit: (clean)
64 update: (current)
64 update: (current)
65 phases: 2 draft
65 phases: 2 draft
66
66
67 Prepare a basic merge
67 Prepare a basic merge
68
68
69 $ hg up 0
69 $ hg up 0
70 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
70 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
71 $ echo This is file c1 > c
71 $ echo This is file c1 > c
72 $ hg add c
72 $ hg add c
73 $ hg commit -m "commit #2"
73 $ hg commit -m "commit #2"
74 created new head
74 created new head
75 $ echo This is file b1 > b
75 $ echo This is file b1 > b
76 no merges expected
76 no merges expected
77 $ hg merge -P 1
77 $ hg merge -P 1
78 changeset: 1:b8bb4a988f25
78 changeset: 1:b8bb4a988f25
79 user: test
79 user: test
80 date: Thu Jan 01 00:00:00 1970 +0000
80 date: Thu Jan 01 00:00:00 1970 +0000
81 summary: commit #1
81 summary: commit #1
82
82
83 $ hg merge 1
83 $ hg merge 1
84 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
84 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 (branch merge, don't forget to commit)
85 (branch merge, don't forget to commit)
86 $ hg diff --nodates
86 $ hg diff --nodates
87 diff -r 49035e18a8e6 b
87 diff -r 49035e18a8e6 b
88 --- /dev/null
88 --- /dev/null
89 +++ b/b
89 +++ b/b
90 @@ -0,0 +1,1 @@
90 @@ -0,0 +1,1 @@
91 +This is file b1
91 +This is file b1
92 $ hg status
92 $ hg status
93 M b
93 M b
94 $ cd ..; rm -r t
94 $ cd ..; rm -r t
95
95
96 $ hg init t
96 $ hg init t
97 $ cd t
97 $ cd t
98 $ echo This is file a1 > a
98 $ echo This is file a1 > a
99 $ hg add a
99 $ hg add a
100 $ hg commit -m "commit #0"
100 $ hg commit -m "commit #0"
101 $ echo This is file b1 > b
101 $ echo This is file b1 > b
102 $ hg add b
102 $ hg add b
103 $ hg commit -m "commit #1"
103 $ hg commit -m "commit #1"
104
104
105 $ hg update 0
105 $ hg update 0
106 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
106 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
107 $ echo This is file c1 > c
107 $ echo This is file c1 > c
108 $ hg add c
108 $ hg add c
109 $ hg commit -m "commit #2"
109 $ hg commit -m "commit #2"
110 created new head
110 created new head
111 $ echo This is file b2 > b
111 $ echo This is file b2 > b
112 merge should fail
112 merge should fail
113 $ hg merge 1
113 $ hg merge 1
114 b: untracked file differs
114 b: untracked file differs
115 abort: untracked files in working directory differ from files in requested revision
115 abort: untracked files in working directory differ from files in requested revision
116 [20]
116 [20]
117
117
118 #if symlink
118 #if symlink
119 symlinks to directories should be treated as regular files (issue5027)
119 symlinks to directories should be treated as regular files (issue5027)
120 $ rm b
120 $ rm b
121 $ ln -s 'This is file b2' b
121 $ ln -s 'This is file b2' b
122 $ hg merge 1
122 $ hg merge 1
123 b: untracked file differs
123 b: untracked file differs
124 abort: untracked files in working directory differ from files in requested revision
124 abort: untracked files in working directory differ from files in requested revision
125 [20]
125 [20]
126 symlinks shouldn't be followed
126 symlinks shouldn't be followed
127 $ rm b
127 $ rm b
128 $ echo This is file b1 > .hg/b
128 $ echo This is file b1 > .hg/b
129 $ ln -s .hg/b b
129 $ ln -s .hg/b b
130 $ hg merge 1
130 $ hg merge 1
131 b: untracked file differs
131 b: untracked file differs
132 abort: untracked files in working directory differ from files in requested revision
132 abort: untracked files in working directory differ from files in requested revision
133 [20]
133 [20]
134
134
135 $ rm b
135 $ rm b
136 $ echo This is file b2 > b
136 $ echo This is file b2 > b
137 #endif
137 #endif
138
138
139 bad config
139 bad config
140 $ hg merge 1 --config merge.checkunknown=x
140 $ hg merge 1 --config merge.checkunknown=x
141 config error: merge.checkunknown not valid ('x' is none of 'abort', 'ignore', 'warn')
141 config error: merge.checkunknown not valid ('x' is none of 'abort', 'ignore', 'warn')
142 [30]
142 [30]
143 this merge should fail
143 this merge should fail
144 $ hg merge 1 --config merge.checkunknown=abort
144 $ hg merge 1 --config merge.checkunknown=abort
145 b: untracked file differs
145 b: untracked file differs
146 abort: untracked files in working directory differ from files in requested revision
146 abort: untracked files in working directory differ from files in requested revision
147 [20]
147 [20]
148
148
149 this merge should warn
149 this merge should warn
150 $ hg merge 1 --config merge.checkunknown=warn
150 $ hg merge 1 --config merge.checkunknown=warn
151 b: replacing untracked file
151 b: replacing untracked file
152 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
152 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
153 (branch merge, don't forget to commit)
153 (branch merge, don't forget to commit)
154 $ cat b.orig
154 $ cat b.orig
155 This is file b2
155 This is file b2
156 $ hg up --clean 2
156 $ hg up --clean 2
157 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
157 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
158 $ mv b.orig b
158 $ mv b.orig b
159
159
160 this merge should silently ignore
160 this merge should silently ignore
161 $ cat b
161 $ cat b
162 This is file b2
162 This is file b2
163 $ hg merge 1 --config merge.checkunknown=ignore
163 $ hg merge 1 --config merge.checkunknown=ignore
164 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
164 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
165 (branch merge, don't forget to commit)
165 (branch merge, don't forget to commit)
166
166
167 merge.checkignored
167 merge.checkignored
168 $ hg up --clean 1
168 $ hg up --clean 1
169 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
169 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
170 $ cat >> .hgignore << EOF
170 $ cat >> .hgignore << EOF
171 > remoteignored
171 > remoteignored
172 > EOF
172 > EOF
173 $ echo This is file localignored3 > localignored
173 $ echo This is file localignored3 > localignored
174 $ echo This is file remoteignored3 > remoteignored
174 $ echo This is file remoteignored3 > remoteignored
175 $ hg add .hgignore localignored remoteignored
175 $ hg add .hgignore localignored remoteignored
176 $ hg commit -m "commit #3"
176 $ hg commit -m "commit #3"
177
177
178 $ hg up 2
178 $ hg up 2
179 1 files updated, 0 files merged, 4 files removed, 0 files unresolved
179 1 files updated, 0 files merged, 4 files removed, 0 files unresolved
180 $ cat >> .hgignore << EOF
180 $ cat >> .hgignore << EOF
181 > localignored
181 > localignored
182 > EOF
182 > EOF
183 $ hg add .hgignore
183 $ hg add .hgignore
184 $ hg commit -m "commit #4"
184 $ hg commit -m "commit #4"
185
185
186 remote .hgignore shouldn't be used for determining whether a file is ignored
186 remote .hgignore shouldn't be used for determining whether a file is ignored
187 $ echo This is file remoteignored4 > remoteignored
187 $ echo This is file remoteignored4 > remoteignored
188 $ hg merge 3 --config merge.checkignored=ignore --config merge.checkunknown=abort
188 $ hg merge 3 --config merge.checkignored=ignore --config merge.checkunknown=abort
189 remoteignored: untracked file differs
189 remoteignored: untracked file differs
190 abort: untracked files in working directory differ from files in requested revision
190 abort: untracked files in working directory differ from files in requested revision
191 [20]
191 [20]
192 $ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=ignore
192 $ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=ignore
193 merging .hgignore
193 merging .hgignore
194 merging for .hgignore
194 merging for .hgignore
195 3 files updated, 1 files merged, 0 files removed, 0 files unresolved
195 3 files updated, 1 files merged, 0 files removed, 0 files unresolved
196 (branch merge, don't forget to commit)
196 (branch merge, don't forget to commit)
197 $ cat remoteignored
197 $ cat remoteignored
198 This is file remoteignored3
198 This is file remoteignored3
199 $ cat remoteignored.orig
199 $ cat remoteignored.orig
200 This is file remoteignored4
200 This is file remoteignored4
201 $ rm remoteignored.orig
201 $ rm remoteignored.orig
202
202
203 local .hgignore should be used for that
203 local .hgignore should be used for that
204 $ hg up --clean 4
204 $ hg up --clean 4
205 1 files updated, 0 files merged, 3 files removed, 0 files unresolved
205 1 files updated, 0 files merged, 3 files removed, 0 files unresolved
206 $ echo This is file localignored4 > localignored
206 $ echo This is file localignored4 > localignored
207 also test other conflicting files to see we output the full set of warnings
207 also test other conflicting files to see we output the full set of warnings
208 $ echo This is file b2 > b
208 $ echo This is file b2 > b
209 $ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=abort
209 $ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=abort
210 b: untracked file differs
210 b: untracked file differs
211 localignored: untracked file differs
211 localignored: untracked file differs
212 abort: untracked files in working directory differ from files in requested revision
212 abort: untracked files in working directory differ from files in requested revision
213 [20]
213 [20]
214 $ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=ignore
214 $ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=ignore
215 localignored: untracked file differs
215 localignored: untracked file differs
216 abort: untracked files in working directory differ from files in requested revision
216 abort: untracked files in working directory differ from files in requested revision
217 [20]
217 [20]
218 $ hg merge 3 --config merge.checkignored=warn --config merge.checkunknown=abort
218 $ hg merge 3 --config merge.checkignored=warn --config merge.checkunknown=abort
219 b: untracked file differs
219 b: untracked file differs
220 abort: untracked files in working directory differ from files in requested revision
220 abort: untracked files in working directory differ from files in requested revision
221 [20]
221 [20]
222 $ hg merge 3 --config merge.checkignored=warn --config merge.checkunknown=warn
222 $ hg merge 3 --config merge.checkignored=warn --config merge.checkunknown=warn
223 b: replacing untracked file
223 b: replacing untracked file
224 localignored: replacing untracked file
224 localignored: replacing untracked file
225 merging .hgignore
225 merging .hgignore
226 merging for .hgignore
226 merging for .hgignore
227 3 files updated, 1 files merged, 0 files removed, 0 files unresolved
227 3 files updated, 1 files merged, 0 files removed, 0 files unresolved
228 (branch merge, don't forget to commit)
228 (branch merge, don't forget to commit)
229 $ cat localignored
229 $ cat localignored
230 This is file localignored3
230 This is file localignored3
231 $ cat localignored.orig
231 $ cat localignored.orig
232 This is file localignored4
232 This is file localignored4
233 $ rm localignored.orig
233 $ rm localignored.orig
234
234
235 $ cat b.orig
235 $ cat b.orig
236 This is file b2
236 This is file b2
237 $ hg up --clean 2
237 $ hg up --clean 2
238 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
238 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
239 $ mv b.orig b
239 $ mv b.orig b
240
240
241 this merge of b should work
241 this merge of b should work
242 $ cat b
242 $ cat b
243 This is file b2
243 This is file b2
244 $ hg merge -f 1
244 $ hg merge -f 1
245 merging b
245 merging b
246 merging for b
246 merging for b
247 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
247 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
248 (branch merge, don't forget to commit)
248 (branch merge, don't forget to commit)
249 $ hg diff --nodates
249 $ hg diff --nodates
250 diff -r 49035e18a8e6 b
250 diff -r 49035e18a8e6 b
251 --- /dev/null
251 --- /dev/null
252 +++ b/b
252 +++ b/b
253 @@ -0,0 +1,1 @@
253 @@ -0,0 +1,1 @@
254 +This is file b2
254 +This is file b2
255 $ hg status
255 $ hg status
256 M b
256 M b
257 $ cd ..; rm -r t
257 $ cd ..; rm -r t
258
258
259 $ hg init t
259 $ hg init t
260 $ cd t
260 $ cd t
261 $ echo This is file a1 > a
261 $ echo This is file a1 > a
262 $ hg add a
262 $ hg add a
263 $ hg commit -m "commit #0"
263 $ hg commit -m "commit #0"
264 $ echo This is file b1 > b
264 $ echo This is file b1 > b
265 $ hg add b
265 $ hg add b
266 $ hg commit -m "commit #1"
266 $ hg commit -m "commit #1"
267 $ echo This is file b22 > b
267 $ echo This is file b22 > b
268 $ hg commit -m "commit #2"
268 $ hg commit -m "commit #2"
269 $ hg update 1
269 $ hg update 1
270 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
270 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
271 $ echo This is file c1 > c
271 $ echo This is file c1 > c
272 $ hg add c
272 $ hg add c
273 $ hg commit -m "commit #3"
273 $ hg commit -m "commit #3"
274 created new head
274 created new head
275
275
276 Contents of b should be "this is file b1"
276 Contents of b should be "this is file b1"
277 $ cat b
277 $ cat b
278 This is file b1
278 This is file b1
279
279
280 $ echo This is file b22 > b
280 $ echo This is file b22 > b
281 merge fails
281 merge fails
282 $ hg merge 2
282 $ hg merge 2
283 abort: uncommitted changes
283 abort: uncommitted changes
284 (use 'hg status' to list changes)
284 (use 'hg status' to list changes)
285 [20]
285 [20]
286 merge expected!
286 merge expected!
287 $ hg merge -f 2
287 $ hg merge -f 2
288 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
288 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
289 (branch merge, don't forget to commit)
289 (branch merge, don't forget to commit)
290 $ hg diff --nodates
290 $ hg diff --nodates
291 diff -r 85de557015a8 b
291 diff -r 85de557015a8 b
292 --- a/b
292 --- a/b
293 +++ b/b
293 +++ b/b
294 @@ -1,1 +1,1 @@
294 @@ -1,1 +1,1 @@
295 -This is file b1
295 -This is file b1
296 +This is file b22
296 +This is file b22
297 $ hg status
297 $ hg status
298 M b
298 M b
299 $ cd ..; rm -r t
299 $ cd ..; rm -r t
300
300
301 $ hg init t
301 $ hg init t
302 $ cd t
302 $ cd t
303 $ echo This is file a1 > a
303 $ echo This is file a1 > a
304 $ hg add a
304 $ hg add a
305 $ hg commit -m "commit #0"
305 $ hg commit -m "commit #0"
306 $ echo This is file b1 > b
306 $ echo This is file b1 > b
307 $ hg add b
307 $ hg add b
308 $ hg commit -m "commit #1"
308 $ hg commit -m "commit #1"
309 $ echo This is file b22 > b
309 $ echo This is file b22 > b
310 $ hg commit -m "commit #2"
310 $ hg commit -m "commit #2"
311 $ hg update 1
311 $ hg update 1
312 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
312 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
313 $ echo This is file c1 > c
313 $ echo This is file c1 > c
314 $ hg add c
314 $ hg add c
315 $ hg commit -m "commit #3"
315 $ hg commit -m "commit #3"
316 created new head
316 created new head
317 $ echo This is file b33 > b
317 $ echo This is file b33 > b
318 merge of b should fail
318 merge of b should fail
319 $ hg merge 2
319 $ hg merge 2
320 abort: uncommitted changes
320 abort: uncommitted changes
321 (use 'hg status' to list changes)
321 (use 'hg status' to list changes)
322 [20]
322 [20]
323 merge of b expected
323 merge of b expected
324 $ hg merge -f 2
324 $ hg merge -f 2
325 merging b
325 merging b
326 merging for b
326 merging for b
327 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
327 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
328 (branch merge, don't forget to commit)
328 (branch merge, don't forget to commit)
329 $ hg diff --nodates
329 $ hg diff --nodates
330 diff -r 85de557015a8 b
330 diff -r 85de557015a8 b
331 --- a/b
331 --- a/b
332 +++ b/b
332 +++ b/b
333 @@ -1,1 +1,1 @@
333 @@ -1,1 +1,1 @@
334 -This is file b1
334 -This is file b1
335 +This is file b33
335 +This is file b33
336 $ hg status
336 $ hg status
337 M b
337 M b
338
338
339 Test for issue2364
339 Test for issue2364
340
340
341 $ hg up -qC .
341 $ hg up -qC .
342 $ hg rm b
342 $ hg rm b
343 $ hg ci -md
343 $ hg ci -md
344 $ hg revert -r -2 b
344 $ hg revert -r -2 b
345 $ hg up -q -- -2
345 $ hg up -q -- -2
346
346
347 Test that updated files are treated as "modified", when
347 Test that updated files are treated as "modified", when
348 'merge.update()' is aborted before 'merge.recordupdates()' (= parents
348 'merge.update()' is aborted before 'merge.recordupdates()' (= parents
349 aren't changed), even if none of mode, size and timestamp of them
349 aren't changed), even if none of mode, size and timestamp of them
350 isn't changed on the filesystem (see also issue4583).
350 isn't changed on the filesystem (see also issue4583).
351
351
352 This test is now "best effort" as the mechanism to prevent such race are
353 getting better, it get more complicated to test a specific scenario that would
354 trigger it. If you see flakyness here, there is a race.
355
352 $ cat > $TESTTMP/abort.py <<EOF
356 $ cat > $TESTTMP/abort.py <<EOF
353 > from __future__ import absolute_import
357 > from __future__ import absolute_import
354 > # emulate aborting before "recordupdates()". in this case, files
358 > # emulate aborting before "recordupdates()". in this case, files
355 > # are changed without updating dirstate
359 > # are changed without updating dirstate
356 > from mercurial import (
360 > from mercurial import (
357 > error,
361 > error,
358 > extensions,
362 > extensions,
359 > merge,
363 > merge,
360 > )
364 > )
361 > def applyupdates(orig, *args, **kwargs):
365 > def applyupdates(orig, *args, **kwargs):
362 > orig(*args, **kwargs)
366 > orig(*args, **kwargs)
363 > raise error.Abort(b'intentional aborting')
367 > raise error.Abort(b'intentional aborting')
364 > def extsetup(ui):
368 > def extsetup(ui):
365 > extensions.wrapfunction(merge, "applyupdates", applyupdates)
369 > extensions.wrapfunction(merge, "applyupdates", applyupdates)
366 > EOF
370 > EOF
367
371
368 $ cat >> .hg/hgrc <<EOF
369 > [fakedirstatewritetime]
370 > # emulate invoking dirstate.write() via repo.status()
371 > # at 2000-01-01 00:00
372 > fakenow = 200001010000
373 > EOF
374
375 (file gotten from other revision)
372 (file gotten from other revision)
376
373
377 $ hg update -q -C 2
374 $ hg update -q -C 2
378 $ echo 'THIS IS FILE B5' > b
375 $ echo 'THIS IS FILE B5' > b
379 $ hg commit -m 'commit #5'
376 $ hg commit -m 'commit #5'
380
377
381 $ hg update -q -C 3
378 $ hg update -q -C 3
382 $ cat b
379 $ cat b
383 This is file b1
380 This is file b1
384 $ touch -t 200001010000 b
385 $ hg debugrebuildstate
386
387 $ cat >> .hg/hgrc <<EOF
381 $ cat >> .hg/hgrc <<EOF
388 > [extensions]
382 > [extensions]
389 > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
390 > abort = $TESTTMP/abort.py
383 > abort = $TESTTMP/abort.py
391 > EOF
384 > EOF
392 $ hg merge 5
385 $ hg merge 5
393 abort: intentional aborting
386 abort: intentional aborting
394 [255]
387 [255]
395 $ cat >> .hg/hgrc <<EOF
388 $ cat >> .hg/hgrc <<EOF
396 > [extensions]
389 > [extensions]
397 > fakedirstatewritetime = !
398 > abort = !
390 > abort = !
399 > EOF
391 > EOF
400
392
401 $ cat b
393 $ cat b
402 THIS IS FILE B5
394 THIS IS FILE B5
403 $ touch -t 200001010000 b
404 $ hg status -A b
395 $ hg status -A b
405 M b
396 M b
406
397
407 (file merged from other revision)
398 (file merged from other revision)
408
399
409 $ hg update -q -C 3
400 $ hg update -q -C 3
410 $ echo 'this is file b6' > b
401 $ echo 'this is file b6' > b
411 $ hg commit -m 'commit #6'
402 $ hg commit -m 'commit #6'
412 created new head
403 created new head
413
404
414 $ cat b
405 $ cat b
415 this is file b6
406 this is file b6
416 $ touch -t 200001010000 b
407 $ hg status
417 $ hg debugrebuildstate
418
408
419 $ cat >> .hg/hgrc <<EOF
409 $ cat >> .hg/hgrc <<EOF
420 > [extensions]
410 > [extensions]
421 > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
422 > abort = $TESTTMP/abort.py
411 > abort = $TESTTMP/abort.py
423 > EOF
412 > EOF
424 $ hg merge --tool internal:other 5
413 $ hg merge --tool internal:other 5
425 abort: intentional aborting
414 abort: intentional aborting
426 [255]
415 [255]
427 $ cat >> .hg/hgrc <<EOF
416 $ cat >> .hg/hgrc <<EOF
428 > [extensions]
417 > [extensions]
429 > fakedirstatewritetime = !
430 > abort = !
418 > abort = !
431 > EOF
419 > EOF
432
420
433 $ cat b
421 $ cat b
434 THIS IS FILE B5
422 THIS IS FILE B5
435 $ touch -t 200001010000 b
436 $ hg status -A b
423 $ hg status -A b
437 M b
424 M b
438
425
439 $ cd ..
426 $ cd ..
@@ -1,2035 +1,2019 b''
1 Let commit recurse into subrepos by default to match pre-2.0 behavior:
1 Let commit recurse into subrepos by default to match pre-2.0 behavior:
2
2
3 $ echo "[ui]" >> $HGRCPATH
3 $ echo "[ui]" >> $HGRCPATH
4 $ echo "commitsubrepos = Yes" >> $HGRCPATH
4 $ echo "commitsubrepos = Yes" >> $HGRCPATH
5
5
6 $ hg init t
6 $ hg init t
7 $ cd t
7 $ cd t
8
8
9 first revision, no sub
9 first revision, no sub
10
10
11 $ echo a > a
11 $ echo a > a
12 $ hg ci -Am0
12 $ hg ci -Am0
13 adding a
13 adding a
14
14
15 add first sub
15 add first sub
16
16
17 $ echo s = s > .hgsub
17 $ echo s = s > .hgsub
18 $ hg add .hgsub
18 $ hg add .hgsub
19 $ hg init s
19 $ hg init s
20 $ echo a > s/a
20 $ echo a > s/a
21
21
22 Issue2232: committing a subrepo without .hgsub
22 Issue2232: committing a subrepo without .hgsub
23
23
24 $ hg ci -mbad s
24 $ hg ci -mbad s
25 abort: can't commit subrepos without .hgsub
25 abort: can't commit subrepos without .hgsub
26 [255]
26 [255]
27
27
28 $ hg -R s add s/a
28 $ hg -R s add s/a
29 $ hg files -S
29 $ hg files -S
30 .hgsub
30 .hgsub
31 a
31 a
32 s/a
32 s/a
33
33
34 `hg files` respects ui.relative-paths
34 `hg files` respects ui.relative-paths
35 BROKEN: shows subrepo paths relative to the subrepo
35 BROKEN: shows subrepo paths relative to the subrepo
36 $ hg files -S --config ui.relative-paths=no
36 $ hg files -S --config ui.relative-paths=no
37 .hgsub
37 .hgsub
38 a
38 a
39 s/a
39 s/a
40
40
41 $ hg -R s ci -Ams0
41 $ hg -R s ci -Ams0
42 $ hg sum
42 $ hg sum
43 parent: 0:f7b1eb17ad24 tip
43 parent: 0:f7b1eb17ad24 tip
44 0
44 0
45 branch: default
45 branch: default
46 commit: 1 added, 1 subrepos
46 commit: 1 added, 1 subrepos
47 update: (current)
47 update: (current)
48 phases: 1 draft
48 phases: 1 draft
49 $ hg ci -m1
49 $ hg ci -m1
50
50
51 test handling .hgsubstate "added" explicitly.
51 test handling .hgsubstate "added" explicitly.
52
52
53 $ hg parents --template '{node}\n{files}\n'
53 $ hg parents --template '{node}\n{files}\n'
54 7cf8cfea66e410e8e3336508dfeec07b3192de51
54 7cf8cfea66e410e8e3336508dfeec07b3192de51
55 .hgsub .hgsubstate
55 .hgsub .hgsubstate
56 $ hg rollback -q
56 $ hg rollback -q
57 $ hg add .hgsubstate
57 $ hg add .hgsubstate
58 $ hg ci -m1
58 $ hg ci -m1
59 $ hg parents --template '{node}\n{files}\n'
59 $ hg parents --template '{node}\n{files}\n'
60 7cf8cfea66e410e8e3336508dfeec07b3192de51
60 7cf8cfea66e410e8e3336508dfeec07b3192de51
61 .hgsub .hgsubstate
61 .hgsub .hgsubstate
62
62
63 Subrepopath which overlaps with filepath, does not change warnings in remove()
63 Subrepopath which overlaps with filepath, does not change warnings in remove()
64
64
65 $ mkdir snot
65 $ mkdir snot
66 $ touch snot/file
66 $ touch snot/file
67 $ hg remove -S snot/file
67 $ hg remove -S snot/file
68 not removing snot/file: file is untracked
68 not removing snot/file: file is untracked
69 [1]
69 [1]
70 $ hg cat snot/filenot
70 $ hg cat snot/filenot
71 snot/filenot: no such file in rev 7cf8cfea66e4
71 snot/filenot: no such file in rev 7cf8cfea66e4
72 [1]
72 [1]
73 $ rm -r snot
73 $ rm -r snot
74
74
75 Revert subrepo and test subrepo fileset keyword:
75 Revert subrepo and test subrepo fileset keyword:
76
76
77 $ echo b > s/a
77 $ echo b > s/a
78 $ hg revert --dry-run "set:subrepo('glob:s*')"
78 $ hg revert --dry-run "set:subrepo('glob:s*')"
79 reverting subrepo s
79 reverting subrepo s
80 reverting s/a
80 reverting s/a
81 $ cat s/a
81 $ cat s/a
82 b
82 b
83 $ hg revert "set:subrepo('glob:s*')"
83 $ hg revert "set:subrepo('glob:s*')"
84 reverting subrepo s
84 reverting subrepo s
85 reverting s/a
85 reverting s/a
86 $ cat s/a
86 $ cat s/a
87 a
87 a
88 $ rm s/a.orig
88 $ rm s/a.orig
89
89
90 Revert subrepo with no backup. The "reverting s/a" line is gone since
90 Revert subrepo with no backup. The "reverting s/a" line is gone since
91 we're really running 'hg update' in the subrepo:
91 we're really running 'hg update' in the subrepo:
92
92
93 $ echo b > s/a
93 $ echo b > s/a
94 $ hg revert --no-backup s
94 $ hg revert --no-backup s
95 reverting subrepo s
95 reverting subrepo s
96
96
97 Issue2022: update -C
97 Issue2022: update -C
98
98
99 $ echo b > s/a
99 $ echo b > s/a
100 $ hg sum
100 $ hg sum
101 parent: 1:7cf8cfea66e4 tip
101 parent: 1:7cf8cfea66e4 tip
102 1
102 1
103 branch: default
103 branch: default
104 commit: 1 subrepos
104 commit: 1 subrepos
105 update: (current)
105 update: (current)
106 phases: 2 draft
106 phases: 2 draft
107 $ hg co -C 1
107 $ hg co -C 1
108 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
108 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 $ hg sum
109 $ hg sum
110 parent: 1:7cf8cfea66e4 tip
110 parent: 1:7cf8cfea66e4 tip
111 1
111 1
112 branch: default
112 branch: default
113 commit: (clean)
113 commit: (clean)
114 update: (current)
114 update: (current)
115 phases: 2 draft
115 phases: 2 draft
116
116
117 commands that require a clean repo should respect subrepos
117 commands that require a clean repo should respect subrepos
118
118
119 $ echo b >> s/a
119 $ echo b >> s/a
120 $ hg backout tip
120 $ hg backout tip
121 abort: uncommitted changes in subrepository "s"
121 abort: uncommitted changes in subrepository "s"
122 [255]
122 [255]
123 $ hg revert -C -R s s/a
123 $ hg revert -C -R s s/a
124
124
125 add sub sub
125 add sub sub
126
126
127 $ echo ss = ss > s/.hgsub
127 $ echo ss = ss > s/.hgsub
128 $ hg init s/ss
128 $ hg init s/ss
129 $ echo a > s/ss/a
129 $ echo a > s/ss/a
130 $ hg -R s add s/.hgsub
130 $ hg -R s add s/.hgsub
131 $ hg -R s/ss add s/ss/a
131 $ hg -R s/ss add s/ss/a
132 $ hg sum
132 $ hg sum
133 parent: 1:7cf8cfea66e4 tip
133 parent: 1:7cf8cfea66e4 tip
134 1
134 1
135 branch: default
135 branch: default
136 commit: 1 subrepos
136 commit: 1 subrepos
137 update: (current)
137 update: (current)
138 phases: 2 draft
138 phases: 2 draft
139 $ hg ci -m2
139 $ hg ci -m2
140 committing subrepository s
140 committing subrepository s
141 committing subrepository s/ss
141 committing subrepository s/ss
142 $ hg sum
142 $ hg sum
143 parent: 2:df30734270ae tip
143 parent: 2:df30734270ae tip
144 2
144 2
145 branch: default
145 branch: default
146 commit: (clean)
146 commit: (clean)
147 update: (current)
147 update: (current)
148 phases: 3 draft
148 phases: 3 draft
149
149
150 test handling .hgsubstate "modified" explicitly.
150 test handling .hgsubstate "modified" explicitly.
151
151
152 $ hg parents --template '{node}\n{files}\n'
152 $ hg parents --template '{node}\n{files}\n'
153 df30734270ae757feb35e643b7018e818e78a9aa
153 df30734270ae757feb35e643b7018e818e78a9aa
154 .hgsubstate
154 .hgsubstate
155 $ hg rollback -q
155 $ hg rollback -q
156 $ hg status -A .hgsubstate
156 $ hg status -A .hgsubstate
157 M .hgsubstate
157 M .hgsubstate
158 $ hg ci -m2
158 $ hg ci -m2
159 $ hg parents --template '{node}\n{files}\n'
159 $ hg parents --template '{node}\n{files}\n'
160 df30734270ae757feb35e643b7018e818e78a9aa
160 df30734270ae757feb35e643b7018e818e78a9aa
161 .hgsubstate
161 .hgsubstate
162
162
163 bump sub rev (and check it is ignored by ui.commitsubrepos)
163 bump sub rev (and check it is ignored by ui.commitsubrepos)
164
164
165 $ echo b > s/a
165 $ echo b > s/a
166 $ hg -R s ci -ms1
166 $ hg -R s ci -ms1
167 $ hg --config ui.commitsubrepos=no ci -m3
167 $ hg --config ui.commitsubrepos=no ci -m3
168
168
169 leave sub dirty (and check ui.commitsubrepos=no aborts the commit)
169 leave sub dirty (and check ui.commitsubrepos=no aborts the commit)
170
170
171 $ echo c > s/a
171 $ echo c > s/a
172 $ hg --config ui.commitsubrepos=no ci -m4
172 $ hg --config ui.commitsubrepos=no ci -m4
173 abort: uncommitted changes in subrepository "s"
173 abort: uncommitted changes in subrepository "s"
174 (use --subrepos for recursive commit)
174 (use --subrepos for recursive commit)
175 [255]
175 [255]
176 $ hg id
176 $ hg id
177 f6affe3fbfaa+ tip
177 f6affe3fbfaa+ tip
178 $ hg -R s ci -mc
178 $ hg -R s ci -mc
179 $ hg id
179 $ hg id
180 f6affe3fbfaa+ tip
180 f6affe3fbfaa+ tip
181 $ echo d > s/a
181 $ echo d > s/a
182 $ hg ci -m4
182 $ hg ci -m4
183 committing subrepository s
183 committing subrepository s
184 $ hg tip -R s
184 $ hg tip -R s
185 changeset: 4:02dcf1d70411
185 changeset: 4:02dcf1d70411
186 tag: tip
186 tag: tip
187 user: test
187 user: test
188 date: Thu Jan 01 00:00:00 1970 +0000
188 date: Thu Jan 01 00:00:00 1970 +0000
189 summary: 4
189 summary: 4
190
190
191
191
192 check caching
192 check caching
193
193
194 $ hg co 0
194 $ hg co 0
195 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
195 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
196 $ hg debugsub
196 $ hg debugsub
197
197
198 restore
198 restore
199
199
200 $ hg co
200 $ hg co
201 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
202 $ hg debugsub
202 $ hg debugsub
203 path s
203 path s
204 source s
204 source s
205 revision 02dcf1d704118aee3ee306ccfa1910850d5b05ef
205 revision 02dcf1d704118aee3ee306ccfa1910850d5b05ef
206
206
207 new branch for merge tests
207 new branch for merge tests
208
208
209 $ hg co 1
209 $ hg co 1
210 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
210 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
211 $ echo t = t >> .hgsub
211 $ echo t = t >> .hgsub
212 $ hg init t
212 $ hg init t
213 $ echo t > t/t
213 $ echo t > t/t
214 $ hg -R t add t
214 $ hg -R t add t
215 adding t/t
215 adding t/t
216
216
217 5
217 5
218
218
219 $ hg ci -m5 # add sub
219 $ hg ci -m5 # add sub
220 committing subrepository t
220 committing subrepository t
221 created new head
221 created new head
222 $ echo t2 > t/t
222 $ echo t2 > t/t
223
223
224 6
224 6
225
225
226 $ hg st -R s
226 $ hg st -R s
227 $ hg ci -m6 # change sub
227 $ hg ci -m6 # change sub
228 committing subrepository t
228 committing subrepository t
229 $ hg debugsub
229 $ hg debugsub
230 path s
230 path s
231 source s
231 source s
232 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
232 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
233 path t
233 path t
234 source t
234 source t
235 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
235 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
236 $ echo t3 > t/t
236 $ echo t3 > t/t
237
237
238 7
238 7
239
239
240 $ hg ci -m7 # change sub again for conflict test
240 $ hg ci -m7 # change sub again for conflict test
241 committing subrepository t
241 committing subrepository t
242 $ hg rm .hgsub
242 $ hg rm .hgsub
243
243
244 8
244 8
245
245
246 $ hg ci -m8 # remove sub
246 $ hg ci -m8 # remove sub
247
247
248 test handling .hgsubstate "removed" explicitly.
248 test handling .hgsubstate "removed" explicitly.
249
249
250 $ hg parents --template '{node}\n{files}\n'
250 $ hg parents --template '{node}\n{files}\n'
251 96615c1dad2dc8e3796d7332c77ce69156f7b78e
251 96615c1dad2dc8e3796d7332c77ce69156f7b78e
252 .hgsub .hgsubstate
252 .hgsub .hgsubstate
253 $ hg rollback -q
253 $ hg rollback -q
254 $ hg remove .hgsubstate
254 $ hg remove .hgsubstate
255 $ hg ci -m8
255 $ hg ci -m8
256 $ hg parents --template '{node}\n{files}\n'
256 $ hg parents --template '{node}\n{files}\n'
257 96615c1dad2dc8e3796d7332c77ce69156f7b78e
257 96615c1dad2dc8e3796d7332c77ce69156f7b78e
258 .hgsub .hgsubstate
258 .hgsub .hgsubstate
259
259
260 merge tests
260 merge tests
261
261
262 $ hg co -C 3
262 $ hg co -C 3
263 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
263 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
264 $ hg merge 5 # test adding
264 $ hg merge 5 # test adding
265 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
266 (branch merge, don't forget to commit)
266 (branch merge, don't forget to commit)
267 $ hg debugsub
267 $ hg debugsub
268 path s
268 path s
269 source s
269 source s
270 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
270 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
271 path t
271 path t
272 source t
272 source t
273 revision 60ca1237c19474e7a3978b0dc1ca4e6f36d51382
273 revision 60ca1237c19474e7a3978b0dc1ca4e6f36d51382
274 $ hg ci -m9
274 $ hg ci -m9
275 created new head
275 created new head
276 $ hg merge 6 --debug # test change
276 $ hg merge 6 --debug # test change
277 resolving manifests
277 resolving manifests
278 branchmerge: True, force: False, partial: False
278 branchmerge: True, force: False, partial: False
279 ancestor: 1f14a2e2d3ec, local: f0d2028bf86d+, remote: 1831e14459c4
279 ancestor: 1f14a2e2d3ec, local: f0d2028bf86d+, remote: 1831e14459c4
280 starting 4 threads for background file closing (?)
280 starting 4 threads for background file closing (?)
281 .hgsubstate: versions differ -> m (premerge)
281 .hgsubstate: versions differ -> m (premerge)
282 subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec
282 subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec
283 subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg
283 subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg
284 getting subrepo t
284 getting subrepo t
285 resolving manifests
285 resolving manifests
286 branchmerge: False, force: False, partial: False
286 branchmerge: False, force: False, partial: False
287 ancestor: 60ca1237c194, local: 60ca1237c194+, remote: 6747d179aa9a
287 ancestor: 60ca1237c194, local: 60ca1237c194+, remote: 6747d179aa9a
288 t: remote is newer -> g
288 t: remote is newer -> g
289 getting t
289 getting t
290 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
290 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
291 (branch merge, don't forget to commit)
291 (branch merge, don't forget to commit)
292 $ hg debugsub
292 $ hg debugsub
293 path s
293 path s
294 source s
294 source s
295 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
295 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
296 path t
296 path t
297 source t
297 source t
298 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
298 revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad
299 $ echo conflict > t/t
299 $ echo conflict > t/t
300 $ hg ci -m10
300 $ hg ci -m10
301 committing subrepository t
301 committing subrepository t
302 $ HGMERGE=internal:merge hg merge --debug 7 # test conflict
302 $ HGMERGE=internal:merge hg merge --debug 7 # test conflict
303 resolving manifests
303 resolving manifests
304 branchmerge: True, force: False, partial: False
304 branchmerge: True, force: False, partial: False
305 ancestor: 1831e14459c4, local: e45c8b14af55+, remote: f94576341bcf
305 ancestor: 1831e14459c4, local: e45c8b14af55+, remote: f94576341bcf
306 starting 4 threads for background file closing (?)
306 starting 4 threads for background file closing (?)
307 .hgsubstate: versions differ -> m (premerge)
307 .hgsubstate: versions differ -> m (premerge)
308 subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4
308 subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4
309 subrepo t: both sides changed
309 subrepo t: both sides changed
310 subrepository t diverged (local revision: 20a0db6fbf6c, remote revision: 7af322bc1198)
310 subrepository t diverged (local revision: 20a0db6fbf6c, remote revision: 7af322bc1198)
311 starting 4 threads for background file closing (?)
311 starting 4 threads for background file closing (?)
312 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [merge rev].
312 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [merge rev].
313 what do you want to do? m
313 what do you want to do? m
314 merging subrepository "t"
314 merging subrepository "t"
315 resolving manifests
315 resolving manifests
316 branchmerge: True, force: False, partial: False
316 branchmerge: True, force: False, partial: False
317 ancestor: 6747d179aa9a, local: 20a0db6fbf6c+, remote: 7af322bc1198
317 ancestor: 6747d179aa9a, local: 20a0db6fbf6c+, remote: 7af322bc1198
318 starting 4 threads for background file closing (?)
318 starting 4 threads for background file closing (?)
319 preserving t for resolve of t
319 preserving t for resolve of t
320 t: versions differ -> m (premerge)
320 t: versions differ -> m (premerge)
321 picked tool ':merge' for t (binary False symlink False changedelete False)
321 picked tool ':merge' for t (binary False symlink False changedelete False)
322 merging t
322 merging t
323 my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
323 my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
324 t: versions differ -> m (merge)
324 t: versions differ -> m (merge)
325 picked tool ':merge' for t (binary False symlink False changedelete False)
325 picked tool ':merge' for t (binary False symlink False changedelete False)
326 my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
326 my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
327 warning: conflicts while merging t! (edit, then use 'hg resolve --mark')
327 warning: conflicts while merging t! (edit, then use 'hg resolve --mark')
328 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
328 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
329 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
329 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
330 subrepo t: merge with t:7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4:hg
330 subrepo t: merge with t:7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4:hg
331 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
331 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
332 (branch merge, don't forget to commit)
332 (branch merge, don't forget to commit)
333
333
334 should conflict
334 should conflict
335
335
336 $ cat t/t
336 $ cat t/t
337 <<<<<<< local: 20a0db6fbf6c - test: 10
337 <<<<<<< local: 20a0db6fbf6c - test: 10
338 conflict
338 conflict
339 =======
339 =======
340 t3
340 t3
341 >>>>>>> other: 7af322bc1198 - test: 7
341 >>>>>>> other: 7af322bc1198 - test: 7
342
342
343 11: remove subrepo t
343 11: remove subrepo t
344
344
345 $ hg co -C 5
345 $ hg co -C 5
346 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
346 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
347 $ hg revert -r 4 .hgsub # remove t
347 $ hg revert -r 4 .hgsub # remove t
348 $ hg ci -m11
348 $ hg ci -m11
349 created new head
349 created new head
350 $ hg debugsub
350 $ hg debugsub
351 path s
351 path s
352 source s
352 source s
353 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
353 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
354
354
355 local removed, remote changed, keep changed
355 local removed, remote changed, keep changed
356
356
357 $ hg merge 6
357 $ hg merge 6
358 remote [merge rev] changed subrepository t which local [working copy] removed
358 remote [merge rev] changed subrepository t which local [working copy] removed
359 use (c)hanged version or (d)elete? c
359 use (c)hanged version or (d)elete? c
360 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
360 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
361 (branch merge, don't forget to commit)
361 (branch merge, don't forget to commit)
362 BROKEN: should include subrepo t
362 BROKEN: should include subrepo t
363 $ hg debugsub
363 $ hg debugsub
364 path s
364 path s
365 source s
365 source s
366 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
366 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
367 $ cat .hgsubstate
367 $ cat .hgsubstate
368 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
368 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
369 6747d179aa9a688023c4b0cad32e4c92bb7f34ad t
369 6747d179aa9a688023c4b0cad32e4c92bb7f34ad t
370 $ hg ci -m 'local removed, remote changed, keep changed'
370 $ hg ci -m 'local removed, remote changed, keep changed'
371 BROKEN: should include subrepo t
371 BROKEN: should include subrepo t
372 $ hg debugsub
372 $ hg debugsub
373 path s
373 path s
374 source s
374 source s
375 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
375 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
376 BROKEN: should include subrepo t
376 BROKEN: should include subrepo t
377 $ cat .hgsubstate
377 $ cat .hgsubstate
378 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
378 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
379 $ cat t/t
379 $ cat t/t
380 t2
380 t2
381
381
382 local removed, remote changed, keep removed
382 local removed, remote changed, keep removed
383
383
384 $ hg co -C 11
384 $ hg co -C 11
385 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
385 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
386 $ hg merge --config ui.interactive=true 6 <<EOF
386 $ hg merge --config ui.interactive=true 6 <<EOF
387 > d
387 > d
388 > EOF
388 > EOF
389 remote [merge rev] changed subrepository t which local [working copy] removed
389 remote [merge rev] changed subrepository t which local [working copy] removed
390 use (c)hanged version or (d)elete? d
390 use (c)hanged version or (d)elete? d
391 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
391 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
392 (branch merge, don't forget to commit)
392 (branch merge, don't forget to commit)
393 $ hg debugsub
393 $ hg debugsub
394 path s
394 path s
395 source s
395 source s
396 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
396 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
397 $ cat .hgsubstate
397 $ cat .hgsubstate
398 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
398 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
399 $ hg ci -m 'local removed, remote changed, keep removed'
399 $ hg ci -m 'local removed, remote changed, keep removed'
400 created new head
400 created new head
401 $ hg debugsub
401 $ hg debugsub
402 path s
402 path s
403 source s
403 source s
404 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
404 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
405 $ cat .hgsubstate
405 $ cat .hgsubstate
406 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
406 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
407
407
408 local changed, remote removed, keep changed
408 local changed, remote removed, keep changed
409
409
410 $ hg co -C 6
410 $ hg co -C 6
411 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
411 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
412 $ hg merge 11
412 $ hg merge 11
413 local [working copy] changed subrepository t which remote [merge rev] removed
413 local [working copy] changed subrepository t which remote [merge rev] removed
414 use (c)hanged version or (d)elete? c
414 use (c)hanged version or (d)elete? c
415 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
415 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
416 (branch merge, don't forget to commit)
416 (branch merge, don't forget to commit)
417 BROKEN: should include subrepo t
417 BROKEN: should include subrepo t
418 $ hg debugsub
418 $ hg debugsub
419 path s
419 path s
420 source s
420 source s
421 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
421 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
422 BROKEN: should include subrepo t
422 BROKEN: should include subrepo t
423 $ cat .hgsubstate
423 $ cat .hgsubstate
424 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
424 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
425 $ hg ci -m 'local changed, remote removed, keep changed'
425 $ hg ci -m 'local changed, remote removed, keep changed'
426 created new head
426 created new head
427 BROKEN: should include subrepo t
427 BROKEN: should include subrepo t
428 $ hg debugsub
428 $ hg debugsub
429 path s
429 path s
430 source s
430 source s
431 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
431 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
432 BROKEN: should include subrepo t
432 BROKEN: should include subrepo t
433 $ cat .hgsubstate
433 $ cat .hgsubstate
434 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
434 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
435 $ cat t/t
435 $ cat t/t
436 t2
436 t2
437
437
438 local changed, remote removed, keep removed
438 local changed, remote removed, keep removed
439
439
440 $ hg co -C 6
440 $ hg co -C 6
441 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
441 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
442 $ hg merge --config ui.interactive=true 11 <<EOF
442 $ hg merge --config ui.interactive=true 11 <<EOF
443 > d
443 > d
444 > EOF
444 > EOF
445 local [working copy] changed subrepository t which remote [merge rev] removed
445 local [working copy] changed subrepository t which remote [merge rev] removed
446 use (c)hanged version or (d)elete? d
446 use (c)hanged version or (d)elete? d
447 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
447 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
448 (branch merge, don't forget to commit)
448 (branch merge, don't forget to commit)
449 $ hg debugsub
449 $ hg debugsub
450 path s
450 path s
451 source s
451 source s
452 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
452 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
453 $ cat .hgsubstate
453 $ cat .hgsubstate
454 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
454 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
455 $ hg ci -m 'local changed, remote removed, keep removed'
455 $ hg ci -m 'local changed, remote removed, keep removed'
456 created new head
456 created new head
457 $ hg debugsub
457 $ hg debugsub
458 path s
458 path s
459 source s
459 source s
460 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
460 revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4
461 $ cat .hgsubstate
461 $ cat .hgsubstate
462 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
462 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
463
463
464 clean up to avoid having to fix up the tests below
464 clean up to avoid having to fix up the tests below
465
465
466 $ hg co -C 10
466 $ hg co -C 10
467 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
467 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
468 $ cat >> $HGRCPATH <<EOF
468 $ cat >> $HGRCPATH <<EOF
469 > [extensions]
469 > [extensions]
470 > strip=
470 > strip=
471 > EOF
471 > EOF
472 $ hg strip -r 11:15
472 $ hg strip -r 11:15
473 saved backup bundle to $TESTTMP/t/.hg/strip-backup/*-backup.hg (glob)
473 saved backup bundle to $TESTTMP/t/.hg/strip-backup/*-backup.hg (glob)
474
474
475 clone
475 clone
476
476
477 $ cd ..
477 $ cd ..
478 $ hg clone t tc
478 $ hg clone t tc
479 updating to branch default
479 updating to branch default
480 cloning subrepo s from $TESTTMP/t/s
480 cloning subrepo s from $TESTTMP/t/s
481 cloning subrepo s/ss from $TESTTMP/t/s/ss
481 cloning subrepo s/ss from $TESTTMP/t/s/ss
482 cloning subrepo t from $TESTTMP/t/t
482 cloning subrepo t from $TESTTMP/t/t
483 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
483 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
484 $ cd tc
484 $ cd tc
485 $ hg debugsub
485 $ hg debugsub
486 path s
486 path s
487 source s
487 source s
488 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
488 revision fc627a69481fcbe5f1135069e8a3881c023e4cf5
489 path t
489 path t
490 source t
490 source t
491 revision 20a0db6fbf6c3d2836e6519a642ae929bfc67c0e
491 revision 20a0db6fbf6c3d2836e6519a642ae929bfc67c0e
492 $ cd ..
492 $ cd ..
493
493
494 clone with subrepo disabled (update should fail)
494 clone with subrepo disabled (update should fail)
495
495
496 $ hg clone t -U tc2 --config subrepos.allowed=false
496 $ hg clone t -U tc2 --config subrepos.allowed=false
497 $ hg update -R tc2 --config subrepos.allowed=false
497 $ hg update -R tc2 --config subrepos.allowed=false
498 abort: subrepos not enabled
498 abort: subrepos not enabled
499 (see 'hg help config.subrepos' for details)
499 (see 'hg help config.subrepos' for details)
500 [255]
500 [255]
501 $ ls -A tc2
501 $ ls -A tc2
502 .hg
502 .hg
503 .hgsub
503 .hgsub
504 .hgsubstate
504 .hgsubstate
505 a
505 a
506
506
507 $ hg clone t tc3 --config subrepos.allowed=false
507 $ hg clone t tc3 --config subrepos.allowed=false
508 updating to branch default
508 updating to branch default
509 abort: subrepos not enabled
509 abort: subrepos not enabled
510 (see 'hg help config.subrepos' for details)
510 (see 'hg help config.subrepos' for details)
511 [255]
511 [255]
512 $ ls -A tc3
512 $ ls -A tc3
513 .hg
513 .hg
514 .hgsub
514 .hgsub
515 .hgsubstate
515 .hgsubstate
516 a
516 a
517
517
518 And again with just the hg type disabled
518 And again with just the hg type disabled
519
519
520 $ hg clone t -U tc4 --config subrepos.hg:allowed=false
520 $ hg clone t -U tc4 --config subrepos.hg:allowed=false
521 $ hg update -R tc4 --config subrepos.hg:allowed=false
521 $ hg update -R tc4 --config subrepos.hg:allowed=false
522 abort: hg subrepos not allowed
522 abort: hg subrepos not allowed
523 (see 'hg help config.subrepos' for details)
523 (see 'hg help config.subrepos' for details)
524 [255]
524 [255]
525 $ ls -A tc4
525 $ ls -A tc4
526 .hg
526 .hg
527 .hgsub
527 .hgsub
528 .hgsubstate
528 .hgsubstate
529 a
529 a
530
530
531 $ hg clone t tc5 --config subrepos.hg:allowed=false
531 $ hg clone t tc5 --config subrepos.hg:allowed=false
532 updating to branch default
532 updating to branch default
533 abort: hg subrepos not allowed
533 abort: hg subrepos not allowed
534 (see 'hg help config.subrepos' for details)
534 (see 'hg help config.subrepos' for details)
535 [255]
535 [255]
536 $ ls -A tc5
536 $ ls -A tc5
537 .hg
537 .hg
538 .hgsub
538 .hgsub
539 .hgsubstate
539 .hgsubstate
540 a
540 a
541
541
542 push
542 push
543
543
544 $ cd tc
544 $ cd tc
545 $ echo bah > t/t
545 $ echo bah > t/t
546 $ hg ci -m11
546 $ hg ci -m11
547 committing subrepository t
547 committing subrepository t
548 $ hg push
548 $ hg push
549 pushing to $TESTTMP/t
549 pushing to $TESTTMP/t
550 no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss
550 no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss
551 no changes made to subrepo s since last push to $TESTTMP/t/s
551 no changes made to subrepo s since last push to $TESTTMP/t/s
552 pushing subrepo t to $TESTTMP/t/t
552 pushing subrepo t to $TESTTMP/t/t
553 searching for changes
553 searching for changes
554 adding changesets
554 adding changesets
555 adding manifests
555 adding manifests
556 adding file changes
556 adding file changes
557 added 1 changesets with 1 changes to 1 files
557 added 1 changesets with 1 changes to 1 files
558 searching for changes
558 searching for changes
559 adding changesets
559 adding changesets
560 adding manifests
560 adding manifests
561 adding file changes
561 adding file changes
562 added 1 changesets with 1 changes to 1 files
562 added 1 changesets with 1 changes to 1 files
563
563
564 push -f
564 push -f
565
565
566 $ echo bah > s/a
566 $ echo bah > s/a
567 $ hg ci -m12
567 $ hg ci -m12
568 committing subrepository s
568 committing subrepository s
569 $ hg push
569 $ hg push
570 pushing to $TESTTMP/t
570 pushing to $TESTTMP/t
571 no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss
571 no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss
572 pushing subrepo s to $TESTTMP/t/s
572 pushing subrepo s to $TESTTMP/t/s
573 searching for changes
573 searching for changes
574 abort: push creates new remote head 12a213df6fa9 (in subrepository "s")
574 abort: push creates new remote head 12a213df6fa9 (in subrepository "s")
575 (merge or see 'hg help push' for details about pushing new heads)
575 (merge or see 'hg help push' for details about pushing new heads)
576 [255]
576 [255]
577 $ hg push -f
577 $ hg push -f
578 pushing to $TESTTMP/t
578 pushing to $TESTTMP/t
579 pushing subrepo s/ss to $TESTTMP/t/s/ss
579 pushing subrepo s/ss to $TESTTMP/t/s/ss
580 searching for changes
580 searching for changes
581 no changes found
581 no changes found
582 pushing subrepo s to $TESTTMP/t/s
582 pushing subrepo s to $TESTTMP/t/s
583 searching for changes
583 searching for changes
584 adding changesets
584 adding changesets
585 adding manifests
585 adding manifests
586 adding file changes
586 adding file changes
587 added 1 changesets with 1 changes to 1 files (+1 heads)
587 added 1 changesets with 1 changes to 1 files (+1 heads)
588 pushing subrepo t to $TESTTMP/t/t
588 pushing subrepo t to $TESTTMP/t/t
589 searching for changes
589 searching for changes
590 no changes found
590 no changes found
591 searching for changes
591 searching for changes
592 adding changesets
592 adding changesets
593 adding manifests
593 adding manifests
594 adding file changes
594 adding file changes
595 added 1 changesets with 1 changes to 1 files
595 added 1 changesets with 1 changes to 1 files
596
596
597 check that unmodified subrepos are not pushed
597 check that unmodified subrepos are not pushed
598
598
599 $ hg clone . ../tcc
599 $ hg clone . ../tcc
600 updating to branch default
600 updating to branch default
601 cloning subrepo s from $TESTTMP/tc/s
601 cloning subrepo s from $TESTTMP/tc/s
602 cloning subrepo s/ss from $TESTTMP/tc/s/ss
602 cloning subrepo s/ss from $TESTTMP/tc/s/ss
603 cloning subrepo t from $TESTTMP/tc/t
603 cloning subrepo t from $TESTTMP/tc/t
604 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
604 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
605
605
606 the subrepos on the new clone have nothing to push to its source
606 the subrepos on the new clone have nothing to push to its source
607
607
608 $ hg push -R ../tcc .
608 $ hg push -R ../tcc .
609 pushing to .
609 pushing to .
610 no changes made to subrepo s/ss since last push to s/ss
610 no changes made to subrepo s/ss since last push to s/ss
611 no changes made to subrepo s since last push to s
611 no changes made to subrepo s since last push to s
612 no changes made to subrepo t since last push to t
612 no changes made to subrepo t since last push to t
613 searching for changes
613 searching for changes
614 no changes found
614 no changes found
615 [1]
615 [1]
616
616
617 the subrepos on the source do not have a clean store versus the clone target
617 the subrepos on the source do not have a clean store versus the clone target
618 because they were never explicitly pushed to the source
618 because they were never explicitly pushed to the source
619
619
620 $ hg push ../tcc
620 $ hg push ../tcc
621 pushing to ../tcc
621 pushing to ../tcc
622 pushing subrepo s/ss to ../tcc/s/ss
622 pushing subrepo s/ss to ../tcc/s/ss
623 searching for changes
623 searching for changes
624 no changes found
624 no changes found
625 pushing subrepo s to ../tcc/s
625 pushing subrepo s to ../tcc/s
626 searching for changes
626 searching for changes
627 no changes found
627 no changes found
628 pushing subrepo t to ../tcc/t
628 pushing subrepo t to ../tcc/t
629 searching for changes
629 searching for changes
630 no changes found
630 no changes found
631 searching for changes
631 searching for changes
632 no changes found
632 no changes found
633 [1]
633 [1]
634
634
635 after push their stores become clean
635 after push their stores become clean
636
636
637 $ hg push ../tcc
637 $ hg push ../tcc
638 pushing to ../tcc
638 pushing to ../tcc
639 no changes made to subrepo s/ss since last push to ../tcc/s/ss
639 no changes made to subrepo s/ss since last push to ../tcc/s/ss
640 no changes made to subrepo s since last push to ../tcc/s
640 no changes made to subrepo s since last push to ../tcc/s
641 no changes made to subrepo t since last push to ../tcc/t
641 no changes made to subrepo t since last push to ../tcc/t
642 searching for changes
642 searching for changes
643 no changes found
643 no changes found
644 [1]
644 [1]
645
645
646 updating a subrepo to a different revision or changing
646 updating a subrepo to a different revision or changing
647 its working directory does not make its store dirty
647 its working directory does not make its store dirty
648
648
649 $ hg -R s update '.^'
649 $ hg -R s update '.^'
650 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
650 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
651 $ hg push
651 $ hg push
652 pushing to $TESTTMP/t
652 pushing to $TESTTMP/t
653 no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss
653 no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss
654 no changes made to subrepo s since last push to $TESTTMP/t/s
654 no changes made to subrepo s since last push to $TESTTMP/t/s
655 no changes made to subrepo t since last push to $TESTTMP/t/t
655 no changes made to subrepo t since last push to $TESTTMP/t/t
656 searching for changes
656 searching for changes
657 no changes found
657 no changes found
658 [1]
658 [1]
659 $ echo foo >> s/a
659 $ echo foo >> s/a
660 $ hg push
660 $ hg push
661 pushing to $TESTTMP/t
661 pushing to $TESTTMP/t
662 no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss
662 no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss
663 no changes made to subrepo s since last push to $TESTTMP/t/s
663 no changes made to subrepo s since last push to $TESTTMP/t/s
664 no changes made to subrepo t since last push to $TESTTMP/t/t
664 no changes made to subrepo t since last push to $TESTTMP/t/t
665 searching for changes
665 searching for changes
666 no changes found
666 no changes found
667 [1]
667 [1]
668 $ hg -R s update -C tip
668 $ hg -R s update -C tip
669 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
669 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
670
670
671 committing into a subrepo makes its store (but not its parent's store) dirty
671 committing into a subrepo makes its store (but not its parent's store) dirty
672
672
673 $ echo foo >> s/ss/a
673 $ echo foo >> s/ss/a
674 $ hg -R s/ss commit -m 'test dirty store detection'
674 $ hg -R s/ss commit -m 'test dirty store detection'
675
675
676 $ hg out -S -r `hg log -r tip -T "{node|short}"`
676 $ hg out -S -r `hg log -r tip -T "{node|short}"`
677 comparing with $TESTTMP/t
677 comparing with $TESTTMP/t
678 searching for changes
678 searching for changes
679 no changes found
679 no changes found
680 comparing with $TESTTMP/t/s
680 comparing with $TESTTMP/t/s
681 searching for changes
681 searching for changes
682 no changes found
682 no changes found
683 comparing with $TESTTMP/t/s/ss
683 comparing with $TESTTMP/t/s/ss
684 searching for changes
684 searching for changes
685 changeset: 1:79ea5566a333
685 changeset: 1:79ea5566a333
686 tag: tip
686 tag: tip
687 user: test
687 user: test
688 date: Thu Jan 01 00:00:00 1970 +0000
688 date: Thu Jan 01 00:00:00 1970 +0000
689 summary: test dirty store detection
689 summary: test dirty store detection
690
690
691 comparing with $TESTTMP/t/t
691 comparing with $TESTTMP/t/t
692 searching for changes
692 searching for changes
693 no changes found
693 no changes found
694
694
695 $ hg push
695 $ hg push
696 pushing to $TESTTMP/t
696 pushing to $TESTTMP/t
697 pushing subrepo s/ss to $TESTTMP/t/s/ss
697 pushing subrepo s/ss to $TESTTMP/t/s/ss
698 searching for changes
698 searching for changes
699 adding changesets
699 adding changesets
700 adding manifests
700 adding manifests
701 adding file changes
701 adding file changes
702 added 1 changesets with 1 changes to 1 files
702 added 1 changesets with 1 changes to 1 files
703 no changes made to subrepo s since last push to $TESTTMP/t/s
703 no changes made to subrepo s since last push to $TESTTMP/t/s
704 no changes made to subrepo t since last push to $TESTTMP/t/t
704 no changes made to subrepo t since last push to $TESTTMP/t/t
705 searching for changes
705 searching for changes
706 no changes found
706 no changes found
707 [1]
707 [1]
708
708
709 a subrepo store may be clean versus one repo but not versus another
709 a subrepo store may be clean versus one repo but not versus another
710
710
711 $ hg push
711 $ hg push
712 pushing to $TESTTMP/t
712 pushing to $TESTTMP/t
713 no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss
713 no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss
714 no changes made to subrepo s since last push to $TESTTMP/t/s
714 no changes made to subrepo s since last push to $TESTTMP/t/s
715 no changes made to subrepo t since last push to $TESTTMP/t/t
715 no changes made to subrepo t since last push to $TESTTMP/t/t
716 searching for changes
716 searching for changes
717 no changes found
717 no changes found
718 [1]
718 [1]
719 $ hg push ../tcc
719 $ hg push ../tcc
720 pushing to ../tcc
720 pushing to ../tcc
721 pushing subrepo s/ss to ../tcc/s/ss
721 pushing subrepo s/ss to ../tcc/s/ss
722 searching for changes
722 searching for changes
723 adding changesets
723 adding changesets
724 adding manifests
724 adding manifests
725 adding file changes
725 adding file changes
726 added 1 changesets with 1 changes to 1 files
726 added 1 changesets with 1 changes to 1 files
727 no changes made to subrepo s since last push to ../tcc/s
727 no changes made to subrepo s since last push to ../tcc/s
728 no changes made to subrepo t since last push to ../tcc/t
728 no changes made to subrepo t since last push to ../tcc/t
729 searching for changes
729 searching for changes
730 no changes found
730 no changes found
731 [1]
731 [1]
732
732
733 update
733 update
734
734
735 $ cd ../t
735 $ cd ../t
736 $ hg up -C # discard our earlier merge
736 $ hg up -C # discard our earlier merge
737 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
737 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
738 updated to "c373c8102e68: 12"
738 updated to "c373c8102e68: 12"
739 2 other heads for branch "default"
739 2 other heads for branch "default"
740 $ echo blah > t/t
740 $ echo blah > t/t
741 $ hg ci -m13
741 $ hg ci -m13
742 committing subrepository t
742 committing subrepository t
743
743
744 backout calls revert internally with minimal opts, which should not raise
744 backout calls revert internally with minimal opts, which should not raise
745 KeyError
745 KeyError
746
746
747 $ hg backout ".^" --no-commit
747 $ hg backout ".^" --no-commit
748 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
748 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
749 changeset c373c8102e68 backed out, don't forget to commit.
749 changeset c373c8102e68 backed out, don't forget to commit.
750
750
751 $ hg up -C # discard changes
751 $ hg up -C # discard changes
752 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
752 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
753 updated to "925c17564ef8: 13"
753 updated to "925c17564ef8: 13"
754 2 other heads for branch "default"
754 2 other heads for branch "default"
755
755
756 pull
756 pull
757
757
758 $ cd ../tc
758 $ cd ../tc
759 $ hg pull
759 $ hg pull
760 pulling from $TESTTMP/t
760 pulling from $TESTTMP/t
761 searching for changes
761 searching for changes
762 adding changesets
762 adding changesets
763 adding manifests
763 adding manifests
764 adding file changes
764 adding file changes
765 added 1 changesets with 1 changes to 1 files
765 added 1 changesets with 1 changes to 1 files
766 new changesets 925c17564ef8
766 new changesets 925c17564ef8
767 (run 'hg update' to get a working copy)
767 (run 'hg update' to get a working copy)
768
768
769 should pull t
769 should pull t
770
770
771 $ hg incoming -S -r `hg log -r tip -T "{node|short}"`
771 $ hg incoming -S -r `hg log -r tip -T "{node|short}"`
772 comparing with $TESTTMP/t
772 comparing with $TESTTMP/t
773 no changes found
773 no changes found
774 comparing with $TESTTMP/t/s
774 comparing with $TESTTMP/t/s
775 searching for changes
775 searching for changes
776 no changes found
776 no changes found
777 comparing with $TESTTMP/t/s/ss
777 comparing with $TESTTMP/t/s/ss
778 searching for changes
778 searching for changes
779 no changes found
779 no changes found
780 comparing with $TESTTMP/t/t
780 comparing with $TESTTMP/t/t
781 searching for changes
781 searching for changes
782 changeset: 5:52c0adc0515a
782 changeset: 5:52c0adc0515a
783 tag: tip
783 tag: tip
784 user: test
784 user: test
785 date: Thu Jan 01 00:00:00 1970 +0000
785 date: Thu Jan 01 00:00:00 1970 +0000
786 summary: 13
786 summary: 13
787
787
788
788
789 $ hg up
789 $ hg up
790 pulling subrepo t from $TESTTMP/t/t
790 pulling subrepo t from $TESTTMP/t/t
791 searching for changes
791 searching for changes
792 adding changesets
792 adding changesets
793 adding manifests
793 adding manifests
794 adding file changes
794 adding file changes
795 added 1 changesets with 1 changes to 1 files
795 added 1 changesets with 1 changes to 1 files
796 new changesets 52c0adc0515a
796 new changesets 52c0adc0515a
797 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
797 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
798 updated to "925c17564ef8: 13"
798 updated to "925c17564ef8: 13"
799 2 other heads for branch "default"
799 2 other heads for branch "default"
800 $ cat t/t
800 $ cat t/t
801 blah
801 blah
802
802
803 bogus subrepo path aborts
803 bogus subrepo path aborts
804
804
805 $ echo 'bogus=[boguspath' >> .hgsub
805 $ echo 'bogus=[boguspath' >> .hgsub
806 $ hg ci -m 'bogus subrepo path'
806 $ hg ci -m 'bogus subrepo path'
807 abort: missing ] in subrepository source
807 abort: missing ] in subrepository source
808 [255]
808 [255]
809
809
810 Issue1986: merge aborts when trying to merge a subrepo that
810 Issue1986: merge aborts when trying to merge a subrepo that
811 shouldn't need merging
811 shouldn't need merging
812
812
813 # subrepo layout
813 # subrepo layout
814 #
814 #
815 # o 5 br
815 # o 5 br
816 # /|
816 # /|
817 # o | 4 default
817 # o | 4 default
818 # | |
818 # | |
819 # | o 3 br
819 # | o 3 br
820 # |/|
820 # |/|
821 # o | 2 default
821 # o | 2 default
822 # | |
822 # | |
823 # | o 1 br
823 # | o 1 br
824 # |/
824 # |/
825 # o 0 default
825 # o 0 default
826
826
827 $ cd ..
827 $ cd ..
828 $ rm -rf sub
828 $ rm -rf sub
829 $ hg init main
829 $ hg init main
830 $ cd main
830 $ cd main
831 $ hg init s
831 $ hg init s
832 $ cd s
832 $ cd s
833 $ echo a > a
833 $ echo a > a
834 $ hg ci -Am1
834 $ hg ci -Am1
835 adding a
835 adding a
836 $ hg branch br
836 $ hg branch br
837 marked working directory as branch br
837 marked working directory as branch br
838 (branches are permanent and global, did you want a bookmark?)
838 (branches are permanent and global, did you want a bookmark?)
839 $ echo a >> a
839 $ echo a >> a
840 $ hg ci -m1
840 $ hg ci -m1
841 $ hg up default
841 $ hg up default
842 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
842 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
843 $ echo b > b
843 $ echo b > b
844 $ hg ci -Am1
844 $ hg ci -Am1
845 adding b
845 adding b
846 $ hg up br
846 $ hg up br
847 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
847 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
848 $ hg merge tip
848 $ hg merge tip
849 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
849 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
850 (branch merge, don't forget to commit)
850 (branch merge, don't forget to commit)
851 $ hg ci -m1
851 $ hg ci -m1
852 $ hg up 2
852 $ hg up 2
853 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
853 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
854 $ echo c > c
854 $ echo c > c
855 $ hg ci -Am1
855 $ hg ci -Am1
856 adding c
856 adding c
857 $ hg up 3
857 $ hg up 3
858 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
858 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
859 $ hg merge 4
859 $ hg merge 4
860 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
860 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
861 (branch merge, don't forget to commit)
861 (branch merge, don't forget to commit)
862 $ hg ci -m1
862 $ hg ci -m1
863
863
864 # main repo layout:
864 # main repo layout:
865 #
865 #
866 # * <-- try to merge default into br again
866 # * <-- try to merge default into br again
867 # .`|
867 # .`|
868 # . o 5 br --> substate = 5
868 # . o 5 br --> substate = 5
869 # . |
869 # . |
870 # o | 4 default --> substate = 4
870 # o | 4 default --> substate = 4
871 # | |
871 # | |
872 # | o 3 br --> substate = 2
872 # | o 3 br --> substate = 2
873 # |/|
873 # |/|
874 # o | 2 default --> substate = 2
874 # o | 2 default --> substate = 2
875 # | |
875 # | |
876 # | o 1 br --> substate = 3
876 # | o 1 br --> substate = 3
877 # |/
877 # |/
878 # o 0 default --> substate = 2
878 # o 0 default --> substate = 2
879
879
880 $ cd ..
880 $ cd ..
881 $ echo 's = s' > .hgsub
881 $ echo 's = s' > .hgsub
882 $ hg -R s up 2
882 $ hg -R s up 2
883 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
883 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
884 $ hg ci -Am1
884 $ hg ci -Am1
885 adding .hgsub
885 adding .hgsub
886 $ hg branch br
886 $ hg branch br
887 marked working directory as branch br
887 marked working directory as branch br
888 (branches are permanent and global, did you want a bookmark?)
888 (branches are permanent and global, did you want a bookmark?)
889 $ echo b > b
889 $ echo b > b
890 $ hg -R s up 3
890 $ hg -R s up 3
891 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
891 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
892 $ hg ci -Am1
892 $ hg ci -Am1
893 adding b
893 adding b
894 $ hg up default
894 $ hg up default
895 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
895 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
896 $ echo c > c
896 $ echo c > c
897 $ hg ci -Am1
897 $ hg ci -Am1
898 adding c
898 adding c
899 $ hg up 1
899 $ hg up 1
900 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
900 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
901 $ hg merge 2
901 $ hg merge 2
902 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
902 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
903 (branch merge, don't forget to commit)
903 (branch merge, don't forget to commit)
904 $ hg ci -m1
904 $ hg ci -m1
905 $ hg up 2
905 $ hg up 2
906 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
906 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
907 $ hg -R s up 4
907 $ hg -R s up 4
908 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
908 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
909 $ echo d > d
909 $ echo d > d
910 $ hg ci -Am1
910 $ hg ci -Am1
911 adding d
911 adding d
912 $ hg up 3
912 $ hg up 3
913 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
913 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
914 $ hg -R s up 5
914 $ hg -R s up 5
915 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
915 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
916 $ echo e > e
916 $ echo e > e
917 $ hg ci -Am1
917 $ hg ci -Am1
918 adding e
918 adding e
919
919
920 $ hg up 5
920 $ hg up 5
921 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
921 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
922 $ hg merge 4 # try to merge default into br again
922 $ hg merge 4 # try to merge default into br again
923 subrepository s diverged (local revision: f8f13b33206e, remote revision: a3f9062a4f88)
923 subrepository s diverged (local revision: f8f13b33206e, remote revision: a3f9062a4f88)
924 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [merge rev].
924 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [merge rev].
925 what do you want to do? m
925 what do you want to do? m
926 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
926 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
927 (branch merge, don't forget to commit)
927 (branch merge, don't forget to commit)
928 $ cd ..
928 $ cd ..
929
929
930 test subrepo delete from .hgsubstate
930 test subrepo delete from .hgsubstate
931
931
932 $ hg init testdelete
932 $ hg init testdelete
933 $ mkdir testdelete/nested testdelete/nested2
933 $ mkdir testdelete/nested testdelete/nested2
934 $ hg init testdelete/nested
934 $ hg init testdelete/nested
935 $ hg init testdelete/nested2
935 $ hg init testdelete/nested2
936 $ echo test > testdelete/nested/foo
936 $ echo test > testdelete/nested/foo
937 $ echo test > testdelete/nested2/foo
937 $ echo test > testdelete/nested2/foo
938 $ hg -R testdelete/nested add
938 $ hg -R testdelete/nested add
939 adding testdelete/nested/foo
939 adding testdelete/nested/foo
940 $ hg -R testdelete/nested2 add
940 $ hg -R testdelete/nested2 add
941 adding testdelete/nested2/foo
941 adding testdelete/nested2/foo
942 $ hg -R testdelete/nested ci -m test
942 $ hg -R testdelete/nested ci -m test
943 $ hg -R testdelete/nested2 ci -m test
943 $ hg -R testdelete/nested2 ci -m test
944 $ echo nested = nested > testdelete/.hgsub
944 $ echo nested = nested > testdelete/.hgsub
945 $ echo nested2 = nested2 >> testdelete/.hgsub
945 $ echo nested2 = nested2 >> testdelete/.hgsub
946 $ hg -R testdelete add
946 $ hg -R testdelete add
947 adding testdelete/.hgsub
947 adding testdelete/.hgsub
948 $ hg -R testdelete ci -m "nested 1 & 2 added"
948 $ hg -R testdelete ci -m "nested 1 & 2 added"
949 $ echo nested = nested > testdelete/.hgsub
949 $ echo nested = nested > testdelete/.hgsub
950 $ hg -R testdelete ci -m "nested 2 deleted"
950 $ hg -R testdelete ci -m "nested 2 deleted"
951 $ cat testdelete/.hgsubstate
951 $ cat testdelete/.hgsubstate
952 bdf5c9a3103743d900b12ae0db3ffdcfd7b0d878 nested
952 bdf5c9a3103743d900b12ae0db3ffdcfd7b0d878 nested
953 $ hg -R testdelete remove testdelete/.hgsub
953 $ hg -R testdelete remove testdelete/.hgsub
954 $ hg -R testdelete ci -m ".hgsub deleted"
954 $ hg -R testdelete ci -m ".hgsub deleted"
955 $ cat testdelete/.hgsubstate
955 $ cat testdelete/.hgsubstate
956 bdf5c9a3103743d900b12ae0db3ffdcfd7b0d878 nested
956 bdf5c9a3103743d900b12ae0db3ffdcfd7b0d878 nested
957
957
958 test repository cloning
958 test repository cloning
959
959
960 $ mkdir mercurial mercurial2
960 $ mkdir mercurial mercurial2
961 $ hg init nested_absolute
961 $ hg init nested_absolute
962 $ echo test > nested_absolute/foo
962 $ echo test > nested_absolute/foo
963 $ hg -R nested_absolute add
963 $ hg -R nested_absolute add
964 adding nested_absolute/foo
964 adding nested_absolute/foo
965 $ hg -R nested_absolute ci -mtest
965 $ hg -R nested_absolute ci -mtest
966 $ cd mercurial
966 $ cd mercurial
967 $ hg init nested_relative
967 $ hg init nested_relative
968 $ echo test2 > nested_relative/foo2
968 $ echo test2 > nested_relative/foo2
969 $ hg -R nested_relative add
969 $ hg -R nested_relative add
970 adding nested_relative/foo2
970 adding nested_relative/foo2
971 $ hg -R nested_relative ci -mtest2
971 $ hg -R nested_relative ci -mtest2
972 $ hg init main
972 $ hg init main
973 $ echo "nested_relative = ../nested_relative" > main/.hgsub
973 $ echo "nested_relative = ../nested_relative" > main/.hgsub
974 $ echo "nested_absolute = `pwd`/nested_absolute" >> main/.hgsub
974 $ echo "nested_absolute = `pwd`/nested_absolute" >> main/.hgsub
975 $ hg -R main add
975 $ hg -R main add
976 adding main/.hgsub
976 adding main/.hgsub
977 $ hg -R main ci -m "add subrepos"
977 $ hg -R main ci -m "add subrepos"
978 $ cd ..
978 $ cd ..
979 $ hg clone mercurial/main mercurial2/main
979 $ hg clone mercurial/main mercurial2/main
980 updating to branch default
980 updating to branch default
981 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
981 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
982 $ cat mercurial2/main/nested_absolute/.hg/hgrc \
982 $ cat mercurial2/main/nested_absolute/.hg/hgrc \
983 > mercurial2/main/nested_relative/.hg/hgrc
983 > mercurial2/main/nested_relative/.hg/hgrc
984 [paths]
984 [paths]
985 default = $TESTTMP/mercurial/nested_absolute
985 default = $TESTTMP/mercurial/nested_absolute
986 [paths]
986 [paths]
987 default = $TESTTMP/mercurial/nested_relative
987 default = $TESTTMP/mercurial/nested_relative
988 $ rm -rf mercurial mercurial2
988 $ rm -rf mercurial mercurial2
989
989
990 Issue1977: multirepo push should fail if subrepo push fails
990 Issue1977: multirepo push should fail if subrepo push fails
991
991
992 $ hg init repo
992 $ hg init repo
993 $ hg init repo/s
993 $ hg init repo/s
994 $ echo a > repo/s/a
994 $ echo a > repo/s/a
995 $ hg -R repo/s ci -Am0
995 $ hg -R repo/s ci -Am0
996 adding a
996 adding a
997 $ echo s = s > repo/.hgsub
997 $ echo s = s > repo/.hgsub
998 $ hg -R repo ci -Am1
998 $ hg -R repo ci -Am1
999 adding .hgsub
999 adding .hgsub
1000 $ hg clone repo repo2
1000 $ hg clone repo repo2
1001 updating to branch default
1001 updating to branch default
1002 cloning subrepo s from $TESTTMP/repo/s
1002 cloning subrepo s from $TESTTMP/repo/s
1003 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1003 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1004 $ hg -q -R repo2 pull -u
1004 $ hg -q -R repo2 pull -u
1005 $ echo 1 > repo2/s/a
1005 $ echo 1 > repo2/s/a
1006 $ hg -R repo2/s ci -m2
1006 $ hg -R repo2/s ci -m2
1007 $ hg -q -R repo2/s push
1007 $ hg -q -R repo2/s push
1008 $ hg -R repo2/s up -C 0
1008 $ hg -R repo2/s up -C 0
1009 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1009 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1010 $ echo 2 > repo2/s/b
1010 $ echo 2 > repo2/s/b
1011 $ hg -R repo2/s ci -m3 -A
1011 $ hg -R repo2/s ci -m3 -A
1012 adding b
1012 adding b
1013 created new head
1013 created new head
1014 $ hg -R repo2 ci -m3
1014 $ hg -R repo2 ci -m3
1015 $ hg -q -R repo2 push
1015 $ hg -q -R repo2 push
1016 abort: push creates new remote head cc505f09a8b2 (in subrepository "s")
1016 abort: push creates new remote head cc505f09a8b2 (in subrepository "s")
1017 (merge or see 'hg help push' for details about pushing new heads)
1017 (merge or see 'hg help push' for details about pushing new heads)
1018 [255]
1018 [255]
1019 $ hg -R repo update
1019 $ hg -R repo update
1020 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1020 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1021
1021
1022 test if untracked file is not overwritten
1022 test if untracked file is not overwritten
1023
1023
1024 (this also tests that updated .hgsubstate is treated as "modified",
1024 (this tests also has a change to update .hgsubstate and merge it within the
1025 when 'merge.update()' is aborted before 'merge.recordupdates()', even
1025 same second. It should mark is are modified , even if none of mode, size and
1026 if none of mode, size and timestamp of it isn't changed on the
1026 timestamp of it isn't changed on the filesystem (see also issue4583))
1027 filesystem (see also issue4583))
1028
1027
1029 $ echo issue3276_ok > repo/s/b
1028 $ echo issue3276_ok > repo/s/b
1030 $ hg -R repo2 push -f -q
1029 $ hg -R repo2 push -f -q
1031 $ touch -t 200001010000 repo/.hgsubstate
1032
1030
1033 $ cat >> repo/.hg/hgrc <<EOF
1034 > [fakedirstatewritetime]
1035 > # emulate invoking dirstate.write() via repo.status()
1036 > # at 2000-01-01 00:00
1037 > fakenow = 200001010000
1038 >
1039 > [extensions]
1040 > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
1041 > EOF
1042 $ hg -R repo update
1031 $ hg -R repo update
1043 b: untracked file differs
1032 b: untracked file differs
1044 abort: untracked files in working directory differ from files in requested revision (in subrepository "s")
1033 abort: untracked files in working directory differ from files in requested revision (in subrepository "s")
1045 [255]
1034 [255]
1046 $ cat >> repo/.hg/hgrc <<EOF
1047 > [extensions]
1048 > fakedirstatewritetime = !
1049 > EOF
1050
1035
1051 $ cat repo/s/b
1036 $ cat repo/s/b
1052 issue3276_ok
1037 issue3276_ok
1053 $ rm repo/s/b
1038 $ rm repo/s/b
1054 $ touch -t 200001010000 repo/.hgsubstate
1055 $ hg -R repo revert --all
1039 $ hg -R repo revert --all
1056 reverting repo/.hgsubstate
1040 reverting repo/.hgsubstate
1057 reverting subrepo s
1041 reverting subrepo s
1058 $ hg -R repo update
1042 $ hg -R repo update
1059 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1043 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1060 $ cat repo/s/b
1044 $ cat repo/s/b
1061 2
1045 2
1062 $ rm -rf repo2 repo
1046 $ rm -rf repo2 repo
1063
1047
1064
1048
1065 Issue1852 subrepos with relative paths always push/pull relative to default
1049 Issue1852 subrepos with relative paths always push/pull relative to default
1066
1050
1067 Prepare a repo with subrepo
1051 Prepare a repo with subrepo
1068
1052
1069 $ hg init issue1852a
1053 $ hg init issue1852a
1070 $ cd issue1852a
1054 $ cd issue1852a
1071 $ hg init sub/repo
1055 $ hg init sub/repo
1072 $ echo test > sub/repo/foo
1056 $ echo test > sub/repo/foo
1073 $ hg -R sub/repo add sub/repo/foo
1057 $ hg -R sub/repo add sub/repo/foo
1074 $ echo sub/repo = sub/repo > .hgsub
1058 $ echo sub/repo = sub/repo > .hgsub
1075 $ hg add .hgsub
1059 $ hg add .hgsub
1076 $ hg ci -mtest
1060 $ hg ci -mtest
1077 committing subrepository sub/repo
1061 committing subrepository sub/repo
1078 $ echo test >> sub/repo/foo
1062 $ echo test >> sub/repo/foo
1079 $ hg ci -mtest
1063 $ hg ci -mtest
1080 committing subrepository sub/repo
1064 committing subrepository sub/repo
1081 $ hg cat sub/repo/foo
1065 $ hg cat sub/repo/foo
1082 test
1066 test
1083 test
1067 test
1084 $ hg cat sub/repo/foo -Tjson | sed 's|\\\\|/|g'
1068 $ hg cat sub/repo/foo -Tjson | sed 's|\\\\|/|g'
1085 [
1069 [
1086 {
1070 {
1087 "data": "test\ntest\n",
1071 "data": "test\ntest\n",
1088 "path": "foo"
1072 "path": "foo"
1089 }
1073 }
1090 ]
1074 ]
1091
1075
1092 non-exact match:
1076 non-exact match:
1093
1077
1094 $ hg cat -T '{path|relpath}\n' 'glob:**'
1078 $ hg cat -T '{path|relpath}\n' 'glob:**'
1095 .hgsub
1079 .hgsub
1096 .hgsubstate
1080 .hgsubstate
1097 sub/repo/foo
1081 sub/repo/foo
1098 $ hg cat -T '{path|relpath}\n' 're:^sub'
1082 $ hg cat -T '{path|relpath}\n' 're:^sub'
1099 sub/repo/foo
1083 sub/repo/foo
1100
1084
1101 missing subrepos in working directory:
1085 missing subrepos in working directory:
1102
1086
1103 $ mkdir -p tmp/sub/repo
1087 $ mkdir -p tmp/sub/repo
1104 $ hg cat -r 0 --output tmp/%p_p sub/repo/foo
1088 $ hg cat -r 0 --output tmp/%p_p sub/repo/foo
1105 $ cat tmp/sub/repo/foo_p
1089 $ cat tmp/sub/repo/foo_p
1106 test
1090 test
1107 $ mv sub/repo sub_
1091 $ mv sub/repo sub_
1108 $ hg cat sub/repo/baz
1092 $ hg cat sub/repo/baz
1109 skipping missing subrepository: sub/repo
1093 skipping missing subrepository: sub/repo
1110 [1]
1094 [1]
1111 $ rm -rf sub/repo
1095 $ rm -rf sub/repo
1112 $ mv sub_ sub/repo
1096 $ mv sub_ sub/repo
1113 $ cd ..
1097 $ cd ..
1114
1098
1115 Create repo without default path, pull top repo, and see what happens on update
1099 Create repo without default path, pull top repo, and see what happens on update
1116
1100
1117 $ hg init issue1852b
1101 $ hg init issue1852b
1118 $ hg -R issue1852b pull issue1852a
1102 $ hg -R issue1852b pull issue1852a
1119 pulling from issue1852a
1103 pulling from issue1852a
1120 requesting all changes
1104 requesting all changes
1121 adding changesets
1105 adding changesets
1122 adding manifests
1106 adding manifests
1123 adding file changes
1107 adding file changes
1124 added 2 changesets with 3 changes to 2 files
1108 added 2 changesets with 3 changes to 2 files
1125 new changesets 19487b456929:be5eb94e7215
1109 new changesets 19487b456929:be5eb94e7215
1126 (run 'hg update' to get a working copy)
1110 (run 'hg update' to get a working copy)
1127 $ hg -R issue1852b update
1111 $ hg -R issue1852b update
1128 abort: default path for subrepository not found (in subrepository "sub/repo")
1112 abort: default path for subrepository not found (in subrepository "sub/repo")
1129 [255]
1113 [255]
1130
1114
1131 Ensure a full traceback, not just the SubrepoAbort part
1115 Ensure a full traceback, not just the SubrepoAbort part
1132
1116
1133 $ hg -R issue1852b update --traceback 2>&1 | grep 'raise error\.Abort'
1117 $ hg -R issue1852b update --traceback 2>&1 | grep 'raise error\.Abort'
1134 raise error.Abort(_(b"default path for subrepository not found"))
1118 raise error.Abort(_(b"default path for subrepository not found"))
1135
1119
1136 Pull -u now doesn't help
1120 Pull -u now doesn't help
1137
1121
1138 $ hg -R issue1852b pull -u issue1852a
1122 $ hg -R issue1852b pull -u issue1852a
1139 pulling from issue1852a
1123 pulling from issue1852a
1140 searching for changes
1124 searching for changes
1141 no changes found
1125 no changes found
1142
1126
1143 Try the same, but with pull -u
1127 Try the same, but with pull -u
1144
1128
1145 $ hg init issue1852c
1129 $ hg init issue1852c
1146 $ hg -R issue1852c pull -r0 -u issue1852a
1130 $ hg -R issue1852c pull -r0 -u issue1852a
1147 pulling from issue1852a
1131 pulling from issue1852a
1148 adding changesets
1132 adding changesets
1149 adding manifests
1133 adding manifests
1150 adding file changes
1134 adding file changes
1151 added 1 changesets with 2 changes to 2 files
1135 added 1 changesets with 2 changes to 2 files
1152 new changesets 19487b456929
1136 new changesets 19487b456929
1153 cloning subrepo sub/repo from issue1852a/sub/repo
1137 cloning subrepo sub/repo from issue1852a/sub/repo
1154 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1138 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1155
1139
1156 Try to push from the other side
1140 Try to push from the other side
1157
1141
1158 $ hg -R issue1852a push `pwd`/issue1852c
1142 $ hg -R issue1852a push `pwd`/issue1852c
1159 pushing to $TESTTMP/issue1852c
1143 pushing to $TESTTMP/issue1852c
1160 pushing subrepo sub/repo to $TESTTMP/issue1852c/sub/repo
1144 pushing subrepo sub/repo to $TESTTMP/issue1852c/sub/repo
1161 searching for changes
1145 searching for changes
1162 no changes found
1146 no changes found
1163 searching for changes
1147 searching for changes
1164 adding changesets
1148 adding changesets
1165 adding manifests
1149 adding manifests
1166 adding file changes
1150 adding file changes
1167 added 1 changesets with 1 changes to 1 files
1151 added 1 changesets with 1 changes to 1 files
1168
1152
1169 Incoming and outgoing should not use the default path:
1153 Incoming and outgoing should not use the default path:
1170
1154
1171 $ hg clone -q issue1852a issue1852d
1155 $ hg clone -q issue1852a issue1852d
1172 $ hg -R issue1852d outgoing --subrepos issue1852c
1156 $ hg -R issue1852d outgoing --subrepos issue1852c
1173 comparing with issue1852c
1157 comparing with issue1852c
1174 searching for changes
1158 searching for changes
1175 no changes found
1159 no changes found
1176 comparing with issue1852c/sub/repo
1160 comparing with issue1852c/sub/repo
1177 searching for changes
1161 searching for changes
1178 no changes found
1162 no changes found
1179 [1]
1163 [1]
1180 $ hg -R issue1852d incoming --subrepos issue1852c
1164 $ hg -R issue1852d incoming --subrepos issue1852c
1181 comparing with issue1852c
1165 comparing with issue1852c
1182 searching for changes
1166 searching for changes
1183 no changes found
1167 no changes found
1184 comparing with issue1852c/sub/repo
1168 comparing with issue1852c/sub/repo
1185 searching for changes
1169 searching for changes
1186 no changes found
1170 no changes found
1187 [1]
1171 [1]
1188
1172
1189 Check that merge of a new subrepo doesn't write the uncommitted state to
1173 Check that merge of a new subrepo doesn't write the uncommitted state to
1190 .hgsubstate (issue4622)
1174 .hgsubstate (issue4622)
1191
1175
1192 $ hg init issue1852a/addedsub
1176 $ hg init issue1852a/addedsub
1193 $ echo zzz > issue1852a/addedsub/zz.txt
1177 $ echo zzz > issue1852a/addedsub/zz.txt
1194 $ hg -R issue1852a/addedsub ci -Aqm "initial ZZ"
1178 $ hg -R issue1852a/addedsub ci -Aqm "initial ZZ"
1195
1179
1196 $ hg clone issue1852a/addedsub issue1852d/addedsub
1180 $ hg clone issue1852a/addedsub issue1852d/addedsub
1197 updating to branch default
1181 updating to branch default
1198 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1182 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1199
1183
1200 $ echo def > issue1852a/sub/repo/foo
1184 $ echo def > issue1852a/sub/repo/foo
1201 $ hg -R issue1852a ci -SAm 'tweaked subrepo'
1185 $ hg -R issue1852a ci -SAm 'tweaked subrepo'
1202 adding tmp/sub/repo/foo_p
1186 adding tmp/sub/repo/foo_p
1203 committing subrepository sub/repo
1187 committing subrepository sub/repo
1204
1188
1205 $ echo 'addedsub = addedsub' >> issue1852d/.hgsub
1189 $ echo 'addedsub = addedsub' >> issue1852d/.hgsub
1206 $ echo xyz > issue1852d/sub/repo/foo
1190 $ echo xyz > issue1852d/sub/repo/foo
1207 $ hg -R issue1852d pull -u
1191 $ hg -R issue1852d pull -u
1208 pulling from $TESTTMP/issue1852a
1192 pulling from $TESTTMP/issue1852a
1209 searching for changes
1193 searching for changes
1210 adding changesets
1194 adding changesets
1211 adding manifests
1195 adding manifests
1212 adding file changes
1196 adding file changes
1213 added 1 changesets with 2 changes to 2 files
1197 added 1 changesets with 2 changes to 2 files
1214 new changesets c82b79fdcc5b
1198 new changesets c82b79fdcc5b
1215 subrepository sub/repo diverged (local revision: f42d5c7504a8, remote revision: 46cd4aac504c)
1199 subrepository sub/repo diverged (local revision: f42d5c7504a8, remote revision: 46cd4aac504c)
1216 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1200 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1217 what do you want to do? m
1201 what do you want to do? m
1218 pulling subrepo sub/repo from $TESTTMP/issue1852a/sub/repo
1202 pulling subrepo sub/repo from $TESTTMP/issue1852a/sub/repo
1219 searching for changes
1203 searching for changes
1220 adding changesets
1204 adding changesets
1221 adding manifests
1205 adding manifests
1222 adding file changes
1206 adding file changes
1223 added 1 changesets with 1 changes to 1 files
1207 added 1 changesets with 1 changes to 1 files
1224 new changesets 46cd4aac504c
1208 new changesets 46cd4aac504c
1225 subrepository sources for sub/repo differ
1209 subrepository sources for sub/repo differ
1226 you can use (l)ocal source (f42d5c7504a8) or (r)emote source (46cd4aac504c).
1210 you can use (l)ocal source (f42d5c7504a8) or (r)emote source (46cd4aac504c).
1227 what do you want to do? l
1211 what do you want to do? l
1228 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1212 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1229 $ cat issue1852d/.hgsubstate
1213 $ cat issue1852d/.hgsubstate
1230 f42d5c7504a811dda50f5cf3e5e16c3330b87172 sub/repo
1214 f42d5c7504a811dda50f5cf3e5e16c3330b87172 sub/repo
1231
1215
1232 Check status of files when none of them belong to the first
1216 Check status of files when none of them belong to the first
1233 subrepository:
1217 subrepository:
1234
1218
1235 $ hg init subrepo-status
1219 $ hg init subrepo-status
1236 $ cd subrepo-status
1220 $ cd subrepo-status
1237 $ hg init subrepo-1
1221 $ hg init subrepo-1
1238 $ hg init subrepo-2
1222 $ hg init subrepo-2
1239 $ cd subrepo-2
1223 $ cd subrepo-2
1240 $ touch file
1224 $ touch file
1241 $ hg add file
1225 $ hg add file
1242 $ cd ..
1226 $ cd ..
1243 $ echo subrepo-1 = subrepo-1 > .hgsub
1227 $ echo subrepo-1 = subrepo-1 > .hgsub
1244 $ echo subrepo-2 = subrepo-2 >> .hgsub
1228 $ echo subrepo-2 = subrepo-2 >> .hgsub
1245 $ hg add .hgsub
1229 $ hg add .hgsub
1246 $ hg ci -m 'Added subrepos'
1230 $ hg ci -m 'Added subrepos'
1247 committing subrepository subrepo-2
1231 committing subrepository subrepo-2
1248 $ hg st subrepo-2/file
1232 $ hg st subrepo-2/file
1249
1233
1250 Check that share works with subrepo
1234 Check that share works with subrepo
1251 $ hg --config extensions.share= share . ../shared
1235 $ hg --config extensions.share= share . ../shared
1252 updating working directory
1236 updating working directory
1253 sharing subrepo subrepo-1 from $TESTTMP/subrepo-status/subrepo-1
1237 sharing subrepo subrepo-1 from $TESTTMP/subrepo-status/subrepo-1
1254 sharing subrepo subrepo-2 from $TESTTMP/subrepo-status/subrepo-2
1238 sharing subrepo subrepo-2 from $TESTTMP/subrepo-status/subrepo-2
1255 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1239 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1256 $ find ../shared/* | sort
1240 $ find ../shared/* | sort
1257 ../shared/subrepo-1
1241 ../shared/subrepo-1
1258 ../shared/subrepo-1/.hg
1242 ../shared/subrepo-1/.hg
1259 ../shared/subrepo-1/.hg/cache
1243 ../shared/subrepo-1/.hg/cache
1260 ../shared/subrepo-1/.hg/cache/storehash
1244 ../shared/subrepo-1/.hg/cache/storehash
1261 ../shared/subrepo-1/.hg/cache/storehash/* (glob)
1245 ../shared/subrepo-1/.hg/cache/storehash/* (glob)
1262 ../shared/subrepo-1/.hg/hgrc
1246 ../shared/subrepo-1/.hg/hgrc
1263 ../shared/subrepo-1/.hg/requires
1247 ../shared/subrepo-1/.hg/requires
1264 ../shared/subrepo-1/.hg/sharedpath
1248 ../shared/subrepo-1/.hg/sharedpath
1265 ../shared/subrepo-1/.hg/wcache
1249 ../shared/subrepo-1/.hg/wcache
1266 ../shared/subrepo-2
1250 ../shared/subrepo-2
1267 ../shared/subrepo-2/.hg
1251 ../shared/subrepo-2/.hg
1268 ../shared/subrepo-2/.hg/branch
1252 ../shared/subrepo-2/.hg/branch
1269 ../shared/subrepo-2/.hg/cache
1253 ../shared/subrepo-2/.hg/cache
1270 ../shared/subrepo-2/.hg/cache/storehash
1254 ../shared/subrepo-2/.hg/cache/storehash
1271 ../shared/subrepo-2/.hg/cache/storehash/* (glob)
1255 ../shared/subrepo-2/.hg/cache/storehash/* (glob)
1272 ../shared/subrepo-2/.hg/dirstate
1256 ../shared/subrepo-2/.hg/dirstate
1273 ../shared/subrepo-2/.hg/hgrc
1257 ../shared/subrepo-2/.hg/hgrc
1274 ../shared/subrepo-2/.hg/requires
1258 ../shared/subrepo-2/.hg/requires
1275 ../shared/subrepo-2/.hg/sharedpath
1259 ../shared/subrepo-2/.hg/sharedpath
1276 ../shared/subrepo-2/.hg/wcache
1260 ../shared/subrepo-2/.hg/wcache
1277 ../shared/subrepo-2/.hg/wcache/checkisexec (execbit !)
1261 ../shared/subrepo-2/.hg/wcache/checkisexec (execbit !)
1278 ../shared/subrepo-2/.hg/wcache/checklink (symlink no-rust !)
1262 ../shared/subrepo-2/.hg/wcache/checklink (symlink no-rust !)
1279 ../shared/subrepo-2/.hg/wcache/checklink-target (symlink no-rust !)
1263 ../shared/subrepo-2/.hg/wcache/checklink-target (symlink no-rust !)
1280 ../shared/subrepo-2/.hg/wcache/manifestfulltextcache (reporevlogstore !)
1264 ../shared/subrepo-2/.hg/wcache/manifestfulltextcache (reporevlogstore !)
1281 ../shared/subrepo-2/file
1265 ../shared/subrepo-2/file
1282 $ hg -R ../shared in
1266 $ hg -R ../shared in
1283 abort: repository default not found
1267 abort: repository default not found
1284 [255]
1268 [255]
1285 $ hg -R ../shared/subrepo-2 showconfig paths
1269 $ hg -R ../shared/subrepo-2 showconfig paths
1286 paths.default=$TESTTMP/subrepo-status/subrepo-2
1270 paths.default=$TESTTMP/subrepo-status/subrepo-2
1287 $ hg -R ../shared/subrepo-1 sum --remote
1271 $ hg -R ../shared/subrepo-1 sum --remote
1288 parent: -1:000000000000 tip (empty repository)
1272 parent: -1:000000000000 tip (empty repository)
1289 branch: default
1273 branch: default
1290 commit: (clean)
1274 commit: (clean)
1291 update: (current)
1275 update: (current)
1292 remote: (synced)
1276 remote: (synced)
1293
1277
1294 Check hg update --clean
1278 Check hg update --clean
1295 $ cd $TESTTMP/t
1279 $ cd $TESTTMP/t
1296 $ rm -r t/t.orig
1280 $ rm -r t/t.orig
1297 $ hg status -S --all
1281 $ hg status -S --all
1298 C .hgsub
1282 C .hgsub
1299 C .hgsubstate
1283 C .hgsubstate
1300 C a
1284 C a
1301 C s/.hgsub
1285 C s/.hgsub
1302 C s/.hgsubstate
1286 C s/.hgsubstate
1303 C s/a
1287 C s/a
1304 C s/ss/a
1288 C s/ss/a
1305 C t/t
1289 C t/t
1306 $ echo c1 > s/a
1290 $ echo c1 > s/a
1307 $ cd s
1291 $ cd s
1308 $ echo c1 > b
1292 $ echo c1 > b
1309 $ echo c1 > c
1293 $ echo c1 > c
1310 $ hg add b
1294 $ hg add b
1311 $ cd ..
1295 $ cd ..
1312 $ hg status -S
1296 $ hg status -S
1313 M s/a
1297 M s/a
1314 A s/b
1298 A s/b
1315 ? s/c
1299 ? s/c
1316 $ hg update -C
1300 $ hg update -C
1317 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1301 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1318 updated to "925c17564ef8: 13"
1302 updated to "925c17564ef8: 13"
1319 2 other heads for branch "default"
1303 2 other heads for branch "default"
1320 $ hg status -S
1304 $ hg status -S
1321 ? s/b
1305 ? s/b
1322 ? s/c
1306 ? s/c
1323
1307
1324 Sticky subrepositories, no changes
1308 Sticky subrepositories, no changes
1325 $ cd $TESTTMP/t
1309 $ cd $TESTTMP/t
1326 $ hg id
1310 $ hg id
1327 925c17564ef8 tip
1311 925c17564ef8 tip
1328 $ hg -R s id
1312 $ hg -R s id
1329 12a213df6fa9 tip
1313 12a213df6fa9 tip
1330 $ hg -R t id
1314 $ hg -R t id
1331 52c0adc0515a tip
1315 52c0adc0515a tip
1332 $ hg update 11
1316 $ hg update 11
1333 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1317 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1334 $ hg id
1318 $ hg id
1335 365661e5936a
1319 365661e5936a
1336 $ hg -R s id
1320 $ hg -R s id
1337 fc627a69481f
1321 fc627a69481f
1338 $ hg -R t id
1322 $ hg -R t id
1339 e95bcfa18a35
1323 e95bcfa18a35
1340
1324
1341 Sticky subrepositories, file changes
1325 Sticky subrepositories, file changes
1342 $ touch s/f1
1326 $ touch s/f1
1343 $ touch t/f1
1327 $ touch t/f1
1344 $ hg add -S s/f1
1328 $ hg add -S s/f1
1345 $ hg add -S t/f1
1329 $ hg add -S t/f1
1346 $ hg id
1330 $ hg id
1347 365661e5936a+
1331 365661e5936a+
1348 $ hg -R s id
1332 $ hg -R s id
1349 fc627a69481f+
1333 fc627a69481f+
1350 $ hg -R t id
1334 $ hg -R t id
1351 e95bcfa18a35+
1335 e95bcfa18a35+
1352 $ hg update tip
1336 $ hg update tip
1353 subrepository s diverged (local revision: fc627a69481f, remote revision: 12a213df6fa9)
1337 subrepository s diverged (local revision: fc627a69481f, remote revision: 12a213df6fa9)
1354 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1338 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1355 what do you want to do? m
1339 what do you want to do? m
1356 subrepository sources for s differ
1340 subrepository sources for s differ
1357 you can use (l)ocal source (fc627a69481f) or (r)emote source (12a213df6fa9).
1341 you can use (l)ocal source (fc627a69481f) or (r)emote source (12a213df6fa9).
1358 what do you want to do? l
1342 what do you want to do? l
1359 subrepository t diverged (local revision: e95bcfa18a35, remote revision: 52c0adc0515a)
1343 subrepository t diverged (local revision: e95bcfa18a35, remote revision: 52c0adc0515a)
1360 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1344 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1361 what do you want to do? m
1345 what do you want to do? m
1362 subrepository sources for t differ
1346 subrepository sources for t differ
1363 you can use (l)ocal source (e95bcfa18a35) or (r)emote source (52c0adc0515a).
1347 you can use (l)ocal source (e95bcfa18a35) or (r)emote source (52c0adc0515a).
1364 what do you want to do? l
1348 what do you want to do? l
1365 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1349 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1366 $ hg id
1350 $ hg id
1367 925c17564ef8+ tip
1351 925c17564ef8+ tip
1368 $ hg -R s id
1352 $ hg -R s id
1369 fc627a69481f+
1353 fc627a69481f+
1370 $ hg -R t id
1354 $ hg -R t id
1371 e95bcfa18a35+
1355 e95bcfa18a35+
1372 $ hg update --clean tip
1356 $ hg update --clean tip
1373 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1357 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1374
1358
1375 Sticky subrepository, revision updates
1359 Sticky subrepository, revision updates
1376 $ hg id
1360 $ hg id
1377 925c17564ef8 tip
1361 925c17564ef8 tip
1378 $ hg -R s id
1362 $ hg -R s id
1379 12a213df6fa9 tip
1363 12a213df6fa9 tip
1380 $ hg -R t id
1364 $ hg -R t id
1381 52c0adc0515a tip
1365 52c0adc0515a tip
1382 $ cd s
1366 $ cd s
1383 $ hg update -r -2
1367 $ hg update -r -2
1384 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1368 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1385 $ cd ../t
1369 $ cd ../t
1386 $ hg update -r 2
1370 $ hg update -r 2
1387 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1371 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1388 $ cd ..
1372 $ cd ..
1389 $ hg update 10
1373 $ hg update 10
1390 subrepository s diverged (local revision: 12a213df6fa9, remote revision: fc627a69481f)
1374 subrepository s diverged (local revision: 12a213df6fa9, remote revision: fc627a69481f)
1391 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1375 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1392 what do you want to do? m
1376 what do you want to do? m
1393 subrepository t diverged (local revision: 52c0adc0515a, remote revision: 20a0db6fbf6c)
1377 subrepository t diverged (local revision: 52c0adc0515a, remote revision: 20a0db6fbf6c)
1394 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1378 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1395 what do you want to do? m
1379 what do you want to do? m
1396 subrepository sources for t differ (in checked out version)
1380 subrepository sources for t differ (in checked out version)
1397 you can use (l)ocal source (7af322bc1198) or (r)emote source (20a0db6fbf6c).
1381 you can use (l)ocal source (7af322bc1198) or (r)emote source (20a0db6fbf6c).
1398 what do you want to do? l
1382 what do you want to do? l
1399 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1383 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1400 $ hg id
1384 $ hg id
1401 e45c8b14af55+
1385 e45c8b14af55+
1402 $ hg -R s id
1386 $ hg -R s id
1403 02dcf1d70411
1387 02dcf1d70411
1404 $ hg -R t id
1388 $ hg -R t id
1405 7af322bc1198
1389 7af322bc1198
1406
1390
1407 Sticky subrepository, file changes and revision updates
1391 Sticky subrepository, file changes and revision updates
1408 $ touch s/f1
1392 $ touch s/f1
1409 $ touch t/f1
1393 $ touch t/f1
1410 $ hg add -S s/f1
1394 $ hg add -S s/f1
1411 $ hg add -S t/f1
1395 $ hg add -S t/f1
1412 $ hg id
1396 $ hg id
1413 e45c8b14af55+
1397 e45c8b14af55+
1414 $ hg -R s id
1398 $ hg -R s id
1415 02dcf1d70411+
1399 02dcf1d70411+
1416 $ hg -R t id
1400 $ hg -R t id
1417 7af322bc1198+
1401 7af322bc1198+
1418 $ hg update tip
1402 $ hg update tip
1419 subrepository s diverged (local revision: 12a213df6fa9, remote revision: 12a213df6fa9)
1403 subrepository s diverged (local revision: 12a213df6fa9, remote revision: 12a213df6fa9)
1420 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1404 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1421 what do you want to do? m
1405 what do you want to do? m
1422 subrepository sources for s differ
1406 subrepository sources for s differ
1423 you can use (l)ocal source (02dcf1d70411) or (r)emote source (12a213df6fa9).
1407 you can use (l)ocal source (02dcf1d70411) or (r)emote source (12a213df6fa9).
1424 what do you want to do? l
1408 what do you want to do? l
1425 subrepository t diverged (local revision: 52c0adc0515a, remote revision: 52c0adc0515a)
1409 subrepository t diverged (local revision: 52c0adc0515a, remote revision: 52c0adc0515a)
1426 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1410 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1427 what do you want to do? m
1411 what do you want to do? m
1428 subrepository sources for t differ
1412 subrepository sources for t differ
1429 you can use (l)ocal source (7af322bc1198) or (r)emote source (52c0adc0515a).
1413 you can use (l)ocal source (7af322bc1198) or (r)emote source (52c0adc0515a).
1430 what do you want to do? l
1414 what do you want to do? l
1431 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1415 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1432 $ hg id
1416 $ hg id
1433 925c17564ef8+ tip
1417 925c17564ef8+ tip
1434 $ hg -R s id
1418 $ hg -R s id
1435 02dcf1d70411+
1419 02dcf1d70411+
1436 $ hg -R t id
1420 $ hg -R t id
1437 7af322bc1198+
1421 7af322bc1198+
1438
1422
1439 Sticky repository, update --clean
1423 Sticky repository, update --clean
1440 $ hg update --clean tip
1424 $ hg update --clean tip
1441 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1425 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1442 $ hg id
1426 $ hg id
1443 925c17564ef8 tip
1427 925c17564ef8 tip
1444 $ hg -R s id
1428 $ hg -R s id
1445 12a213df6fa9 tip
1429 12a213df6fa9 tip
1446 $ hg -R t id
1430 $ hg -R t id
1447 52c0adc0515a tip
1431 52c0adc0515a tip
1448
1432
1449 Test subrepo already at intended revision:
1433 Test subrepo already at intended revision:
1450 $ cd s
1434 $ cd s
1451 $ hg update fc627a69481f
1435 $ hg update fc627a69481f
1452 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1436 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1453 $ cd ..
1437 $ cd ..
1454 $ hg update 11
1438 $ hg update 11
1455 subrepository s diverged (local revision: 12a213df6fa9, remote revision: fc627a69481f)
1439 subrepository s diverged (local revision: 12a213df6fa9, remote revision: fc627a69481f)
1456 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1440 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
1457 what do you want to do? m
1441 what do you want to do? m
1458 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1442 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1459 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1443 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1460 $ hg id -n
1444 $ hg id -n
1461 11+
1445 11+
1462 $ hg -R s id
1446 $ hg -R s id
1463 fc627a69481f
1447 fc627a69481f
1464 $ hg -R t id
1448 $ hg -R t id
1465 e95bcfa18a35
1449 e95bcfa18a35
1466
1450
1467 Test that removing .hgsubstate doesn't break anything:
1451 Test that removing .hgsubstate doesn't break anything:
1468
1452
1469 $ hg rm -f .hgsubstate
1453 $ hg rm -f .hgsubstate
1470 $ hg ci -mrm
1454 $ hg ci -mrm
1471 nothing changed
1455 nothing changed
1472 [1]
1456 [1]
1473 $ hg log -vr tip
1457 $ hg log -vr tip
1474 changeset: 13:925c17564ef8
1458 changeset: 13:925c17564ef8
1475 tag: tip
1459 tag: tip
1476 user: test
1460 user: test
1477 date: Thu Jan 01 00:00:00 1970 +0000
1461 date: Thu Jan 01 00:00:00 1970 +0000
1478 files: .hgsubstate
1462 files: .hgsubstate
1479 description:
1463 description:
1480 13
1464 13
1481
1465
1482
1466
1483
1467
1484 Test that removing .hgsub removes .hgsubstate:
1468 Test that removing .hgsub removes .hgsubstate:
1485
1469
1486 $ hg rm .hgsub
1470 $ hg rm .hgsub
1487 $ hg ci -mrm2
1471 $ hg ci -mrm2
1488 created new head
1472 created new head
1489 $ hg log -vr tip
1473 $ hg log -vr tip
1490 changeset: 14:2400bccd50af
1474 changeset: 14:2400bccd50af
1491 tag: tip
1475 tag: tip
1492 parent: 11:365661e5936a
1476 parent: 11:365661e5936a
1493 user: test
1477 user: test
1494 date: Thu Jan 01 00:00:00 1970 +0000
1478 date: Thu Jan 01 00:00:00 1970 +0000
1495 files: .hgsub .hgsubstate
1479 files: .hgsub .hgsubstate
1496 description:
1480 description:
1497 rm2
1481 rm2
1498
1482
1499
1483
1500 Test issue3153: diff -S with deleted subrepos
1484 Test issue3153: diff -S with deleted subrepos
1501
1485
1502 $ hg diff --nodates -S -c .
1486 $ hg diff --nodates -S -c .
1503 diff -r 365661e5936a -r 2400bccd50af .hgsub
1487 diff -r 365661e5936a -r 2400bccd50af .hgsub
1504 --- a/.hgsub
1488 --- a/.hgsub
1505 +++ /dev/null
1489 +++ /dev/null
1506 @@ -1,2 +0,0 @@
1490 @@ -1,2 +0,0 @@
1507 -s = s
1491 -s = s
1508 -t = t
1492 -t = t
1509 diff -r 365661e5936a -r 2400bccd50af .hgsubstate
1493 diff -r 365661e5936a -r 2400bccd50af .hgsubstate
1510 --- a/.hgsubstate
1494 --- a/.hgsubstate
1511 +++ /dev/null
1495 +++ /dev/null
1512 @@ -1,2 +0,0 @@
1496 @@ -1,2 +0,0 @@
1513 -fc627a69481fcbe5f1135069e8a3881c023e4cf5 s
1497 -fc627a69481fcbe5f1135069e8a3881c023e4cf5 s
1514 -e95bcfa18a358dc4936da981ebf4147b4cad1362 t
1498 -e95bcfa18a358dc4936da981ebf4147b4cad1362 t
1515
1499
1516 Test behavior of add for explicit path in subrepo:
1500 Test behavior of add for explicit path in subrepo:
1517 $ cd ..
1501 $ cd ..
1518 $ hg init explicit
1502 $ hg init explicit
1519 $ cd explicit
1503 $ cd explicit
1520 $ echo s = s > .hgsub
1504 $ echo s = s > .hgsub
1521 $ hg add .hgsub
1505 $ hg add .hgsub
1522 $ hg init s
1506 $ hg init s
1523 $ hg ci -m0
1507 $ hg ci -m0
1524 Adding with an explicit path in a subrepo adds the file
1508 Adding with an explicit path in a subrepo adds the file
1525 $ echo c1 > f1
1509 $ echo c1 > f1
1526 $ echo c2 > s/f2
1510 $ echo c2 > s/f2
1527 $ hg st -S
1511 $ hg st -S
1528 ? f1
1512 ? f1
1529 ? s/f2
1513 ? s/f2
1530 $ hg add s/f2
1514 $ hg add s/f2
1531 $ hg st -S
1515 $ hg st -S
1532 A s/f2
1516 A s/f2
1533 ? f1
1517 ? f1
1534 $ hg ci -R s -m0
1518 $ hg ci -R s -m0
1535 $ hg ci -Am1
1519 $ hg ci -Am1
1536 adding f1
1520 adding f1
1537 Adding with an explicit path in a subrepo with -S has the same behavior
1521 Adding with an explicit path in a subrepo with -S has the same behavior
1538 $ echo c3 > f3
1522 $ echo c3 > f3
1539 $ echo c4 > s/f4
1523 $ echo c4 > s/f4
1540 $ hg st -S
1524 $ hg st -S
1541 ? f3
1525 ? f3
1542 ? s/f4
1526 ? s/f4
1543 $ hg add -S s/f4
1527 $ hg add -S s/f4
1544 $ hg st -S
1528 $ hg st -S
1545 A s/f4
1529 A s/f4
1546 ? f3
1530 ? f3
1547 $ hg ci -R s -m1
1531 $ hg ci -R s -m1
1548 $ hg ci -Ama2
1532 $ hg ci -Ama2
1549 adding f3
1533 adding f3
1550 Adding without a path or pattern silently ignores subrepos
1534 Adding without a path or pattern silently ignores subrepos
1551 $ echo c5 > f5
1535 $ echo c5 > f5
1552 $ echo c6 > s/f6
1536 $ echo c6 > s/f6
1553 $ echo c7 > s/f7
1537 $ echo c7 > s/f7
1554 $ hg st -S
1538 $ hg st -S
1555 ? f5
1539 ? f5
1556 ? s/f6
1540 ? s/f6
1557 ? s/f7
1541 ? s/f7
1558 $ hg add
1542 $ hg add
1559 adding f5
1543 adding f5
1560 $ hg st -S
1544 $ hg st -S
1561 A f5
1545 A f5
1562 ? s/f6
1546 ? s/f6
1563 ? s/f7
1547 ? s/f7
1564 $ hg ci -R s -Am2
1548 $ hg ci -R s -Am2
1565 adding f6
1549 adding f6
1566 adding f7
1550 adding f7
1567 $ hg ci -m3
1551 $ hg ci -m3
1568 Adding without a path or pattern with -S also adds files in subrepos
1552 Adding without a path or pattern with -S also adds files in subrepos
1569 $ echo c8 > f8
1553 $ echo c8 > f8
1570 $ echo c9 > s/f9
1554 $ echo c9 > s/f9
1571 $ echo c10 > s/f10
1555 $ echo c10 > s/f10
1572 $ hg st -S
1556 $ hg st -S
1573 ? f8
1557 ? f8
1574 ? s/f10
1558 ? s/f10
1575 ? s/f9
1559 ? s/f9
1576 $ hg add -S
1560 $ hg add -S
1577 adding f8
1561 adding f8
1578 adding s/f10
1562 adding s/f10
1579 adding s/f9
1563 adding s/f9
1580 $ hg st -S
1564 $ hg st -S
1581 A f8
1565 A f8
1582 A s/f10
1566 A s/f10
1583 A s/f9
1567 A s/f9
1584 $ hg ci -R s -m3
1568 $ hg ci -R s -m3
1585 $ hg ci -m4
1569 $ hg ci -m4
1586 Adding with a pattern silently ignores subrepos
1570 Adding with a pattern silently ignores subrepos
1587 $ echo c11 > fm11
1571 $ echo c11 > fm11
1588 $ echo c12 > fn12
1572 $ echo c12 > fn12
1589 $ echo c13 > s/fm13
1573 $ echo c13 > s/fm13
1590 $ echo c14 > s/fn14
1574 $ echo c14 > s/fn14
1591 $ hg st -S
1575 $ hg st -S
1592 ? fm11
1576 ? fm11
1593 ? fn12
1577 ? fn12
1594 ? s/fm13
1578 ? s/fm13
1595 ? s/fn14
1579 ? s/fn14
1596 $ hg add 'glob:**fm*'
1580 $ hg add 'glob:**fm*'
1597 adding fm11
1581 adding fm11
1598 $ hg st -S
1582 $ hg st -S
1599 A fm11
1583 A fm11
1600 ? fn12
1584 ? fn12
1601 ? s/fm13
1585 ? s/fm13
1602 ? s/fn14
1586 ? s/fn14
1603 $ hg ci -R s -Am4
1587 $ hg ci -R s -Am4
1604 adding fm13
1588 adding fm13
1605 adding fn14
1589 adding fn14
1606 $ hg ci -Am5
1590 $ hg ci -Am5
1607 adding fn12
1591 adding fn12
1608 Adding with a pattern with -S also adds matches in subrepos
1592 Adding with a pattern with -S also adds matches in subrepos
1609 $ echo c15 > fm15
1593 $ echo c15 > fm15
1610 $ echo c16 > fn16
1594 $ echo c16 > fn16
1611 $ echo c17 > s/fm17
1595 $ echo c17 > s/fm17
1612 $ echo c18 > s/fn18
1596 $ echo c18 > s/fn18
1613 $ hg st -S
1597 $ hg st -S
1614 ? fm15
1598 ? fm15
1615 ? fn16
1599 ? fn16
1616 ? s/fm17
1600 ? s/fm17
1617 ? s/fn18
1601 ? s/fn18
1618 $ hg add -S 'glob:**fm*'
1602 $ hg add -S 'glob:**fm*'
1619 adding fm15
1603 adding fm15
1620 adding s/fm17
1604 adding s/fm17
1621 $ hg st -S
1605 $ hg st -S
1622 A fm15
1606 A fm15
1623 A s/fm17
1607 A s/fm17
1624 ? fn16
1608 ? fn16
1625 ? s/fn18
1609 ? s/fn18
1626 $ hg ci -R s -Am5
1610 $ hg ci -R s -Am5
1627 adding fn18
1611 adding fn18
1628 $ hg ci -Am6
1612 $ hg ci -Am6
1629 adding fn16
1613 adding fn16
1630
1614
1631 Test behavior of forget for explicit path in subrepo:
1615 Test behavior of forget for explicit path in subrepo:
1632 Forgetting an explicit path in a subrepo untracks the file
1616 Forgetting an explicit path in a subrepo untracks the file
1633 $ echo c19 > s/f19
1617 $ echo c19 > s/f19
1634 $ hg add s/f19
1618 $ hg add s/f19
1635 $ hg st -S
1619 $ hg st -S
1636 A s/f19
1620 A s/f19
1637 $ hg forget s/f19
1621 $ hg forget s/f19
1638 $ hg st -S
1622 $ hg st -S
1639 ? s/f19
1623 ? s/f19
1640 $ rm s/f19
1624 $ rm s/f19
1641 $ cd ..
1625 $ cd ..
1642
1626
1643 Courtesy phases synchronisation to publishing server does not block the push
1627 Courtesy phases synchronisation to publishing server does not block the push
1644 (issue3781)
1628 (issue3781)
1645
1629
1646 $ cp -R main issue3781
1630 $ cp -R main issue3781
1647 $ cp -R main issue3781-dest
1631 $ cp -R main issue3781-dest
1648 $ cd issue3781-dest/s
1632 $ cd issue3781-dest/s
1649 $ hg phase tip # show we have draft changeset
1633 $ hg phase tip # show we have draft changeset
1650 5: draft
1634 5: draft
1651 $ chmod a-w .hg/store/phaseroots # prevent phase push
1635 $ chmod a-w .hg/store/phaseroots # prevent phase push
1652 $ cd ../../issue3781
1636 $ cd ../../issue3781
1653 $ cat >> .hg/hgrc << EOF
1637 $ cat >> .hg/hgrc << EOF
1654 > [paths]
1638 > [paths]
1655 > default=../issue3781-dest/
1639 > default=../issue3781-dest/
1656 > EOF
1640 > EOF
1657 $ hg push --config devel.legacy.exchange=bundle1
1641 $ hg push --config devel.legacy.exchange=bundle1
1658 pushing to $TESTTMP/issue3781-dest
1642 pushing to $TESTTMP/issue3781-dest
1659 pushing subrepo s to $TESTTMP/issue3781-dest/s
1643 pushing subrepo s to $TESTTMP/issue3781-dest/s
1660 searching for changes
1644 searching for changes
1661 no changes found
1645 no changes found
1662 searching for changes
1646 searching for changes
1663 no changes found
1647 no changes found
1664 [1]
1648 [1]
1665 # clean the push cache
1649 # clean the push cache
1666 $ rm s/.hg/cache/storehash/*
1650 $ rm s/.hg/cache/storehash/*
1667 $ hg push # bundle2+
1651 $ hg push # bundle2+
1668 pushing to $TESTTMP/issue3781-dest
1652 pushing to $TESTTMP/issue3781-dest
1669 pushing subrepo s to $TESTTMP/issue3781-dest/s
1653 pushing subrepo s to $TESTTMP/issue3781-dest/s
1670 searching for changes
1654 searching for changes
1671 no changes found
1655 no changes found
1672 searching for changes
1656 searching for changes
1673 no changes found
1657 no changes found
1674 [1]
1658 [1]
1675 $ cd ..
1659 $ cd ..
1676
1660
1677 Test phase choice for newly created commit with "phases.subrepochecks"
1661 Test phase choice for newly created commit with "phases.subrepochecks"
1678 configuration
1662 configuration
1679
1663
1680 $ cd t
1664 $ cd t
1681 $ hg update -q -r 12
1665 $ hg update -q -r 12
1682
1666
1683 $ cat >> s/ss/.hg/hgrc <<EOF
1667 $ cat >> s/ss/.hg/hgrc <<EOF
1684 > [phases]
1668 > [phases]
1685 > new-commit = secret
1669 > new-commit = secret
1686 > EOF
1670 > EOF
1687 $ cat >> s/.hg/hgrc <<EOF
1671 $ cat >> s/.hg/hgrc <<EOF
1688 > [phases]
1672 > [phases]
1689 > new-commit = draft
1673 > new-commit = draft
1690 > EOF
1674 > EOF
1691 $ echo phasecheck1 >> s/ss/a
1675 $ echo phasecheck1 >> s/ss/a
1692 $ hg -R s commit -S --config phases.checksubrepos=abort -m phasecheck1
1676 $ hg -R s commit -S --config phases.checksubrepos=abort -m phasecheck1
1693 committing subrepository ss
1677 committing subrepository ss
1694 transaction abort!
1678 transaction abort!
1695 rollback completed
1679 rollback completed
1696 abort: can't commit in draft phase conflicting secret from subrepository ss
1680 abort: can't commit in draft phase conflicting secret from subrepository ss
1697 [255]
1681 [255]
1698 $ echo phasecheck2 >> s/ss/a
1682 $ echo phasecheck2 >> s/ss/a
1699 $ hg -R s commit -S --config phases.checksubrepos=ignore -m phasecheck2
1683 $ hg -R s commit -S --config phases.checksubrepos=ignore -m phasecheck2
1700 committing subrepository ss
1684 committing subrepository ss
1701 $ hg -R s/ss phase tip
1685 $ hg -R s/ss phase tip
1702 3: secret
1686 3: secret
1703 $ hg -R s phase tip
1687 $ hg -R s phase tip
1704 6: draft
1688 6: draft
1705 $ echo phasecheck3 >> s/ss/a
1689 $ echo phasecheck3 >> s/ss/a
1706 $ hg -R s commit -S -m phasecheck3
1690 $ hg -R s commit -S -m phasecheck3
1707 committing subrepository ss
1691 committing subrepository ss
1708 warning: changes are committed in secret phase from subrepository ss
1692 warning: changes are committed in secret phase from subrepository ss
1709 $ hg -R s/ss phase tip
1693 $ hg -R s/ss phase tip
1710 4: secret
1694 4: secret
1711 $ hg -R s phase tip
1695 $ hg -R s phase tip
1712 7: secret
1696 7: secret
1713
1697
1714 $ cat >> t/.hg/hgrc <<EOF
1698 $ cat >> t/.hg/hgrc <<EOF
1715 > [phases]
1699 > [phases]
1716 > new-commit = draft
1700 > new-commit = draft
1717 > EOF
1701 > EOF
1718 $ cat >> .hg/hgrc <<EOF
1702 $ cat >> .hg/hgrc <<EOF
1719 > [phases]
1703 > [phases]
1720 > new-commit = public
1704 > new-commit = public
1721 > EOF
1705 > EOF
1722 $ echo phasecheck4 >> s/ss/a
1706 $ echo phasecheck4 >> s/ss/a
1723 $ echo phasecheck4 >> t/t
1707 $ echo phasecheck4 >> t/t
1724 $ hg commit -S -m phasecheck4
1708 $ hg commit -S -m phasecheck4
1725 committing subrepository s
1709 committing subrepository s
1726 committing subrepository s/ss
1710 committing subrepository s/ss
1727 warning: changes are committed in secret phase from subrepository ss
1711 warning: changes are committed in secret phase from subrepository ss
1728 committing subrepository t
1712 committing subrepository t
1729 warning: changes are committed in secret phase from subrepository s
1713 warning: changes are committed in secret phase from subrepository s
1730 created new head
1714 created new head
1731 $ hg -R s/ss phase tip
1715 $ hg -R s/ss phase tip
1732 5: secret
1716 5: secret
1733 $ hg -R s phase tip
1717 $ hg -R s phase tip
1734 8: secret
1718 8: secret
1735 $ hg -R t phase tip
1719 $ hg -R t phase tip
1736 6: draft
1720 6: draft
1737 $ hg phase tip
1721 $ hg phase tip
1738 15: secret
1722 15: secret
1739
1723
1740 $ cd ..
1724 $ cd ..
1741
1725
1742
1726
1743 Test that commit --secret works on both repo and subrepo (issue4182)
1727 Test that commit --secret works on both repo and subrepo (issue4182)
1744
1728
1745 $ cd main
1729 $ cd main
1746 $ echo secret >> b
1730 $ echo secret >> b
1747 $ echo secret >> s/b
1731 $ echo secret >> s/b
1748 $ hg commit --secret --subrepo -m "secret"
1732 $ hg commit --secret --subrepo -m "secret"
1749 committing subrepository s
1733 committing subrepository s
1750 $ hg phase -r .
1734 $ hg phase -r .
1751 6: secret
1735 6: secret
1752 $ cd s
1736 $ cd s
1753 $ hg phase -r .
1737 $ hg phase -r .
1754 6: secret
1738 6: secret
1755 $ cd ../../
1739 $ cd ../../
1756
1740
1757 Test "subrepos" template keyword
1741 Test "subrepos" template keyword
1758
1742
1759 $ cd t
1743 $ cd t
1760 $ hg update -q 15
1744 $ hg update -q 15
1761 $ cat > .hgsub <<EOF
1745 $ cat > .hgsub <<EOF
1762 > s = s
1746 > s = s
1763 > EOF
1747 > EOF
1764 $ hg commit -m "16"
1748 $ hg commit -m "16"
1765 warning: changes are committed in secret phase from subrepository s
1749 warning: changes are committed in secret phase from subrepository s
1766
1750
1767 (addition of ".hgsub" itself)
1751 (addition of ".hgsub" itself)
1768
1752
1769 $ hg diff --nodates -c 1 .hgsubstate
1753 $ hg diff --nodates -c 1 .hgsubstate
1770 diff -r f7b1eb17ad24 -r 7cf8cfea66e4 .hgsubstate
1754 diff -r f7b1eb17ad24 -r 7cf8cfea66e4 .hgsubstate
1771 --- /dev/null
1755 --- /dev/null
1772 +++ b/.hgsubstate
1756 +++ b/.hgsubstate
1773 @@ -0,0 +1,1 @@
1757 @@ -0,0 +1,1 @@
1774 +e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
1758 +e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
1775 $ hg log -r 1 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}"
1759 $ hg log -r 1 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}"
1776 f7b1eb17ad24 000000000000
1760 f7b1eb17ad24 000000000000
1777 s
1761 s
1778
1762
1779 (modification of existing entry)
1763 (modification of existing entry)
1780
1764
1781 $ hg diff --nodates -c 2 .hgsubstate
1765 $ hg diff --nodates -c 2 .hgsubstate
1782 diff -r 7cf8cfea66e4 -r df30734270ae .hgsubstate
1766 diff -r 7cf8cfea66e4 -r df30734270ae .hgsubstate
1783 --- a/.hgsubstate
1767 --- a/.hgsubstate
1784 +++ b/.hgsubstate
1768 +++ b/.hgsubstate
1785 @@ -1,1 +1,1 @@
1769 @@ -1,1 +1,1 @@
1786 -e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
1770 -e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
1787 +dc73e2e6d2675eb2e41e33c205f4bdab4ea5111d s
1771 +dc73e2e6d2675eb2e41e33c205f4bdab4ea5111d s
1788 $ hg log -r 2 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}"
1772 $ hg log -r 2 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}"
1789 7cf8cfea66e4 000000000000
1773 7cf8cfea66e4 000000000000
1790 s
1774 s
1791
1775
1792 (addition of entry)
1776 (addition of entry)
1793
1777
1794 $ hg diff --nodates -c 5 .hgsubstate
1778 $ hg diff --nodates -c 5 .hgsubstate
1795 diff -r 7cf8cfea66e4 -r 1f14a2e2d3ec .hgsubstate
1779 diff -r 7cf8cfea66e4 -r 1f14a2e2d3ec .hgsubstate
1796 --- a/.hgsubstate
1780 --- a/.hgsubstate
1797 +++ b/.hgsubstate
1781 +++ b/.hgsubstate
1798 @@ -1,1 +1,2 @@
1782 @@ -1,1 +1,2 @@
1799 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
1783 e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
1800 +60ca1237c19474e7a3978b0dc1ca4e6f36d51382 t
1784 +60ca1237c19474e7a3978b0dc1ca4e6f36d51382 t
1801 $ hg log -r 5 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}"
1785 $ hg log -r 5 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}"
1802 7cf8cfea66e4 000000000000
1786 7cf8cfea66e4 000000000000
1803 t
1787 t
1804
1788
1805 (removal of existing entry)
1789 (removal of existing entry)
1806
1790
1807 $ hg diff --nodates -c 16 .hgsubstate
1791 $ hg diff --nodates -c 16 .hgsubstate
1808 diff -r 8bec38d2bd0b -r f2f70bc3d3c9 .hgsubstate
1792 diff -r 8bec38d2bd0b -r f2f70bc3d3c9 .hgsubstate
1809 --- a/.hgsubstate
1793 --- a/.hgsubstate
1810 +++ b/.hgsubstate
1794 +++ b/.hgsubstate
1811 @@ -1,2 +1,1 @@
1795 @@ -1,2 +1,1 @@
1812 0731af8ca9423976d3743119d0865097c07bdc1b s
1796 0731af8ca9423976d3743119d0865097c07bdc1b s
1813 -e202dc79b04c88a636ea8913d9182a1346d9b3dc t
1797 -e202dc79b04c88a636ea8913d9182a1346d9b3dc t
1814 $ hg log -r 16 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}"
1798 $ hg log -r 16 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}"
1815 8bec38d2bd0b 000000000000
1799 8bec38d2bd0b 000000000000
1816 t
1800 t
1817
1801
1818 (merging)
1802 (merging)
1819
1803
1820 $ hg diff --nodates -c 9 .hgsubstate
1804 $ hg diff --nodates -c 9 .hgsubstate
1821 diff -r f6affe3fbfaa -r f0d2028bf86d .hgsubstate
1805 diff -r f6affe3fbfaa -r f0d2028bf86d .hgsubstate
1822 --- a/.hgsubstate
1806 --- a/.hgsubstate
1823 +++ b/.hgsubstate
1807 +++ b/.hgsubstate
1824 @@ -1,1 +1,2 @@
1808 @@ -1,1 +1,2 @@
1825 fc627a69481fcbe5f1135069e8a3881c023e4cf5 s
1809 fc627a69481fcbe5f1135069e8a3881c023e4cf5 s
1826 +60ca1237c19474e7a3978b0dc1ca4e6f36d51382 t
1810 +60ca1237c19474e7a3978b0dc1ca4e6f36d51382 t
1827 $ hg log -r 9 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}"
1811 $ hg log -r 9 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}"
1828 f6affe3fbfaa 1f14a2e2d3ec
1812 f6affe3fbfaa 1f14a2e2d3ec
1829 t
1813 t
1830
1814
1831 (removal of ".hgsub" itself)
1815 (removal of ".hgsub" itself)
1832
1816
1833 $ hg diff --nodates -c 8 .hgsubstate
1817 $ hg diff --nodates -c 8 .hgsubstate
1834 diff -r f94576341bcf -r 96615c1dad2d .hgsubstate
1818 diff -r f94576341bcf -r 96615c1dad2d .hgsubstate
1835 --- a/.hgsubstate
1819 --- a/.hgsubstate
1836 +++ /dev/null
1820 +++ /dev/null
1837 @@ -1,2 +0,0 @@
1821 @@ -1,2 +0,0 @@
1838 -e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
1822 -e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s
1839 -7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4 t
1823 -7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4 t
1840 $ hg log -r 8 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}"
1824 $ hg log -r 8 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}"
1841 f94576341bcf 000000000000
1825 f94576341bcf 000000000000
1842
1826
1843 Test that '[paths]' is configured correctly at subrepo creation
1827 Test that '[paths]' is configured correctly at subrepo creation
1844
1828
1845 $ cd $TESTTMP/tc
1829 $ cd $TESTTMP/tc
1846 $ cat > .hgsub <<EOF
1830 $ cat > .hgsub <<EOF
1847 > # to clear bogus subrepo path 'bogus=[boguspath'
1831 > # to clear bogus subrepo path 'bogus=[boguspath'
1848 > s = s
1832 > s = s
1849 > t = t
1833 > t = t
1850 > EOF
1834 > EOF
1851 $ hg update -q --clean null
1835 $ hg update -q --clean null
1852 $ rm -rf s t
1836 $ rm -rf s t
1853 $ cat >> .hg/hgrc <<EOF
1837 $ cat >> .hg/hgrc <<EOF
1854 > [paths]
1838 > [paths]
1855 > default-push = /foo/bar
1839 > default-push = /foo/bar
1856 > EOF
1840 > EOF
1857 $ hg update -q
1841 $ hg update -q
1858 $ cat s/.hg/hgrc
1842 $ cat s/.hg/hgrc
1859 [paths]
1843 [paths]
1860 default = $TESTTMP/t/s
1844 default = $TESTTMP/t/s
1861 default-push = /foo/bar/s
1845 default-push = /foo/bar/s
1862 $ cat s/ss/.hg/hgrc
1846 $ cat s/ss/.hg/hgrc
1863 [paths]
1847 [paths]
1864 default = $TESTTMP/t/s/ss
1848 default = $TESTTMP/t/s/ss
1865 default-push = /foo/bar/s/ss
1849 default-push = /foo/bar/s/ss
1866 $ cat t/.hg/hgrc
1850 $ cat t/.hg/hgrc
1867 [paths]
1851 [paths]
1868 default = $TESTTMP/t/t
1852 default = $TESTTMP/t/t
1869 default-push = /foo/bar/t
1853 default-push = /foo/bar/t
1870
1854
1871 $ cd $TESTTMP/t
1855 $ cd $TESTTMP/t
1872 $ hg up -qC 0
1856 $ hg up -qC 0
1873 $ echo 'bar' > bar.txt
1857 $ echo 'bar' > bar.txt
1874 $ hg ci -Am 'branch before subrepo add'
1858 $ hg ci -Am 'branch before subrepo add'
1875 adding bar.txt
1859 adding bar.txt
1876 created new head
1860 created new head
1877 $ hg merge -r "first(subrepo('s'))"
1861 $ hg merge -r "first(subrepo('s'))"
1878 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1862 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1879 (branch merge, don't forget to commit)
1863 (branch merge, don't forget to commit)
1880 $ hg status -S -X '.hgsub*'
1864 $ hg status -S -X '.hgsub*'
1881 A s/a
1865 A s/a
1882 ? s/b
1866 ? s/b
1883 ? s/c
1867 ? s/c
1884 ? s/f1
1868 ? s/f1
1885 $ hg status -S --rev 'p2()'
1869 $ hg status -S --rev 'p2()'
1886 A bar.txt
1870 A bar.txt
1887 ? s/b
1871 ? s/b
1888 ? s/c
1872 ? s/c
1889 ? s/f1
1873 ? s/f1
1890 $ hg diff -S -X '.hgsub*' --nodates
1874 $ hg diff -S -X '.hgsub*' --nodates
1891 diff -r 000000000000 s/a
1875 diff -r 000000000000 s/a
1892 --- /dev/null
1876 --- /dev/null
1893 +++ b/s/a
1877 +++ b/s/a
1894 @@ -0,0 +1,1 @@
1878 @@ -0,0 +1,1 @@
1895 +a
1879 +a
1896 $ hg diff -S --rev 'p2()' --nodates
1880 $ hg diff -S --rev 'p2()' --nodates
1897 diff -r 7cf8cfea66e4 bar.txt
1881 diff -r 7cf8cfea66e4 bar.txt
1898 --- /dev/null
1882 --- /dev/null
1899 +++ b/bar.txt
1883 +++ b/bar.txt
1900 @@ -0,0 +1,1 @@
1884 @@ -0,0 +1,1 @@
1901 +bar
1885 +bar
1902
1886
1903 $ hg diff -X '.hgsub*' --nodates s
1887 $ hg diff -X '.hgsub*' --nodates s
1904 diff -r 000000000000 s/a
1888 diff -r 000000000000 s/a
1905 --- /dev/null
1889 --- /dev/null
1906 +++ b/s/a
1890 +++ b/s/a
1907 @@ -0,0 +1,1 @@
1891 @@ -0,0 +1,1 @@
1908 +a
1892 +a
1909 $ hg diff -X '.hgsub*' --nodates s/a
1893 $ hg diff -X '.hgsub*' --nodates s/a
1910 diff -r 000000000000 s/a
1894 diff -r 000000000000 s/a
1911 --- /dev/null
1895 --- /dev/null
1912 +++ b/s/a
1896 +++ b/s/a
1913 @@ -0,0 +1,1 @@
1897 @@ -0,0 +1,1 @@
1914 +a
1898 +a
1915
1899
1916 $ cd ..
1900 $ cd ..
1917
1901
1918 test for ssh exploit 2017-07-25
1902 test for ssh exploit 2017-07-25
1919
1903
1920 $ cat >> $HGRCPATH << EOF
1904 $ cat >> $HGRCPATH << EOF
1921 > [ui]
1905 > [ui]
1922 > ssh = sh -c "read l; read l; read l"
1906 > ssh = sh -c "read l; read l; read l"
1923 > EOF
1907 > EOF
1924
1908
1925 $ hg init malicious-proxycommand
1909 $ hg init malicious-proxycommand
1926 $ cd malicious-proxycommand
1910 $ cd malicious-proxycommand
1927 $ echo 's = [hg]ssh://-oProxyCommand=touch${IFS}owned/path' > .hgsub
1911 $ echo 's = [hg]ssh://-oProxyCommand=touch${IFS}owned/path' > .hgsub
1928 $ hg init s
1912 $ hg init s
1929 $ cd s
1913 $ cd s
1930 $ echo init > init
1914 $ echo init > init
1931 $ hg add
1915 $ hg add
1932 adding init
1916 adding init
1933 $ hg commit -m init
1917 $ hg commit -m init
1934 $ cd ..
1918 $ cd ..
1935 $ hg add .hgsub
1919 $ hg add .hgsub
1936 $ hg ci -m 'add subrepo'
1920 $ hg ci -m 'add subrepo'
1937 $ cd ..
1921 $ cd ..
1938 $ hg clone malicious-proxycommand malicious-proxycommand-clone
1922 $ hg clone malicious-proxycommand malicious-proxycommand-clone
1939 updating to branch default
1923 updating to branch default
1940 cloning subrepo s from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
1924 cloning subrepo s from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
1941 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path' (in subrepository "s")
1925 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path' (in subrepository "s")
1942 [255]
1926 [255]
1943
1927
1944 also check that a percent encoded '-' (%2D) doesn't work
1928 also check that a percent encoded '-' (%2D) doesn't work
1945
1929
1946 $ cd malicious-proxycommand
1930 $ cd malicious-proxycommand
1947 $ echo 's = [hg]ssh://%2DoProxyCommand=touch${IFS}owned/path' > .hgsub
1931 $ echo 's = [hg]ssh://%2DoProxyCommand=touch${IFS}owned/path' > .hgsub
1948 $ hg ci -m 'change url to percent encoded'
1932 $ hg ci -m 'change url to percent encoded'
1949 $ cd ..
1933 $ cd ..
1950 $ rm -r malicious-proxycommand-clone
1934 $ rm -r malicious-proxycommand-clone
1951 $ hg clone malicious-proxycommand malicious-proxycommand-clone
1935 $ hg clone malicious-proxycommand malicious-proxycommand-clone
1952 updating to branch default
1936 updating to branch default
1953 cloning subrepo s from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
1937 cloning subrepo s from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
1954 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path' (in subrepository "s")
1938 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path' (in subrepository "s")
1955 [255]
1939 [255]
1956
1940
1957 also check for a pipe
1941 also check for a pipe
1958
1942
1959 $ cd malicious-proxycommand
1943 $ cd malicious-proxycommand
1960 $ echo 's = [hg]ssh://fakehost|touch${IFS}owned/path' > .hgsub
1944 $ echo 's = [hg]ssh://fakehost|touch${IFS}owned/path' > .hgsub
1961 $ hg ci -m 'change url to pipe'
1945 $ hg ci -m 'change url to pipe'
1962 $ cd ..
1946 $ cd ..
1963 $ rm -r malicious-proxycommand-clone
1947 $ rm -r malicious-proxycommand-clone
1964 $ hg clone malicious-proxycommand malicious-proxycommand-clone
1948 $ hg clone malicious-proxycommand malicious-proxycommand-clone
1965 updating to branch default
1949 updating to branch default
1966 cloning subrepo s from ssh://fakehost%7Ctouch%24%7BIFS%7Downed/path
1950 cloning subrepo s from ssh://fakehost%7Ctouch%24%7BIFS%7Downed/path
1967 abort: no suitable response from remote hg
1951 abort: no suitable response from remote hg
1968 [255]
1952 [255]
1969 $ [ ! -f owned ] || echo 'you got owned'
1953 $ [ ! -f owned ] || echo 'you got owned'
1970
1954
1971 also check that a percent encoded '|' (%7C) doesn't work
1955 also check that a percent encoded '|' (%7C) doesn't work
1972
1956
1973 $ cd malicious-proxycommand
1957 $ cd malicious-proxycommand
1974 $ echo 's = [hg]ssh://fakehost%7Ctouch%20owned/path' > .hgsub
1958 $ echo 's = [hg]ssh://fakehost%7Ctouch%20owned/path' > .hgsub
1975 $ hg ci -m 'change url to percent encoded pipe'
1959 $ hg ci -m 'change url to percent encoded pipe'
1976 $ cd ..
1960 $ cd ..
1977 $ rm -r malicious-proxycommand-clone
1961 $ rm -r malicious-proxycommand-clone
1978 $ hg clone malicious-proxycommand malicious-proxycommand-clone
1962 $ hg clone malicious-proxycommand malicious-proxycommand-clone
1979 updating to branch default
1963 updating to branch default
1980 cloning subrepo s from ssh://fakehost%7Ctouch%20owned/path
1964 cloning subrepo s from ssh://fakehost%7Ctouch%20owned/path
1981 abort: no suitable response from remote hg
1965 abort: no suitable response from remote hg
1982 [255]
1966 [255]
1983 $ [ ! -f owned ] || echo 'you got owned'
1967 $ [ ! -f owned ] || echo 'you got owned'
1984
1968
1985 and bad usernames:
1969 and bad usernames:
1986 $ cd malicious-proxycommand
1970 $ cd malicious-proxycommand
1987 $ echo 's = [hg]ssh://-oProxyCommand=touch owned@example.com/path' > .hgsub
1971 $ echo 's = [hg]ssh://-oProxyCommand=touch owned@example.com/path' > .hgsub
1988 $ hg ci -m 'owned username'
1972 $ hg ci -m 'owned username'
1989 $ cd ..
1973 $ cd ..
1990 $ rm -r malicious-proxycommand-clone
1974 $ rm -r malicious-proxycommand-clone
1991 $ hg clone malicious-proxycommand malicious-proxycommand-clone
1975 $ hg clone malicious-proxycommand malicious-proxycommand-clone
1992 updating to branch default
1976 updating to branch default
1993 cloning subrepo s from ssh://-oProxyCommand%3Dtouch%20owned@example.com/path
1977 cloning subrepo s from ssh://-oProxyCommand%3Dtouch%20owned@example.com/path
1994 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned@example.com/path' (in subrepository "s")
1978 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned@example.com/path' (in subrepository "s")
1995 [255]
1979 [255]
1996
1980
1997 Test convert subrepositories including merge (issue5526):
1981 Test convert subrepositories including merge (issue5526):
1998
1982
1999 $ hg init tconv
1983 $ hg init tconv
2000 $ hg convert --config extensions.convert= -q t/s tconv/s
1984 $ hg convert --config extensions.convert= -q t/s tconv/s
2001 $ hg convert --config extensions.convert= -q t/s/ss tconv/s/ss
1985 $ hg convert --config extensions.convert= -q t/s/ss tconv/s/ss
2002 $ hg convert --config extensions.convert= -q t/t tconv/t
1986 $ hg convert --config extensions.convert= -q t/t tconv/t
2003
1987
2004 convert shouldn't fail because of pseudo filenode:
1988 convert shouldn't fail because of pseudo filenode:
2005
1989
2006 $ hg convert --config extensions.convert= t tconv
1990 $ hg convert --config extensions.convert= t tconv
2007 scanning source...
1991 scanning source...
2008 sorting...
1992 sorting...
2009 converting...
1993 converting...
2010 17 0
1994 17 0
2011 16 1
1995 16 1
2012 15 2
1996 15 2
2013 14 3
1997 14 3
2014 13 4
1998 13 4
2015 12 5
1999 12 5
2016 11 6
2000 11 6
2017 10 7
2001 10 7
2018 9 8
2002 9 8
2019 8 9
2003 8 9
2020 7 10
2004 7 10
2021 6 11
2005 6 11
2022 5 12
2006 5 12
2023 4 13
2007 4 13
2024 3 rm2
2008 3 rm2
2025 2 phasecheck4
2009 2 phasecheck4
2026 1 16
2010 1 16
2027 0 branch before subrepo add
2011 0 branch before subrepo add
2028
2012
2029 converted .hgsubstate should point to valid nodes:
2013 converted .hgsubstate should point to valid nodes:
2030
2014
2031 $ hg up -R tconv 9
2015 $ hg up -R tconv 9
2032 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
2016 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
2033 $ cat tconv/.hgsubstate
2017 $ cat tconv/.hgsubstate
2034 fc627a69481fcbe5f1135069e8a3881c023e4cf5 s
2018 fc627a69481fcbe5f1135069e8a3881c023e4cf5 s
2035 60ca1237c19474e7a3978b0dc1ca4e6f36d51382 t
2019 60ca1237c19474e7a3978b0dc1ca4e6f36d51382 t
General Comments 0
You need to be logged in to leave comments. Login now