Show More
@@ -1,1355 +1,1320 b'' | |||
|
1 | 1 | /* |
|
2 | 2 | parsers.c - efficient content parsing |
|
3 | 3 | |
|
4 | 4 | Copyright 2008 Olivia Mackall <olivia@selenic.com> and others |
|
5 | 5 | |
|
6 | 6 | This software may be used and distributed according to the terms of |
|
7 | 7 | the GNU General Public License, incorporated herein by reference. |
|
8 | 8 | */ |
|
9 | 9 | |
|
10 | 10 | #define PY_SSIZE_T_CLEAN |
|
11 | 11 | #include <Python.h> |
|
12 | 12 | #include <ctype.h> |
|
13 | 13 | #include <stddef.h> |
|
14 | 14 | #include <string.h> |
|
15 | 15 | |
|
16 | 16 | #include "bitmanipulation.h" |
|
17 | 17 | #include "charencode.h" |
|
18 | 18 | #include "util.h" |
|
19 | 19 | |
|
20 | 20 | #ifdef IS_PY3K |
|
21 | 21 | /* The mapping of Python types is meant to be temporary to get Python |
|
22 | 22 | * 3 to compile. We should remove this once Python 3 support is fully |
|
23 | 23 | * supported and proper types are used in the extensions themselves. */ |
|
24 | 24 | #define PyInt_Check PyLong_Check |
|
25 | 25 | #define PyInt_FromLong PyLong_FromLong |
|
26 | 26 | #define PyInt_FromSsize_t PyLong_FromSsize_t |
|
27 | 27 | #define PyInt_AsLong PyLong_AsLong |
|
28 | 28 | #else |
|
29 | 29 | /* Windows on Python 2.7 doesn't define S_IFLNK. Python 3+ defines via |
|
30 | 30 | * pyport.h. */ |
|
31 | 31 | #ifndef S_IFLNK |
|
32 | 32 | #define S_IFLNK 0120000 |
|
33 | 33 | #endif |
|
34 | 34 | #endif |
|
35 | 35 | |
|
36 | 36 | static const char *const versionerrortext = "Python minor version mismatch"; |
|
37 | 37 | |
|
38 | 38 | static const int dirstate_v1_from_p2 = -2; |
|
39 | 39 | static const int dirstate_v1_nonnormal = -1; |
|
40 | 40 | static const int ambiguous_time = -1; |
|
41 | 41 | |
|
42 | 42 | static PyObject *dict_new_presized(PyObject *self, PyObject *args) |
|
43 | 43 | { |
|
44 | 44 | Py_ssize_t expected_size; |
|
45 | 45 | |
|
46 | 46 | if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) { |
|
47 | 47 | return NULL; |
|
48 | 48 | } |
|
49 | 49 | |
|
50 | 50 | return _dict_new_presized(expected_size); |
|
51 | 51 | } |
|
52 | 52 | |
|
53 | 53 | static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args, |
|
54 | 54 | PyObject *kwds) |
|
55 | 55 | { |
|
56 | 56 | /* We do all the initialization here and not a tp_init function because |
|
57 | 57 | * dirstate_item is immutable. */ |
|
58 | 58 | dirstateItemObject *t; |
|
59 | 59 | int wc_tracked; |
|
60 | 60 | int p1_tracked; |
|
61 | 61 | int p2_info; |
|
62 | 62 | int has_meaningful_data; |
|
63 | 63 | int has_meaningful_mtime; |
|
64 | 64 | int mode; |
|
65 | 65 | int size; |
|
66 | 66 | int mtime_s; |
|
67 | 67 | int mtime_ns; |
|
68 | 68 | PyObject *parentfiledata; |
|
69 | 69 | PyObject *mtime; |
|
70 | 70 | PyObject *fallback_exec; |
|
71 | 71 | PyObject *fallback_symlink; |
|
72 | 72 | static char *keywords_name[] = { |
|
73 | 73 | "wc_tracked", "p1_tracked", "p2_info", |
|
74 | 74 | "has_meaningful_data", "has_meaningful_mtime", "parentfiledata", |
|
75 | 75 | "fallback_exec", "fallback_symlink", NULL, |
|
76 | 76 | }; |
|
77 | 77 | wc_tracked = 0; |
|
78 | 78 | p1_tracked = 0; |
|
79 | 79 | p2_info = 0; |
|
80 | 80 | has_meaningful_mtime = 1; |
|
81 | 81 | has_meaningful_data = 1; |
|
82 | 82 | parentfiledata = Py_None; |
|
83 | 83 | fallback_exec = Py_None; |
|
84 | 84 | fallback_symlink = Py_None; |
|
85 | 85 | if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiiiOOO", keywords_name, |
|
86 | 86 | &wc_tracked, &p1_tracked, &p2_info, |
|
87 | 87 | &has_meaningful_data, |
|
88 | 88 | &has_meaningful_mtime, &parentfiledata, |
|
89 | 89 | &fallback_exec, &fallback_symlink)) { |
|
90 | 90 | return NULL; |
|
91 | 91 | } |
|
92 | 92 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); |
|
93 | 93 | if (!t) { |
|
94 | 94 | return NULL; |
|
95 | 95 | } |
|
96 | 96 | |
|
97 | 97 | t->flags = 0; |
|
98 | 98 | if (wc_tracked) { |
|
99 | 99 | t->flags |= dirstate_flag_wc_tracked; |
|
100 | 100 | } |
|
101 | 101 | if (p1_tracked) { |
|
102 | 102 | t->flags |= dirstate_flag_p1_tracked; |
|
103 | 103 | } |
|
104 | 104 | if (p2_info) { |
|
105 | 105 | t->flags |= dirstate_flag_p2_info; |
|
106 | 106 | } |
|
107 | 107 | |
|
108 | 108 | if (fallback_exec != Py_None) { |
|
109 | 109 | t->flags |= dirstate_flag_has_fallback_exec; |
|
110 | 110 | if (PyObject_IsTrue(fallback_exec)) { |
|
111 | 111 | t->flags |= dirstate_flag_fallback_exec; |
|
112 | 112 | } |
|
113 | 113 | } |
|
114 | 114 | if (fallback_symlink != Py_None) { |
|
115 | 115 | t->flags |= dirstate_flag_has_fallback_symlink; |
|
116 | 116 | if (PyObject_IsTrue(fallback_symlink)) { |
|
117 | 117 | t->flags |= dirstate_flag_fallback_symlink; |
|
118 | 118 | } |
|
119 | 119 | } |
|
120 | 120 | |
|
121 | 121 | if (parentfiledata != Py_None) { |
|
122 | 122 | if (!PyArg_ParseTuple(parentfiledata, "iiO", &mode, &size, |
|
123 | 123 | &mtime)) { |
|
124 | 124 | return NULL; |
|
125 | 125 | } |
|
126 | 126 | if (mtime != Py_None) { |
|
127 | 127 | if (!PyArg_ParseTuple(mtime, "ii", &mtime_s, |
|
128 | 128 | &mtime_ns)) { |
|
129 | 129 | return NULL; |
|
130 | 130 | } |
|
131 | 131 | } else { |
|
132 | 132 | has_meaningful_mtime = 0; |
|
133 | 133 | } |
|
134 | 134 | } else { |
|
135 | 135 | has_meaningful_data = 0; |
|
136 | 136 | has_meaningful_mtime = 0; |
|
137 | 137 | } |
|
138 | 138 | if (has_meaningful_data) { |
|
139 | 139 | t->flags |= dirstate_flag_has_meaningful_data; |
|
140 | 140 | t->mode = mode; |
|
141 | 141 | t->size = size; |
|
142 | 142 | } else { |
|
143 | 143 | t->mode = 0; |
|
144 | 144 | t->size = 0; |
|
145 | 145 | } |
|
146 | 146 | if (has_meaningful_mtime) { |
|
147 | 147 | t->flags |= dirstate_flag_has_mtime; |
|
148 | 148 | t->mtime_s = mtime_s; |
|
149 | 149 | t->mtime_ns = mtime_ns; |
|
150 | 150 | } else { |
|
151 | 151 | t->mtime_s = 0; |
|
152 | 152 | t->mtime_ns = 0; |
|
153 | 153 | } |
|
154 | 154 | return (PyObject *)t; |
|
155 | 155 | } |
|
156 | 156 | |
|
157 | 157 | static void dirstate_item_dealloc(PyObject *o) |
|
158 | 158 | { |
|
159 | 159 | PyObject_Del(o); |
|
160 | 160 | } |
|
161 | 161 | |
|
162 | 162 | static inline bool dirstate_item_c_tracked(dirstateItemObject *self) |
|
163 | 163 | { |
|
164 | 164 | return (self->flags & dirstate_flag_wc_tracked); |
|
165 | 165 | } |
|
166 | 166 | |
|
167 | 167 | static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self) |
|
168 | 168 | { |
|
169 | 169 | const int mask = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | |
|
170 | 170 | dirstate_flag_p2_info; |
|
171 | 171 | return (self->flags & mask); |
|
172 | 172 | } |
|
173 | 173 | |
|
174 | 174 | static inline bool dirstate_item_c_added(dirstateItemObject *self) |
|
175 | 175 | { |
|
176 | 176 | const int mask = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | |
|
177 | 177 | dirstate_flag_p2_info); |
|
178 | 178 | const int target = dirstate_flag_wc_tracked; |
|
179 | 179 | return (self->flags & mask) == target; |
|
180 | 180 | } |
|
181 | 181 | |
|
182 | 182 | static inline bool dirstate_item_c_removed(dirstateItemObject *self) |
|
183 | 183 | { |
|
184 | 184 | if (self->flags & dirstate_flag_wc_tracked) { |
|
185 | 185 | return false; |
|
186 | 186 | } |
|
187 | 187 | return (self->flags & |
|
188 | 188 | (dirstate_flag_p1_tracked | dirstate_flag_p2_info)); |
|
189 | 189 | } |
|
190 | 190 | |
|
191 | 191 | static inline bool dirstate_item_c_merged(dirstateItemObject *self) |
|
192 | 192 | { |
|
193 | 193 | return ((self->flags & dirstate_flag_wc_tracked) && |
|
194 | 194 | (self->flags & dirstate_flag_p1_tracked) && |
|
195 | 195 | (self->flags & dirstate_flag_p2_info)); |
|
196 | 196 | } |
|
197 | 197 | |
|
198 | 198 | static inline bool dirstate_item_c_from_p2(dirstateItemObject *self) |
|
199 | 199 | { |
|
200 | 200 | return ((self->flags & dirstate_flag_wc_tracked) && |
|
201 | 201 | !(self->flags & dirstate_flag_p1_tracked) && |
|
202 | 202 | (self->flags & dirstate_flag_p2_info)); |
|
203 | 203 | } |
|
204 | 204 | |
|
205 | 205 | static inline char dirstate_item_c_v1_state(dirstateItemObject *self) |
|
206 | 206 | { |
|
207 | 207 | if (dirstate_item_c_removed(self)) { |
|
208 | 208 | return 'r'; |
|
209 | 209 | } else if (dirstate_item_c_merged(self)) { |
|
210 | 210 | return 'm'; |
|
211 | 211 | } else if (dirstate_item_c_added(self)) { |
|
212 | 212 | return 'a'; |
|
213 | 213 | } else { |
|
214 | 214 | return 'n'; |
|
215 | 215 | } |
|
216 | 216 | } |
|
217 | 217 | |
|
218 | 218 | static inline bool dirstate_item_c_has_fallback_exec(dirstateItemObject *self) |
|
219 | 219 | { |
|
220 | 220 | return (bool)self->flags & dirstate_flag_has_fallback_exec; |
|
221 | 221 | } |
|
222 | 222 | |
|
223 | 223 | static inline bool |
|
224 | 224 | dirstate_item_c_has_fallback_symlink(dirstateItemObject *self) |
|
225 | 225 | { |
|
226 | 226 | return (bool)self->flags & dirstate_flag_has_fallback_symlink; |
|
227 | 227 | } |
|
228 | 228 | |
|
229 | 229 | static inline int dirstate_item_c_v1_mode(dirstateItemObject *self) |
|
230 | 230 | { |
|
231 | 231 | if (self->flags & dirstate_flag_has_meaningful_data) { |
|
232 | 232 | return self->mode; |
|
233 | 233 | } else { |
|
234 | 234 | return 0; |
|
235 | 235 | } |
|
236 | 236 | } |
|
237 | 237 | |
|
238 | 238 | static inline int dirstate_item_c_v1_size(dirstateItemObject *self) |
|
239 | 239 | { |
|
240 | 240 | if (!(self->flags & dirstate_flag_wc_tracked) && |
|
241 | 241 | (self->flags & dirstate_flag_p2_info)) { |
|
242 | 242 | if (self->flags & dirstate_flag_p1_tracked) { |
|
243 | 243 | return dirstate_v1_nonnormal; |
|
244 | 244 | } else { |
|
245 | 245 | return dirstate_v1_from_p2; |
|
246 | 246 | } |
|
247 | 247 | } else if (dirstate_item_c_removed(self)) { |
|
248 | 248 | return 0; |
|
249 | 249 | } else if (self->flags & dirstate_flag_p2_info) { |
|
250 | 250 | return dirstate_v1_from_p2; |
|
251 | 251 | } else if (dirstate_item_c_added(self)) { |
|
252 | 252 | return dirstate_v1_nonnormal; |
|
253 | 253 | } else if (self->flags & dirstate_flag_has_meaningful_data) { |
|
254 | 254 | return self->size; |
|
255 | 255 | } else { |
|
256 | 256 | return dirstate_v1_nonnormal; |
|
257 | 257 | } |
|
258 | 258 | } |
|
259 | 259 | |
|
260 | 260 | static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self) |
|
261 | 261 | { |
|
262 | 262 | if (dirstate_item_c_removed(self)) { |
|
263 | 263 | return 0; |
|
264 | 264 | } else if (!(self->flags & dirstate_flag_has_mtime) || |
|
265 | 265 | !(self->flags & dirstate_flag_p1_tracked) || |
|
266 | 266 | !(self->flags & dirstate_flag_wc_tracked) || |
|
267 | 267 | (self->flags & dirstate_flag_p2_info)) { |
|
268 | 268 | return ambiguous_time; |
|
269 | 269 | } else { |
|
270 | 270 | return self->mtime_s; |
|
271 | 271 | } |
|
272 | 272 | } |
|
273 | 273 | |
|
274 | 274 | static PyObject *dirstate_item_v2_data(dirstateItemObject *self) |
|
275 | 275 | { |
|
276 | 276 | int flags = self->flags; |
|
277 | 277 | int mode = dirstate_item_c_v1_mode(self); |
|
278 | 278 | #ifdef S_IXUSR |
|
279 | 279 | /* This is for platforms with an exec bit */ |
|
280 | 280 | if ((mode & S_IXUSR) != 0) { |
|
281 | 281 | flags |= dirstate_flag_mode_exec_perm; |
|
282 | 282 | } else { |
|
283 | 283 | flags &= ~dirstate_flag_mode_exec_perm; |
|
284 | 284 | } |
|
285 | 285 | #else |
|
286 | 286 | flags &= ~dirstate_flag_mode_exec_perm; |
|
287 | 287 | #endif |
|
288 | 288 | #ifdef S_ISLNK |
|
289 | 289 | /* This is for platforms with support for symlinks */ |
|
290 | 290 | if (S_ISLNK(mode)) { |
|
291 | 291 | flags |= dirstate_flag_mode_is_symlink; |
|
292 | 292 | } else { |
|
293 | 293 | flags &= ~dirstate_flag_mode_is_symlink; |
|
294 | 294 | } |
|
295 | 295 | #else |
|
296 | 296 | flags &= ~dirstate_flag_mode_is_symlink; |
|
297 | 297 | #endif |
|
298 | 298 | return Py_BuildValue("iiii", flags, self->size, self->mtime_s, |
|
299 | 299 | self->mtime_ns); |
|
300 | 300 | }; |
|
301 | 301 | |
|
302 | 302 | static PyObject *dirstate_item_v1_state(dirstateItemObject *self) |
|
303 | 303 | { |
|
304 | 304 | char state = dirstate_item_c_v1_state(self); |
|
305 | 305 | return PyBytes_FromStringAndSize(&state, 1); |
|
306 | 306 | }; |
|
307 | 307 | |
|
308 | 308 | static PyObject *dirstate_item_v1_mode(dirstateItemObject *self) |
|
309 | 309 | { |
|
310 | 310 | return PyInt_FromLong(dirstate_item_c_v1_mode(self)); |
|
311 | 311 | }; |
|
312 | 312 | |
|
313 | 313 | static PyObject *dirstate_item_v1_size(dirstateItemObject *self) |
|
314 | 314 | { |
|
315 | 315 | return PyInt_FromLong(dirstate_item_c_v1_size(self)); |
|
316 | 316 | }; |
|
317 | 317 | |
|
318 | 318 | static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self) |
|
319 | 319 | { |
|
320 | 320 | return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); |
|
321 | 321 | }; |
|
322 | 322 | |
|
323 | static PyObject *dirstate_item_need_delay(dirstateItemObject *self, | |
|
324 | PyObject *now) | |
|
325 | { | |
|
326 | int now_s; | |
|
327 | int now_ns; | |
|
328 | if (!PyArg_ParseTuple(now, "ii", &now_s, &now_ns)) { | |
|
329 | return NULL; | |
|
330 | } | |
|
331 | if (dirstate_item_c_v1_state(self) == 'n' && self->mtime_s == now_s) { | |
|
332 | Py_RETURN_TRUE; | |
|
333 | } else { | |
|
334 | Py_RETURN_FALSE; | |
|
335 | } | |
|
336 | }; | |
|
337 | ||
|
338 | 323 | static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self, |
|
339 | 324 | PyObject *other) |
|
340 | 325 | { |
|
341 | 326 | int other_s; |
|
342 | 327 | int other_ns; |
|
343 | 328 | if (!PyArg_ParseTuple(other, "ii", &other_s, &other_ns)) { |
|
344 | 329 | return NULL; |
|
345 | 330 | } |
|
346 | 331 | if ((self->flags & dirstate_flag_has_mtime) && |
|
347 | 332 | self->mtime_s == other_s && |
|
348 | 333 | (self->mtime_ns == other_ns || self->mtime_ns == 0 || |
|
349 | 334 | other_ns == 0)) { |
|
350 | 335 | Py_RETURN_TRUE; |
|
351 | 336 | } else { |
|
352 | 337 | Py_RETURN_FALSE; |
|
353 | 338 | } |
|
354 | 339 | }; |
|
355 | 340 | |
|
356 | 341 | /* This will never change since it's bound to V1 |
|
357 | 342 | */ |
|
358 | 343 | static inline dirstateItemObject * |
|
359 | 344 | dirstate_item_from_v1_data(char state, int mode, int size, int mtime) |
|
360 | 345 | { |
|
361 | 346 | dirstateItemObject *t = |
|
362 | 347 | PyObject_New(dirstateItemObject, &dirstateItemType); |
|
363 | 348 | if (!t) { |
|
364 | 349 | return NULL; |
|
365 | 350 | } |
|
366 | 351 | t->flags = 0; |
|
367 | 352 | t->mode = 0; |
|
368 | 353 | t->size = 0; |
|
369 | 354 | t->mtime_s = 0; |
|
370 | 355 | t->mtime_ns = 0; |
|
371 | 356 | |
|
372 | 357 | if (state == 'm') { |
|
373 | 358 | t->flags = (dirstate_flag_wc_tracked | |
|
374 | 359 | dirstate_flag_p1_tracked | dirstate_flag_p2_info); |
|
375 | 360 | } else if (state == 'a') { |
|
376 | 361 | t->flags = dirstate_flag_wc_tracked; |
|
377 | 362 | } else if (state == 'r') { |
|
378 | 363 | if (size == dirstate_v1_nonnormal) { |
|
379 | 364 | t->flags = |
|
380 | 365 | dirstate_flag_p1_tracked | dirstate_flag_p2_info; |
|
381 | 366 | } else if (size == dirstate_v1_from_p2) { |
|
382 | 367 | t->flags = dirstate_flag_p2_info; |
|
383 | 368 | } else { |
|
384 | 369 | t->flags = dirstate_flag_p1_tracked; |
|
385 | 370 | } |
|
386 | 371 | } else if (state == 'n') { |
|
387 | 372 | if (size == dirstate_v1_from_p2) { |
|
388 | 373 | t->flags = |
|
389 | 374 | dirstate_flag_wc_tracked | dirstate_flag_p2_info; |
|
390 | 375 | } else if (size == dirstate_v1_nonnormal) { |
|
391 | 376 | t->flags = |
|
392 | 377 | dirstate_flag_wc_tracked | dirstate_flag_p1_tracked; |
|
393 | 378 | } else if (mtime == ambiguous_time) { |
|
394 | 379 | t->flags = (dirstate_flag_wc_tracked | |
|
395 | 380 | dirstate_flag_p1_tracked | |
|
396 | 381 | dirstate_flag_has_meaningful_data); |
|
397 | 382 | t->mode = mode; |
|
398 | 383 | t->size = size; |
|
399 | 384 | } else { |
|
400 | 385 | t->flags = (dirstate_flag_wc_tracked | |
|
401 | 386 | dirstate_flag_p1_tracked | |
|
402 | 387 | dirstate_flag_has_meaningful_data | |
|
403 | 388 | dirstate_flag_has_mtime); |
|
404 | 389 | t->mode = mode; |
|
405 | 390 | t->size = size; |
|
406 | 391 | t->mtime_s = mtime; |
|
407 | 392 | } |
|
408 | 393 | } else { |
|
409 | 394 | PyErr_Format(PyExc_RuntimeError, |
|
410 | 395 | "unknown state: `%c` (%d, %d, %d)", state, mode, |
|
411 | 396 | size, mtime, NULL); |
|
412 | 397 | Py_DECREF(t); |
|
413 | 398 | return NULL; |
|
414 | 399 | } |
|
415 | 400 | |
|
416 | 401 | return t; |
|
417 | 402 | } |
|
418 | 403 | |
|
419 | 404 | /* This will never change since it's bound to V1, unlike `dirstate_item_new` */ |
|
420 | 405 | static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype, |
|
421 | 406 | PyObject *args) |
|
422 | 407 | { |
|
423 | 408 | /* We do all the initialization here and not a tp_init function because |
|
424 | 409 | * dirstate_item is immutable. */ |
|
425 | 410 | char state; |
|
426 | 411 | int size, mode, mtime; |
|
427 | 412 | if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) { |
|
428 | 413 | return NULL; |
|
429 | 414 | } |
|
430 | 415 | return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime); |
|
431 | 416 | }; |
|
432 | 417 | |
|
433 | 418 | static PyObject *dirstate_item_from_v2_meth(PyTypeObject *subtype, |
|
434 | 419 | PyObject *args) |
|
435 | 420 | { |
|
436 | 421 | dirstateItemObject *t = |
|
437 | 422 | PyObject_New(dirstateItemObject, &dirstateItemType); |
|
438 | 423 | if (!t) { |
|
439 | 424 | return NULL; |
|
440 | 425 | } |
|
441 | 426 | if (!PyArg_ParseTuple(args, "iiii", &t->flags, &t->size, &t->mtime_s, |
|
442 | 427 | &t->mtime_ns)) { |
|
443 | 428 | return NULL; |
|
444 | 429 | } |
|
445 | 430 | if (t->flags & dirstate_flag_expected_state_is_modified) { |
|
446 | 431 | t->flags &= ~(dirstate_flag_expected_state_is_modified | |
|
447 | 432 | dirstate_flag_has_meaningful_data | |
|
448 | 433 | dirstate_flag_has_mtime); |
|
449 | 434 | } |
|
450 | 435 | if (t->flags & dirstate_flag_mtime_second_ambiguous) { |
|
451 | 436 | /* The current code is not able to do the more subtle comparison |
|
452 | 437 | * that the MTIME_SECOND_AMBIGUOUS requires. So we ignore the |
|
453 | 438 | * mtime */ |
|
454 | 439 | t->flags &= ~(dirstate_flag_mtime_second_ambiguous | |
|
455 | 440 | dirstate_flag_has_meaningful_data | |
|
456 | 441 | dirstate_flag_has_mtime); |
|
457 | 442 | } |
|
458 | 443 | t->mode = 0; |
|
459 | 444 | if (t->flags & dirstate_flag_has_meaningful_data) { |
|
460 | 445 | if (t->flags & dirstate_flag_mode_exec_perm) { |
|
461 | 446 | t->mode = 0755; |
|
462 | 447 | } else { |
|
463 | 448 | t->mode = 0644; |
|
464 | 449 | } |
|
465 | 450 | if (t->flags & dirstate_flag_mode_is_symlink) { |
|
466 | 451 | t->mode |= S_IFLNK; |
|
467 | 452 | } else { |
|
468 | 453 | t->mode |= S_IFREG; |
|
469 | 454 | } |
|
470 | 455 | } |
|
471 | 456 | return (PyObject *)t; |
|
472 | 457 | }; |
|
473 | 458 | |
|
474 | 459 | /* This means the next status call will have to actually check its content |
|
475 | 460 | to make sure it is correct. */ |
|
476 | 461 | static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self) |
|
477 | 462 | { |
|
478 | 463 | self->flags &= ~dirstate_flag_has_mtime; |
|
479 | 464 | Py_RETURN_NONE; |
|
480 | 465 | } |
|
481 | 466 | |
|
482 | 467 | /* See docstring of the python implementation for details */ |
|
483 | 468 | static PyObject *dirstate_item_set_clean(dirstateItemObject *self, |
|
484 | 469 | PyObject *args) |
|
485 | 470 | { |
|
486 | 471 | int size, mode, mtime_s, mtime_ns; |
|
487 | 472 | PyObject *mtime; |
|
488 | 473 | mtime_s = 0; |
|
489 | 474 | mtime_ns = 0; |
|
490 | 475 | if (!PyArg_ParseTuple(args, "iiO", &mode, &size, &mtime)) { |
|
491 | 476 | return NULL; |
|
492 | 477 | } |
|
493 | 478 | if (mtime != Py_None) { |
|
494 | 479 | if (!PyArg_ParseTuple(mtime, "ii", &mtime_s, &mtime_ns)) { |
|
495 | 480 | return NULL; |
|
496 | 481 | } |
|
497 | 482 | } else { |
|
498 | 483 | self->flags &= ~dirstate_flag_has_mtime; |
|
499 | 484 | } |
|
500 | 485 | self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | |
|
501 | 486 | dirstate_flag_has_meaningful_data | |
|
502 | 487 | dirstate_flag_has_mtime; |
|
503 | 488 | self->mode = mode; |
|
504 | 489 | self->size = size; |
|
505 | 490 | self->mtime_s = mtime_s; |
|
506 | 491 | self->mtime_ns = mtime_ns; |
|
507 | 492 | Py_RETURN_NONE; |
|
508 | 493 | } |
|
509 | 494 | |
|
510 | 495 | static PyObject *dirstate_item_set_tracked(dirstateItemObject *self) |
|
511 | 496 | { |
|
512 | 497 | self->flags |= dirstate_flag_wc_tracked; |
|
513 | 498 | self->flags &= ~dirstate_flag_has_mtime; |
|
514 | 499 | Py_RETURN_NONE; |
|
515 | 500 | } |
|
516 | 501 | |
|
517 | 502 | static PyObject *dirstate_item_set_untracked(dirstateItemObject *self) |
|
518 | 503 | { |
|
519 | 504 | self->flags &= ~dirstate_flag_wc_tracked; |
|
520 | 505 | self->mode = 0; |
|
521 | 506 | self->size = 0; |
|
522 | 507 | self->mtime_s = 0; |
|
523 | 508 | self->mtime_ns = 0; |
|
524 | 509 | Py_RETURN_NONE; |
|
525 | 510 | } |
|
526 | 511 | |
|
527 | 512 | static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self) |
|
528 | 513 | { |
|
529 | 514 | if (self->flags & dirstate_flag_p2_info) { |
|
530 | 515 | self->flags &= ~(dirstate_flag_p2_info | |
|
531 | 516 | dirstate_flag_has_meaningful_data | |
|
532 | 517 | dirstate_flag_has_mtime); |
|
533 | 518 | self->mode = 0; |
|
534 | 519 | self->size = 0; |
|
535 | 520 | self->mtime_s = 0; |
|
536 | 521 | self->mtime_ns = 0; |
|
537 | 522 | } |
|
538 | 523 | Py_RETURN_NONE; |
|
539 | 524 | } |
|
540 | 525 | static PyMethodDef dirstate_item_methods[] = { |
|
541 | 526 | {"v2_data", (PyCFunction)dirstate_item_v2_data, METH_NOARGS, |
|
542 | 527 | "return data suitable for v2 serialization"}, |
|
543 | 528 | {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS, |
|
544 | 529 | "return a \"state\" suitable for v1 serialization"}, |
|
545 | 530 | {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS, |
|
546 | 531 | "return a \"mode\" suitable for v1 serialization"}, |
|
547 | 532 | {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS, |
|
548 | 533 | "return a \"size\" suitable for v1 serialization"}, |
|
549 | 534 | {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS, |
|
550 | 535 | "return a \"mtime\" suitable for v1 serialization"}, |
|
551 | {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O, | |
|
552 | "True if the stored mtime would be ambiguous with the current time"}, | |
|
553 | 536 | {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to, |
|
554 | 537 | METH_O, "True if the stored mtime is likely equal to the given mtime"}, |
|
555 | 538 | {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, |
|
556 | 539 | METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"}, |
|
557 | 540 | {"from_v2_data", (PyCFunction)dirstate_item_from_v2_meth, |
|
558 | 541 | METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V2 data"}, |
|
559 | 542 | {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty, |
|
560 | 543 | METH_NOARGS, "mark a file as \"possibly dirty\""}, |
|
561 | 544 | {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS, |
|
562 | 545 | "mark a file as \"clean\""}, |
|
563 | 546 | {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS, |
|
564 | 547 | "mark a file as \"tracked\""}, |
|
565 | 548 | {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS, |
|
566 | 549 | "mark a file as \"untracked\""}, |
|
567 | 550 | {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS, |
|
568 | 551 | "remove all \"merge-only\" from a DirstateItem"}, |
|
569 | 552 | {NULL} /* Sentinel */ |
|
570 | 553 | }; |
|
571 | 554 | |
|
572 | 555 | static PyObject *dirstate_item_get_mode(dirstateItemObject *self) |
|
573 | 556 | { |
|
574 | 557 | return PyInt_FromLong(dirstate_item_c_v1_mode(self)); |
|
575 | 558 | }; |
|
576 | 559 | |
|
577 | 560 | static PyObject *dirstate_item_get_size(dirstateItemObject *self) |
|
578 | 561 | { |
|
579 | 562 | return PyInt_FromLong(dirstate_item_c_v1_size(self)); |
|
580 | 563 | }; |
|
581 | 564 | |
|
582 | 565 | static PyObject *dirstate_item_get_mtime(dirstateItemObject *self) |
|
583 | 566 | { |
|
584 | 567 | return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); |
|
585 | 568 | }; |
|
586 | 569 | |
|
587 | 570 | static PyObject *dirstate_item_get_state(dirstateItemObject *self) |
|
588 | 571 | { |
|
589 | 572 | char state = dirstate_item_c_v1_state(self); |
|
590 | 573 | return PyBytes_FromStringAndSize(&state, 1); |
|
591 | 574 | }; |
|
592 | 575 | |
|
593 | 576 | static PyObject *dirstate_item_get_has_fallback_exec(dirstateItemObject *self) |
|
594 | 577 | { |
|
595 | 578 | if (dirstate_item_c_has_fallback_exec(self)) { |
|
596 | 579 | Py_RETURN_TRUE; |
|
597 | 580 | } else { |
|
598 | 581 | Py_RETURN_FALSE; |
|
599 | 582 | } |
|
600 | 583 | }; |
|
601 | 584 | |
|
602 | 585 | static PyObject *dirstate_item_get_fallback_exec(dirstateItemObject *self) |
|
603 | 586 | { |
|
604 | 587 | if (dirstate_item_c_has_fallback_exec(self)) { |
|
605 | 588 | if (self->flags & dirstate_flag_fallback_exec) { |
|
606 | 589 | Py_RETURN_TRUE; |
|
607 | 590 | } else { |
|
608 | 591 | Py_RETURN_FALSE; |
|
609 | 592 | } |
|
610 | 593 | } else { |
|
611 | 594 | Py_RETURN_NONE; |
|
612 | 595 | } |
|
613 | 596 | }; |
|
614 | 597 | |
|
615 | 598 | static int dirstate_item_set_fallback_exec(dirstateItemObject *self, |
|
616 | 599 | PyObject *value) |
|
617 | 600 | { |
|
618 | 601 | if ((value == Py_None) || (value == NULL)) { |
|
619 | 602 | self->flags &= ~dirstate_flag_has_fallback_exec; |
|
620 | 603 | } else { |
|
621 | 604 | self->flags |= dirstate_flag_has_fallback_exec; |
|
622 | 605 | if (PyObject_IsTrue(value)) { |
|
623 | 606 | self->flags |= dirstate_flag_fallback_exec; |
|
624 | 607 | } else { |
|
625 | 608 | self->flags &= ~dirstate_flag_fallback_exec; |
|
626 | 609 | } |
|
627 | 610 | } |
|
628 | 611 | return 0; |
|
629 | 612 | }; |
|
630 | 613 | |
|
631 | 614 | static PyObject * |
|
632 | 615 | dirstate_item_get_has_fallback_symlink(dirstateItemObject *self) |
|
633 | 616 | { |
|
634 | 617 | if (dirstate_item_c_has_fallback_symlink(self)) { |
|
635 | 618 | Py_RETURN_TRUE; |
|
636 | 619 | } else { |
|
637 | 620 | Py_RETURN_FALSE; |
|
638 | 621 | } |
|
639 | 622 | }; |
|
640 | 623 | |
|
641 | 624 | static PyObject *dirstate_item_get_fallback_symlink(dirstateItemObject *self) |
|
642 | 625 | { |
|
643 | 626 | if (dirstate_item_c_has_fallback_symlink(self)) { |
|
644 | 627 | if (self->flags & dirstate_flag_fallback_symlink) { |
|
645 | 628 | Py_RETURN_TRUE; |
|
646 | 629 | } else { |
|
647 | 630 | Py_RETURN_FALSE; |
|
648 | 631 | } |
|
649 | 632 | } else { |
|
650 | 633 | Py_RETURN_NONE; |
|
651 | 634 | } |
|
652 | 635 | }; |
|
653 | 636 | |
|
654 | 637 | static int dirstate_item_set_fallback_symlink(dirstateItemObject *self, |
|
655 | 638 | PyObject *value) |
|
656 | 639 | { |
|
657 | 640 | if ((value == Py_None) || (value == NULL)) { |
|
658 | 641 | self->flags &= ~dirstate_flag_has_fallback_symlink; |
|
659 | 642 | } else { |
|
660 | 643 | self->flags |= dirstate_flag_has_fallback_symlink; |
|
661 | 644 | if (PyObject_IsTrue(value)) { |
|
662 | 645 | self->flags |= dirstate_flag_fallback_symlink; |
|
663 | 646 | } else { |
|
664 | 647 | self->flags &= ~dirstate_flag_fallback_symlink; |
|
665 | 648 | } |
|
666 | 649 | } |
|
667 | 650 | return 0; |
|
668 | 651 | }; |
|
669 | 652 | |
|
670 | 653 | static PyObject *dirstate_item_get_tracked(dirstateItemObject *self) |
|
671 | 654 | { |
|
672 | 655 | if (dirstate_item_c_tracked(self)) { |
|
673 | 656 | Py_RETURN_TRUE; |
|
674 | 657 | } else { |
|
675 | 658 | Py_RETURN_FALSE; |
|
676 | 659 | } |
|
677 | 660 | }; |
|
678 | 661 | static PyObject *dirstate_item_get_p1_tracked(dirstateItemObject *self) |
|
679 | 662 | { |
|
680 | 663 | if (self->flags & dirstate_flag_p1_tracked) { |
|
681 | 664 | Py_RETURN_TRUE; |
|
682 | 665 | } else { |
|
683 | 666 | Py_RETURN_FALSE; |
|
684 | 667 | } |
|
685 | 668 | }; |
|
686 | 669 | |
|
687 | 670 | static PyObject *dirstate_item_get_added(dirstateItemObject *self) |
|
688 | 671 | { |
|
689 | 672 | if (dirstate_item_c_added(self)) { |
|
690 | 673 | Py_RETURN_TRUE; |
|
691 | 674 | } else { |
|
692 | 675 | Py_RETURN_FALSE; |
|
693 | 676 | } |
|
694 | 677 | }; |
|
695 | 678 | |
|
696 | 679 | static PyObject *dirstate_item_get_p2_info(dirstateItemObject *self) |
|
697 | 680 | { |
|
698 | 681 | if (self->flags & dirstate_flag_wc_tracked && |
|
699 | 682 | self->flags & dirstate_flag_p2_info) { |
|
700 | 683 | Py_RETURN_TRUE; |
|
701 | 684 | } else { |
|
702 | 685 | Py_RETURN_FALSE; |
|
703 | 686 | } |
|
704 | 687 | }; |
|
705 | 688 | |
|
706 | 689 | static PyObject *dirstate_item_get_merged(dirstateItemObject *self) |
|
707 | 690 | { |
|
708 | 691 | if (dirstate_item_c_merged(self)) { |
|
709 | 692 | Py_RETURN_TRUE; |
|
710 | 693 | } else { |
|
711 | 694 | Py_RETURN_FALSE; |
|
712 | 695 | } |
|
713 | 696 | }; |
|
714 | 697 | |
|
715 | 698 | static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self) |
|
716 | 699 | { |
|
717 | 700 | if (dirstate_item_c_from_p2(self)) { |
|
718 | 701 | Py_RETURN_TRUE; |
|
719 | 702 | } else { |
|
720 | 703 | Py_RETURN_FALSE; |
|
721 | 704 | } |
|
722 | 705 | }; |
|
723 | 706 | |
|
724 | 707 | static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self) |
|
725 | 708 | { |
|
726 | 709 | if (!(self->flags & dirstate_flag_wc_tracked)) { |
|
727 | 710 | Py_RETURN_FALSE; |
|
728 | 711 | } else if (!(self->flags & dirstate_flag_p1_tracked)) { |
|
729 | 712 | Py_RETURN_FALSE; |
|
730 | 713 | } else if (self->flags & dirstate_flag_p2_info) { |
|
731 | 714 | Py_RETURN_FALSE; |
|
732 | 715 | } else { |
|
733 | 716 | Py_RETURN_TRUE; |
|
734 | 717 | } |
|
735 | 718 | }; |
|
736 | 719 | |
|
737 | 720 | static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self) |
|
738 | 721 | { |
|
739 | 722 | if (dirstate_item_c_any_tracked(self)) { |
|
740 | 723 | Py_RETURN_TRUE; |
|
741 | 724 | } else { |
|
742 | 725 | Py_RETURN_FALSE; |
|
743 | 726 | } |
|
744 | 727 | }; |
|
745 | 728 | |
|
746 | 729 | static PyObject *dirstate_item_get_removed(dirstateItemObject *self) |
|
747 | 730 | { |
|
748 | 731 | if (dirstate_item_c_removed(self)) { |
|
749 | 732 | Py_RETURN_TRUE; |
|
750 | 733 | } else { |
|
751 | 734 | Py_RETURN_FALSE; |
|
752 | 735 | } |
|
753 | 736 | }; |
|
754 | 737 | |
|
755 | 738 | static PyGetSetDef dirstate_item_getset[] = { |
|
756 | 739 | {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL}, |
|
757 | 740 | {"size", (getter)dirstate_item_get_size, NULL, "size", NULL}, |
|
758 | 741 | {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL}, |
|
759 | 742 | {"state", (getter)dirstate_item_get_state, NULL, "state", NULL}, |
|
760 | 743 | {"has_fallback_exec", (getter)dirstate_item_get_has_fallback_exec, NULL, |
|
761 | 744 | "has_fallback_exec", NULL}, |
|
762 | 745 | {"fallback_exec", (getter)dirstate_item_get_fallback_exec, |
|
763 | 746 | (setter)dirstate_item_set_fallback_exec, "fallback_exec", NULL}, |
|
764 | 747 | {"has_fallback_symlink", (getter)dirstate_item_get_has_fallback_symlink, |
|
765 | 748 | NULL, "has_fallback_symlink", NULL}, |
|
766 | 749 | {"fallback_symlink", (getter)dirstate_item_get_fallback_symlink, |
|
767 | 750 | (setter)dirstate_item_set_fallback_symlink, "fallback_symlink", NULL}, |
|
768 | 751 | {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL}, |
|
769 | 752 | {"p1_tracked", (getter)dirstate_item_get_p1_tracked, NULL, "p1_tracked", |
|
770 | 753 | NULL}, |
|
771 | 754 | {"added", (getter)dirstate_item_get_added, NULL, "added", NULL}, |
|
772 | 755 | {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL}, |
|
773 | 756 | {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL}, |
|
774 | 757 | {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL}, |
|
775 | 758 | {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean", |
|
776 | 759 | NULL}, |
|
777 | 760 | {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked", |
|
778 | 761 | NULL}, |
|
779 | 762 | {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL}, |
|
780 | 763 | {NULL} /* Sentinel */ |
|
781 | 764 | }; |
|
782 | 765 | |
|
783 | 766 | PyTypeObject dirstateItemType = { |
|
784 | 767 | PyVarObject_HEAD_INIT(NULL, 0) /* header */ |
|
785 | 768 | "dirstate_tuple", /* tp_name */ |
|
786 | 769 | sizeof(dirstateItemObject), /* tp_basicsize */ |
|
787 | 770 | 0, /* tp_itemsize */ |
|
788 | 771 | (destructor)dirstate_item_dealloc, /* tp_dealloc */ |
|
789 | 772 | 0, /* tp_print */ |
|
790 | 773 | 0, /* tp_getattr */ |
|
791 | 774 | 0, /* tp_setattr */ |
|
792 | 775 | 0, /* tp_compare */ |
|
793 | 776 | 0, /* tp_repr */ |
|
794 | 777 | 0, /* tp_as_number */ |
|
795 | 778 | 0, /* tp_as_sequence */ |
|
796 | 779 | 0, /* tp_as_mapping */ |
|
797 | 780 | 0, /* tp_hash */ |
|
798 | 781 | 0, /* tp_call */ |
|
799 | 782 | 0, /* tp_str */ |
|
800 | 783 | 0, /* tp_getattro */ |
|
801 | 784 | 0, /* tp_setattro */ |
|
802 | 785 | 0, /* tp_as_buffer */ |
|
803 | 786 | Py_TPFLAGS_DEFAULT, /* tp_flags */ |
|
804 | 787 | "dirstate tuple", /* tp_doc */ |
|
805 | 788 | 0, /* tp_traverse */ |
|
806 | 789 | 0, /* tp_clear */ |
|
807 | 790 | 0, /* tp_richcompare */ |
|
808 | 791 | 0, /* tp_weaklistoffset */ |
|
809 | 792 | 0, /* tp_iter */ |
|
810 | 793 | 0, /* tp_iternext */ |
|
811 | 794 | dirstate_item_methods, /* tp_methods */ |
|
812 | 795 | 0, /* tp_members */ |
|
813 | 796 | dirstate_item_getset, /* tp_getset */ |
|
814 | 797 | 0, /* tp_base */ |
|
815 | 798 | 0, /* tp_dict */ |
|
816 | 799 | 0, /* tp_descr_get */ |
|
817 | 800 | 0, /* tp_descr_set */ |
|
818 | 801 | 0, /* tp_dictoffset */ |
|
819 | 802 | 0, /* tp_init */ |
|
820 | 803 | 0, /* tp_alloc */ |
|
821 | 804 | dirstate_item_new, /* tp_new */ |
|
822 | 805 | }; |
|
823 | 806 | |
|
824 | 807 | static PyObject *parse_dirstate(PyObject *self, PyObject *args) |
|
825 | 808 | { |
|
826 | 809 | PyObject *dmap, *cmap, *parents = NULL, *ret = NULL; |
|
827 | 810 | PyObject *fname = NULL, *cname = NULL, *entry = NULL; |
|
828 | 811 | char state, *cur, *str, *cpos; |
|
829 | 812 | int mode, size, mtime; |
|
830 | 813 | unsigned int flen, pos = 40; |
|
831 | 814 | Py_ssize_t len = 40; |
|
832 | 815 | Py_ssize_t readlen; |
|
833 | 816 | |
|
834 | 817 | if (!PyArg_ParseTuple( |
|
835 | 818 | args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"), |
|
836 | 819 | &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) { |
|
837 | 820 | goto quit; |
|
838 | 821 | } |
|
839 | 822 | |
|
840 | 823 | len = readlen; |
|
841 | 824 | |
|
842 | 825 | /* read parents */ |
|
843 | 826 | if (len < 40) { |
|
844 | 827 | PyErr_SetString(PyExc_ValueError, |
|
845 | 828 | "too little data for parents"); |
|
846 | 829 | goto quit; |
|
847 | 830 | } |
|
848 | 831 | |
|
849 | 832 | parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, (Py_ssize_t)20, |
|
850 | 833 | str + 20, (Py_ssize_t)20); |
|
851 | 834 | if (!parents) { |
|
852 | 835 | goto quit; |
|
853 | 836 | } |
|
854 | 837 | |
|
855 | 838 | /* read filenames */ |
|
856 | 839 | while (pos >= 40 && pos < len) { |
|
857 | 840 | if (pos + 17 > len) { |
|
858 | 841 | PyErr_SetString(PyExc_ValueError, |
|
859 | 842 | "overflow in dirstate"); |
|
860 | 843 | goto quit; |
|
861 | 844 | } |
|
862 | 845 | cur = str + pos; |
|
863 | 846 | /* unpack header */ |
|
864 | 847 | state = *cur; |
|
865 | 848 | mode = getbe32(cur + 1); |
|
866 | 849 | size = getbe32(cur + 5); |
|
867 | 850 | mtime = getbe32(cur + 9); |
|
868 | 851 | flen = getbe32(cur + 13); |
|
869 | 852 | pos += 17; |
|
870 | 853 | cur += 17; |
|
871 | 854 | if (flen > len - pos) { |
|
872 | 855 | PyErr_SetString(PyExc_ValueError, |
|
873 | 856 | "overflow in dirstate"); |
|
874 | 857 | goto quit; |
|
875 | 858 | } |
|
876 | 859 | |
|
877 | 860 | entry = (PyObject *)dirstate_item_from_v1_data(state, mode, |
|
878 | 861 | size, mtime); |
|
879 | 862 | if (!entry) |
|
880 | 863 | goto quit; |
|
881 | 864 | cpos = memchr(cur, 0, flen); |
|
882 | 865 | if (cpos) { |
|
883 | 866 | fname = PyBytes_FromStringAndSize(cur, cpos - cur); |
|
884 | 867 | cname = PyBytes_FromStringAndSize( |
|
885 | 868 | cpos + 1, flen - (cpos - cur) - 1); |
|
886 | 869 | if (!fname || !cname || |
|
887 | 870 | PyDict_SetItem(cmap, fname, cname) == -1 || |
|
888 | 871 | PyDict_SetItem(dmap, fname, entry) == -1) { |
|
889 | 872 | goto quit; |
|
890 | 873 | } |
|
891 | 874 | Py_DECREF(cname); |
|
892 | 875 | } else { |
|
893 | 876 | fname = PyBytes_FromStringAndSize(cur, flen); |
|
894 | 877 | if (!fname || |
|
895 | 878 | PyDict_SetItem(dmap, fname, entry) == -1) { |
|
896 | 879 | goto quit; |
|
897 | 880 | } |
|
898 | 881 | } |
|
899 | 882 | Py_DECREF(fname); |
|
900 | 883 | Py_DECREF(entry); |
|
901 | 884 | fname = cname = entry = NULL; |
|
902 | 885 | pos += flen; |
|
903 | 886 | } |
|
904 | 887 | |
|
905 | 888 | ret = parents; |
|
906 | 889 | Py_INCREF(ret); |
|
907 | 890 | quit: |
|
908 | 891 | Py_XDECREF(fname); |
|
909 | 892 | Py_XDECREF(cname); |
|
910 | 893 | Py_XDECREF(entry); |
|
911 | 894 | Py_XDECREF(parents); |
|
912 | 895 | return ret; |
|
913 | 896 | } |
|
914 | 897 | |
|
915 | 898 | /* |
|
916 | 899 | * Efficiently pack a dirstate object into its on-disk format. |
|
917 | 900 | */ |
|
918 | 901 | static PyObject *pack_dirstate(PyObject *self, PyObject *args) |
|
919 | 902 | { |
|
920 | 903 | PyObject *packobj = NULL; |
|
921 | 904 | PyObject *map, *copymap, *pl, *mtime_unset = NULL; |
|
922 | 905 | Py_ssize_t nbytes, pos, l; |
|
923 | 906 | PyObject *k, *v = NULL, *pn; |
|
924 | 907 | char *p, *s; |
|
925 | int now_s; | |
|
926 | int now_ns; | |
|
927 | 908 | |
|
928 |
if (!PyArg_ParseTuple(args, "O!O!O! |
|
|
929 |
|
|
|
930 | &now_s, &now_ns)) { | |
|
909 | if (!PyArg_ParseTuple(args, "O!O!O!:pack_dirstate", &PyDict_Type, &map, | |
|
910 | &PyDict_Type, ©map, &PyTuple_Type, &pl)) { | |
|
931 | 911 | return NULL; |
|
932 | 912 | } |
|
933 | 913 | |
|
934 | 914 | if (PyTuple_Size(pl) != 2) { |
|
935 | 915 | PyErr_SetString(PyExc_TypeError, "expected 2-element tuple"); |
|
936 | 916 | return NULL; |
|
937 | 917 | } |
|
938 | 918 | |
|
939 | 919 | /* Figure out how much we need to allocate. */ |
|
940 | 920 | for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) { |
|
941 | 921 | PyObject *c; |
|
942 | 922 | if (!PyBytes_Check(k)) { |
|
943 | 923 | PyErr_SetString(PyExc_TypeError, "expected string key"); |
|
944 | 924 | goto bail; |
|
945 | 925 | } |
|
946 | 926 | nbytes += PyBytes_GET_SIZE(k) + 17; |
|
947 | 927 | c = PyDict_GetItem(copymap, k); |
|
948 | 928 | if (c) { |
|
949 | 929 | if (!PyBytes_Check(c)) { |
|
950 | 930 | PyErr_SetString(PyExc_TypeError, |
|
951 | 931 | "expected string key"); |
|
952 | 932 | goto bail; |
|
953 | 933 | } |
|
954 | 934 | nbytes += PyBytes_GET_SIZE(c) + 1; |
|
955 | 935 | } |
|
956 | 936 | } |
|
957 | 937 | |
|
958 | 938 | packobj = PyBytes_FromStringAndSize(NULL, nbytes); |
|
959 | 939 | if (packobj == NULL) { |
|
960 | 940 | goto bail; |
|
961 | 941 | } |
|
962 | 942 | |
|
963 | 943 | p = PyBytes_AS_STRING(packobj); |
|
964 | 944 | |
|
965 | 945 | pn = PyTuple_GET_ITEM(pl, 0); |
|
966 | 946 | if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) { |
|
967 | 947 | PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash"); |
|
968 | 948 | goto bail; |
|
969 | 949 | } |
|
970 | 950 | memcpy(p, s, l); |
|
971 | 951 | p += 20; |
|
972 | 952 | pn = PyTuple_GET_ITEM(pl, 1); |
|
973 | 953 | if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) { |
|
974 | 954 | PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash"); |
|
975 | 955 | goto bail; |
|
976 | 956 | } |
|
977 | 957 | memcpy(p, s, l); |
|
978 | 958 | p += 20; |
|
979 | 959 | |
|
980 | 960 | for (pos = 0; PyDict_Next(map, &pos, &k, &v);) { |
|
981 | 961 | dirstateItemObject *tuple; |
|
982 | 962 | char state; |
|
983 | 963 | int mode, size, mtime; |
|
984 | 964 | Py_ssize_t len, l; |
|
985 | 965 | PyObject *o; |
|
986 | 966 | char *t; |
|
987 | 967 | |
|
988 | 968 | if (!dirstate_tuple_check(v)) { |
|
989 | 969 | PyErr_SetString(PyExc_TypeError, |
|
990 | 970 | "expected a dirstate tuple"); |
|
991 | 971 | goto bail; |
|
992 | 972 | } |
|
993 | 973 | tuple = (dirstateItemObject *)v; |
|
994 | 974 | |
|
995 | 975 | state = dirstate_item_c_v1_state(tuple); |
|
996 | 976 | mode = dirstate_item_c_v1_mode(tuple); |
|
997 | 977 | size = dirstate_item_c_v1_size(tuple); |
|
998 | 978 | mtime = dirstate_item_c_v1_mtime(tuple); |
|
999 | if (state == 'n' && tuple->mtime_s == now_s) { | |
|
1000 | /* See pure/parsers.py:pack_dirstate for why we do | |
|
1001 | * this. */ | |
|
1002 | mtime = -1; | |
|
1003 | mtime_unset = (PyObject *)dirstate_item_from_v1_data( | |
|
1004 | state, mode, size, mtime); | |
|
1005 | if (!mtime_unset) { | |
|
1006 | goto bail; | |
|
1007 | } | |
|
1008 | if (PyDict_SetItem(map, k, mtime_unset) == -1) { | |
|
1009 | goto bail; | |
|
1010 | } | |
|
1011 | Py_DECREF(mtime_unset); | |
|
1012 | mtime_unset = NULL; | |
|
1013 | } | |
|
1014 | 979 | *p++ = state; |
|
1015 | 980 | putbe32((uint32_t)mode, p); |
|
1016 | 981 | putbe32((uint32_t)size, p + 4); |
|
1017 | 982 | putbe32((uint32_t)mtime, p + 8); |
|
1018 | 983 | t = p + 12; |
|
1019 | 984 | p += 16; |
|
1020 | 985 | len = PyBytes_GET_SIZE(k); |
|
1021 | 986 | memcpy(p, PyBytes_AS_STRING(k), len); |
|
1022 | 987 | p += len; |
|
1023 | 988 | o = PyDict_GetItem(copymap, k); |
|
1024 | 989 | if (o) { |
|
1025 | 990 | *p++ = '\0'; |
|
1026 | 991 | l = PyBytes_GET_SIZE(o); |
|
1027 | 992 | memcpy(p, PyBytes_AS_STRING(o), l); |
|
1028 | 993 | p += l; |
|
1029 | 994 | len += l + 1; |
|
1030 | 995 | } |
|
1031 | 996 | putbe32((uint32_t)len, t); |
|
1032 | 997 | } |
|
1033 | 998 | |
|
1034 | 999 | pos = p - PyBytes_AS_STRING(packobj); |
|
1035 | 1000 | if (pos != nbytes) { |
|
1036 | 1001 | PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld", |
|
1037 | 1002 | (long)pos, (long)nbytes); |
|
1038 | 1003 | goto bail; |
|
1039 | 1004 | } |
|
1040 | 1005 | |
|
1041 | 1006 | return packobj; |
|
1042 | 1007 | bail: |
|
1043 | 1008 | Py_XDECREF(mtime_unset); |
|
1044 | 1009 | Py_XDECREF(packobj); |
|
1045 | 1010 | Py_XDECREF(v); |
|
1046 | 1011 | return NULL; |
|
1047 | 1012 | } |
|
1048 | 1013 | |
|
1049 | 1014 | #define BUMPED_FIX 1 |
|
1050 | 1015 | #define USING_SHA_256 2 |
|
1051 | 1016 | #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1) |
|
1052 | 1017 | |
|
1053 | 1018 | static PyObject *readshas(const char *source, unsigned char num, |
|
1054 | 1019 | Py_ssize_t hashwidth) |
|
1055 | 1020 | { |
|
1056 | 1021 | int i; |
|
1057 | 1022 | PyObject *list = PyTuple_New(num); |
|
1058 | 1023 | if (list == NULL) { |
|
1059 | 1024 | return NULL; |
|
1060 | 1025 | } |
|
1061 | 1026 | for (i = 0; i < num; i++) { |
|
1062 | 1027 | PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth); |
|
1063 | 1028 | if (hash == NULL) { |
|
1064 | 1029 | Py_DECREF(list); |
|
1065 | 1030 | return NULL; |
|
1066 | 1031 | } |
|
1067 | 1032 | PyTuple_SET_ITEM(list, i, hash); |
|
1068 | 1033 | source += hashwidth; |
|
1069 | 1034 | } |
|
1070 | 1035 | return list; |
|
1071 | 1036 | } |
|
1072 | 1037 | |
|
1073 | 1038 | static PyObject *fm1readmarker(const char *databegin, const char *dataend, |
|
1074 | 1039 | uint32_t *msize) |
|
1075 | 1040 | { |
|
1076 | 1041 | const char *data = databegin; |
|
1077 | 1042 | const char *meta; |
|
1078 | 1043 | |
|
1079 | 1044 | double mtime; |
|
1080 | 1045 | int16_t tz; |
|
1081 | 1046 | uint16_t flags; |
|
1082 | 1047 | unsigned char nsuccs, nparents, nmetadata; |
|
1083 | 1048 | Py_ssize_t hashwidth = 20; |
|
1084 | 1049 | |
|
1085 | 1050 | PyObject *prec = NULL, *parents = NULL, *succs = NULL; |
|
1086 | 1051 | PyObject *metadata = NULL, *ret = NULL; |
|
1087 | 1052 | int i; |
|
1088 | 1053 | |
|
1089 | 1054 | if (data + FM1_HEADER_SIZE > dataend) { |
|
1090 | 1055 | goto overflow; |
|
1091 | 1056 | } |
|
1092 | 1057 | |
|
1093 | 1058 | *msize = getbe32(data); |
|
1094 | 1059 | data += 4; |
|
1095 | 1060 | mtime = getbefloat64(data); |
|
1096 | 1061 | data += 8; |
|
1097 | 1062 | tz = getbeint16(data); |
|
1098 | 1063 | data += 2; |
|
1099 | 1064 | flags = getbeuint16(data); |
|
1100 | 1065 | data += 2; |
|
1101 | 1066 | |
|
1102 | 1067 | if (flags & USING_SHA_256) { |
|
1103 | 1068 | hashwidth = 32; |
|
1104 | 1069 | } |
|
1105 | 1070 | |
|
1106 | 1071 | nsuccs = (unsigned char)(*data++); |
|
1107 | 1072 | nparents = (unsigned char)(*data++); |
|
1108 | 1073 | nmetadata = (unsigned char)(*data++); |
|
1109 | 1074 | |
|
1110 | 1075 | if (databegin + *msize > dataend) { |
|
1111 | 1076 | goto overflow; |
|
1112 | 1077 | } |
|
1113 | 1078 | dataend = databegin + *msize; /* narrow down to marker size */ |
|
1114 | 1079 | |
|
1115 | 1080 | if (data + hashwidth > dataend) { |
|
1116 | 1081 | goto overflow; |
|
1117 | 1082 | } |
|
1118 | 1083 | prec = PyBytes_FromStringAndSize(data, hashwidth); |
|
1119 | 1084 | data += hashwidth; |
|
1120 | 1085 | if (prec == NULL) { |
|
1121 | 1086 | goto bail; |
|
1122 | 1087 | } |
|
1123 | 1088 | |
|
1124 | 1089 | if (data + nsuccs * hashwidth > dataend) { |
|
1125 | 1090 | goto overflow; |
|
1126 | 1091 | } |
|
1127 | 1092 | succs = readshas(data, nsuccs, hashwidth); |
|
1128 | 1093 | if (succs == NULL) { |
|
1129 | 1094 | goto bail; |
|
1130 | 1095 | } |
|
1131 | 1096 | data += nsuccs * hashwidth; |
|
1132 | 1097 | |
|
1133 | 1098 | if (nparents == 1 || nparents == 2) { |
|
1134 | 1099 | if (data + nparents * hashwidth > dataend) { |
|
1135 | 1100 | goto overflow; |
|
1136 | 1101 | } |
|
1137 | 1102 | parents = readshas(data, nparents, hashwidth); |
|
1138 | 1103 | if (parents == NULL) { |
|
1139 | 1104 | goto bail; |
|
1140 | 1105 | } |
|
1141 | 1106 | data += nparents * hashwidth; |
|
1142 | 1107 | } else { |
|
1143 | 1108 | parents = Py_None; |
|
1144 | 1109 | Py_INCREF(parents); |
|
1145 | 1110 | } |
|
1146 | 1111 | |
|
1147 | 1112 | if (data + 2 * nmetadata > dataend) { |
|
1148 | 1113 | goto overflow; |
|
1149 | 1114 | } |
|
1150 | 1115 | meta = data + (2 * nmetadata); |
|
1151 | 1116 | metadata = PyTuple_New(nmetadata); |
|
1152 | 1117 | if (metadata == NULL) { |
|
1153 | 1118 | goto bail; |
|
1154 | 1119 | } |
|
1155 | 1120 | for (i = 0; i < nmetadata; i++) { |
|
1156 | 1121 | PyObject *tmp, *left = NULL, *right = NULL; |
|
1157 | 1122 | Py_ssize_t leftsize = (unsigned char)(*data++); |
|
1158 | 1123 | Py_ssize_t rightsize = (unsigned char)(*data++); |
|
1159 | 1124 | if (meta + leftsize + rightsize > dataend) { |
|
1160 | 1125 | goto overflow; |
|
1161 | 1126 | } |
|
1162 | 1127 | left = PyBytes_FromStringAndSize(meta, leftsize); |
|
1163 | 1128 | meta += leftsize; |
|
1164 | 1129 | right = PyBytes_FromStringAndSize(meta, rightsize); |
|
1165 | 1130 | meta += rightsize; |
|
1166 | 1131 | tmp = PyTuple_New(2); |
|
1167 | 1132 | if (!left || !right || !tmp) { |
|
1168 | 1133 | Py_XDECREF(left); |
|
1169 | 1134 | Py_XDECREF(right); |
|
1170 | 1135 | Py_XDECREF(tmp); |
|
1171 | 1136 | goto bail; |
|
1172 | 1137 | } |
|
1173 | 1138 | PyTuple_SET_ITEM(tmp, 0, left); |
|
1174 | 1139 | PyTuple_SET_ITEM(tmp, 1, right); |
|
1175 | 1140 | PyTuple_SET_ITEM(metadata, i, tmp); |
|
1176 | 1141 | } |
|
1177 | 1142 | ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime, |
|
1178 | 1143 | (int)tz * 60, parents); |
|
1179 | 1144 | goto bail; /* return successfully */ |
|
1180 | 1145 | |
|
1181 | 1146 | overflow: |
|
1182 | 1147 | PyErr_SetString(PyExc_ValueError, "overflow in obsstore"); |
|
1183 | 1148 | bail: |
|
1184 | 1149 | Py_XDECREF(prec); |
|
1185 | 1150 | Py_XDECREF(succs); |
|
1186 | 1151 | Py_XDECREF(metadata); |
|
1187 | 1152 | Py_XDECREF(parents); |
|
1188 | 1153 | return ret; |
|
1189 | 1154 | } |
|
1190 | 1155 | |
|
1191 | 1156 | static PyObject *fm1readmarkers(PyObject *self, PyObject *args) |
|
1192 | 1157 | { |
|
1193 | 1158 | const char *data, *dataend; |
|
1194 | 1159 | Py_ssize_t datalen, offset, stop; |
|
1195 | 1160 | PyObject *markers = NULL; |
|
1196 | 1161 | |
|
1197 | 1162 | if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen, |
|
1198 | 1163 | &offset, &stop)) { |
|
1199 | 1164 | return NULL; |
|
1200 | 1165 | } |
|
1201 | 1166 | if (offset < 0) { |
|
1202 | 1167 | PyErr_SetString(PyExc_ValueError, |
|
1203 | 1168 | "invalid negative offset in fm1readmarkers"); |
|
1204 | 1169 | return NULL; |
|
1205 | 1170 | } |
|
1206 | 1171 | if (stop > datalen) { |
|
1207 | 1172 | PyErr_SetString( |
|
1208 | 1173 | PyExc_ValueError, |
|
1209 | 1174 | "stop longer than data length in fm1readmarkers"); |
|
1210 | 1175 | return NULL; |
|
1211 | 1176 | } |
|
1212 | 1177 | dataend = data + datalen; |
|
1213 | 1178 | data += offset; |
|
1214 | 1179 | markers = PyList_New(0); |
|
1215 | 1180 | if (!markers) { |
|
1216 | 1181 | return NULL; |
|
1217 | 1182 | } |
|
1218 | 1183 | while (offset < stop) { |
|
1219 | 1184 | uint32_t msize; |
|
1220 | 1185 | int error; |
|
1221 | 1186 | PyObject *record = fm1readmarker(data, dataend, &msize); |
|
1222 | 1187 | if (!record) { |
|
1223 | 1188 | goto bail; |
|
1224 | 1189 | } |
|
1225 | 1190 | error = PyList_Append(markers, record); |
|
1226 | 1191 | Py_DECREF(record); |
|
1227 | 1192 | if (error) { |
|
1228 | 1193 | goto bail; |
|
1229 | 1194 | } |
|
1230 | 1195 | data += msize; |
|
1231 | 1196 | offset += msize; |
|
1232 | 1197 | } |
|
1233 | 1198 | return markers; |
|
1234 | 1199 | bail: |
|
1235 | 1200 | Py_DECREF(markers); |
|
1236 | 1201 | return NULL; |
|
1237 | 1202 | } |
|
1238 | 1203 | |
|
1239 | 1204 | static char parsers_doc[] = "Efficient content parsing."; |
|
1240 | 1205 | |
|
1241 | 1206 | PyObject *encodedir(PyObject *self, PyObject *args); |
|
1242 | 1207 | PyObject *pathencode(PyObject *self, PyObject *args); |
|
1243 | 1208 | PyObject *lowerencode(PyObject *self, PyObject *args); |
|
1244 | 1209 | PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs); |
|
1245 | 1210 | |
|
1246 | 1211 | static PyMethodDef methods[] = { |
|
1247 | 1212 | {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"}, |
|
1248 | 1213 | {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"}, |
|
1249 | 1214 | {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS, |
|
1250 | 1215 | "parse a revlog index\n"}, |
|
1251 | 1216 | {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"}, |
|
1252 | 1217 | {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"}, |
|
1253 | 1218 | {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"}, |
|
1254 | 1219 | {"dict_new_presized", dict_new_presized, METH_VARARGS, |
|
1255 | 1220 | "construct a dict with an expected size\n"}, |
|
1256 | 1221 | {"make_file_foldmap", make_file_foldmap, METH_VARARGS, |
|
1257 | 1222 | "make file foldmap\n"}, |
|
1258 | 1223 | {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS, |
|
1259 | 1224 | "escape a UTF-8 byte string to JSON (fast path)\n"}, |
|
1260 | 1225 | {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"}, |
|
1261 | 1226 | {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"}, |
|
1262 | 1227 | {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"}, |
|
1263 | 1228 | {"fm1readmarkers", fm1readmarkers, METH_VARARGS, |
|
1264 | 1229 | "parse v1 obsolete markers\n"}, |
|
1265 | 1230 | {NULL, NULL}}; |
|
1266 | 1231 | |
|
1267 | 1232 | void dirs_module_init(PyObject *mod); |
|
1268 | 1233 | void manifest_module_init(PyObject *mod); |
|
1269 | 1234 | void revlog_module_init(PyObject *mod); |
|
1270 | 1235 | |
|
1271 | 1236 | static const int version = 20; |
|
1272 | 1237 | |
|
1273 | 1238 | static void module_init(PyObject *mod) |
|
1274 | 1239 | { |
|
1275 | 1240 | PyModule_AddIntConstant(mod, "version", version); |
|
1276 | 1241 | |
|
1277 | 1242 | /* This module constant has two purposes. First, it lets us unit test |
|
1278 | 1243 | * the ImportError raised without hard-coding any error text. This |
|
1279 | 1244 | * means we can change the text in the future without breaking tests, |
|
1280 | 1245 | * even across changesets without a recompile. Second, its presence |
|
1281 | 1246 | * can be used to determine whether the version-checking logic is |
|
1282 | 1247 | * present, which also helps in testing across changesets without a |
|
1283 | 1248 | * recompile. Note that this means the pure-Python version of parsers |
|
1284 | 1249 | * should not have this module constant. */ |
|
1285 | 1250 | PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext); |
|
1286 | 1251 | |
|
1287 | 1252 | dirs_module_init(mod); |
|
1288 | 1253 | manifest_module_init(mod); |
|
1289 | 1254 | revlog_module_init(mod); |
|
1290 | 1255 | |
|
1291 | 1256 | if (PyType_Ready(&dirstateItemType) < 0) { |
|
1292 | 1257 | return; |
|
1293 | 1258 | } |
|
1294 | 1259 | Py_INCREF(&dirstateItemType); |
|
1295 | 1260 | PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType); |
|
1296 | 1261 | } |
|
1297 | 1262 | |
|
1298 | 1263 | static int check_python_version(void) |
|
1299 | 1264 | { |
|
1300 | 1265 | PyObject *sys = PyImport_ImportModule("sys"), *ver; |
|
1301 | 1266 | long hexversion; |
|
1302 | 1267 | if (!sys) { |
|
1303 | 1268 | return -1; |
|
1304 | 1269 | } |
|
1305 | 1270 | ver = PyObject_GetAttrString(sys, "hexversion"); |
|
1306 | 1271 | Py_DECREF(sys); |
|
1307 | 1272 | if (!ver) { |
|
1308 | 1273 | return -1; |
|
1309 | 1274 | } |
|
1310 | 1275 | hexversion = PyInt_AsLong(ver); |
|
1311 | 1276 | Py_DECREF(ver); |
|
1312 | 1277 | /* sys.hexversion is a 32-bit number by default, so the -1 case |
|
1313 | 1278 | * should only occur in unusual circumstances (e.g. if sys.hexversion |
|
1314 | 1279 | * is manually set to an invalid value). */ |
|
1315 | 1280 | if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) { |
|
1316 | 1281 | PyErr_Format(PyExc_ImportError, |
|
1317 | 1282 | "%s: The Mercurial extension " |
|
1318 | 1283 | "modules were compiled with Python " PY_VERSION |
|
1319 | 1284 | ", but " |
|
1320 | 1285 | "Mercurial is currently using Python with " |
|
1321 | 1286 | "sys.hexversion=%ld: " |
|
1322 | 1287 | "Python %s\n at: %s", |
|
1323 | 1288 | versionerrortext, hexversion, Py_GetVersion(), |
|
1324 | 1289 | Py_GetProgramFullPath()); |
|
1325 | 1290 | return -1; |
|
1326 | 1291 | } |
|
1327 | 1292 | return 0; |
|
1328 | 1293 | } |
|
1329 | 1294 | |
|
1330 | 1295 | #ifdef IS_PY3K |
|
1331 | 1296 | static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers", |
|
1332 | 1297 | parsers_doc, -1, methods}; |
|
1333 | 1298 | |
|
1334 | 1299 | PyMODINIT_FUNC PyInit_parsers(void) |
|
1335 | 1300 | { |
|
1336 | 1301 | PyObject *mod; |
|
1337 | 1302 | |
|
1338 | 1303 | if (check_python_version() == -1) |
|
1339 | 1304 | return NULL; |
|
1340 | 1305 | mod = PyModule_Create(&parsers_module); |
|
1341 | 1306 | module_init(mod); |
|
1342 | 1307 | return mod; |
|
1343 | 1308 | } |
|
1344 | 1309 | #else |
|
1345 | 1310 | PyMODINIT_FUNC initparsers(void) |
|
1346 | 1311 | { |
|
1347 | 1312 | PyObject *mod; |
|
1348 | 1313 | |
|
1349 | 1314 | if (check_python_version() == -1) { |
|
1350 | 1315 | return; |
|
1351 | 1316 | } |
|
1352 | 1317 | mod = Py_InitModule3("parsers", methods, parsers_doc); |
|
1353 | 1318 | module_init(mod); |
|
1354 | 1319 | } |
|
1355 | 1320 | #endif |
@@ -1,1491 +1,1472 b'' | |||
|
1 | 1 | # dirstate.py - working directory tracking for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import collections |
|
11 | 11 | import contextlib |
|
12 | 12 | import errno |
|
13 | 13 | import os |
|
14 | 14 | import stat |
|
15 | 15 | |
|
16 | 16 | from .i18n import _ |
|
17 | 17 | from .pycompat import delattr |
|
18 | 18 | |
|
19 | 19 | from hgdemandimport import tracing |
|
20 | 20 | |
|
21 | 21 | from . import ( |
|
22 | 22 | dirstatemap, |
|
23 | 23 | encoding, |
|
24 | 24 | error, |
|
25 | 25 | match as matchmod, |
|
26 | 26 | pathutil, |
|
27 | 27 | policy, |
|
28 | 28 | pycompat, |
|
29 | 29 | scmutil, |
|
30 | 30 | sparse, |
|
31 | 31 | util, |
|
32 | 32 | ) |
|
33 | 33 | |
|
34 | 34 | from .dirstateutils import ( |
|
35 | 35 | timestamp, |
|
36 | 36 | ) |
|
37 | 37 | |
|
38 | 38 | from .interfaces import ( |
|
39 | 39 | dirstate as intdirstate, |
|
40 | 40 | util as interfaceutil, |
|
41 | 41 | ) |
|
42 | 42 | |
|
43 | 43 | parsers = policy.importmod('parsers') |
|
44 | 44 | rustmod = policy.importrust('dirstate') |
|
45 | 45 | |
|
46 | 46 | HAS_FAST_DIRSTATE_V2 = rustmod is not None |
|
47 | 47 | |
|
48 | 48 | propertycache = util.propertycache |
|
49 | 49 | filecache = scmutil.filecache |
|
50 | 50 | _rangemask = dirstatemap.rangemask |
|
51 | 51 | |
|
52 | 52 | DirstateItem = dirstatemap.DirstateItem |
|
53 | 53 | |
|
54 | 54 | |
|
55 | 55 | class repocache(filecache): |
|
56 | 56 | """filecache for files in .hg/""" |
|
57 | 57 | |
|
58 | 58 | def join(self, obj, fname): |
|
59 | 59 | return obj._opener.join(fname) |
|
60 | 60 | |
|
61 | 61 | |
|
62 | 62 | class rootcache(filecache): |
|
63 | 63 | """filecache for files in the repository root""" |
|
64 | 64 | |
|
65 | 65 | def join(self, obj, fname): |
|
66 | 66 | return obj._join(fname) |
|
67 | 67 | |
|
68 | 68 | |
|
69 | 69 | def requires_parents_change(func): |
|
70 | 70 | def wrap(self, *args, **kwargs): |
|
71 | 71 | if not self.pendingparentchange(): |
|
72 | 72 | msg = 'calling `%s` outside of a parentchange context' |
|
73 | 73 | msg %= func.__name__ |
|
74 | 74 | raise error.ProgrammingError(msg) |
|
75 | 75 | return func(self, *args, **kwargs) |
|
76 | 76 | |
|
77 | 77 | return wrap |
|
78 | 78 | |
|
79 | 79 | |
|
80 | 80 | def requires_no_parents_change(func): |
|
81 | 81 | def wrap(self, *args, **kwargs): |
|
82 | 82 | if self.pendingparentchange(): |
|
83 | 83 | msg = 'calling `%s` inside of a parentchange context' |
|
84 | 84 | msg %= func.__name__ |
|
85 | 85 | raise error.ProgrammingError(msg) |
|
86 | 86 | return func(self, *args, **kwargs) |
|
87 | 87 | |
|
88 | 88 | return wrap |
|
89 | 89 | |
|
90 | 90 | |
|
91 | 91 | @interfaceutil.implementer(intdirstate.idirstate) |
|
92 | 92 | class dirstate(object): |
|
93 | 93 | def __init__( |
|
94 | 94 | self, |
|
95 | 95 | opener, |
|
96 | 96 | ui, |
|
97 | 97 | root, |
|
98 | 98 | validate, |
|
99 | 99 | sparsematchfn, |
|
100 | 100 | nodeconstants, |
|
101 | 101 | use_dirstate_v2, |
|
102 | 102 | ): |
|
103 | 103 | """Create a new dirstate object. |
|
104 | 104 | |
|
105 | 105 | opener is an open()-like callable that can be used to open the |
|
106 | 106 | dirstate file; root is the root of the directory tracked by |
|
107 | 107 | the dirstate. |
|
108 | 108 | """ |
|
109 | 109 | self._use_dirstate_v2 = use_dirstate_v2 |
|
110 | 110 | self._nodeconstants = nodeconstants |
|
111 | 111 | self._opener = opener |
|
112 | 112 | self._validate = validate |
|
113 | 113 | self._root = root |
|
114 | 114 | self._sparsematchfn = sparsematchfn |
|
115 | 115 | # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is |
|
116 | 116 | # UNC path pointing to root share (issue4557) |
|
117 | 117 | self._rootdir = pathutil.normasprefix(root) |
|
118 | 118 | self._dirty = False |
|
119 | 119 | self._ui = ui |
|
120 | 120 | self._filecache = {} |
|
121 | 121 | self._parentwriters = 0 |
|
122 | 122 | self._filename = b'dirstate' |
|
123 | 123 | self._pendingfilename = b'%s.pending' % self._filename |
|
124 | 124 | self._plchangecallbacks = {} |
|
125 | 125 | self._origpl = None |
|
126 | 126 | self._mapcls = dirstatemap.dirstatemap |
|
127 | 127 | # Access and cache cwd early, so we don't access it for the first time |
|
128 | 128 | # after a working-copy update caused it to not exist (accessing it then |
|
129 | 129 | # raises an exception). |
|
130 | 130 | self._cwd |
|
131 | 131 | |
|
132 | 132 | def prefetch_parents(self): |
|
133 | 133 | """make sure the parents are loaded |
|
134 | 134 | |
|
135 | 135 | Used to avoid a race condition. |
|
136 | 136 | """ |
|
137 | 137 | self._pl |
|
138 | 138 | |
|
139 | 139 | @contextlib.contextmanager |
|
140 | 140 | def parentchange(self): |
|
141 | 141 | """Context manager for handling dirstate parents. |
|
142 | 142 | |
|
143 | 143 | If an exception occurs in the scope of the context manager, |
|
144 | 144 | the incoherent dirstate won't be written when wlock is |
|
145 | 145 | released. |
|
146 | 146 | """ |
|
147 | 147 | self._parentwriters += 1 |
|
148 | 148 | yield |
|
149 | 149 | # Typically we want the "undo" step of a context manager in a |
|
150 | 150 | # finally block so it happens even when an exception |
|
151 | 151 | # occurs. In this case, however, we only want to decrement |
|
152 | 152 | # parentwriters if the code in the with statement exits |
|
153 | 153 | # normally, so we don't have a try/finally here on purpose. |
|
154 | 154 | self._parentwriters -= 1 |
|
155 | 155 | |
|
156 | 156 | def pendingparentchange(self): |
|
157 | 157 | """Returns true if the dirstate is in the middle of a set of changes |
|
158 | 158 | that modify the dirstate parent. |
|
159 | 159 | """ |
|
160 | 160 | return self._parentwriters > 0 |
|
161 | 161 | |
|
162 | 162 | @propertycache |
|
163 | 163 | def _map(self): |
|
164 | 164 | """Return the dirstate contents (see documentation for dirstatemap).""" |
|
165 | 165 | self._map = self._mapcls( |
|
166 | 166 | self._ui, |
|
167 | 167 | self._opener, |
|
168 | 168 | self._root, |
|
169 | 169 | self._nodeconstants, |
|
170 | 170 | self._use_dirstate_v2, |
|
171 | 171 | ) |
|
172 | 172 | return self._map |
|
173 | 173 | |
|
174 | 174 | @property |
|
175 | 175 | def _sparsematcher(self): |
|
176 | 176 | """The matcher for the sparse checkout. |
|
177 | 177 | |
|
178 | 178 | The working directory may not include every file from a manifest. The |
|
179 | 179 | matcher obtained by this property will match a path if it is to be |
|
180 | 180 | included in the working directory. |
|
181 | 181 | """ |
|
182 | 182 | # TODO there is potential to cache this property. For now, the matcher |
|
183 | 183 | # is resolved on every access. (But the called function does use a |
|
184 | 184 | # cache to keep the lookup fast.) |
|
185 | 185 | return self._sparsematchfn() |
|
186 | 186 | |
|
187 | 187 | @repocache(b'branch') |
|
188 | 188 | def _branch(self): |
|
189 | 189 | try: |
|
190 | 190 | return self._opener.read(b"branch").strip() or b"default" |
|
191 | 191 | except IOError as inst: |
|
192 | 192 | if inst.errno != errno.ENOENT: |
|
193 | 193 | raise |
|
194 | 194 | return b"default" |
|
195 | 195 | |
|
196 | 196 | @property |
|
197 | 197 | def _pl(self): |
|
198 | 198 | return self._map.parents() |
|
199 | 199 | |
|
200 | 200 | def hasdir(self, d): |
|
201 | 201 | return self._map.hastrackeddir(d) |
|
202 | 202 | |
|
203 | 203 | @rootcache(b'.hgignore') |
|
204 | 204 | def _ignore(self): |
|
205 | 205 | files = self._ignorefiles() |
|
206 | 206 | if not files: |
|
207 | 207 | return matchmod.never() |
|
208 | 208 | |
|
209 | 209 | pats = [b'include:%s' % f for f in files] |
|
210 | 210 | return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn) |
|
211 | 211 | |
|
212 | 212 | @propertycache |
|
213 | 213 | def _slash(self): |
|
214 | 214 | return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/' |
|
215 | 215 | |
|
216 | 216 | @propertycache |
|
217 | 217 | def _checklink(self): |
|
218 | 218 | return util.checklink(self._root) |
|
219 | 219 | |
|
220 | 220 | @propertycache |
|
221 | 221 | def _checkexec(self): |
|
222 | 222 | return bool(util.checkexec(self._root)) |
|
223 | 223 | |
|
224 | 224 | @propertycache |
|
225 | 225 | def _checkcase(self): |
|
226 | 226 | return not util.fscasesensitive(self._join(b'.hg')) |
|
227 | 227 | |
|
228 | 228 | def _join(self, f): |
|
229 | 229 | # much faster than os.path.join() |
|
230 | 230 | # it's safe because f is always a relative path |
|
231 | 231 | return self._rootdir + f |
|
232 | 232 | |
|
233 | 233 | def flagfunc(self, buildfallback): |
|
234 | 234 | """build a callable that returns flags associated with a filename |
|
235 | 235 | |
|
236 | 236 | The information is extracted from three possible layers: |
|
237 | 237 | 1. the file system if it supports the information |
|
238 | 238 | 2. the "fallback" information stored in the dirstate if any |
|
239 | 239 | 3. a more expensive mechanism inferring the flags from the parents. |
|
240 | 240 | """ |
|
241 | 241 | |
|
242 | 242 | # small hack to cache the result of buildfallback() |
|
243 | 243 | fallback_func = [] |
|
244 | 244 | |
|
245 | 245 | def get_flags(x): |
|
246 | 246 | entry = None |
|
247 | 247 | fallback_value = None |
|
248 | 248 | try: |
|
249 | 249 | st = os.lstat(self._join(x)) |
|
250 | 250 | except OSError: |
|
251 | 251 | return b'' |
|
252 | 252 | |
|
253 | 253 | if self._checklink: |
|
254 | 254 | if util.statislink(st): |
|
255 | 255 | return b'l' |
|
256 | 256 | else: |
|
257 | 257 | entry = self.get_entry(x) |
|
258 | 258 | if entry.has_fallback_symlink: |
|
259 | 259 | if entry.fallback_symlink: |
|
260 | 260 | return b'l' |
|
261 | 261 | else: |
|
262 | 262 | if not fallback_func: |
|
263 | 263 | fallback_func.append(buildfallback()) |
|
264 | 264 | fallback_value = fallback_func[0](x) |
|
265 | 265 | if b'l' in fallback_value: |
|
266 | 266 | return b'l' |
|
267 | 267 | |
|
268 | 268 | if self._checkexec: |
|
269 | 269 | if util.statisexec(st): |
|
270 | 270 | return b'x' |
|
271 | 271 | else: |
|
272 | 272 | if entry is None: |
|
273 | 273 | entry = self.get_entry(x) |
|
274 | 274 | if entry.has_fallback_exec: |
|
275 | 275 | if entry.fallback_exec: |
|
276 | 276 | return b'x' |
|
277 | 277 | else: |
|
278 | 278 | if fallback_value is None: |
|
279 | 279 | if not fallback_func: |
|
280 | 280 | fallback_func.append(buildfallback()) |
|
281 | 281 | fallback_value = fallback_func[0](x) |
|
282 | 282 | if b'x' in fallback_value: |
|
283 | 283 | return b'x' |
|
284 | 284 | return b'' |
|
285 | 285 | |
|
286 | 286 | return get_flags |
|
287 | 287 | |
|
288 | 288 | @propertycache |
|
289 | 289 | def _cwd(self): |
|
290 | 290 | # internal config: ui.forcecwd |
|
291 | 291 | forcecwd = self._ui.config(b'ui', b'forcecwd') |
|
292 | 292 | if forcecwd: |
|
293 | 293 | return forcecwd |
|
294 | 294 | return encoding.getcwd() |
|
295 | 295 | |
|
296 | 296 | def getcwd(self): |
|
297 | 297 | """Return the path from which a canonical path is calculated. |
|
298 | 298 | |
|
299 | 299 | This path should be used to resolve file patterns or to convert |
|
300 | 300 | canonical paths back to file paths for display. It shouldn't be |
|
301 | 301 | used to get real file paths. Use vfs functions instead. |
|
302 | 302 | """ |
|
303 | 303 | cwd = self._cwd |
|
304 | 304 | if cwd == self._root: |
|
305 | 305 | return b'' |
|
306 | 306 | # self._root ends with a path separator if self._root is '/' or 'C:\' |
|
307 | 307 | rootsep = self._root |
|
308 | 308 | if not util.endswithsep(rootsep): |
|
309 | 309 | rootsep += pycompat.ossep |
|
310 | 310 | if cwd.startswith(rootsep): |
|
311 | 311 | return cwd[len(rootsep) :] |
|
312 | 312 | else: |
|
313 | 313 | # we're outside the repo. return an absolute path. |
|
314 | 314 | return cwd |
|
315 | 315 | |
|
316 | 316 | def pathto(self, f, cwd=None): |
|
317 | 317 | if cwd is None: |
|
318 | 318 | cwd = self.getcwd() |
|
319 | 319 | path = util.pathto(self._root, cwd, f) |
|
320 | 320 | if self._slash: |
|
321 | 321 | return util.pconvert(path) |
|
322 | 322 | return path |
|
323 | 323 | |
|
324 | 324 | def __getitem__(self, key): |
|
325 | 325 | """Return the current state of key (a filename) in the dirstate. |
|
326 | 326 | |
|
327 | 327 | States are: |
|
328 | 328 | n normal |
|
329 | 329 | m needs merging |
|
330 | 330 | r marked for removal |
|
331 | 331 | a marked for addition |
|
332 | 332 | ? not tracked |
|
333 | 333 | |
|
334 | 334 | XXX The "state" is a bit obscure to be in the "public" API. we should |
|
335 | 335 | consider migrating all user of this to going through the dirstate entry |
|
336 | 336 | instead. |
|
337 | 337 | """ |
|
338 | 338 | msg = b"don't use dirstate[file], use dirstate.get_entry(file)" |
|
339 | 339 | util.nouideprecwarn(msg, b'6.1', stacklevel=2) |
|
340 | 340 | entry = self._map.get(key) |
|
341 | 341 | if entry is not None: |
|
342 | 342 | return entry.state |
|
343 | 343 | return b'?' |
|
344 | 344 | |
|
345 | 345 | def get_entry(self, path): |
|
346 | 346 | """return a DirstateItem for the associated path""" |
|
347 | 347 | entry = self._map.get(path) |
|
348 | 348 | if entry is None: |
|
349 | 349 | return DirstateItem() |
|
350 | 350 | return entry |
|
351 | 351 | |
|
352 | 352 | def __contains__(self, key): |
|
353 | 353 | return key in self._map |
|
354 | 354 | |
|
355 | 355 | def __iter__(self): |
|
356 | 356 | return iter(sorted(self._map)) |
|
357 | 357 | |
|
358 | 358 | def items(self): |
|
359 | 359 | return pycompat.iteritems(self._map) |
|
360 | 360 | |
|
361 | 361 | iteritems = items |
|
362 | 362 | |
|
363 | 363 | def parents(self): |
|
364 | 364 | return [self._validate(p) for p in self._pl] |
|
365 | 365 | |
|
366 | 366 | def p1(self): |
|
367 | 367 | return self._validate(self._pl[0]) |
|
368 | 368 | |
|
369 | 369 | def p2(self): |
|
370 | 370 | return self._validate(self._pl[1]) |
|
371 | 371 | |
|
372 | 372 | @property |
|
373 | 373 | def in_merge(self): |
|
374 | 374 | """True if a merge is in progress""" |
|
375 | 375 | return self._pl[1] != self._nodeconstants.nullid |
|
376 | 376 | |
|
377 | 377 | def branch(self): |
|
378 | 378 | return encoding.tolocal(self._branch) |
|
379 | 379 | |
|
380 | 380 | def setparents(self, p1, p2=None): |
|
381 | 381 | """Set dirstate parents to p1 and p2. |
|
382 | 382 | |
|
383 | 383 | When moving from two parents to one, "merged" entries a |
|
384 | 384 | adjusted to normal and previous copy records discarded and |
|
385 | 385 | returned by the call. |
|
386 | 386 | |
|
387 | 387 | See localrepo.setparents() |
|
388 | 388 | """ |
|
389 | 389 | if p2 is None: |
|
390 | 390 | p2 = self._nodeconstants.nullid |
|
391 | 391 | if self._parentwriters == 0: |
|
392 | 392 | raise ValueError( |
|
393 | 393 | b"cannot set dirstate parent outside of " |
|
394 | 394 | b"dirstate.parentchange context manager" |
|
395 | 395 | ) |
|
396 | 396 | |
|
397 | 397 | self._dirty = True |
|
398 | 398 | oldp2 = self._pl[1] |
|
399 | 399 | if self._origpl is None: |
|
400 | 400 | self._origpl = self._pl |
|
401 | 401 | nullid = self._nodeconstants.nullid |
|
402 | 402 | # True if we need to fold p2 related state back to a linear case |
|
403 | 403 | fold_p2 = oldp2 != nullid and p2 == nullid |
|
404 | 404 | return self._map.setparents(p1, p2, fold_p2=fold_p2) |
|
405 | 405 | |
|
406 | 406 | def setbranch(self, branch): |
|
407 | 407 | self.__class__._branch.set(self, encoding.fromlocal(branch)) |
|
408 | 408 | f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True) |
|
409 | 409 | try: |
|
410 | 410 | f.write(self._branch + b'\n') |
|
411 | 411 | f.close() |
|
412 | 412 | |
|
413 | 413 | # make sure filecache has the correct stat info for _branch after |
|
414 | 414 | # replacing the underlying file |
|
415 | 415 | ce = self._filecache[b'_branch'] |
|
416 | 416 | if ce: |
|
417 | 417 | ce.refresh() |
|
418 | 418 | except: # re-raises |
|
419 | 419 | f.discard() |
|
420 | 420 | raise |
|
421 | 421 | |
|
422 | 422 | def invalidate(self): |
|
423 | 423 | """Causes the next access to reread the dirstate. |
|
424 | 424 | |
|
425 | 425 | This is different from localrepo.invalidatedirstate() because it always |
|
426 | 426 | rereads the dirstate. Use localrepo.invalidatedirstate() if you want to |
|
427 | 427 | check whether the dirstate has changed before rereading it.""" |
|
428 | 428 | |
|
429 | 429 | for a in ("_map", "_branch", "_ignore"): |
|
430 | 430 | if a in self.__dict__: |
|
431 | 431 | delattr(self, a) |
|
432 | 432 | self._dirty = False |
|
433 | 433 | self._parentwriters = 0 |
|
434 | 434 | self._origpl = None |
|
435 | 435 | |
|
436 | 436 | def copy(self, source, dest): |
|
437 | 437 | """Mark dest as a copy of source. Unmark dest if source is None.""" |
|
438 | 438 | if source == dest: |
|
439 | 439 | return |
|
440 | 440 | self._dirty = True |
|
441 | 441 | if source is not None: |
|
442 | 442 | self._map.copymap[dest] = source |
|
443 | 443 | else: |
|
444 | 444 | self._map.copymap.pop(dest, None) |
|
445 | 445 | |
|
446 | 446 | def copied(self, file): |
|
447 | 447 | return self._map.copymap.get(file, None) |
|
448 | 448 | |
|
449 | 449 | def copies(self): |
|
450 | 450 | return self._map.copymap |
|
451 | 451 | |
|
452 | 452 | @requires_no_parents_change |
|
453 | 453 | def set_tracked(self, filename, reset_copy=False): |
|
454 | 454 | """a "public" method for generic code to mark a file as tracked |
|
455 | 455 | |
|
456 | 456 | This function is to be called outside of "update/merge" case. For |
|
457 | 457 | example by a command like `hg add X`. |
|
458 | 458 | |
|
459 | 459 | if reset_copy is set, any existing copy information will be dropped. |
|
460 | 460 | |
|
461 | 461 | return True the file was previously untracked, False otherwise. |
|
462 | 462 | """ |
|
463 | 463 | self._dirty = True |
|
464 | 464 | entry = self._map.get(filename) |
|
465 | 465 | if entry is None or not entry.tracked: |
|
466 | 466 | self._check_new_tracked_filename(filename) |
|
467 | 467 | pre_tracked = self._map.set_tracked(filename) |
|
468 | 468 | if reset_copy: |
|
469 | 469 | self._map.copymap.pop(filename, None) |
|
470 | 470 | return pre_tracked |
|
471 | 471 | |
|
472 | 472 | @requires_no_parents_change |
|
473 | 473 | def set_untracked(self, filename): |
|
474 | 474 | """a "public" method for generic code to mark a file as untracked |
|
475 | 475 | |
|
476 | 476 | This function is to be called outside of "update/merge" case. For |
|
477 | 477 | example by a command like `hg remove X`. |
|
478 | 478 | |
|
479 | 479 | return True the file was previously tracked, False otherwise. |
|
480 | 480 | """ |
|
481 | 481 | ret = self._map.set_untracked(filename) |
|
482 | 482 | if ret: |
|
483 | 483 | self._dirty = True |
|
484 | 484 | return ret |
|
485 | 485 | |
|
486 | 486 | @requires_no_parents_change |
|
487 | 487 | def set_clean(self, filename, parentfiledata): |
|
488 | 488 | """record that the current state of the file on disk is known to be clean""" |
|
489 | 489 | self._dirty = True |
|
490 | 490 | if not self._map[filename].tracked: |
|
491 | 491 | self._check_new_tracked_filename(filename) |
|
492 | 492 | (mode, size, mtime) = parentfiledata |
|
493 | 493 | self._map.set_clean(filename, mode, size, mtime) |
|
494 | 494 | |
|
495 | 495 | @requires_no_parents_change |
|
496 | 496 | def set_possibly_dirty(self, filename): |
|
497 | 497 | """record that the current state of the file on disk is unknown""" |
|
498 | 498 | self._dirty = True |
|
499 | 499 | self._map.set_possibly_dirty(filename) |
|
500 | 500 | |
|
501 | 501 | @requires_parents_change |
|
502 | 502 | def update_file_p1( |
|
503 | 503 | self, |
|
504 | 504 | filename, |
|
505 | 505 | p1_tracked, |
|
506 | 506 | ): |
|
507 | 507 | """Set a file as tracked in the parent (or not) |
|
508 | 508 | |
|
509 | 509 | This is to be called when adjust the dirstate to a new parent after an history |
|
510 | 510 | rewriting operation. |
|
511 | 511 | |
|
512 | 512 | It should not be called during a merge (p2 != nullid) and only within |
|
513 | 513 | a `with dirstate.parentchange():` context. |
|
514 | 514 | """ |
|
515 | 515 | if self.in_merge: |
|
516 | 516 | msg = b'update_file_reference should not be called when merging' |
|
517 | 517 | raise error.ProgrammingError(msg) |
|
518 | 518 | entry = self._map.get(filename) |
|
519 | 519 | if entry is None: |
|
520 | 520 | wc_tracked = False |
|
521 | 521 | else: |
|
522 | 522 | wc_tracked = entry.tracked |
|
523 | 523 | if not (p1_tracked or wc_tracked): |
|
524 | 524 | # the file is no longer relevant to anyone |
|
525 | 525 | if self._map.get(filename) is not None: |
|
526 | 526 | self._map.reset_state(filename) |
|
527 | 527 | self._dirty = True |
|
528 | 528 | elif (not p1_tracked) and wc_tracked: |
|
529 | 529 | if entry is not None and entry.added: |
|
530 | 530 | return # avoid dropping copy information (maybe?) |
|
531 | 531 | |
|
532 | 532 | self._map.reset_state( |
|
533 | 533 | filename, |
|
534 | 534 | wc_tracked, |
|
535 | 535 | p1_tracked, |
|
536 | 536 | # the underlying reference might have changed, we will have to |
|
537 | 537 | # check it. |
|
538 | 538 | has_meaningful_mtime=False, |
|
539 | 539 | ) |
|
540 | 540 | |
|
541 | 541 | @requires_parents_change |
|
542 | 542 | def update_file( |
|
543 | 543 | self, |
|
544 | 544 | filename, |
|
545 | 545 | wc_tracked, |
|
546 | 546 | p1_tracked, |
|
547 | 547 | p2_info=False, |
|
548 | 548 | possibly_dirty=False, |
|
549 | 549 | parentfiledata=None, |
|
550 | 550 | ): |
|
551 | 551 | """update the information about a file in the dirstate |
|
552 | 552 | |
|
553 | 553 | This is to be called when the direstates parent changes to keep track |
|
554 | 554 | of what is the file situation in regards to the working copy and its parent. |
|
555 | 555 | |
|
556 | 556 | This function must be called within a `dirstate.parentchange` context. |
|
557 | 557 | |
|
558 | 558 | note: the API is at an early stage and we might need to adjust it |
|
559 | 559 | depending of what information ends up being relevant and useful to |
|
560 | 560 | other processing. |
|
561 | 561 | """ |
|
562 | 562 | |
|
563 | 563 | # note: I do not think we need to double check name clash here since we |
|
564 | 564 | # are in a update/merge case that should already have taken care of |
|
565 | 565 | # this. The test agrees |
|
566 | 566 | |
|
567 | 567 | self._dirty = True |
|
568 | 568 | |
|
569 | 569 | self._map.reset_state( |
|
570 | 570 | filename, |
|
571 | 571 | wc_tracked, |
|
572 | 572 | p1_tracked, |
|
573 | 573 | p2_info=p2_info, |
|
574 | 574 | has_meaningful_mtime=not possibly_dirty, |
|
575 | 575 | parentfiledata=parentfiledata, |
|
576 | 576 | ) |
|
577 | 577 | |
|
578 | 578 | def _check_new_tracked_filename(self, filename): |
|
579 | 579 | scmutil.checkfilename(filename) |
|
580 | 580 | if self._map.hastrackeddir(filename): |
|
581 | 581 | msg = _(b'directory %r already in dirstate') |
|
582 | 582 | msg %= pycompat.bytestr(filename) |
|
583 | 583 | raise error.Abort(msg) |
|
584 | 584 | # shadows |
|
585 | 585 | for d in pathutil.finddirs(filename): |
|
586 | 586 | if self._map.hastrackeddir(d): |
|
587 | 587 | break |
|
588 | 588 | entry = self._map.get(d) |
|
589 | 589 | if entry is not None and not entry.removed: |
|
590 | 590 | msg = _(b'file %r in dirstate clashes with %r') |
|
591 | 591 | msg %= (pycompat.bytestr(d), pycompat.bytestr(filename)) |
|
592 | 592 | raise error.Abort(msg) |
|
593 | 593 | |
|
594 | 594 | def _get_filedata(self, filename): |
|
595 | 595 | """returns""" |
|
596 | 596 | s = os.lstat(self._join(filename)) |
|
597 | 597 | mode = s.st_mode |
|
598 | 598 | size = s.st_size |
|
599 | 599 | mtime = timestamp.mtime_of(s) |
|
600 | 600 | return (mode, size, mtime) |
|
601 | 601 | |
|
602 | 602 | def _discoverpath(self, path, normed, ignoremissing, exists, storemap): |
|
603 | 603 | if exists is None: |
|
604 | 604 | exists = os.path.lexists(os.path.join(self._root, path)) |
|
605 | 605 | if not exists: |
|
606 | 606 | # Maybe a path component exists |
|
607 | 607 | if not ignoremissing and b'/' in path: |
|
608 | 608 | d, f = path.rsplit(b'/', 1) |
|
609 | 609 | d = self._normalize(d, False, ignoremissing, None) |
|
610 | 610 | folded = d + b"/" + f |
|
611 | 611 | else: |
|
612 | 612 | # No path components, preserve original case |
|
613 | 613 | folded = path |
|
614 | 614 | else: |
|
615 | 615 | # recursively normalize leading directory components |
|
616 | 616 | # against dirstate |
|
617 | 617 | if b'/' in normed: |
|
618 | 618 | d, f = normed.rsplit(b'/', 1) |
|
619 | 619 | d = self._normalize(d, False, ignoremissing, True) |
|
620 | 620 | r = self._root + b"/" + d |
|
621 | 621 | folded = d + b"/" + util.fspath(f, r) |
|
622 | 622 | else: |
|
623 | 623 | folded = util.fspath(normed, self._root) |
|
624 | 624 | storemap[normed] = folded |
|
625 | 625 | |
|
626 | 626 | return folded |
|
627 | 627 | |
|
628 | 628 | def _normalizefile(self, path, isknown, ignoremissing=False, exists=None): |
|
629 | 629 | normed = util.normcase(path) |
|
630 | 630 | folded = self._map.filefoldmap.get(normed, None) |
|
631 | 631 | if folded is None: |
|
632 | 632 | if isknown: |
|
633 | 633 | folded = path |
|
634 | 634 | else: |
|
635 | 635 | folded = self._discoverpath( |
|
636 | 636 | path, normed, ignoremissing, exists, self._map.filefoldmap |
|
637 | 637 | ) |
|
638 | 638 | return folded |
|
639 | 639 | |
|
640 | 640 | def _normalize(self, path, isknown, ignoremissing=False, exists=None): |
|
641 | 641 | normed = util.normcase(path) |
|
642 | 642 | folded = self._map.filefoldmap.get(normed, None) |
|
643 | 643 | if folded is None: |
|
644 | 644 | folded = self._map.dirfoldmap.get(normed, None) |
|
645 | 645 | if folded is None: |
|
646 | 646 | if isknown: |
|
647 | 647 | folded = path |
|
648 | 648 | else: |
|
649 | 649 | # store discovered result in dirfoldmap so that future |
|
650 | 650 | # normalizefile calls don't start matching directories |
|
651 | 651 | folded = self._discoverpath( |
|
652 | 652 | path, normed, ignoremissing, exists, self._map.dirfoldmap |
|
653 | 653 | ) |
|
654 | 654 | return folded |
|
655 | 655 | |
|
656 | 656 | def normalize(self, path, isknown=False, ignoremissing=False): |
|
657 | 657 | """ |
|
658 | 658 | normalize the case of a pathname when on a casefolding filesystem |
|
659 | 659 | |
|
660 | 660 | isknown specifies whether the filename came from walking the |
|
661 | 661 | disk, to avoid extra filesystem access. |
|
662 | 662 | |
|
663 | 663 | If ignoremissing is True, missing path are returned |
|
664 | 664 | unchanged. Otherwise, we try harder to normalize possibly |
|
665 | 665 | existing path components. |
|
666 | 666 | |
|
667 | 667 | The normalized case is determined based on the following precedence: |
|
668 | 668 | |
|
669 | 669 | - version of name already stored in the dirstate |
|
670 | 670 | - version of name stored on disk |
|
671 | 671 | - version provided via command arguments |
|
672 | 672 | """ |
|
673 | 673 | |
|
674 | 674 | if self._checkcase: |
|
675 | 675 | return self._normalize(path, isknown, ignoremissing) |
|
676 | 676 | return path |
|
677 | 677 | |
|
678 | 678 | def clear(self): |
|
679 | 679 | self._map.clear() |
|
680 | 680 | self._dirty = True |
|
681 | 681 | |
|
682 | 682 | def rebuild(self, parent, allfiles, changedfiles=None): |
|
683 | 683 | if changedfiles is None: |
|
684 | 684 | # Rebuild entire dirstate |
|
685 | 685 | to_lookup = allfiles |
|
686 | 686 | to_drop = [] |
|
687 | 687 | self.clear() |
|
688 | 688 | elif len(changedfiles) < 10: |
|
689 | 689 | # Avoid turning allfiles into a set, which can be expensive if it's |
|
690 | 690 | # large. |
|
691 | 691 | to_lookup = [] |
|
692 | 692 | to_drop = [] |
|
693 | 693 | for f in changedfiles: |
|
694 | 694 | if f in allfiles: |
|
695 | 695 | to_lookup.append(f) |
|
696 | 696 | else: |
|
697 | 697 | to_drop.append(f) |
|
698 | 698 | else: |
|
699 | 699 | changedfilesset = set(changedfiles) |
|
700 | 700 | to_lookup = changedfilesset & set(allfiles) |
|
701 | 701 | to_drop = changedfilesset - to_lookup |
|
702 | 702 | |
|
703 | 703 | if self._origpl is None: |
|
704 | 704 | self._origpl = self._pl |
|
705 | 705 | self._map.setparents(parent, self._nodeconstants.nullid) |
|
706 | 706 | |
|
707 | 707 | for f in to_lookup: |
|
708 | 708 | |
|
709 | 709 | if self.in_merge: |
|
710 | 710 | self.set_tracked(f) |
|
711 | 711 | else: |
|
712 | 712 | self._map.reset_state( |
|
713 | 713 | f, |
|
714 | 714 | wc_tracked=True, |
|
715 | 715 | p1_tracked=True, |
|
716 | 716 | ) |
|
717 | 717 | for f in to_drop: |
|
718 | 718 | self._map.reset_state(f) |
|
719 | 719 | |
|
720 | 720 | self._dirty = True |
|
721 | 721 | |
|
722 | 722 | def identity(self): |
|
723 | 723 | """Return identity of dirstate itself to detect changing in storage |
|
724 | 724 | |
|
725 | 725 | If identity of previous dirstate is equal to this, writing |
|
726 | 726 | changes based on the former dirstate out can keep consistency. |
|
727 | 727 | """ |
|
728 | 728 | return self._map.identity |
|
729 | 729 | |
|
730 | 730 | def write(self, tr): |
|
731 | 731 | if not self._dirty: |
|
732 | 732 | return |
|
733 | 733 | |
|
734 | 734 | filename = self._filename |
|
735 | 735 | if tr: |
|
736 | 736 | # 'dirstate.write()' is not only for writing in-memory |
|
737 | 737 | # changes out, but also for dropping ambiguous timestamp. |
|
738 | 738 | # delayed writing re-raise "ambiguous timestamp issue". |
|
739 | 739 | # See also the wiki page below for detail: |
|
740 | 740 | # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan |
|
741 | 741 | |
|
742 | 742 | # record when mtime start to be ambiguous |
|
743 | 743 | now = timestamp.get_fs_now(self._opener) |
|
744 | 744 | |
|
745 | 745 | # delay writing in-memory changes out |
|
746 | 746 | tr.addfilegenerator( |
|
747 | 747 | b'dirstate', |
|
748 | 748 | (self._filename,), |
|
749 | 749 | lambda f: self._writedirstate(tr, f, now=now), |
|
750 | 750 | location=b'plain', |
|
751 | 751 | ) |
|
752 | 752 | return |
|
753 | 753 | |
|
754 | 754 | st = self._opener(filename, b"w", atomictemp=True, checkambig=True) |
|
755 | 755 | self._writedirstate(tr, st) |
|
756 | 756 | |
|
757 | 757 | def addparentchangecallback(self, category, callback): |
|
758 | 758 | """add a callback to be called when the wd parents are changed |
|
759 | 759 | |
|
760 | 760 | Callback will be called with the following arguments: |
|
761 | 761 | dirstate, (oldp1, oldp2), (newp1, newp2) |
|
762 | 762 | |
|
763 | 763 | Category is a unique identifier to allow overwriting an old callback |
|
764 | 764 | with a newer callback. |
|
765 | 765 | """ |
|
766 | 766 | self._plchangecallbacks[category] = callback |
|
767 | 767 | |
|
768 | 768 | def _writedirstate(self, tr, st, now=None): |
|
769 | 769 | # notify callbacks about parents change |
|
770 | 770 | if self._origpl is not None and self._origpl != self._pl: |
|
771 | 771 | for c, callback in sorted( |
|
772 | 772 | pycompat.iteritems(self._plchangecallbacks) |
|
773 | 773 | ): |
|
774 | 774 | callback(self, self._origpl, self._pl) |
|
775 | 775 | self._origpl = None |
|
776 | 776 | |
|
777 | 777 | if now is None: |
|
778 | 778 | # use the modification time of the newly created temporary file as the |
|
779 | 779 | # filesystem's notion of 'now' |
|
780 | 780 | now = timestamp.mtime_of(util.fstat(st)) |
|
781 | 781 | |
|
782 | # enough 'delaywrite' prevents 'pack_dirstate' from dropping | |
|
783 | # timestamp of each entries in dirstate, because of 'now > mtime' | |
|
784 | delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite') | |
|
785 | if delaywrite > 0: | |
|
786 | # do we have any files to delay for? | |
|
787 | for f, e in pycompat.iteritems(self._map): | |
|
788 | if e.need_delay(now): | |
|
789 | import time # to avoid useless import | |
|
790 | ||
|
791 | # rather than sleep n seconds, sleep until the next | |
|
792 | # multiple of n seconds | |
|
793 | clock = time.time() | |
|
794 | start = int(clock) - (int(clock) % delaywrite) | |
|
795 | end = start + delaywrite | |
|
796 | time.sleep(end - clock) | |
|
797 | # trust our estimate that the end is near now | |
|
798 | now = timestamp.timestamp((end, 0)) | |
|
799 | break | |
|
800 | ||
|
801 | 782 | self._map.write(tr, st, now) |
|
802 | 783 | self._dirty = False |
|
803 | 784 | |
|
804 | 785 | def _dirignore(self, f): |
|
805 | 786 | if self._ignore(f): |
|
806 | 787 | return True |
|
807 | 788 | for p in pathutil.finddirs(f): |
|
808 | 789 | if self._ignore(p): |
|
809 | 790 | return True |
|
810 | 791 | return False |
|
811 | 792 | |
|
812 | 793 | def _ignorefiles(self): |
|
813 | 794 | files = [] |
|
814 | 795 | if os.path.exists(self._join(b'.hgignore')): |
|
815 | 796 | files.append(self._join(b'.hgignore')) |
|
816 | 797 | for name, path in self._ui.configitems(b"ui"): |
|
817 | 798 | if name == b'ignore' or name.startswith(b'ignore.'): |
|
818 | 799 | # we need to use os.path.join here rather than self._join |
|
819 | 800 | # because path is arbitrary and user-specified |
|
820 | 801 | files.append(os.path.join(self._rootdir, util.expandpath(path))) |
|
821 | 802 | return files |
|
822 | 803 | |
|
823 | 804 | def _ignorefileandline(self, f): |
|
824 | 805 | files = collections.deque(self._ignorefiles()) |
|
825 | 806 | visited = set() |
|
826 | 807 | while files: |
|
827 | 808 | i = files.popleft() |
|
828 | 809 | patterns = matchmod.readpatternfile( |
|
829 | 810 | i, self._ui.warn, sourceinfo=True |
|
830 | 811 | ) |
|
831 | 812 | for pattern, lineno, line in patterns: |
|
832 | 813 | kind, p = matchmod._patsplit(pattern, b'glob') |
|
833 | 814 | if kind == b"subinclude": |
|
834 | 815 | if p not in visited: |
|
835 | 816 | files.append(p) |
|
836 | 817 | continue |
|
837 | 818 | m = matchmod.match( |
|
838 | 819 | self._root, b'', [], [pattern], warn=self._ui.warn |
|
839 | 820 | ) |
|
840 | 821 | if m(f): |
|
841 | 822 | return (i, lineno, line) |
|
842 | 823 | visited.add(i) |
|
843 | 824 | return (None, -1, b"") |
|
844 | 825 | |
|
845 | 826 | def _walkexplicit(self, match, subrepos): |
|
846 | 827 | """Get stat data about the files explicitly specified by match. |
|
847 | 828 | |
|
848 | 829 | Return a triple (results, dirsfound, dirsnotfound). |
|
849 | 830 | - results is a mapping from filename to stat result. It also contains |
|
850 | 831 | listings mapping subrepos and .hg to None. |
|
851 | 832 | - dirsfound is a list of files found to be directories. |
|
852 | 833 | - dirsnotfound is a list of files that the dirstate thinks are |
|
853 | 834 | directories and that were not found.""" |
|
854 | 835 | |
|
855 | 836 | def badtype(mode): |
|
856 | 837 | kind = _(b'unknown') |
|
857 | 838 | if stat.S_ISCHR(mode): |
|
858 | 839 | kind = _(b'character device') |
|
859 | 840 | elif stat.S_ISBLK(mode): |
|
860 | 841 | kind = _(b'block device') |
|
861 | 842 | elif stat.S_ISFIFO(mode): |
|
862 | 843 | kind = _(b'fifo') |
|
863 | 844 | elif stat.S_ISSOCK(mode): |
|
864 | 845 | kind = _(b'socket') |
|
865 | 846 | elif stat.S_ISDIR(mode): |
|
866 | 847 | kind = _(b'directory') |
|
867 | 848 | return _(b'unsupported file type (type is %s)') % kind |
|
868 | 849 | |
|
869 | 850 | badfn = match.bad |
|
870 | 851 | dmap = self._map |
|
871 | 852 | lstat = os.lstat |
|
872 | 853 | getkind = stat.S_IFMT |
|
873 | 854 | dirkind = stat.S_IFDIR |
|
874 | 855 | regkind = stat.S_IFREG |
|
875 | 856 | lnkkind = stat.S_IFLNK |
|
876 | 857 | join = self._join |
|
877 | 858 | dirsfound = [] |
|
878 | 859 | foundadd = dirsfound.append |
|
879 | 860 | dirsnotfound = [] |
|
880 | 861 | notfoundadd = dirsnotfound.append |
|
881 | 862 | |
|
882 | 863 | if not match.isexact() and self._checkcase: |
|
883 | 864 | normalize = self._normalize |
|
884 | 865 | else: |
|
885 | 866 | normalize = None |
|
886 | 867 | |
|
887 | 868 | files = sorted(match.files()) |
|
888 | 869 | subrepos.sort() |
|
889 | 870 | i, j = 0, 0 |
|
890 | 871 | while i < len(files) and j < len(subrepos): |
|
891 | 872 | subpath = subrepos[j] + b"/" |
|
892 | 873 | if files[i] < subpath: |
|
893 | 874 | i += 1 |
|
894 | 875 | continue |
|
895 | 876 | while i < len(files) and files[i].startswith(subpath): |
|
896 | 877 | del files[i] |
|
897 | 878 | j += 1 |
|
898 | 879 | |
|
899 | 880 | if not files or b'' in files: |
|
900 | 881 | files = [b''] |
|
901 | 882 | # constructing the foldmap is expensive, so don't do it for the |
|
902 | 883 | # common case where files is [''] |
|
903 | 884 | normalize = None |
|
904 | 885 | results = dict.fromkeys(subrepos) |
|
905 | 886 | results[b'.hg'] = None |
|
906 | 887 | |
|
907 | 888 | for ff in files: |
|
908 | 889 | if normalize: |
|
909 | 890 | nf = normalize(ff, False, True) |
|
910 | 891 | else: |
|
911 | 892 | nf = ff |
|
912 | 893 | if nf in results: |
|
913 | 894 | continue |
|
914 | 895 | |
|
915 | 896 | try: |
|
916 | 897 | st = lstat(join(nf)) |
|
917 | 898 | kind = getkind(st.st_mode) |
|
918 | 899 | if kind == dirkind: |
|
919 | 900 | if nf in dmap: |
|
920 | 901 | # file replaced by dir on disk but still in dirstate |
|
921 | 902 | results[nf] = None |
|
922 | 903 | foundadd((nf, ff)) |
|
923 | 904 | elif kind == regkind or kind == lnkkind: |
|
924 | 905 | results[nf] = st |
|
925 | 906 | else: |
|
926 | 907 | badfn(ff, badtype(kind)) |
|
927 | 908 | if nf in dmap: |
|
928 | 909 | results[nf] = None |
|
929 | 910 | except OSError as inst: # nf not found on disk - it is dirstate only |
|
930 | 911 | if nf in dmap: # does it exactly match a missing file? |
|
931 | 912 | results[nf] = None |
|
932 | 913 | else: # does it match a missing directory? |
|
933 | 914 | if self._map.hasdir(nf): |
|
934 | 915 | notfoundadd(nf) |
|
935 | 916 | else: |
|
936 | 917 | badfn(ff, encoding.strtolocal(inst.strerror)) |
|
937 | 918 | |
|
938 | 919 | # match.files() may contain explicitly-specified paths that shouldn't |
|
939 | 920 | # be taken; drop them from the list of files found. dirsfound/notfound |
|
940 | 921 | # aren't filtered here because they will be tested later. |
|
941 | 922 | if match.anypats(): |
|
942 | 923 | for f in list(results): |
|
943 | 924 | if f == b'.hg' or f in subrepos: |
|
944 | 925 | # keep sentinel to disable further out-of-repo walks |
|
945 | 926 | continue |
|
946 | 927 | if not match(f): |
|
947 | 928 | del results[f] |
|
948 | 929 | |
|
949 | 930 | # Case insensitive filesystems cannot rely on lstat() failing to detect |
|
950 | 931 | # a case-only rename. Prune the stat object for any file that does not |
|
951 | 932 | # match the case in the filesystem, if there are multiple files that |
|
952 | 933 | # normalize to the same path. |
|
953 | 934 | if match.isexact() and self._checkcase: |
|
954 | 935 | normed = {} |
|
955 | 936 | |
|
956 | 937 | for f, st in pycompat.iteritems(results): |
|
957 | 938 | if st is None: |
|
958 | 939 | continue |
|
959 | 940 | |
|
960 | 941 | nc = util.normcase(f) |
|
961 | 942 | paths = normed.get(nc) |
|
962 | 943 | |
|
963 | 944 | if paths is None: |
|
964 | 945 | paths = set() |
|
965 | 946 | normed[nc] = paths |
|
966 | 947 | |
|
967 | 948 | paths.add(f) |
|
968 | 949 | |
|
969 | 950 | for norm, paths in pycompat.iteritems(normed): |
|
970 | 951 | if len(paths) > 1: |
|
971 | 952 | for path in paths: |
|
972 | 953 | folded = self._discoverpath( |
|
973 | 954 | path, norm, True, None, self._map.dirfoldmap |
|
974 | 955 | ) |
|
975 | 956 | if path != folded: |
|
976 | 957 | results[path] = None |
|
977 | 958 | |
|
978 | 959 | return results, dirsfound, dirsnotfound |
|
979 | 960 | |
|
980 | 961 | def walk(self, match, subrepos, unknown, ignored, full=True): |
|
981 | 962 | """ |
|
982 | 963 | Walk recursively through the directory tree, finding all files |
|
983 | 964 | matched by match. |
|
984 | 965 | |
|
985 | 966 | If full is False, maybe skip some known-clean files. |
|
986 | 967 | |
|
987 | 968 | Return a dict mapping filename to stat-like object (either |
|
988 | 969 | mercurial.osutil.stat instance or return value of os.stat()). |
|
989 | 970 | |
|
990 | 971 | """ |
|
991 | 972 | # full is a flag that extensions that hook into walk can use -- this |
|
992 | 973 | # implementation doesn't use it at all. This satisfies the contract |
|
993 | 974 | # because we only guarantee a "maybe". |
|
994 | 975 | |
|
995 | 976 | if ignored: |
|
996 | 977 | ignore = util.never |
|
997 | 978 | dirignore = util.never |
|
998 | 979 | elif unknown: |
|
999 | 980 | ignore = self._ignore |
|
1000 | 981 | dirignore = self._dirignore |
|
1001 | 982 | else: |
|
1002 | 983 | # if not unknown and not ignored, drop dir recursion and step 2 |
|
1003 | 984 | ignore = util.always |
|
1004 | 985 | dirignore = util.always |
|
1005 | 986 | |
|
1006 | 987 | matchfn = match.matchfn |
|
1007 | 988 | matchalways = match.always() |
|
1008 | 989 | matchtdir = match.traversedir |
|
1009 | 990 | dmap = self._map |
|
1010 | 991 | listdir = util.listdir |
|
1011 | 992 | lstat = os.lstat |
|
1012 | 993 | dirkind = stat.S_IFDIR |
|
1013 | 994 | regkind = stat.S_IFREG |
|
1014 | 995 | lnkkind = stat.S_IFLNK |
|
1015 | 996 | join = self._join |
|
1016 | 997 | |
|
1017 | 998 | exact = skipstep3 = False |
|
1018 | 999 | if match.isexact(): # match.exact |
|
1019 | 1000 | exact = True |
|
1020 | 1001 | dirignore = util.always # skip step 2 |
|
1021 | 1002 | elif match.prefix(): # match.match, no patterns |
|
1022 | 1003 | skipstep3 = True |
|
1023 | 1004 | |
|
1024 | 1005 | if not exact and self._checkcase: |
|
1025 | 1006 | normalize = self._normalize |
|
1026 | 1007 | normalizefile = self._normalizefile |
|
1027 | 1008 | skipstep3 = False |
|
1028 | 1009 | else: |
|
1029 | 1010 | normalize = self._normalize |
|
1030 | 1011 | normalizefile = None |
|
1031 | 1012 | |
|
1032 | 1013 | # step 1: find all explicit files |
|
1033 | 1014 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) |
|
1034 | 1015 | if matchtdir: |
|
1035 | 1016 | for d in work: |
|
1036 | 1017 | matchtdir(d[0]) |
|
1037 | 1018 | for d in dirsnotfound: |
|
1038 | 1019 | matchtdir(d) |
|
1039 | 1020 | |
|
1040 | 1021 | skipstep3 = skipstep3 and not (work or dirsnotfound) |
|
1041 | 1022 | work = [d for d in work if not dirignore(d[0])] |
|
1042 | 1023 | |
|
1043 | 1024 | # step 2: visit subdirectories |
|
1044 | 1025 | def traverse(work, alreadynormed): |
|
1045 | 1026 | wadd = work.append |
|
1046 | 1027 | while work: |
|
1047 | 1028 | tracing.counter('dirstate.walk work', len(work)) |
|
1048 | 1029 | nd = work.pop() |
|
1049 | 1030 | visitentries = match.visitchildrenset(nd) |
|
1050 | 1031 | if not visitentries: |
|
1051 | 1032 | continue |
|
1052 | 1033 | if visitentries == b'this' or visitentries == b'all': |
|
1053 | 1034 | visitentries = None |
|
1054 | 1035 | skip = None |
|
1055 | 1036 | if nd != b'': |
|
1056 | 1037 | skip = b'.hg' |
|
1057 | 1038 | try: |
|
1058 | 1039 | with tracing.log('dirstate.walk.traverse listdir %s', nd): |
|
1059 | 1040 | entries = listdir(join(nd), stat=True, skip=skip) |
|
1060 | 1041 | except OSError as inst: |
|
1061 | 1042 | if inst.errno in (errno.EACCES, errno.ENOENT): |
|
1062 | 1043 | match.bad( |
|
1063 | 1044 | self.pathto(nd), encoding.strtolocal(inst.strerror) |
|
1064 | 1045 | ) |
|
1065 | 1046 | continue |
|
1066 | 1047 | raise |
|
1067 | 1048 | for f, kind, st in entries: |
|
1068 | 1049 | # Some matchers may return files in the visitentries set, |
|
1069 | 1050 | # instead of 'this', if the matcher explicitly mentions them |
|
1070 | 1051 | # and is not an exactmatcher. This is acceptable; we do not |
|
1071 | 1052 | # make any hard assumptions about file-or-directory below |
|
1072 | 1053 | # based on the presence of `f` in visitentries. If |
|
1073 | 1054 | # visitchildrenset returned a set, we can always skip the |
|
1074 | 1055 | # entries *not* in the set it provided regardless of whether |
|
1075 | 1056 | # they're actually a file or a directory. |
|
1076 | 1057 | if visitentries and f not in visitentries: |
|
1077 | 1058 | continue |
|
1078 | 1059 | if normalizefile: |
|
1079 | 1060 | # even though f might be a directory, we're only |
|
1080 | 1061 | # interested in comparing it to files currently in the |
|
1081 | 1062 | # dmap -- therefore normalizefile is enough |
|
1082 | 1063 | nf = normalizefile( |
|
1083 | 1064 | nd and (nd + b"/" + f) or f, True, True |
|
1084 | 1065 | ) |
|
1085 | 1066 | else: |
|
1086 | 1067 | nf = nd and (nd + b"/" + f) or f |
|
1087 | 1068 | if nf not in results: |
|
1088 | 1069 | if kind == dirkind: |
|
1089 | 1070 | if not ignore(nf): |
|
1090 | 1071 | if matchtdir: |
|
1091 | 1072 | matchtdir(nf) |
|
1092 | 1073 | wadd(nf) |
|
1093 | 1074 | if nf in dmap and (matchalways or matchfn(nf)): |
|
1094 | 1075 | results[nf] = None |
|
1095 | 1076 | elif kind == regkind or kind == lnkkind: |
|
1096 | 1077 | if nf in dmap: |
|
1097 | 1078 | if matchalways or matchfn(nf): |
|
1098 | 1079 | results[nf] = st |
|
1099 | 1080 | elif (matchalways or matchfn(nf)) and not ignore( |
|
1100 | 1081 | nf |
|
1101 | 1082 | ): |
|
1102 | 1083 | # unknown file -- normalize if necessary |
|
1103 | 1084 | if not alreadynormed: |
|
1104 | 1085 | nf = normalize(nf, False, True) |
|
1105 | 1086 | results[nf] = st |
|
1106 | 1087 | elif nf in dmap and (matchalways or matchfn(nf)): |
|
1107 | 1088 | results[nf] = None |
|
1108 | 1089 | |
|
1109 | 1090 | for nd, d in work: |
|
1110 | 1091 | # alreadynormed means that processwork doesn't have to do any |
|
1111 | 1092 | # expensive directory normalization |
|
1112 | 1093 | alreadynormed = not normalize or nd == d |
|
1113 | 1094 | traverse([d], alreadynormed) |
|
1114 | 1095 | |
|
1115 | 1096 | for s in subrepos: |
|
1116 | 1097 | del results[s] |
|
1117 | 1098 | del results[b'.hg'] |
|
1118 | 1099 | |
|
1119 | 1100 | # step 3: visit remaining files from dmap |
|
1120 | 1101 | if not skipstep3 and not exact: |
|
1121 | 1102 | # If a dmap file is not in results yet, it was either |
|
1122 | 1103 | # a) not matching matchfn b) ignored, c) missing, or d) under a |
|
1123 | 1104 | # symlink directory. |
|
1124 | 1105 | if not results and matchalways: |
|
1125 | 1106 | visit = [f for f in dmap] |
|
1126 | 1107 | else: |
|
1127 | 1108 | visit = [f for f in dmap if f not in results and matchfn(f)] |
|
1128 | 1109 | visit.sort() |
|
1129 | 1110 | |
|
1130 | 1111 | if unknown: |
|
1131 | 1112 | # unknown == True means we walked all dirs under the roots |
|
1132 | 1113 | # that wasn't ignored, and everything that matched was stat'ed |
|
1133 | 1114 | # and is already in results. |
|
1134 | 1115 | # The rest must thus be ignored or under a symlink. |
|
1135 | 1116 | audit_path = pathutil.pathauditor(self._root, cached=True) |
|
1136 | 1117 | |
|
1137 | 1118 | for nf in iter(visit): |
|
1138 | 1119 | # If a stat for the same file was already added with a |
|
1139 | 1120 | # different case, don't add one for this, since that would |
|
1140 | 1121 | # make it appear as if the file exists under both names |
|
1141 | 1122 | # on disk. |
|
1142 | 1123 | if ( |
|
1143 | 1124 | normalizefile |
|
1144 | 1125 | and normalizefile(nf, True, True) in results |
|
1145 | 1126 | ): |
|
1146 | 1127 | results[nf] = None |
|
1147 | 1128 | # Report ignored items in the dmap as long as they are not |
|
1148 | 1129 | # under a symlink directory. |
|
1149 | 1130 | elif audit_path.check(nf): |
|
1150 | 1131 | try: |
|
1151 | 1132 | results[nf] = lstat(join(nf)) |
|
1152 | 1133 | # file was just ignored, no links, and exists |
|
1153 | 1134 | except OSError: |
|
1154 | 1135 | # file doesn't exist |
|
1155 | 1136 | results[nf] = None |
|
1156 | 1137 | else: |
|
1157 | 1138 | # It's either missing or under a symlink directory |
|
1158 | 1139 | # which we in this case report as missing |
|
1159 | 1140 | results[nf] = None |
|
1160 | 1141 | else: |
|
1161 | 1142 | # We may not have walked the full directory tree above, |
|
1162 | 1143 | # so stat and check everything we missed. |
|
1163 | 1144 | iv = iter(visit) |
|
1164 | 1145 | for st in util.statfiles([join(i) for i in visit]): |
|
1165 | 1146 | results[next(iv)] = st |
|
1166 | 1147 | return results |
|
1167 | 1148 | |
|
1168 | 1149 | def _rust_status(self, matcher, list_clean, list_ignored, list_unknown): |
|
1169 | 1150 | # Force Rayon (Rust parallelism library) to respect the number of |
|
1170 | 1151 | # workers. This is a temporary workaround until Rust code knows |
|
1171 | 1152 | # how to read the config file. |
|
1172 | 1153 | numcpus = self._ui.configint(b"worker", b"numcpus") |
|
1173 | 1154 | if numcpus is not None: |
|
1174 | 1155 | encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus) |
|
1175 | 1156 | |
|
1176 | 1157 | workers_enabled = self._ui.configbool(b"worker", b"enabled", True) |
|
1177 | 1158 | if not workers_enabled: |
|
1178 | 1159 | encoding.environ[b"RAYON_NUM_THREADS"] = b"1" |
|
1179 | 1160 | |
|
1180 | 1161 | ( |
|
1181 | 1162 | lookup, |
|
1182 | 1163 | modified, |
|
1183 | 1164 | added, |
|
1184 | 1165 | removed, |
|
1185 | 1166 | deleted, |
|
1186 | 1167 | clean, |
|
1187 | 1168 | ignored, |
|
1188 | 1169 | unknown, |
|
1189 | 1170 | warnings, |
|
1190 | 1171 | bad, |
|
1191 | 1172 | traversed, |
|
1192 | 1173 | dirty, |
|
1193 | 1174 | ) = rustmod.status( |
|
1194 | 1175 | self._map._map, |
|
1195 | 1176 | matcher, |
|
1196 | 1177 | self._rootdir, |
|
1197 | 1178 | self._ignorefiles(), |
|
1198 | 1179 | self._checkexec, |
|
1199 | 1180 | bool(list_clean), |
|
1200 | 1181 | bool(list_ignored), |
|
1201 | 1182 | bool(list_unknown), |
|
1202 | 1183 | bool(matcher.traversedir), |
|
1203 | 1184 | ) |
|
1204 | 1185 | |
|
1205 | 1186 | self._dirty |= dirty |
|
1206 | 1187 | |
|
1207 | 1188 | if matcher.traversedir: |
|
1208 | 1189 | for dir in traversed: |
|
1209 | 1190 | matcher.traversedir(dir) |
|
1210 | 1191 | |
|
1211 | 1192 | if self._ui.warn: |
|
1212 | 1193 | for item in warnings: |
|
1213 | 1194 | if isinstance(item, tuple): |
|
1214 | 1195 | file_path, syntax = item |
|
1215 | 1196 | msg = _(b"%s: ignoring invalid syntax '%s'\n") % ( |
|
1216 | 1197 | file_path, |
|
1217 | 1198 | syntax, |
|
1218 | 1199 | ) |
|
1219 | 1200 | self._ui.warn(msg) |
|
1220 | 1201 | else: |
|
1221 | 1202 | msg = _(b"skipping unreadable pattern file '%s': %s\n") |
|
1222 | 1203 | self._ui.warn( |
|
1223 | 1204 | msg |
|
1224 | 1205 | % ( |
|
1225 | 1206 | pathutil.canonpath( |
|
1226 | 1207 | self._rootdir, self._rootdir, item |
|
1227 | 1208 | ), |
|
1228 | 1209 | b"No such file or directory", |
|
1229 | 1210 | ) |
|
1230 | 1211 | ) |
|
1231 | 1212 | |
|
1232 | 1213 | for (fn, message) in bad: |
|
1233 | 1214 | matcher.bad(fn, encoding.strtolocal(message)) |
|
1234 | 1215 | |
|
1235 | 1216 | status = scmutil.status( |
|
1236 | 1217 | modified=modified, |
|
1237 | 1218 | added=added, |
|
1238 | 1219 | removed=removed, |
|
1239 | 1220 | deleted=deleted, |
|
1240 | 1221 | unknown=unknown, |
|
1241 | 1222 | ignored=ignored, |
|
1242 | 1223 | clean=clean, |
|
1243 | 1224 | ) |
|
1244 | 1225 | return (lookup, status) |
|
1245 | 1226 | |
|
1246 | 1227 | def status(self, match, subrepos, ignored, clean, unknown): |
|
1247 | 1228 | """Determine the status of the working copy relative to the |
|
1248 | 1229 | dirstate and return a pair of (unsure, status), where status is of type |
|
1249 | 1230 | scmutil.status and: |
|
1250 | 1231 | |
|
1251 | 1232 | unsure: |
|
1252 | 1233 | files that might have been modified since the dirstate was |
|
1253 | 1234 | written, but need to be read to be sure (size is the same |
|
1254 | 1235 | but mtime differs) |
|
1255 | 1236 | status.modified: |
|
1256 | 1237 | files that have definitely been modified since the dirstate |
|
1257 | 1238 | was written (different size or mode) |
|
1258 | 1239 | status.clean: |
|
1259 | 1240 | files that have definitely not been modified since the |
|
1260 | 1241 | dirstate was written |
|
1261 | 1242 | """ |
|
1262 | 1243 | listignored, listclean, listunknown = ignored, clean, unknown |
|
1263 | 1244 | lookup, modified, added, unknown, ignored = [], [], [], [], [] |
|
1264 | 1245 | removed, deleted, clean = [], [], [] |
|
1265 | 1246 | |
|
1266 | 1247 | dmap = self._map |
|
1267 | 1248 | dmap.preload() |
|
1268 | 1249 | |
|
1269 | 1250 | use_rust = True |
|
1270 | 1251 | |
|
1271 | 1252 | allowed_matchers = ( |
|
1272 | 1253 | matchmod.alwaysmatcher, |
|
1273 | 1254 | matchmod.exactmatcher, |
|
1274 | 1255 | matchmod.includematcher, |
|
1275 | 1256 | ) |
|
1276 | 1257 | |
|
1277 | 1258 | if rustmod is None: |
|
1278 | 1259 | use_rust = False |
|
1279 | 1260 | elif self._checkcase: |
|
1280 | 1261 | # Case-insensitive filesystems are not handled yet |
|
1281 | 1262 | use_rust = False |
|
1282 | 1263 | elif subrepos: |
|
1283 | 1264 | use_rust = False |
|
1284 | 1265 | elif sparse.enabled: |
|
1285 | 1266 | use_rust = False |
|
1286 | 1267 | elif not isinstance(match, allowed_matchers): |
|
1287 | 1268 | # Some matchers have yet to be implemented |
|
1288 | 1269 | use_rust = False |
|
1289 | 1270 | |
|
1290 | 1271 | # Get the time from the filesystem so we can disambiguate files that |
|
1291 | 1272 | # appear modified in the present or future. |
|
1292 | 1273 | try: |
|
1293 | 1274 | mtime_boundary = timestamp.get_fs_now(self._opener) |
|
1294 | 1275 | except OSError: |
|
1295 | 1276 | # In largefiles or readonly context |
|
1296 | 1277 | mtime_boundary = None |
|
1297 | 1278 | |
|
1298 | 1279 | if use_rust: |
|
1299 | 1280 | try: |
|
1300 | 1281 | res = self._rust_status( |
|
1301 | 1282 | match, listclean, listignored, listunknown |
|
1302 | 1283 | ) |
|
1303 | 1284 | return res + (mtime_boundary,) |
|
1304 | 1285 | except rustmod.FallbackError: |
|
1305 | 1286 | pass |
|
1306 | 1287 | |
|
1307 | 1288 | def noop(f): |
|
1308 | 1289 | pass |
|
1309 | 1290 | |
|
1310 | 1291 | dcontains = dmap.__contains__ |
|
1311 | 1292 | dget = dmap.__getitem__ |
|
1312 | 1293 | ladd = lookup.append # aka "unsure" |
|
1313 | 1294 | madd = modified.append |
|
1314 | 1295 | aadd = added.append |
|
1315 | 1296 | uadd = unknown.append if listunknown else noop |
|
1316 | 1297 | iadd = ignored.append if listignored else noop |
|
1317 | 1298 | radd = removed.append |
|
1318 | 1299 | dadd = deleted.append |
|
1319 | 1300 | cadd = clean.append if listclean else noop |
|
1320 | 1301 | mexact = match.exact |
|
1321 | 1302 | dirignore = self._dirignore |
|
1322 | 1303 | checkexec = self._checkexec |
|
1323 | 1304 | checklink = self._checklink |
|
1324 | 1305 | copymap = self._map.copymap |
|
1325 | 1306 | |
|
1326 | 1307 | # We need to do full walks when either |
|
1327 | 1308 | # - we're listing all clean files, or |
|
1328 | 1309 | # - match.traversedir does something, because match.traversedir should |
|
1329 | 1310 | # be called for every dir in the working dir |
|
1330 | 1311 | full = listclean or match.traversedir is not None |
|
1331 | 1312 | for fn, st in pycompat.iteritems( |
|
1332 | 1313 | self.walk(match, subrepos, listunknown, listignored, full=full) |
|
1333 | 1314 | ): |
|
1334 | 1315 | if not dcontains(fn): |
|
1335 | 1316 | if (listignored or mexact(fn)) and dirignore(fn): |
|
1336 | 1317 | if listignored: |
|
1337 | 1318 | iadd(fn) |
|
1338 | 1319 | else: |
|
1339 | 1320 | uadd(fn) |
|
1340 | 1321 | continue |
|
1341 | 1322 | |
|
1342 | 1323 | t = dget(fn) |
|
1343 | 1324 | mode = t.mode |
|
1344 | 1325 | size = t.size |
|
1345 | 1326 | |
|
1346 | 1327 | if not st and t.tracked: |
|
1347 | 1328 | dadd(fn) |
|
1348 | 1329 | elif t.p2_info: |
|
1349 | 1330 | madd(fn) |
|
1350 | 1331 | elif t.added: |
|
1351 | 1332 | aadd(fn) |
|
1352 | 1333 | elif t.removed: |
|
1353 | 1334 | radd(fn) |
|
1354 | 1335 | elif t.tracked: |
|
1355 | 1336 | if not checklink and t.has_fallback_symlink: |
|
1356 | 1337 | # If the file system does not support symlink, the mode |
|
1357 | 1338 | # might not be correctly stored in the dirstate, so do not |
|
1358 | 1339 | # trust it. |
|
1359 | 1340 | ladd(fn) |
|
1360 | 1341 | elif not checkexec and t.has_fallback_exec: |
|
1361 | 1342 | # If the file system does not support exec bits, the mode |
|
1362 | 1343 | # might not be correctly stored in the dirstate, so do not |
|
1363 | 1344 | # trust it. |
|
1364 | 1345 | ladd(fn) |
|
1365 | 1346 | elif ( |
|
1366 | 1347 | size >= 0 |
|
1367 | 1348 | and ( |
|
1368 | 1349 | (size != st.st_size and size != st.st_size & _rangemask) |
|
1369 | 1350 | or ((mode ^ st.st_mode) & 0o100 and checkexec) |
|
1370 | 1351 | ) |
|
1371 | 1352 | or fn in copymap |
|
1372 | 1353 | ): |
|
1373 | 1354 | if stat.S_ISLNK(st.st_mode) and size != st.st_size: |
|
1374 | 1355 | # issue6456: Size returned may be longer due to |
|
1375 | 1356 | # encryption on EXT-4 fscrypt, undecided. |
|
1376 | 1357 | ladd(fn) |
|
1377 | 1358 | else: |
|
1378 | 1359 | madd(fn) |
|
1379 | 1360 | elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)): |
|
1380 | 1361 | # There might be a change in the future if for example the |
|
1381 | 1362 | # internal clock is off, but this is a case where the issues |
|
1382 | 1363 | # the user would face would be a lot worse and there is |
|
1383 | 1364 | # nothing we can really do. |
|
1384 | 1365 | ladd(fn) |
|
1385 | 1366 | elif listclean: |
|
1386 | 1367 | cadd(fn) |
|
1387 | 1368 | status = scmutil.status( |
|
1388 | 1369 | modified, added, removed, deleted, unknown, ignored, clean |
|
1389 | 1370 | ) |
|
1390 | 1371 | return (lookup, status, mtime_boundary) |
|
1391 | 1372 | |
|
1392 | 1373 | def matches(self, match): |
|
1393 | 1374 | """ |
|
1394 | 1375 | return files in the dirstate (in whatever state) filtered by match |
|
1395 | 1376 | """ |
|
1396 | 1377 | dmap = self._map |
|
1397 | 1378 | if rustmod is not None: |
|
1398 | 1379 | dmap = self._map._map |
|
1399 | 1380 | |
|
1400 | 1381 | if match.always(): |
|
1401 | 1382 | return dmap.keys() |
|
1402 | 1383 | files = match.files() |
|
1403 | 1384 | if match.isexact(): |
|
1404 | 1385 | # fast path -- filter the other way around, since typically files is |
|
1405 | 1386 | # much smaller than dmap |
|
1406 | 1387 | return [f for f in files if f in dmap] |
|
1407 | 1388 | if match.prefix() and all(fn in dmap for fn in files): |
|
1408 | 1389 | # fast path -- all the values are known to be files, so just return |
|
1409 | 1390 | # that |
|
1410 | 1391 | return list(files) |
|
1411 | 1392 | return [f for f in dmap if match(f)] |
|
1412 | 1393 | |
|
1413 | 1394 | def _actualfilename(self, tr): |
|
1414 | 1395 | if tr: |
|
1415 | 1396 | return self._pendingfilename |
|
1416 | 1397 | else: |
|
1417 | 1398 | return self._filename |
|
1418 | 1399 | |
|
1419 | 1400 | def savebackup(self, tr, backupname): |
|
1420 | 1401 | '''Save current dirstate into backup file''' |
|
1421 | 1402 | filename = self._actualfilename(tr) |
|
1422 | 1403 | assert backupname != filename |
|
1423 | 1404 | |
|
1424 | 1405 | # use '_writedirstate' instead of 'write' to write changes certainly, |
|
1425 | 1406 | # because the latter omits writing out if transaction is running. |
|
1426 | 1407 | # output file will be used to create backup of dirstate at this point. |
|
1427 | 1408 | if self._dirty or not self._opener.exists(filename): |
|
1428 | 1409 | self._writedirstate( |
|
1429 | 1410 | tr, |
|
1430 | 1411 | self._opener(filename, b"w", atomictemp=True, checkambig=True), |
|
1431 | 1412 | ) |
|
1432 | 1413 | |
|
1433 | 1414 | if tr: |
|
1434 | 1415 | # ensure that subsequent tr.writepending returns True for |
|
1435 | 1416 | # changes written out above, even if dirstate is never |
|
1436 | 1417 | # changed after this |
|
1437 | 1418 | tr.addfilegenerator( |
|
1438 | 1419 | b'dirstate', |
|
1439 | 1420 | (self._filename,), |
|
1440 | 1421 | lambda f: self._writedirstate(tr, f), |
|
1441 | 1422 | location=b'plain', |
|
1442 | 1423 | ) |
|
1443 | 1424 | |
|
1444 | 1425 | # ensure that pending file written above is unlinked at |
|
1445 | 1426 | # failure, even if tr.writepending isn't invoked until the |
|
1446 | 1427 | # end of this transaction |
|
1447 | 1428 | tr.registertmp(filename, location=b'plain') |
|
1448 | 1429 | |
|
1449 | 1430 | self._opener.tryunlink(backupname) |
|
1450 | 1431 | # hardlink backup is okay because _writedirstate is always called |
|
1451 | 1432 | # with an "atomictemp=True" file. |
|
1452 | 1433 | util.copyfile( |
|
1453 | 1434 | self._opener.join(filename), |
|
1454 | 1435 | self._opener.join(backupname), |
|
1455 | 1436 | hardlink=True, |
|
1456 | 1437 | ) |
|
1457 | 1438 | |
|
1458 | 1439 | def restorebackup(self, tr, backupname): |
|
1459 | 1440 | '''Restore dirstate by backup file''' |
|
1460 | 1441 | # this "invalidate()" prevents "wlock.release()" from writing |
|
1461 | 1442 | # changes of dirstate out after restoring from backup file |
|
1462 | 1443 | self.invalidate() |
|
1463 | 1444 | filename = self._actualfilename(tr) |
|
1464 | 1445 | o = self._opener |
|
1465 | 1446 | if util.samefile(o.join(backupname), o.join(filename)): |
|
1466 | 1447 | o.unlink(backupname) |
|
1467 | 1448 | else: |
|
1468 | 1449 | o.rename(backupname, filename, checkambig=True) |
|
1469 | 1450 | |
|
1470 | 1451 | def clearbackup(self, tr, backupname): |
|
1471 | 1452 | '''Clear backup file''' |
|
1472 | 1453 | self._opener.unlink(backupname) |
|
1473 | 1454 | |
|
1474 | 1455 | def verify(self, m1, m2): |
|
1475 | 1456 | """check the dirstate content again the parent manifest and yield errors""" |
|
1476 | 1457 | missing_from_p1 = b"%s in state %s, but not in manifest1\n" |
|
1477 | 1458 | unexpected_in_p1 = b"%s in state %s, but also in manifest1\n" |
|
1478 | 1459 | missing_from_ps = b"%s in state %s, but not in either manifest\n" |
|
1479 | 1460 | missing_from_ds = b"%s in manifest1, but listed as state %s\n" |
|
1480 | 1461 | for f, entry in self.items(): |
|
1481 | 1462 | state = entry.state |
|
1482 | 1463 | if state in b"nr" and f not in m1: |
|
1483 | 1464 | yield (missing_from_p1, f, state) |
|
1484 | 1465 | if state in b"a" and f in m1: |
|
1485 | 1466 | yield (unexpected_in_p1, f, state) |
|
1486 | 1467 | if state in b"m" and f not in m1 and f not in m2: |
|
1487 | 1468 | yield (missing_from_ps, f, state) |
|
1488 | 1469 | for f in m1: |
|
1489 | 1470 | state = self.get_entry(f).state |
|
1490 | 1471 | if state not in b"nrm": |
|
1491 | 1472 | yield (missing_from_ds, f, state) |
@@ -1,732 +1,732 b'' | |||
|
1 | 1 | # dirstatemap.py |
|
2 | 2 | # |
|
3 | 3 | # This software may be used and distributed according to the terms of the |
|
4 | 4 | # GNU General Public License version 2 or any later version. |
|
5 | 5 | |
|
6 | 6 | from __future__ import absolute_import |
|
7 | 7 | |
|
8 | 8 | import errno |
|
9 | 9 | |
|
10 | 10 | from .i18n import _ |
|
11 | 11 | |
|
12 | 12 | from . import ( |
|
13 | 13 | error, |
|
14 | 14 | pathutil, |
|
15 | 15 | policy, |
|
16 | 16 | pycompat, |
|
17 | 17 | txnutil, |
|
18 | 18 | util, |
|
19 | 19 | ) |
|
20 | 20 | |
|
21 | 21 | from .dirstateutils import ( |
|
22 | 22 | docket as docketmod, |
|
23 | 23 | v2, |
|
24 | 24 | ) |
|
25 | 25 | |
|
26 | 26 | parsers = policy.importmod('parsers') |
|
27 | 27 | rustmod = policy.importrust('dirstate') |
|
28 | 28 | |
|
29 | 29 | propertycache = util.propertycache |
|
30 | 30 | |
|
31 | 31 | if rustmod is None: |
|
32 | 32 | DirstateItem = parsers.DirstateItem |
|
33 | 33 | else: |
|
34 | 34 | DirstateItem = rustmod.DirstateItem |
|
35 | 35 | |
|
36 | 36 | rangemask = 0x7FFFFFFF |
|
37 | 37 | |
|
38 | 38 | |
|
39 | 39 | class _dirstatemapcommon(object): |
|
40 | 40 | """ |
|
41 | 41 | Methods that are identical for both implementations of the dirstatemap |
|
42 | 42 | class, with and without Rust extensions enabled. |
|
43 | 43 | """ |
|
44 | 44 | |
|
45 | 45 | # please pytype |
|
46 | 46 | |
|
47 | 47 | _map = None |
|
48 | 48 | copymap = None |
|
49 | 49 | |
|
50 | 50 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): |
|
51 | 51 | self._use_dirstate_v2 = use_dirstate_v2 |
|
52 | 52 | self._nodeconstants = nodeconstants |
|
53 | 53 | self._ui = ui |
|
54 | 54 | self._opener = opener |
|
55 | 55 | self._root = root |
|
56 | 56 | self._filename = b'dirstate' |
|
57 | 57 | self._nodelen = 20 # Also update Rust code when changing this! |
|
58 | 58 | self._parents = None |
|
59 | 59 | self._dirtyparents = False |
|
60 | 60 | self._docket = None |
|
61 | 61 | |
|
62 | 62 | # for consistent view between _pl() and _read() invocations |
|
63 | 63 | self._pendingmode = None |
|
64 | 64 | |
|
65 | 65 | def preload(self): |
|
66 | 66 | """Loads the underlying data, if it's not already loaded""" |
|
67 | 67 | self._map |
|
68 | 68 | |
|
69 | 69 | def get(self, key, default=None): |
|
70 | 70 | return self._map.get(key, default) |
|
71 | 71 | |
|
72 | 72 | def __len__(self): |
|
73 | 73 | return len(self._map) |
|
74 | 74 | |
|
75 | 75 | def __iter__(self): |
|
76 | 76 | return iter(self._map) |
|
77 | 77 | |
|
78 | 78 | def __contains__(self, key): |
|
79 | 79 | return key in self._map |
|
80 | 80 | |
|
81 | 81 | def __getitem__(self, item): |
|
82 | 82 | return self._map[item] |
|
83 | 83 | |
|
84 | 84 | ### sub-class utility method |
|
85 | 85 | # |
|
86 | 86 | # Use to allow for generic implementation of some method while still coping |
|
87 | 87 | # with minor difference between implementation. |
|
88 | 88 | |
|
89 | 89 | def _dirs_incr(self, filename, old_entry=None): |
|
90 | 90 | """incremente the dirstate counter if applicable |
|
91 | 91 | |
|
92 | 92 | This might be a no-op for some subclass who deal with directory |
|
93 | 93 | tracking in a different way. |
|
94 | 94 | """ |
|
95 | 95 | |
|
96 | 96 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): |
|
97 | 97 | """decremente the dirstate counter if applicable |
|
98 | 98 | |
|
99 | 99 | This might be a no-op for some subclass who deal with directory |
|
100 | 100 | tracking in a different way. |
|
101 | 101 | """ |
|
102 | 102 | |
|
103 | 103 | def _refresh_entry(self, f, entry): |
|
104 | 104 | """record updated state of an entry""" |
|
105 | 105 | |
|
106 | 106 | def _insert_entry(self, f, entry): |
|
107 | 107 | """add a new dirstate entry (or replace an unrelated one) |
|
108 | 108 | |
|
109 | 109 | The fact it is actually new is the responsability of the caller |
|
110 | 110 | """ |
|
111 | 111 | |
|
112 | 112 | def _drop_entry(self, f): |
|
113 | 113 | """remove any entry for file f |
|
114 | 114 | |
|
115 | 115 | This should also drop associated copy information |
|
116 | 116 | |
|
117 | 117 | The fact we actually need to drop it is the responsability of the caller""" |
|
118 | 118 | |
|
119 | 119 | ### method to manipulate the entries |
|
120 | 120 | |
|
121 | 121 | def set_possibly_dirty(self, filename): |
|
122 | 122 | """record that the current state of the file on disk is unknown""" |
|
123 | 123 | entry = self[filename] |
|
124 | 124 | entry.set_possibly_dirty() |
|
125 | 125 | self._refresh_entry(filename, entry) |
|
126 | 126 | |
|
127 | 127 | def set_clean(self, filename, mode, size, mtime): |
|
128 | 128 | """mark a file as back to a clean state""" |
|
129 | 129 | entry = self[filename] |
|
130 | 130 | size = size & rangemask |
|
131 | 131 | entry.set_clean(mode, size, mtime) |
|
132 | 132 | self._refresh_entry(filename, entry) |
|
133 | 133 | self.copymap.pop(filename, None) |
|
134 | 134 | |
|
135 | 135 | def set_tracked(self, filename): |
|
136 | 136 | new = False |
|
137 | 137 | entry = self.get(filename) |
|
138 | 138 | if entry is None: |
|
139 | 139 | self._dirs_incr(filename) |
|
140 | 140 | entry = DirstateItem( |
|
141 | 141 | wc_tracked=True, |
|
142 | 142 | ) |
|
143 | 143 | |
|
144 | 144 | self._insert_entry(filename, entry) |
|
145 | 145 | new = True |
|
146 | 146 | elif not entry.tracked: |
|
147 | 147 | self._dirs_incr(filename, entry) |
|
148 | 148 | entry.set_tracked() |
|
149 | 149 | self._refresh_entry(filename, entry) |
|
150 | 150 | new = True |
|
151 | 151 | else: |
|
152 | 152 | # XXX This is probably overkill for more case, but we need this to |
|
153 | 153 | # fully replace the `normallookup` call with `set_tracked` one. |
|
154 | 154 | # Consider smoothing this in the future. |
|
155 | 155 | entry.set_possibly_dirty() |
|
156 | 156 | self._refresh_entry(filename, entry) |
|
157 | 157 | return new |
|
158 | 158 | |
|
159 | 159 | def set_untracked(self, f): |
|
160 | 160 | """Mark a file as no longer tracked in the dirstate map""" |
|
161 | 161 | entry = self.get(f) |
|
162 | 162 | if entry is None: |
|
163 | 163 | return False |
|
164 | 164 | else: |
|
165 | 165 | self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added) |
|
166 | 166 | if not entry.p2_info: |
|
167 | 167 | self.copymap.pop(f, None) |
|
168 | 168 | entry.set_untracked() |
|
169 | 169 | self._refresh_entry(f, entry) |
|
170 | 170 | return True |
|
171 | 171 | |
|
172 | 172 | def reset_state( |
|
173 | 173 | self, |
|
174 | 174 | filename, |
|
175 | 175 | wc_tracked=False, |
|
176 | 176 | p1_tracked=False, |
|
177 | 177 | p2_info=False, |
|
178 | 178 | has_meaningful_mtime=True, |
|
179 | 179 | has_meaningful_data=True, |
|
180 | 180 | parentfiledata=None, |
|
181 | 181 | ): |
|
182 | 182 | """Set a entry to a given state, diregarding all previous state |
|
183 | 183 | |
|
184 | 184 | This is to be used by the part of the dirstate API dedicated to |
|
185 | 185 | adjusting the dirstate after a update/merge. |
|
186 | 186 | |
|
187 | 187 | note: calling this might result to no entry existing at all if the |
|
188 | 188 | dirstate map does not see any point at having one for this file |
|
189 | 189 | anymore. |
|
190 | 190 | """ |
|
191 | 191 | # copy information are now outdated |
|
192 | 192 | # (maybe new information should be in directly passed to this function) |
|
193 | 193 | self.copymap.pop(filename, None) |
|
194 | 194 | |
|
195 | 195 | if not (p1_tracked or p2_info or wc_tracked): |
|
196 | 196 | old_entry = self._map.get(filename) |
|
197 | 197 | self._drop_entry(filename) |
|
198 | 198 | self._dirs_decr(filename, old_entry=old_entry) |
|
199 | 199 | return |
|
200 | 200 | |
|
201 | 201 | old_entry = self._map.get(filename) |
|
202 | 202 | self._dirs_incr(filename, old_entry) |
|
203 | 203 | entry = DirstateItem( |
|
204 | 204 | wc_tracked=wc_tracked, |
|
205 | 205 | p1_tracked=p1_tracked, |
|
206 | 206 | p2_info=p2_info, |
|
207 | 207 | has_meaningful_mtime=has_meaningful_mtime, |
|
208 | 208 | parentfiledata=parentfiledata, |
|
209 | 209 | ) |
|
210 | 210 | self._insert_entry(filename, entry) |
|
211 | 211 | |
|
212 | 212 | ### disk interaction |
|
213 | 213 | |
|
214 | 214 | def _opendirstatefile(self): |
|
215 | 215 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) |
|
216 | 216 | if self._pendingmode is not None and self._pendingmode != mode: |
|
217 | 217 | fp.close() |
|
218 | 218 | raise error.Abort( |
|
219 | 219 | _(b'working directory state may be changed parallelly') |
|
220 | 220 | ) |
|
221 | 221 | self._pendingmode = mode |
|
222 | 222 | return fp |
|
223 | 223 | |
|
224 | 224 | def _readdirstatefile(self, size=-1): |
|
225 | 225 | try: |
|
226 | 226 | with self._opendirstatefile() as fp: |
|
227 | 227 | return fp.read(size) |
|
228 | 228 | except IOError as err: |
|
229 | 229 | if err.errno != errno.ENOENT: |
|
230 | 230 | raise |
|
231 | 231 | # File doesn't exist, so the current state is empty |
|
232 | 232 | return b'' |
|
233 | 233 | |
|
234 | 234 | @property |
|
235 | 235 | def docket(self): |
|
236 | 236 | if not self._docket: |
|
237 | 237 | if not self._use_dirstate_v2: |
|
238 | 238 | raise error.ProgrammingError( |
|
239 | 239 | b'dirstate only has a docket in v2 format' |
|
240 | 240 | ) |
|
241 | 241 | self._docket = docketmod.DirstateDocket.parse( |
|
242 | 242 | self._readdirstatefile(), self._nodeconstants |
|
243 | 243 | ) |
|
244 | 244 | return self._docket |
|
245 | 245 | |
|
246 | 246 | def write_v2_no_append(self, tr, st, meta, packed): |
|
247 | 247 | old_docket = self.docket |
|
248 | 248 | new_docket = docketmod.DirstateDocket.with_new_uuid( |
|
249 | 249 | self.parents(), len(packed), meta |
|
250 | 250 | ) |
|
251 | 251 | data_filename = new_docket.data_filename() |
|
252 | 252 | if tr: |
|
253 | 253 | tr.add(data_filename, 0) |
|
254 | 254 | self._opener.write(data_filename, packed) |
|
255 | 255 | # Write the new docket after the new data file has been |
|
256 | 256 | # written. Because `st` was opened with `atomictemp=True`, |
|
257 | 257 | # the actual `.hg/dirstate` file is only affected on close. |
|
258 | 258 | st.write(new_docket.serialize()) |
|
259 | 259 | st.close() |
|
260 | 260 | # Remove the old data file after the new docket pointing to |
|
261 | 261 | # the new data file was written. |
|
262 | 262 | if old_docket.uuid: |
|
263 | 263 | data_filename = old_docket.data_filename() |
|
264 | 264 | unlink = lambda _tr=None: self._opener.unlink(data_filename) |
|
265 | 265 | if tr: |
|
266 | 266 | category = b"dirstate-v2-clean-" + old_docket.uuid |
|
267 | 267 | tr.addpostclose(category, unlink) |
|
268 | 268 | else: |
|
269 | 269 | unlink() |
|
270 | 270 | self._docket = new_docket |
|
271 | 271 | |
|
272 | 272 | ### reading/setting parents |
|
273 | 273 | |
|
274 | 274 | def parents(self): |
|
275 | 275 | if not self._parents: |
|
276 | 276 | if self._use_dirstate_v2: |
|
277 | 277 | self._parents = self.docket.parents |
|
278 | 278 | else: |
|
279 | 279 | read_len = self._nodelen * 2 |
|
280 | 280 | st = self._readdirstatefile(read_len) |
|
281 | 281 | l = len(st) |
|
282 | 282 | if l == read_len: |
|
283 | 283 | self._parents = ( |
|
284 | 284 | st[: self._nodelen], |
|
285 | 285 | st[self._nodelen : 2 * self._nodelen], |
|
286 | 286 | ) |
|
287 | 287 | elif l == 0: |
|
288 | 288 | self._parents = ( |
|
289 | 289 | self._nodeconstants.nullid, |
|
290 | 290 | self._nodeconstants.nullid, |
|
291 | 291 | ) |
|
292 | 292 | else: |
|
293 | 293 | raise error.Abort( |
|
294 | 294 | _(b'working directory state appears damaged!') |
|
295 | 295 | ) |
|
296 | 296 | |
|
297 | 297 | return self._parents |
|
298 | 298 | |
|
299 | 299 | |
|
300 | 300 | class dirstatemap(_dirstatemapcommon): |
|
301 | 301 | """Map encapsulating the dirstate's contents. |
|
302 | 302 | |
|
303 | 303 | The dirstate contains the following state: |
|
304 | 304 | |
|
305 | 305 | - `identity` is the identity of the dirstate file, which can be used to |
|
306 | 306 | detect when changes have occurred to the dirstate file. |
|
307 | 307 | |
|
308 | 308 | - `parents` is a pair containing the parents of the working copy. The |
|
309 | 309 | parents are updated by calling `setparents`. |
|
310 | 310 | |
|
311 | 311 | - the state map maps filenames to tuples of (state, mode, size, mtime), |
|
312 | 312 | where state is a single character representing 'normal', 'added', |
|
313 | 313 | 'removed', or 'merged'. It is read by treating the dirstate as a |
|
314 | 314 | dict. File state is updated by calling various methods (see each |
|
315 | 315 | documentation for details): |
|
316 | 316 | |
|
317 | 317 | - `reset_state`, |
|
318 | 318 | - `set_tracked` |
|
319 | 319 | - `set_untracked` |
|
320 | 320 | - `set_clean` |
|
321 | 321 | - `set_possibly_dirty` |
|
322 | 322 | |
|
323 | 323 | - `copymap` maps destination filenames to their source filename. |
|
324 | 324 | |
|
325 | 325 | The dirstate also provides the following views onto the state: |
|
326 | 326 | |
|
327 | 327 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized |
|
328 | 328 | form that they appear as in the dirstate. |
|
329 | 329 | |
|
330 | 330 | - `dirfoldmap` is a dict mapping normalized directory names to the |
|
331 | 331 | denormalized form that they appear as in the dirstate. |
|
332 | 332 | """ |
|
333 | 333 | |
|
334 | 334 | ### Core data storage and access |
|
335 | 335 | |
|
336 | 336 | @propertycache |
|
337 | 337 | def _map(self): |
|
338 | 338 | self._map = {} |
|
339 | 339 | self.read() |
|
340 | 340 | return self._map |
|
341 | 341 | |
|
342 | 342 | @propertycache |
|
343 | 343 | def copymap(self): |
|
344 | 344 | self.copymap = {} |
|
345 | 345 | self._map |
|
346 | 346 | return self.copymap |
|
347 | 347 | |
|
348 | 348 | def clear(self): |
|
349 | 349 | self._map.clear() |
|
350 | 350 | self.copymap.clear() |
|
351 | 351 | self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid) |
|
352 | 352 | util.clearcachedproperty(self, b"_dirs") |
|
353 | 353 | util.clearcachedproperty(self, b"_alldirs") |
|
354 | 354 | util.clearcachedproperty(self, b"filefoldmap") |
|
355 | 355 | util.clearcachedproperty(self, b"dirfoldmap") |
|
356 | 356 | |
|
357 | 357 | def items(self): |
|
358 | 358 | return pycompat.iteritems(self._map) |
|
359 | 359 | |
|
360 | 360 | # forward for python2,3 compat |
|
361 | 361 | iteritems = items |
|
362 | 362 | |
|
363 | 363 | def debug_iter(self, all): |
|
364 | 364 | """ |
|
365 | 365 | Return an iterator of (filename, state, mode, size, mtime) tuples |
|
366 | 366 | |
|
367 | 367 | `all` is unused when Rust is not enabled |
|
368 | 368 | """ |
|
369 | 369 | for (filename, item) in self.items(): |
|
370 | 370 | yield (filename, item.state, item.mode, item.size, item.mtime) |
|
371 | 371 | |
|
372 | 372 | def keys(self): |
|
373 | 373 | return self._map.keys() |
|
374 | 374 | |
|
375 | 375 | ### reading/setting parents |
|
376 | 376 | |
|
377 | 377 | def setparents(self, p1, p2, fold_p2=False): |
|
378 | 378 | self._parents = (p1, p2) |
|
379 | 379 | self._dirtyparents = True |
|
380 | 380 | copies = {} |
|
381 | 381 | if fold_p2: |
|
382 | 382 | for f, s in pycompat.iteritems(self._map): |
|
383 | 383 | # Discard "merged" markers when moving away from a merge state |
|
384 | 384 | if s.p2_info: |
|
385 | 385 | source = self.copymap.pop(f, None) |
|
386 | 386 | if source: |
|
387 | 387 | copies[f] = source |
|
388 | 388 | s.drop_merge_data() |
|
389 | 389 | return copies |
|
390 | 390 | |
|
391 | 391 | ### disk interaction |
|
392 | 392 | |
|
393 | 393 | def read(self): |
|
394 | 394 | # ignore HG_PENDING because identity is used only for writing |
|
395 | 395 | self.identity = util.filestat.frompath( |
|
396 | 396 | self._opener.join(self._filename) |
|
397 | 397 | ) |
|
398 | 398 | |
|
399 | 399 | if self._use_dirstate_v2: |
|
400 | 400 | if not self.docket.uuid: |
|
401 | 401 | return |
|
402 | 402 | st = self._opener.read(self.docket.data_filename()) |
|
403 | 403 | else: |
|
404 | 404 | st = self._readdirstatefile() |
|
405 | 405 | |
|
406 | 406 | if not st: |
|
407 | 407 | return |
|
408 | 408 | |
|
409 | 409 | # TODO: adjust this estimate for dirstate-v2 |
|
410 | 410 | if util.safehasattr(parsers, b'dict_new_presized'): |
|
411 | 411 | # Make an estimate of the number of files in the dirstate based on |
|
412 | 412 | # its size. This trades wasting some memory for avoiding costly |
|
413 | 413 | # resizes. Each entry have a prefix of 17 bytes followed by one or |
|
414 | 414 | # two path names. Studies on various large-scale real-world repositories |
|
415 | 415 | # found 54 bytes a reasonable upper limit for the average path names. |
|
416 | 416 | # Copy entries are ignored for the sake of this estimate. |
|
417 | 417 | self._map = parsers.dict_new_presized(len(st) // 71) |
|
418 | 418 | |
|
419 | 419 | # Python's garbage collector triggers a GC each time a certain number |
|
420 | 420 | # of container objects (the number being defined by |
|
421 | 421 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple |
|
422 | 422 | # for each file in the dirstate. The C version then immediately marks |
|
423 | 423 | # them as not to be tracked by the collector. However, this has no |
|
424 | 424 | # effect on when GCs are triggered, only on what objects the GC looks |
|
425 | 425 | # into. This means that O(number of files) GCs are unavoidable. |
|
426 | 426 | # Depending on when in the process's lifetime the dirstate is parsed, |
|
427 | 427 | # this can get very expensive. As a workaround, disable GC while |
|
428 | 428 | # parsing the dirstate. |
|
429 | 429 | # |
|
430 | 430 | # (we cannot decorate the function directly since it is in a C module) |
|
431 | 431 | if self._use_dirstate_v2: |
|
432 | 432 | p = self.docket.parents |
|
433 | 433 | meta = self.docket.tree_metadata |
|
434 | 434 | parse_dirstate = util.nogc(v2.parse_dirstate) |
|
435 | 435 | parse_dirstate(self._map, self.copymap, st, meta) |
|
436 | 436 | else: |
|
437 | 437 | parse_dirstate = util.nogc(parsers.parse_dirstate) |
|
438 | 438 | p = parse_dirstate(self._map, self.copymap, st) |
|
439 | 439 | if not self._dirtyparents: |
|
440 | 440 | self.setparents(*p) |
|
441 | 441 | |
|
442 | 442 | # Avoid excess attribute lookups by fast pathing certain checks |
|
443 | 443 | self.__contains__ = self._map.__contains__ |
|
444 | 444 | self.__getitem__ = self._map.__getitem__ |
|
445 | 445 | self.get = self._map.get |
|
446 | 446 | |
|
447 | 447 | def write(self, tr, st, now): |
|
448 | 448 | if self._use_dirstate_v2: |
|
449 |
packed, meta = v2.pack_dirstate(self._map, self.copymap |
|
|
449 | packed, meta = v2.pack_dirstate(self._map, self.copymap) | |
|
450 | 450 | self.write_v2_no_append(tr, st, meta, packed) |
|
451 | 451 | else: |
|
452 | 452 | packed = parsers.pack_dirstate( |
|
453 |
self._map, self.copymap, self.parents() |
|
|
453 | self._map, self.copymap, self.parents() | |
|
454 | 454 | ) |
|
455 | 455 | st.write(packed) |
|
456 | 456 | st.close() |
|
457 | 457 | self._dirtyparents = False |
|
458 | 458 | |
|
459 | 459 | @propertycache |
|
460 | 460 | def identity(self): |
|
461 | 461 | self._map |
|
462 | 462 | return self.identity |
|
463 | 463 | |
|
464 | 464 | ### code related to maintaining and accessing "extra" property |
|
465 | 465 | # (e.g. "has_dir") |
|
466 | 466 | |
|
467 | 467 | def _dirs_incr(self, filename, old_entry=None): |
|
468 | 468 | """incremente the dirstate counter if applicable""" |
|
469 | 469 | if ( |
|
470 | 470 | old_entry is None or old_entry.removed |
|
471 | 471 | ) and "_dirs" in self.__dict__: |
|
472 | 472 | self._dirs.addpath(filename) |
|
473 | 473 | if old_entry is None and "_alldirs" in self.__dict__: |
|
474 | 474 | self._alldirs.addpath(filename) |
|
475 | 475 | |
|
476 | 476 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): |
|
477 | 477 | """decremente the dirstate counter if applicable""" |
|
478 | 478 | if old_entry is not None: |
|
479 | 479 | if "_dirs" in self.__dict__ and not old_entry.removed: |
|
480 | 480 | self._dirs.delpath(filename) |
|
481 | 481 | if "_alldirs" in self.__dict__ and not remove_variant: |
|
482 | 482 | self._alldirs.delpath(filename) |
|
483 | 483 | elif remove_variant and "_alldirs" in self.__dict__: |
|
484 | 484 | self._alldirs.addpath(filename) |
|
485 | 485 | if "filefoldmap" in self.__dict__: |
|
486 | 486 | normed = util.normcase(filename) |
|
487 | 487 | self.filefoldmap.pop(normed, None) |
|
488 | 488 | |
|
489 | 489 | @propertycache |
|
490 | 490 | def filefoldmap(self): |
|
491 | 491 | """Returns a dictionary mapping normalized case paths to their |
|
492 | 492 | non-normalized versions. |
|
493 | 493 | """ |
|
494 | 494 | try: |
|
495 | 495 | makefilefoldmap = parsers.make_file_foldmap |
|
496 | 496 | except AttributeError: |
|
497 | 497 | pass |
|
498 | 498 | else: |
|
499 | 499 | return makefilefoldmap( |
|
500 | 500 | self._map, util.normcasespec, util.normcasefallback |
|
501 | 501 | ) |
|
502 | 502 | |
|
503 | 503 | f = {} |
|
504 | 504 | normcase = util.normcase |
|
505 | 505 | for name, s in pycompat.iteritems(self._map): |
|
506 | 506 | if not s.removed: |
|
507 | 507 | f[normcase(name)] = name |
|
508 | 508 | f[b'.'] = b'.' # prevents useless util.fspath() invocation |
|
509 | 509 | return f |
|
510 | 510 | |
|
511 | 511 | @propertycache |
|
512 | 512 | def dirfoldmap(self): |
|
513 | 513 | f = {} |
|
514 | 514 | normcase = util.normcase |
|
515 | 515 | for name in self._dirs: |
|
516 | 516 | f[normcase(name)] = name |
|
517 | 517 | return f |
|
518 | 518 | |
|
519 | 519 | def hastrackeddir(self, d): |
|
520 | 520 | """ |
|
521 | 521 | Returns True if the dirstate contains a tracked (not removed) file |
|
522 | 522 | in this directory. |
|
523 | 523 | """ |
|
524 | 524 | return d in self._dirs |
|
525 | 525 | |
|
526 | 526 | def hasdir(self, d): |
|
527 | 527 | """ |
|
528 | 528 | Returns True if the dirstate contains a file (tracked or removed) |
|
529 | 529 | in this directory. |
|
530 | 530 | """ |
|
531 | 531 | return d in self._alldirs |
|
532 | 532 | |
|
533 | 533 | @propertycache |
|
534 | 534 | def _dirs(self): |
|
535 | 535 | return pathutil.dirs(self._map, only_tracked=True) |
|
536 | 536 | |
|
537 | 537 | @propertycache |
|
538 | 538 | def _alldirs(self): |
|
539 | 539 | return pathutil.dirs(self._map) |
|
540 | 540 | |
|
541 | 541 | ### code related to manipulation of entries and copy-sources |
|
542 | 542 | |
|
543 | 543 | def _refresh_entry(self, f, entry): |
|
544 | 544 | if not entry.any_tracked: |
|
545 | 545 | self._map.pop(f, None) |
|
546 | 546 | |
|
547 | 547 | def _insert_entry(self, f, entry): |
|
548 | 548 | self._map[f] = entry |
|
549 | 549 | |
|
550 | 550 | def _drop_entry(self, f): |
|
551 | 551 | self._map.pop(f, None) |
|
552 | 552 | self.copymap.pop(f, None) |
|
553 | 553 | |
|
554 | 554 | |
|
555 | 555 | if rustmod is not None: |
|
556 | 556 | |
|
557 | 557 | class dirstatemap(_dirstatemapcommon): |
|
558 | 558 | |
|
559 | 559 | ### Core data storage and access |
|
560 | 560 | |
|
561 | 561 | @propertycache |
|
562 | 562 | def _map(self): |
|
563 | 563 | """ |
|
564 | 564 | Fills the Dirstatemap when called. |
|
565 | 565 | """ |
|
566 | 566 | # ignore HG_PENDING because identity is used only for writing |
|
567 | 567 | self.identity = util.filestat.frompath( |
|
568 | 568 | self._opener.join(self._filename) |
|
569 | 569 | ) |
|
570 | 570 | |
|
571 | 571 | if self._use_dirstate_v2: |
|
572 | 572 | if self.docket.uuid: |
|
573 | 573 | # TODO: use mmap when possible |
|
574 | 574 | data = self._opener.read(self.docket.data_filename()) |
|
575 | 575 | else: |
|
576 | 576 | data = b'' |
|
577 | 577 | self._map = rustmod.DirstateMap.new_v2( |
|
578 | 578 | data, self.docket.data_size, self.docket.tree_metadata |
|
579 | 579 | ) |
|
580 | 580 | parents = self.docket.parents |
|
581 | 581 | else: |
|
582 | 582 | self._map, parents = rustmod.DirstateMap.new_v1( |
|
583 | 583 | self._readdirstatefile() |
|
584 | 584 | ) |
|
585 | 585 | |
|
586 | 586 | if parents and not self._dirtyparents: |
|
587 | 587 | self.setparents(*parents) |
|
588 | 588 | |
|
589 | 589 | self.__contains__ = self._map.__contains__ |
|
590 | 590 | self.__getitem__ = self._map.__getitem__ |
|
591 | 591 | self.get = self._map.get |
|
592 | 592 | return self._map |
|
593 | 593 | |
|
594 | 594 | @property |
|
595 | 595 | def copymap(self): |
|
596 | 596 | return self._map.copymap() |
|
597 | 597 | |
|
598 | 598 | def debug_iter(self, all): |
|
599 | 599 | """ |
|
600 | 600 | Return an iterator of (filename, state, mode, size, mtime) tuples |
|
601 | 601 | |
|
602 | 602 | `all`: also include with `state == b' '` dirstate tree nodes that |
|
603 | 603 | don't have an associated `DirstateItem`. |
|
604 | 604 | |
|
605 | 605 | """ |
|
606 | 606 | return self._map.debug_iter(all) |
|
607 | 607 | |
|
608 | 608 | def clear(self): |
|
609 | 609 | self._map.clear() |
|
610 | 610 | self.setparents( |
|
611 | 611 | self._nodeconstants.nullid, self._nodeconstants.nullid |
|
612 | 612 | ) |
|
613 | 613 | util.clearcachedproperty(self, b"_dirs") |
|
614 | 614 | util.clearcachedproperty(self, b"_alldirs") |
|
615 | 615 | util.clearcachedproperty(self, b"dirfoldmap") |
|
616 | 616 | |
|
617 | 617 | def items(self): |
|
618 | 618 | return self._map.items() |
|
619 | 619 | |
|
620 | 620 | # forward for python2,3 compat |
|
621 | 621 | iteritems = items |
|
622 | 622 | |
|
623 | 623 | def keys(self): |
|
624 | 624 | return iter(self._map) |
|
625 | 625 | |
|
626 | 626 | ### reading/setting parents |
|
627 | 627 | |
|
628 | 628 | def setparents(self, p1, p2, fold_p2=False): |
|
629 | 629 | self._parents = (p1, p2) |
|
630 | 630 | self._dirtyparents = True |
|
631 | 631 | copies = {} |
|
632 | 632 | if fold_p2: |
|
633 | 633 | # Collect into an intermediate list to avoid a `RuntimeError` |
|
634 | 634 | # exception due to mutation during iteration. |
|
635 | 635 | # TODO: move this the whole loop to Rust where `iter_mut` |
|
636 | 636 | # enables in-place mutation of elements of a collection while |
|
637 | 637 | # iterating it, without mutating the collection itself. |
|
638 | 638 | files_with_p2_info = [ |
|
639 | 639 | f for f, s in self._map.items() if s.p2_info |
|
640 | 640 | ] |
|
641 | 641 | rust_map = self._map |
|
642 | 642 | for f in files_with_p2_info: |
|
643 | 643 | e = rust_map.get(f) |
|
644 | 644 | source = self.copymap.pop(f, None) |
|
645 | 645 | if source: |
|
646 | 646 | copies[f] = source |
|
647 | 647 | e.drop_merge_data() |
|
648 | 648 | rust_map.set_dirstate_item(f, e) |
|
649 | 649 | return copies |
|
650 | 650 | |
|
651 | 651 | ### disk interaction |
|
652 | 652 | |
|
653 | 653 | @propertycache |
|
654 | 654 | def identity(self): |
|
655 | 655 | self._map |
|
656 | 656 | return self.identity |
|
657 | 657 | |
|
658 | 658 | def write(self, tr, st, now): |
|
659 | 659 | if not self._use_dirstate_v2: |
|
660 | 660 | p1, p2 = self.parents() |
|
661 |
packed = self._map.write_v1(p1, p2 |
|
|
661 | packed = self._map.write_v1(p1, p2) | |
|
662 | 662 | st.write(packed) |
|
663 | 663 | st.close() |
|
664 | 664 | self._dirtyparents = False |
|
665 | 665 | return |
|
666 | 666 | |
|
667 | 667 | # We can only append to an existing data file if there is one |
|
668 | 668 | can_append = self.docket.uuid is not None |
|
669 |
packed, meta, append = self._map.write_v2( |
|
|
669 | packed, meta, append = self._map.write_v2(can_append) | |
|
670 | 670 | if append: |
|
671 | 671 | docket = self.docket |
|
672 | 672 | data_filename = docket.data_filename() |
|
673 | 673 | if tr: |
|
674 | 674 | tr.add(data_filename, docket.data_size) |
|
675 | 675 | with self._opener(data_filename, b'r+b') as fp: |
|
676 | 676 | fp.seek(docket.data_size) |
|
677 | 677 | assert fp.tell() == docket.data_size |
|
678 | 678 | written = fp.write(packed) |
|
679 | 679 | if written is not None: # py2 may return None |
|
680 | 680 | assert written == len(packed), (written, len(packed)) |
|
681 | 681 | docket.data_size += len(packed) |
|
682 | 682 | docket.parents = self.parents() |
|
683 | 683 | docket.tree_metadata = meta |
|
684 | 684 | st.write(docket.serialize()) |
|
685 | 685 | st.close() |
|
686 | 686 | else: |
|
687 | 687 | self.write_v2_no_append(tr, st, meta, packed) |
|
688 | 688 | # Reload from the newly-written file |
|
689 | 689 | util.clearcachedproperty(self, b"_map") |
|
690 | 690 | self._dirtyparents = False |
|
691 | 691 | |
|
692 | 692 | ### code related to maintaining and accessing "extra" property |
|
693 | 693 | # (e.g. "has_dir") |
|
694 | 694 | |
|
695 | 695 | @propertycache |
|
696 | 696 | def filefoldmap(self): |
|
697 | 697 | """Returns a dictionary mapping normalized case paths to their |
|
698 | 698 | non-normalized versions. |
|
699 | 699 | """ |
|
700 | 700 | return self._map.filefoldmapasdict() |
|
701 | 701 | |
|
702 | 702 | def hastrackeddir(self, d): |
|
703 | 703 | return self._map.hastrackeddir(d) |
|
704 | 704 | |
|
705 | 705 | def hasdir(self, d): |
|
706 | 706 | return self._map.hasdir(d) |
|
707 | 707 | |
|
708 | 708 | @propertycache |
|
709 | 709 | def dirfoldmap(self): |
|
710 | 710 | f = {} |
|
711 | 711 | normcase = util.normcase |
|
712 | 712 | for name in self._map.tracked_dirs(): |
|
713 | 713 | f[normcase(name)] = name |
|
714 | 714 | return f |
|
715 | 715 | |
|
716 | 716 | ### code related to manipulation of entries and copy-sources |
|
717 | 717 | |
|
718 | 718 | def _refresh_entry(self, f, entry): |
|
719 | 719 | if not entry.any_tracked: |
|
720 | 720 | self._map.drop_item_and_copy_source(f) |
|
721 | 721 | else: |
|
722 | 722 | self._map.addfile(f, entry) |
|
723 | 723 | |
|
724 | 724 | def _insert_entry(self, f, entry): |
|
725 | 725 | self._map.addfile(f, entry) |
|
726 | 726 | |
|
727 | 727 | def _drop_entry(self, f): |
|
728 | 728 | self._map.drop_item_and_copy_source(f) |
|
729 | 729 | |
|
730 | 730 | def __setitem__(self, key, value): |
|
731 | 731 | assert isinstance(value, DirstateItem) |
|
732 | 732 | self._map.set_dirstate_item(key, value) |
@@ -1,414 +1,401 b'' | |||
|
1 | 1 | # v2.py - Pure-Python implementation of the dirstate-v2 file format |
|
2 | 2 | # |
|
3 | 3 | # Copyright Mercurial Contributors |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import struct |
|
11 | 11 | |
|
12 | 12 | from ..thirdparty import attr |
|
13 | 13 | from .. import error, policy |
|
14 | 14 | |
|
15 | 15 | parsers = policy.importmod('parsers') |
|
16 | 16 | |
|
17 | 17 | |
|
18 | 18 | # Must match the constant of the same name in |
|
19 | 19 | # `rust/hg-core/src/dirstate_tree/on_disk.rs` |
|
20 | 20 | TREE_METADATA_SIZE = 44 |
|
21 | 21 | NODE_SIZE = 44 |
|
22 | 22 | |
|
23 | 23 | |
|
24 | 24 | # Must match the `TreeMetadata` Rust struct in |
|
25 | 25 | # `rust/hg-core/src/dirstate_tree/on_disk.rs`. See doc-comments there. |
|
26 | 26 | # |
|
27 | 27 | # * 4 bytes: start offset of root nodes |
|
28 | 28 | # * 4 bytes: number of root nodes |
|
29 | 29 | # * 4 bytes: total number of nodes in the tree that have an entry |
|
30 | 30 | # * 4 bytes: total number of nodes in the tree that have a copy source |
|
31 | 31 | # * 4 bytes: number of bytes in the data file that are not used anymore |
|
32 | 32 | # * 4 bytes: unused |
|
33 | 33 | # * 20 bytes: SHA-1 hash of ignore patterns |
|
34 | 34 | TREE_METADATA = struct.Struct('>LLLLL4s20s') |
|
35 | 35 | |
|
36 | 36 | |
|
37 | 37 | # Must match the `Node` Rust struct in |
|
38 | 38 | # `rust/hg-core/src/dirstate_tree/on_disk.rs`. See doc-comments there. |
|
39 | 39 | # |
|
40 | 40 | # * 4 bytes: start offset of full path |
|
41 | 41 | # * 2 bytes: length of the full path |
|
42 | 42 | # * 2 bytes: length within the full path before its "base name" |
|
43 | 43 | # * 4 bytes: start offset of the copy source if any, or zero for no copy source |
|
44 | 44 | # * 2 bytes: length of the copy source if any, or unused |
|
45 | 45 | # * 4 bytes: start offset of child nodes |
|
46 | 46 | # * 4 bytes: number of child nodes |
|
47 | 47 | # * 4 bytes: number of descendant nodes that have an entry |
|
48 | 48 | # * 4 bytes: number of descendant nodes that have a "tracked" state |
|
49 | 49 | # * 1 byte: flags |
|
50 | 50 | # * 4 bytes: expected size |
|
51 | 51 | # * 4 bytes: mtime seconds |
|
52 | 52 | # * 4 bytes: mtime nanoseconds |
|
53 | 53 | NODE = struct.Struct('>LHHLHLLLLHlll') |
|
54 | 54 | |
|
55 | 55 | |
|
56 | 56 | assert TREE_METADATA_SIZE == TREE_METADATA.size |
|
57 | 57 | assert NODE_SIZE == NODE.size |
|
58 | 58 | |
|
59 | 59 | # match constant in mercurial/pure/parsers.py |
|
60 | 60 | DIRSTATE_V2_DIRECTORY = 1 << 5 |
|
61 | 61 | |
|
62 | 62 | |
|
63 | 63 | def parse_dirstate(map, copy_map, data, tree_metadata): |
|
64 | 64 | """parse a full v2-dirstate from a binary data into dictionnaries: |
|
65 | 65 | |
|
66 | 66 | - map: a {path: entry} mapping that will be filled |
|
67 | 67 | - copy_map: a {path: copy-source} mapping that will be filled |
|
68 | 68 | - data: a binary blob contains v2 nodes data |
|
69 | 69 | - tree_metadata:: a binary blob of the top level node (from the docket) |
|
70 | 70 | """ |
|
71 | 71 | ( |
|
72 | 72 | root_nodes_start, |
|
73 | 73 | root_nodes_len, |
|
74 | 74 | _nodes_with_entry_count, |
|
75 | 75 | _nodes_with_copy_source_count, |
|
76 | 76 | _unreachable_bytes, |
|
77 | 77 | _unused, |
|
78 | 78 | _ignore_patterns_hash, |
|
79 | 79 | ) = TREE_METADATA.unpack(tree_metadata) |
|
80 | 80 | parse_nodes(map, copy_map, data, root_nodes_start, root_nodes_len) |
|
81 | 81 | |
|
82 | 82 | |
|
83 | 83 | def parse_nodes(map, copy_map, data, start, len): |
|
84 | 84 | """parse <len> nodes from <data> starting at offset <start> |
|
85 | 85 | |
|
86 | 86 | This is used by parse_dirstate to recursively fill `map` and `copy_map`. |
|
87 | 87 | |
|
88 | 88 | All directory specific information is ignored and do not need any |
|
89 | 89 | processing (DIRECTORY, ALL_UNKNOWN_RECORDED, ALL_IGNORED_RECORDED) |
|
90 | 90 | """ |
|
91 | 91 | for i in range(len): |
|
92 | 92 | node_start = start + NODE_SIZE * i |
|
93 | 93 | node_bytes = slice_with_len(data, node_start, NODE_SIZE) |
|
94 | 94 | ( |
|
95 | 95 | path_start, |
|
96 | 96 | path_len, |
|
97 | 97 | _basename_start, |
|
98 | 98 | copy_source_start, |
|
99 | 99 | copy_source_len, |
|
100 | 100 | children_start, |
|
101 | 101 | children_count, |
|
102 | 102 | _descendants_with_entry_count, |
|
103 | 103 | _tracked_descendants_count, |
|
104 | 104 | flags, |
|
105 | 105 | size, |
|
106 | 106 | mtime_s, |
|
107 | 107 | mtime_ns, |
|
108 | 108 | ) = NODE.unpack(node_bytes) |
|
109 | 109 | |
|
110 | 110 | # Parse child nodes of this node recursively |
|
111 | 111 | parse_nodes(map, copy_map, data, children_start, children_count) |
|
112 | 112 | |
|
113 | 113 | item = parsers.DirstateItem.from_v2_data(flags, size, mtime_s, mtime_ns) |
|
114 | 114 | if not item.any_tracked: |
|
115 | 115 | continue |
|
116 | 116 | path = slice_with_len(data, path_start, path_len) |
|
117 | 117 | map[path] = item |
|
118 | 118 | if copy_source_start: |
|
119 | 119 | copy_map[path] = slice_with_len( |
|
120 | 120 | data, copy_source_start, copy_source_len |
|
121 | 121 | ) |
|
122 | 122 | |
|
123 | 123 | |
|
124 | 124 | def slice_with_len(data, start, len): |
|
125 | 125 | return data[start : start + len] |
|
126 | 126 | |
|
127 | 127 | |
|
128 | 128 | @attr.s |
|
129 | 129 | class Node(object): |
|
130 | 130 | path = attr.ib() |
|
131 | 131 | entry = attr.ib() |
|
132 | 132 | parent = attr.ib(default=None) |
|
133 | 133 | children_count = attr.ib(default=0) |
|
134 | 134 | children_offset = attr.ib(default=0) |
|
135 | 135 | descendants_with_entry = attr.ib(default=0) |
|
136 | 136 | tracked_descendants = attr.ib(default=0) |
|
137 | 137 | |
|
138 | 138 | def pack(self, copy_map, paths_offset): |
|
139 | 139 | path = self.path |
|
140 | 140 | copy = copy_map.get(path) |
|
141 | 141 | entry = self.entry |
|
142 | 142 | |
|
143 | 143 | path_start = paths_offset |
|
144 | 144 | path_len = len(path) |
|
145 | 145 | basename_start = path.rfind(b'/') + 1 # 0 if rfind returns -1 |
|
146 | 146 | if copy is not None: |
|
147 | 147 | copy_source_start = paths_offset + len(path) |
|
148 | 148 | copy_source_len = len(copy) |
|
149 | 149 | else: |
|
150 | 150 | copy_source_start = 0 |
|
151 | 151 | copy_source_len = 0 |
|
152 | 152 | if entry is not None: |
|
153 | 153 | flags, size, mtime_s, mtime_ns = entry.v2_data() |
|
154 | 154 | else: |
|
155 | 155 | # There are no mtime-cached directories in the Python implementation |
|
156 | 156 | flags = DIRSTATE_V2_DIRECTORY |
|
157 | 157 | size = 0 |
|
158 | 158 | mtime_s = 0 |
|
159 | 159 | mtime_ns = 0 |
|
160 | 160 | return NODE.pack( |
|
161 | 161 | path_start, |
|
162 | 162 | path_len, |
|
163 | 163 | basename_start, |
|
164 | 164 | copy_source_start, |
|
165 | 165 | copy_source_len, |
|
166 | 166 | self.children_offset, |
|
167 | 167 | self.children_count, |
|
168 | 168 | self.descendants_with_entry, |
|
169 | 169 | self.tracked_descendants, |
|
170 | 170 | flags, |
|
171 | 171 | size, |
|
172 | 172 | mtime_s, |
|
173 | 173 | mtime_ns, |
|
174 | 174 | ) |
|
175 | 175 | |
|
176 | 176 | |
|
177 |
def pack_dirstate(map, copy_map |
|
|
177 | def pack_dirstate(map, copy_map): | |
|
178 | 178 | """ |
|
179 | 179 | Pack `map` and `copy_map` into the dirstate v2 binary format and return |
|
180 | 180 | the bytearray. |
|
181 | `now` is a timestamp of the current filesystem time used to detect race | |
|
182 | conditions in writing the dirstate to disk, see inline comment. | |
|
183 | 181 | |
|
184 | 182 | The on-disk format expects a tree-like structure where the leaves are |
|
185 | 183 | written first (and sorted per-directory), going up levels until the root |
|
186 | 184 | node and writing that one to the docket. See more details on the on-disk |
|
187 | 185 | format in `mercurial/helptext/internals/dirstate-v2`. |
|
188 | 186 | |
|
189 | 187 | Since both `map` and `copy_map` are flat dicts we need to figure out the |
|
190 | 188 | hierarchy. This algorithm does so without having to build the entire tree |
|
191 | 189 | in-memory: it only keeps the minimum number of nodes around to satisfy the |
|
192 | 190 | format. |
|
193 | 191 | |
|
194 | 192 | # Algorithm explanation |
|
195 | 193 | |
|
196 | 194 | This explanation does not talk about the different counters for tracked |
|
197 | 195 | descendents and storing the copies, but that work is pretty simple once this |
|
198 | 196 | algorithm is in place. |
|
199 | 197 | |
|
200 | 198 | ## Building a subtree |
|
201 | 199 | |
|
202 | 200 | First, sort `map`: this makes it so the leaves of the tree are contiguous |
|
203 | 201 | per directory (i.e. a/b/c and a/b/d will be next to each other in the list), |
|
204 | 202 | and enables us to use the ordering of folders to have a "cursor" of the |
|
205 | 203 | current folder we're in without ever going twice in the same branch of the |
|
206 | 204 | tree. The cursor is a node that remembers its parent and any information |
|
207 | 205 | relevant to the format (see the `Node` class), building the relevant part |
|
208 | 206 | of the tree lazily. |
|
209 | 207 | Then, for each file in `map`, move the cursor into the tree to the |
|
210 | 208 | corresponding folder of the file: for example, if the very first file |
|
211 | 209 | is "a/b/c", we start from `Node[""]`, create `Node["a"]` which points to |
|
212 | 210 | its parent `Node[""]`, then create `Node["a/b"]`, which points to its parent |
|
213 | 211 | `Node["a"]`. These nodes are kept around in a stack. |
|
214 | 212 | If the next file in `map` is in the same subtree ("a/b/d" or "a/b/e/f"), we |
|
215 | 213 | add it to the stack and keep looping with the same logic of creating the |
|
216 | 214 | tree nodes as needed. If however the next file in `map` is *not* in the same |
|
217 | 215 | subtree ("a/other", if we're still in the "a/b" folder), then we know that |
|
218 | 216 | the subtree we're in is complete. |
|
219 | 217 | |
|
220 | 218 | ## Writing the subtree |
|
221 | 219 | |
|
222 | 220 | We have the entire subtree in the stack, so we start writing it to disk |
|
223 | 221 | folder by folder. The way we write a folder is to pop the stack into a list |
|
224 | 222 | until the folder changes, revert this list of direct children (to satisfy |
|
225 | 223 | the format requirement that children be sorted). This process repeats until |
|
226 | 224 | we hit the "other" subtree. |
|
227 | 225 | |
|
228 | 226 | An example: |
|
229 | 227 | a |
|
230 | 228 | dir1/b |
|
231 | 229 | dir1/c |
|
232 | 230 | dir2/dir3/d |
|
233 | 231 | dir2/dir3/e |
|
234 | 232 | dir2/f |
|
235 | 233 | |
|
236 | 234 | Would have us: |
|
237 | 235 | - add to the stack until "dir2/dir3/e" |
|
238 | 236 | - realize that "dir2/f" is in a different subtree |
|
239 | 237 | - pop "dir2/dir3/e", "dir2/dir3/d", reverse them so they're sorted and |
|
240 | 238 | pack them since the next entry is "dir2/dir3" |
|
241 | 239 | - go back up to "dir2" |
|
242 | 240 | - add "dir2/f" to the stack |
|
243 | 241 | - realize we're done with the map |
|
244 | 242 | - pop "dir2/f", "dir2/dir3" from the stack, reverse and pack them |
|
245 | 243 | - go up to the root node, do the same to write "a", "dir1" and "dir2" in |
|
246 | 244 | that order |
|
247 | 245 | |
|
248 | 246 | ## Special case for the root node |
|
249 | 247 | |
|
250 | 248 | The root node is not serialized in the format, but its information is |
|
251 | 249 | written to the docket. Again, see more details on the on-disk format in |
|
252 | 250 | `mercurial/helptext/internals/dirstate-v2`. |
|
253 | 251 | """ |
|
254 | 252 | data = bytearray() |
|
255 | 253 | root_nodes_start = 0 |
|
256 | 254 | root_nodes_len = 0 |
|
257 | 255 | nodes_with_entry_count = 0 |
|
258 | 256 | nodes_with_copy_source_count = 0 |
|
259 | 257 | # Will always be 0 since this implementation always re-writes everything |
|
260 | 258 | # to disk |
|
261 | 259 | unreachable_bytes = 0 |
|
262 | 260 | unused = b'\x00' * 4 |
|
263 | 261 | # This is an optimization that's only useful for the Rust implementation |
|
264 | 262 | ignore_patterns_hash = b'\x00' * 20 |
|
265 | 263 | |
|
266 | 264 | if len(map) == 0: |
|
267 | 265 | tree_metadata = TREE_METADATA.pack( |
|
268 | 266 | root_nodes_start, |
|
269 | 267 | root_nodes_len, |
|
270 | 268 | nodes_with_entry_count, |
|
271 | 269 | nodes_with_copy_source_count, |
|
272 | 270 | unreachable_bytes, |
|
273 | 271 | unused, |
|
274 | 272 | ignore_patterns_hash, |
|
275 | 273 | ) |
|
276 | 274 | return data, tree_metadata |
|
277 | 275 | |
|
278 | 276 | sorted_map = sorted(map.items(), key=lambda x: x[0]) |
|
279 | 277 | |
|
280 | 278 | # Use a stack to not have to only remember the nodes we currently need |
|
281 | 279 | # instead of building the entire tree in memory |
|
282 | 280 | stack = [] |
|
283 | 281 | current_node = Node(b"", None) |
|
284 | 282 | stack.append(current_node) |
|
285 | 283 | |
|
286 | 284 | for index, (path, entry) in enumerate(sorted_map, 1): |
|
287 | if entry.need_delay(now): | |
|
288 | # The file was last modified "simultaneously" with the current | |
|
289 | # write to dirstate (i.e. within the same second for file- | |
|
290 | # systems with a granularity of 1 sec). This commonly happens | |
|
291 | # for at least a couple of files on 'update'. | |
|
292 | # The user could change the file without changing its size | |
|
293 | # within the same second. Invalidate the file's mtime in | |
|
294 | # dirstate, forcing future 'status' calls to compare the | |
|
295 | # contents of the file if the size is the same. This prevents | |
|
296 | # mistakenly treating such files as clean. | |
|
297 | entry.set_possibly_dirty() | |
|
298 | 285 | nodes_with_entry_count += 1 |
|
299 | 286 | if path in copy_map: |
|
300 | 287 | nodes_with_copy_source_count += 1 |
|
301 | 288 | current_folder = get_folder(path) |
|
302 | 289 | current_node = move_to_correct_node_in_tree( |
|
303 | 290 | current_folder, current_node, stack |
|
304 | 291 | ) |
|
305 | 292 | |
|
306 | 293 | current_node.children_count += 1 |
|
307 | 294 | # Entries from `map` are never `None` |
|
308 | 295 | if entry.tracked: |
|
309 | 296 | current_node.tracked_descendants += 1 |
|
310 | 297 | current_node.descendants_with_entry += 1 |
|
311 | 298 | stack.append(Node(path, entry, current_node)) |
|
312 | 299 | |
|
313 | 300 | should_pack = True |
|
314 | 301 | next_path = None |
|
315 | 302 | if index < len(sorted_map): |
|
316 | 303 | # Determine if the next entry is in the same sub-tree, if so don't |
|
317 | 304 | # pack yet |
|
318 | 305 | next_path = sorted_map[index][0] |
|
319 | 306 | should_pack = not get_folder(next_path).startswith(current_folder) |
|
320 | 307 | if should_pack: |
|
321 | 308 | pack_directory_children(current_node, copy_map, data, stack) |
|
322 | 309 | while stack and current_node.path != b"": |
|
323 | 310 | # Go up the tree and write until we reach the folder of the next |
|
324 | 311 | # entry (if any, otherwise the root) |
|
325 | 312 | parent = current_node.parent |
|
326 | 313 | in_parent_folder_of_next_entry = next_path is not None and ( |
|
327 | 314 | get_folder(next_path).startswith(get_folder(stack[-1].path)) |
|
328 | 315 | ) |
|
329 | 316 | if parent is None or in_parent_folder_of_next_entry: |
|
330 | 317 | break |
|
331 | 318 | pack_directory_children(parent, copy_map, data, stack) |
|
332 | 319 | current_node = parent |
|
333 | 320 | |
|
334 | 321 | # Special case for the root node since we don't write it to disk, only its |
|
335 | 322 | # children to the docket |
|
336 | 323 | current_node = stack.pop() |
|
337 | 324 | assert current_node.path == b"", current_node.path |
|
338 | 325 | assert len(stack) == 0, len(stack) |
|
339 | 326 | |
|
340 | 327 | tree_metadata = TREE_METADATA.pack( |
|
341 | 328 | current_node.children_offset, |
|
342 | 329 | current_node.children_count, |
|
343 | 330 | nodes_with_entry_count, |
|
344 | 331 | nodes_with_copy_source_count, |
|
345 | 332 | unreachable_bytes, |
|
346 | 333 | unused, |
|
347 | 334 | ignore_patterns_hash, |
|
348 | 335 | ) |
|
349 | 336 | |
|
350 | 337 | return data, tree_metadata |
|
351 | 338 | |
|
352 | 339 | |
|
353 | 340 | def get_folder(path): |
|
354 | 341 | """ |
|
355 | 342 | Return the folder of the path that's given, an empty string for root paths. |
|
356 | 343 | """ |
|
357 | 344 | return path.rsplit(b'/', 1)[0] if b'/' in path else b'' |
|
358 | 345 | |
|
359 | 346 | |
|
360 | 347 | def move_to_correct_node_in_tree(target_folder, current_node, stack): |
|
361 | 348 | """ |
|
362 | 349 | Move inside the dirstate node tree to the node corresponding to |
|
363 | 350 | `target_folder`, creating the missing nodes along the way if needed. |
|
364 | 351 | """ |
|
365 | 352 | while target_folder != current_node.path: |
|
366 | 353 | if target_folder.startswith(current_node.path): |
|
367 | 354 | # We need to go down a folder |
|
368 | 355 | prefix = target_folder[len(current_node.path) :].lstrip(b'/') |
|
369 | 356 | subfolder_name = prefix.split(b'/', 1)[0] |
|
370 | 357 | if current_node.path: |
|
371 | 358 | subfolder_path = current_node.path + b'/' + subfolder_name |
|
372 | 359 | else: |
|
373 | 360 | subfolder_path = subfolder_name |
|
374 | 361 | next_node = stack[-1] |
|
375 | 362 | if next_node.path == target_folder: |
|
376 | 363 | # This folder is now a file and only contains removed entries |
|
377 | 364 | # merge with the last node |
|
378 | 365 | current_node = next_node |
|
379 | 366 | else: |
|
380 | 367 | current_node.children_count += 1 |
|
381 | 368 | current_node = Node(subfolder_path, None, current_node) |
|
382 | 369 | stack.append(current_node) |
|
383 | 370 | else: |
|
384 | 371 | # We need to go up a folder |
|
385 | 372 | current_node = current_node.parent |
|
386 | 373 | return current_node |
|
387 | 374 | |
|
388 | 375 | |
|
389 | 376 | def pack_directory_children(node, copy_map, data, stack): |
|
390 | 377 | """ |
|
391 | 378 | Write the binary representation of the direct sorted children of `node` to |
|
392 | 379 | `data` |
|
393 | 380 | """ |
|
394 | 381 | direct_children = [] |
|
395 | 382 | |
|
396 | 383 | while stack[-1].path != b"" and get_folder(stack[-1].path) == node.path: |
|
397 | 384 | direct_children.append(stack.pop()) |
|
398 | 385 | if not direct_children: |
|
399 | 386 | raise error.ProgrammingError(b"no direct children for %r" % node.path) |
|
400 | 387 | |
|
401 | 388 | # Reverse the stack to get the correct sorted order |
|
402 | 389 | direct_children.reverse() |
|
403 | 390 | packed_children = bytearray() |
|
404 | 391 | # Write the paths to `data`. Pack child nodes but don't write them yet |
|
405 | 392 | for child in direct_children: |
|
406 | 393 | packed = child.pack(copy_map=copy_map, paths_offset=len(data)) |
|
407 | 394 | packed_children.extend(packed) |
|
408 | 395 | data.extend(child.path) |
|
409 | 396 | data.extend(copy_map.get(child.path, b"")) |
|
410 | 397 | node.tracked_descendants += child.tracked_descendants |
|
411 | 398 | node.descendants_with_entry += child.descendants_with_entry |
|
412 | 399 | # Write the fixed-size child nodes all together |
|
413 | 400 | node.children_offset = len(data) |
|
414 | 401 | data.extend(packed_children) |
@@ -1,937 +1,921 b'' | |||
|
1 | 1 | # parsers.py - Python implementation of parsers.c |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import stat |
|
11 | 11 | import struct |
|
12 | 12 | import zlib |
|
13 | 13 | |
|
14 | 14 | from ..node import ( |
|
15 | 15 | nullrev, |
|
16 | 16 | sha1nodeconstants, |
|
17 | 17 | ) |
|
18 | 18 | from ..thirdparty import attr |
|
19 | 19 | from .. import ( |
|
20 | 20 | error, |
|
21 | 21 | pycompat, |
|
22 | 22 | revlogutils, |
|
23 | 23 | util, |
|
24 | 24 | ) |
|
25 | 25 | |
|
26 | 26 | from ..revlogutils import nodemap as nodemaputil |
|
27 | 27 | from ..revlogutils import constants as revlog_constants |
|
28 | 28 | |
|
29 | 29 | stringio = pycompat.bytesio |
|
30 | 30 | |
|
31 | 31 | |
|
32 | 32 | _pack = struct.pack |
|
33 | 33 | _unpack = struct.unpack |
|
34 | 34 | _compress = zlib.compress |
|
35 | 35 | _decompress = zlib.decompress |
|
36 | 36 | |
|
37 | 37 | |
|
38 | 38 | # a special value used internally for `size` if the file come from the other parent |
|
39 | 39 | FROM_P2 = -2 |
|
40 | 40 | |
|
41 | 41 | # a special value used internally for `size` if the file is modified/merged/added |
|
42 | 42 | NONNORMAL = -1 |
|
43 | 43 | |
|
44 | 44 | # a special value used internally for `time` if the time is ambigeous |
|
45 | 45 | AMBIGUOUS_TIME = -1 |
|
46 | 46 | |
|
47 | 47 | # Bits of the `flags` byte inside a node in the file format |
|
48 | 48 | DIRSTATE_V2_WDIR_TRACKED = 1 << 0 |
|
49 | 49 | DIRSTATE_V2_P1_TRACKED = 1 << 1 |
|
50 | 50 | DIRSTATE_V2_P2_INFO = 1 << 2 |
|
51 | 51 | DIRSTATE_V2_MODE_EXEC_PERM = 1 << 3 |
|
52 | 52 | DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 4 |
|
53 | 53 | DIRSTATE_V2_HAS_FALLBACK_EXEC = 1 << 5 |
|
54 | 54 | DIRSTATE_V2_FALLBACK_EXEC = 1 << 6 |
|
55 | 55 | DIRSTATE_V2_HAS_FALLBACK_SYMLINK = 1 << 7 |
|
56 | 56 | DIRSTATE_V2_FALLBACK_SYMLINK = 1 << 8 |
|
57 | 57 | DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED = 1 << 9 |
|
58 | 58 | DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 10 |
|
59 | 59 | DIRSTATE_V2_HAS_MTIME = 1 << 11 |
|
60 | 60 | DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12 |
|
61 | 61 | DIRSTATE_V2_DIRECTORY = 1 << 13 |
|
62 | 62 | DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14 |
|
63 | 63 | DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15 |
|
64 | 64 | |
|
65 | 65 | |
|
66 | 66 | @attr.s(slots=True, init=False) |
|
67 | 67 | class DirstateItem(object): |
|
68 | 68 | """represent a dirstate entry |
|
69 | 69 | |
|
70 | 70 | It hold multiple attributes |
|
71 | 71 | |
|
72 | 72 | # about file tracking |
|
73 | 73 | - wc_tracked: is the file tracked by the working copy |
|
74 | 74 | - p1_tracked: is the file tracked in working copy first parent |
|
75 | 75 | - p2_info: the file has been involved in some merge operation. Either |
|
76 | 76 | because it was actually merged, or because the p2 version was |
|
77 | 77 | ahead, or because some rename moved it there. In either case |
|
78 | 78 | `hg status` will want it displayed as modified. |
|
79 | 79 | |
|
80 | 80 | # about the file state expected from p1 manifest: |
|
81 | 81 | - mode: the file mode in p1 |
|
82 | 82 | - size: the file size in p1 |
|
83 | 83 | |
|
84 | 84 | These value can be set to None, which mean we don't have a meaningful value |
|
85 | 85 | to compare with. Either because we don't really care about them as there |
|
86 | 86 | `status` is known without having to look at the disk or because we don't |
|
87 | 87 | know these right now and a full comparison will be needed to find out if |
|
88 | 88 | the file is clean. |
|
89 | 89 | |
|
90 | 90 | # about the file state on disk last time we saw it: |
|
91 | 91 | - mtime: the last known clean mtime for the file. |
|
92 | 92 | |
|
93 | 93 | This value can be set to None if no cachable state exist. Either because we |
|
94 | 94 | do not care (see previous section) or because we could not cache something |
|
95 | 95 | yet. |
|
96 | 96 | """ |
|
97 | 97 | |
|
98 | 98 | _wc_tracked = attr.ib() |
|
99 | 99 | _p1_tracked = attr.ib() |
|
100 | 100 | _p2_info = attr.ib() |
|
101 | 101 | _mode = attr.ib() |
|
102 | 102 | _size = attr.ib() |
|
103 | 103 | _mtime_s = attr.ib() |
|
104 | 104 | _mtime_ns = attr.ib() |
|
105 | 105 | _fallback_exec = attr.ib() |
|
106 | 106 | _fallback_symlink = attr.ib() |
|
107 | 107 | |
|
108 | 108 | def __init__( |
|
109 | 109 | self, |
|
110 | 110 | wc_tracked=False, |
|
111 | 111 | p1_tracked=False, |
|
112 | 112 | p2_info=False, |
|
113 | 113 | has_meaningful_data=True, |
|
114 | 114 | has_meaningful_mtime=True, |
|
115 | 115 | parentfiledata=None, |
|
116 | 116 | fallback_exec=None, |
|
117 | 117 | fallback_symlink=None, |
|
118 | 118 | ): |
|
119 | 119 | self._wc_tracked = wc_tracked |
|
120 | 120 | self._p1_tracked = p1_tracked |
|
121 | 121 | self._p2_info = p2_info |
|
122 | 122 | |
|
123 | 123 | self._fallback_exec = fallback_exec |
|
124 | 124 | self._fallback_symlink = fallback_symlink |
|
125 | 125 | |
|
126 | 126 | self._mode = None |
|
127 | 127 | self._size = None |
|
128 | 128 | self._mtime_s = None |
|
129 | 129 | self._mtime_ns = None |
|
130 | 130 | if parentfiledata is None: |
|
131 | 131 | has_meaningful_mtime = False |
|
132 | 132 | has_meaningful_data = False |
|
133 | 133 | elif parentfiledata[2] is None: |
|
134 | 134 | has_meaningful_mtime = False |
|
135 | 135 | if has_meaningful_data: |
|
136 | 136 | self._mode = parentfiledata[0] |
|
137 | 137 | self._size = parentfiledata[1] |
|
138 | 138 | if has_meaningful_mtime: |
|
139 | 139 | self._mtime_s, self._mtime_ns = parentfiledata[2] |
|
140 | 140 | |
|
141 | 141 | @classmethod |
|
142 | 142 | def from_v2_data(cls, flags, size, mtime_s, mtime_ns): |
|
143 | 143 | """Build a new DirstateItem object from V2 data""" |
|
144 | 144 | has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE) |
|
145 | 145 | has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME) |
|
146 | 146 | if flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS: |
|
147 | 147 | # The current code is not able to do the more subtle comparison that the |
|
148 | 148 | # MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime |
|
149 | 149 | has_meaningful_mtime = False |
|
150 | 150 | mode = None |
|
151 | 151 | |
|
152 | 152 | if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED: |
|
153 | 153 | # we do not have support for this flag in the code yet, |
|
154 | 154 | # force a lookup for this file. |
|
155 | 155 | has_mode_size = False |
|
156 | 156 | has_meaningful_mtime = False |
|
157 | 157 | |
|
158 | 158 | fallback_exec = None |
|
159 | 159 | if flags & DIRSTATE_V2_HAS_FALLBACK_EXEC: |
|
160 | 160 | fallback_exec = flags & DIRSTATE_V2_FALLBACK_EXEC |
|
161 | 161 | |
|
162 | 162 | fallback_symlink = None |
|
163 | 163 | if flags & DIRSTATE_V2_HAS_FALLBACK_SYMLINK: |
|
164 | 164 | fallback_symlink = flags & DIRSTATE_V2_FALLBACK_SYMLINK |
|
165 | 165 | |
|
166 | 166 | if has_mode_size: |
|
167 | 167 | assert stat.S_IXUSR == 0o100 |
|
168 | 168 | if flags & DIRSTATE_V2_MODE_EXEC_PERM: |
|
169 | 169 | mode = 0o755 |
|
170 | 170 | else: |
|
171 | 171 | mode = 0o644 |
|
172 | 172 | if flags & DIRSTATE_V2_MODE_IS_SYMLINK: |
|
173 | 173 | mode |= stat.S_IFLNK |
|
174 | 174 | else: |
|
175 | 175 | mode |= stat.S_IFREG |
|
176 | 176 | return cls( |
|
177 | 177 | wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED), |
|
178 | 178 | p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED), |
|
179 | 179 | p2_info=bool(flags & DIRSTATE_V2_P2_INFO), |
|
180 | 180 | has_meaningful_data=has_mode_size, |
|
181 | 181 | has_meaningful_mtime=has_meaningful_mtime, |
|
182 | 182 | parentfiledata=(mode, size, (mtime_s, mtime_ns)), |
|
183 | 183 | fallback_exec=fallback_exec, |
|
184 | 184 | fallback_symlink=fallback_symlink, |
|
185 | 185 | ) |
|
186 | 186 | |
|
187 | 187 | @classmethod |
|
188 | 188 | def from_v1_data(cls, state, mode, size, mtime): |
|
189 | 189 | """Build a new DirstateItem object from V1 data |
|
190 | 190 | |
|
191 | 191 | Since the dirstate-v1 format is frozen, the signature of this function |
|
192 | 192 | is not expected to change, unlike the __init__ one. |
|
193 | 193 | """ |
|
194 | 194 | if state == b'm': |
|
195 | 195 | return cls(wc_tracked=True, p1_tracked=True, p2_info=True) |
|
196 | 196 | elif state == b'a': |
|
197 | 197 | return cls(wc_tracked=True) |
|
198 | 198 | elif state == b'r': |
|
199 | 199 | if size == NONNORMAL: |
|
200 | 200 | p1_tracked = True |
|
201 | 201 | p2_info = True |
|
202 | 202 | elif size == FROM_P2: |
|
203 | 203 | p1_tracked = False |
|
204 | 204 | p2_info = True |
|
205 | 205 | else: |
|
206 | 206 | p1_tracked = True |
|
207 | 207 | p2_info = False |
|
208 | 208 | return cls(p1_tracked=p1_tracked, p2_info=p2_info) |
|
209 | 209 | elif state == b'n': |
|
210 | 210 | if size == FROM_P2: |
|
211 | 211 | return cls(wc_tracked=True, p2_info=True) |
|
212 | 212 | elif size == NONNORMAL: |
|
213 | 213 | return cls(wc_tracked=True, p1_tracked=True) |
|
214 | 214 | elif mtime == AMBIGUOUS_TIME: |
|
215 | 215 | return cls( |
|
216 | 216 | wc_tracked=True, |
|
217 | 217 | p1_tracked=True, |
|
218 | 218 | has_meaningful_mtime=False, |
|
219 | 219 | parentfiledata=(mode, size, (42, 0)), |
|
220 | 220 | ) |
|
221 | 221 | else: |
|
222 | 222 | return cls( |
|
223 | 223 | wc_tracked=True, |
|
224 | 224 | p1_tracked=True, |
|
225 | 225 | parentfiledata=(mode, size, (mtime, 0)), |
|
226 | 226 | ) |
|
227 | 227 | else: |
|
228 | 228 | raise RuntimeError(b'unknown state: %s' % state) |
|
229 | 229 | |
|
230 | 230 | def set_possibly_dirty(self): |
|
231 | 231 | """Mark a file as "possibly dirty" |
|
232 | 232 | |
|
233 | 233 | This means the next status call will have to actually check its content |
|
234 | 234 | to make sure it is correct. |
|
235 | 235 | """ |
|
236 | 236 | self._mtime_s = None |
|
237 | 237 | self._mtime_ns = None |
|
238 | 238 | |
|
239 | 239 | def set_clean(self, mode, size, mtime): |
|
240 | 240 | """mark a file as "clean" cancelling potential "possibly dirty call" |
|
241 | 241 | |
|
242 | 242 | Note: this function is a descendant of `dirstate.normal` and is |
|
243 | 243 | currently expected to be call on "normal" entry only. There are not |
|
244 | 244 | reason for this to not change in the future as long as the ccode is |
|
245 | 245 | updated to preserve the proper state of the non-normal files. |
|
246 | 246 | """ |
|
247 | 247 | self._wc_tracked = True |
|
248 | 248 | self._p1_tracked = True |
|
249 | 249 | self._mode = mode |
|
250 | 250 | self._size = size |
|
251 | 251 | self._mtime_s, self._mtime_ns = mtime |
|
252 | 252 | |
|
253 | 253 | def set_tracked(self): |
|
254 | 254 | """mark a file as tracked in the working copy |
|
255 | 255 | |
|
256 | 256 | This will ultimately be called by command like `hg add`. |
|
257 | 257 | """ |
|
258 | 258 | self._wc_tracked = True |
|
259 | 259 | # `set_tracked` is replacing various `normallookup` call. So we mark |
|
260 | 260 | # the files as needing lookup |
|
261 | 261 | # |
|
262 | 262 | # Consider dropping this in the future in favor of something less broad. |
|
263 | 263 | self._mtime_s = None |
|
264 | 264 | self._mtime_ns = None |
|
265 | 265 | |
|
266 | 266 | def set_untracked(self): |
|
267 | 267 | """mark a file as untracked in the working copy |
|
268 | 268 | |
|
269 | 269 | This will ultimately be called by command like `hg remove`. |
|
270 | 270 | """ |
|
271 | 271 | self._wc_tracked = False |
|
272 | 272 | self._mode = None |
|
273 | 273 | self._size = None |
|
274 | 274 | self._mtime_s = None |
|
275 | 275 | self._mtime_ns = None |
|
276 | 276 | |
|
277 | 277 | def drop_merge_data(self): |
|
278 | 278 | """remove all "merge-only" from a DirstateItem |
|
279 | 279 | |
|
280 | 280 | This is to be call by the dirstatemap code when the second parent is dropped |
|
281 | 281 | """ |
|
282 | 282 | if self._p2_info: |
|
283 | 283 | self._p2_info = False |
|
284 | 284 | self._mode = None |
|
285 | 285 | self._size = None |
|
286 | 286 | self._mtime_s = None |
|
287 | 287 | self._mtime_ns = None |
|
288 | 288 | |
|
289 | 289 | @property |
|
290 | 290 | def mode(self): |
|
291 | 291 | return self.v1_mode() |
|
292 | 292 | |
|
293 | 293 | @property |
|
294 | 294 | def size(self): |
|
295 | 295 | return self.v1_size() |
|
296 | 296 | |
|
297 | 297 | @property |
|
298 | 298 | def mtime(self): |
|
299 | 299 | return self.v1_mtime() |
|
300 | 300 | |
|
301 | 301 | def mtime_likely_equal_to(self, other_mtime): |
|
302 | 302 | self_sec = self._mtime_s |
|
303 | 303 | if self_sec is None: |
|
304 | 304 | return False |
|
305 | 305 | self_ns = self._mtime_ns |
|
306 | 306 | other_sec, other_ns = other_mtime |
|
307 | 307 | return self_sec == other_sec and ( |
|
308 | 308 | self_ns == other_ns or self_ns == 0 or other_ns == 0 |
|
309 | 309 | ) |
|
310 | 310 | |
|
311 | 311 | @property |
|
312 | 312 | def state(self): |
|
313 | 313 | """ |
|
314 | 314 | States are: |
|
315 | 315 | n normal |
|
316 | 316 | m needs merging |
|
317 | 317 | r marked for removal |
|
318 | 318 | a marked for addition |
|
319 | 319 | |
|
320 | 320 | XXX This "state" is a bit obscure and mostly a direct expression of the |
|
321 | 321 | dirstatev1 format. It would make sense to ultimately deprecate it in |
|
322 | 322 | favor of the more "semantic" attributes. |
|
323 | 323 | """ |
|
324 | 324 | if not self.any_tracked: |
|
325 | 325 | return b'?' |
|
326 | 326 | return self.v1_state() |
|
327 | 327 | |
|
328 | 328 | @property |
|
329 | 329 | def has_fallback_exec(self): |
|
330 | 330 | """True if "fallback" information are available for the "exec" bit |
|
331 | 331 | |
|
332 | 332 | Fallback information can be stored in the dirstate to keep track of |
|
333 | 333 | filesystem attribute tracked by Mercurial when the underlying file |
|
334 | 334 | system or operating system does not support that property, (e.g. |
|
335 | 335 | Windows). |
|
336 | 336 | |
|
337 | 337 | Not all version of the dirstate on-disk storage support preserving this |
|
338 | 338 | information. |
|
339 | 339 | """ |
|
340 | 340 | return self._fallback_exec is not None |
|
341 | 341 | |
|
342 | 342 | @property |
|
343 | 343 | def fallback_exec(self): |
|
344 | 344 | """ "fallback" information for the executable bit |
|
345 | 345 | |
|
346 | 346 | True if the file should be considered executable when we cannot get |
|
347 | 347 | this information from the files system. False if it should be |
|
348 | 348 | considered non-executable. |
|
349 | 349 | |
|
350 | 350 | See has_fallback_exec for details.""" |
|
351 | 351 | return self._fallback_exec |
|
352 | 352 | |
|
353 | 353 | @fallback_exec.setter |
|
354 | 354 | def set_fallback_exec(self, value): |
|
355 | 355 | """control "fallback" executable bit |
|
356 | 356 | |
|
357 | 357 | Set to: |
|
358 | 358 | - True if the file should be considered executable, |
|
359 | 359 | - False if the file should be considered non-executable, |
|
360 | 360 | - None if we do not have valid fallback data. |
|
361 | 361 | |
|
362 | 362 | See has_fallback_exec for details.""" |
|
363 | 363 | if value is None: |
|
364 | 364 | self._fallback_exec = None |
|
365 | 365 | else: |
|
366 | 366 | self._fallback_exec = bool(value) |
|
367 | 367 | |
|
368 | 368 | @property |
|
369 | 369 | def has_fallback_symlink(self): |
|
370 | 370 | """True if "fallback" information are available for symlink status |
|
371 | 371 | |
|
372 | 372 | Fallback information can be stored in the dirstate to keep track of |
|
373 | 373 | filesystem attribute tracked by Mercurial when the underlying file |
|
374 | 374 | system or operating system does not support that property, (e.g. |
|
375 | 375 | Windows). |
|
376 | 376 | |
|
377 | 377 | Not all version of the dirstate on-disk storage support preserving this |
|
378 | 378 | information.""" |
|
379 | 379 | return self._fallback_symlink is not None |
|
380 | 380 | |
|
381 | 381 | @property |
|
382 | 382 | def fallback_symlink(self): |
|
383 | 383 | """ "fallback" information for symlink status |
|
384 | 384 | |
|
385 | 385 | True if the file should be considered executable when we cannot get |
|
386 | 386 | this information from the files system. False if it should be |
|
387 | 387 | considered non-executable. |
|
388 | 388 | |
|
389 | 389 | See has_fallback_exec for details.""" |
|
390 | 390 | return self._fallback_symlink |
|
391 | 391 | |
|
392 | 392 | @fallback_symlink.setter |
|
393 | 393 | def set_fallback_symlink(self, value): |
|
394 | 394 | """control "fallback" symlink status |
|
395 | 395 | |
|
396 | 396 | Set to: |
|
397 | 397 | - True if the file should be considered a symlink, |
|
398 | 398 | - False if the file should be considered not a symlink, |
|
399 | 399 | - None if we do not have valid fallback data. |
|
400 | 400 | |
|
401 | 401 | See has_fallback_symlink for details.""" |
|
402 | 402 | if value is None: |
|
403 | 403 | self._fallback_symlink = None |
|
404 | 404 | else: |
|
405 | 405 | self._fallback_symlink = bool(value) |
|
406 | 406 | |
|
407 | 407 | @property |
|
408 | 408 | def tracked(self): |
|
409 | 409 | """True is the file is tracked in the working copy""" |
|
410 | 410 | return self._wc_tracked |
|
411 | 411 | |
|
412 | 412 | @property |
|
413 | 413 | def any_tracked(self): |
|
414 | 414 | """True is the file is tracked anywhere (wc or parents)""" |
|
415 | 415 | return self._wc_tracked or self._p1_tracked or self._p2_info |
|
416 | 416 | |
|
417 | 417 | @property |
|
418 | 418 | def added(self): |
|
419 | 419 | """True if the file has been added""" |
|
420 | 420 | return self._wc_tracked and not (self._p1_tracked or self._p2_info) |
|
421 | 421 | |
|
422 | 422 | @property |
|
423 | 423 | def maybe_clean(self): |
|
424 | 424 | """True if the file has a chance to be in the "clean" state""" |
|
425 | 425 | if not self._wc_tracked: |
|
426 | 426 | return False |
|
427 | 427 | elif not self._p1_tracked: |
|
428 | 428 | return False |
|
429 | 429 | elif self._p2_info: |
|
430 | 430 | return False |
|
431 | 431 | return True |
|
432 | 432 | |
|
433 | 433 | @property |
|
434 | 434 | def p1_tracked(self): |
|
435 | 435 | """True if the file is tracked in the first parent manifest""" |
|
436 | 436 | return self._p1_tracked |
|
437 | 437 | |
|
438 | 438 | @property |
|
439 | 439 | def p2_info(self): |
|
440 | 440 | """True if the file needed to merge or apply any input from p2 |
|
441 | 441 | |
|
442 | 442 | See the class documentation for details. |
|
443 | 443 | """ |
|
444 | 444 | return self._wc_tracked and self._p2_info |
|
445 | 445 | |
|
446 | 446 | @property |
|
447 | 447 | def removed(self): |
|
448 | 448 | """True if the file has been removed""" |
|
449 | 449 | return not self._wc_tracked and (self._p1_tracked or self._p2_info) |
|
450 | 450 | |
|
451 | 451 | def v2_data(self): |
|
452 | 452 | """Returns (flags, mode, size, mtime) for v2 serialization""" |
|
453 | 453 | flags = 0 |
|
454 | 454 | if self._wc_tracked: |
|
455 | 455 | flags |= DIRSTATE_V2_WDIR_TRACKED |
|
456 | 456 | if self._p1_tracked: |
|
457 | 457 | flags |= DIRSTATE_V2_P1_TRACKED |
|
458 | 458 | if self._p2_info: |
|
459 | 459 | flags |= DIRSTATE_V2_P2_INFO |
|
460 | 460 | if self._mode is not None and self._size is not None: |
|
461 | 461 | flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE |
|
462 | 462 | if self.mode & stat.S_IXUSR: |
|
463 | 463 | flags |= DIRSTATE_V2_MODE_EXEC_PERM |
|
464 | 464 | if stat.S_ISLNK(self.mode): |
|
465 | 465 | flags |= DIRSTATE_V2_MODE_IS_SYMLINK |
|
466 | 466 | if self._mtime_s is not None: |
|
467 | 467 | flags |= DIRSTATE_V2_HAS_MTIME |
|
468 | 468 | |
|
469 | 469 | if self._fallback_exec is not None: |
|
470 | 470 | flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC |
|
471 | 471 | if self._fallback_exec: |
|
472 | 472 | flags |= DIRSTATE_V2_FALLBACK_EXEC |
|
473 | 473 | |
|
474 | 474 | if self._fallback_symlink is not None: |
|
475 | 475 | flags |= DIRSTATE_V2_HAS_FALLBACK_SYMLINK |
|
476 | 476 | if self._fallback_symlink: |
|
477 | 477 | flags |= DIRSTATE_V2_FALLBACK_SYMLINK |
|
478 | 478 | |
|
479 | 479 | # Note: we do not need to do anything regarding |
|
480 | 480 | # DIRSTATE_V2_ALL_UNKNOWN_RECORDED and DIRSTATE_V2_ALL_IGNORED_RECORDED |
|
481 | 481 | # since we never set _DIRSTATE_V2_HAS_DIRCTORY_MTIME |
|
482 | 482 | return (flags, self._size or 0, self._mtime_s or 0, self._mtime_ns or 0) |
|
483 | 483 | |
|
484 | 484 | def v1_state(self): |
|
485 | 485 | """return a "state" suitable for v1 serialization""" |
|
486 | 486 | if not self.any_tracked: |
|
487 | 487 | # the object has no state to record, this is -currently- |
|
488 | 488 | # unsupported |
|
489 | 489 | raise RuntimeError('untracked item') |
|
490 | 490 | elif self.removed: |
|
491 | 491 | return b'r' |
|
492 | 492 | elif self._p1_tracked and self._p2_info: |
|
493 | 493 | return b'm' |
|
494 | 494 | elif self.added: |
|
495 | 495 | return b'a' |
|
496 | 496 | else: |
|
497 | 497 | return b'n' |
|
498 | 498 | |
|
499 | 499 | def v1_mode(self): |
|
500 | 500 | """return a "mode" suitable for v1 serialization""" |
|
501 | 501 | return self._mode if self._mode is not None else 0 |
|
502 | 502 | |
|
503 | 503 | def v1_size(self): |
|
504 | 504 | """return a "size" suitable for v1 serialization""" |
|
505 | 505 | if not self.any_tracked: |
|
506 | 506 | # the object has no state to record, this is -currently- |
|
507 | 507 | # unsupported |
|
508 | 508 | raise RuntimeError('untracked item') |
|
509 | 509 | elif self.removed and self._p1_tracked and self._p2_info: |
|
510 | 510 | return NONNORMAL |
|
511 | 511 | elif self._p2_info: |
|
512 | 512 | return FROM_P2 |
|
513 | 513 | elif self.removed: |
|
514 | 514 | return 0 |
|
515 | 515 | elif self.added: |
|
516 | 516 | return NONNORMAL |
|
517 | 517 | elif self._size is None: |
|
518 | 518 | return NONNORMAL |
|
519 | 519 | else: |
|
520 | 520 | return self._size |
|
521 | 521 | |
|
522 | 522 | def v1_mtime(self): |
|
523 | 523 | """return a "mtime" suitable for v1 serialization""" |
|
524 | 524 | if not self.any_tracked: |
|
525 | 525 | # the object has no state to record, this is -currently- |
|
526 | 526 | # unsupported |
|
527 | 527 | raise RuntimeError('untracked item') |
|
528 | 528 | elif self.removed: |
|
529 | 529 | return 0 |
|
530 | 530 | elif self._mtime_s is None: |
|
531 | 531 | return AMBIGUOUS_TIME |
|
532 | 532 | elif self._p2_info: |
|
533 | 533 | return AMBIGUOUS_TIME |
|
534 | 534 | elif not self._p1_tracked: |
|
535 | 535 | return AMBIGUOUS_TIME |
|
536 | 536 | else: |
|
537 | 537 | return self._mtime_s |
|
538 | 538 | |
|
539 | def need_delay(self, now): | |
|
540 | """True if the stored mtime would be ambiguous with the current time""" | |
|
541 | return self.v1_state() == b'n' and self._mtime_s == now[0] | |
|
542 | ||
|
543 | 539 | |
|
544 | 540 | def gettype(q): |
|
545 | 541 | return int(q & 0xFFFF) |
|
546 | 542 | |
|
547 | 543 | |
|
548 | 544 | class BaseIndexObject(object): |
|
549 | 545 | # Can I be passed to an algorithme implemented in Rust ? |
|
550 | 546 | rust_ext_compat = 0 |
|
551 | 547 | # Format of an index entry according to Python's `struct` language |
|
552 | 548 | index_format = revlog_constants.INDEX_ENTRY_V1 |
|
553 | 549 | # Size of a C unsigned long long int, platform independent |
|
554 | 550 | big_int_size = struct.calcsize(b'>Q') |
|
555 | 551 | # Size of a C long int, platform independent |
|
556 | 552 | int_size = struct.calcsize(b'>i') |
|
557 | 553 | # An empty index entry, used as a default value to be overridden, or nullrev |
|
558 | 554 | null_item = ( |
|
559 | 555 | 0, |
|
560 | 556 | 0, |
|
561 | 557 | 0, |
|
562 | 558 | -1, |
|
563 | 559 | -1, |
|
564 | 560 | -1, |
|
565 | 561 | -1, |
|
566 | 562 | sha1nodeconstants.nullid, |
|
567 | 563 | 0, |
|
568 | 564 | 0, |
|
569 | 565 | revlog_constants.COMP_MODE_INLINE, |
|
570 | 566 | revlog_constants.COMP_MODE_INLINE, |
|
571 | 567 | ) |
|
572 | 568 | |
|
573 | 569 | @util.propertycache |
|
574 | 570 | def entry_size(self): |
|
575 | 571 | return self.index_format.size |
|
576 | 572 | |
|
577 | 573 | @property |
|
578 | 574 | def nodemap(self): |
|
579 | 575 | msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]" |
|
580 | 576 | util.nouideprecwarn(msg, b'5.3', stacklevel=2) |
|
581 | 577 | return self._nodemap |
|
582 | 578 | |
|
583 | 579 | @util.propertycache |
|
584 | 580 | def _nodemap(self): |
|
585 | 581 | nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev}) |
|
586 | 582 | for r in range(0, len(self)): |
|
587 | 583 | n = self[r][7] |
|
588 | 584 | nodemap[n] = r |
|
589 | 585 | return nodemap |
|
590 | 586 | |
|
591 | 587 | def has_node(self, node): |
|
592 | 588 | """return True if the node exist in the index""" |
|
593 | 589 | return node in self._nodemap |
|
594 | 590 | |
|
595 | 591 | def rev(self, node): |
|
596 | 592 | """return a revision for a node |
|
597 | 593 | |
|
598 | 594 | If the node is unknown, raise a RevlogError""" |
|
599 | 595 | return self._nodemap[node] |
|
600 | 596 | |
|
601 | 597 | def get_rev(self, node): |
|
602 | 598 | """return a revision for a node |
|
603 | 599 | |
|
604 | 600 | If the node is unknown, return None""" |
|
605 | 601 | return self._nodemap.get(node) |
|
606 | 602 | |
|
607 | 603 | def _stripnodes(self, start): |
|
608 | 604 | if '_nodemap' in vars(self): |
|
609 | 605 | for r in range(start, len(self)): |
|
610 | 606 | n = self[r][7] |
|
611 | 607 | del self._nodemap[n] |
|
612 | 608 | |
|
613 | 609 | def clearcaches(self): |
|
614 | 610 | self.__dict__.pop('_nodemap', None) |
|
615 | 611 | |
|
616 | 612 | def __len__(self): |
|
617 | 613 | return self._lgt + len(self._extra) |
|
618 | 614 | |
|
619 | 615 | def append(self, tup): |
|
620 | 616 | if '_nodemap' in vars(self): |
|
621 | 617 | self._nodemap[tup[7]] = len(self) |
|
622 | 618 | data = self._pack_entry(len(self), tup) |
|
623 | 619 | self._extra.append(data) |
|
624 | 620 | |
|
625 | 621 | def _pack_entry(self, rev, entry): |
|
626 | 622 | assert entry[8] == 0 |
|
627 | 623 | assert entry[9] == 0 |
|
628 | 624 | return self.index_format.pack(*entry[:8]) |
|
629 | 625 | |
|
630 | 626 | def _check_index(self, i): |
|
631 | 627 | if not isinstance(i, int): |
|
632 | 628 | raise TypeError(b"expecting int indexes") |
|
633 | 629 | if i < 0 or i >= len(self): |
|
634 | 630 | raise IndexError |
|
635 | 631 | |
|
636 | 632 | def __getitem__(self, i): |
|
637 | 633 | if i == -1: |
|
638 | 634 | return self.null_item |
|
639 | 635 | self._check_index(i) |
|
640 | 636 | if i >= self._lgt: |
|
641 | 637 | data = self._extra[i - self._lgt] |
|
642 | 638 | else: |
|
643 | 639 | index = self._calculate_index(i) |
|
644 | 640 | data = self._data[index : index + self.entry_size] |
|
645 | 641 | r = self._unpack_entry(i, data) |
|
646 | 642 | if self._lgt and i == 0: |
|
647 | 643 | offset = revlogutils.offset_type(0, gettype(r[0])) |
|
648 | 644 | r = (offset,) + r[1:] |
|
649 | 645 | return r |
|
650 | 646 | |
|
651 | 647 | def _unpack_entry(self, rev, data): |
|
652 | 648 | r = self.index_format.unpack(data) |
|
653 | 649 | r = r + ( |
|
654 | 650 | 0, |
|
655 | 651 | 0, |
|
656 | 652 | revlog_constants.COMP_MODE_INLINE, |
|
657 | 653 | revlog_constants.COMP_MODE_INLINE, |
|
658 | 654 | ) |
|
659 | 655 | return r |
|
660 | 656 | |
|
661 | 657 | def pack_header(self, header): |
|
662 | 658 | """pack header information as binary""" |
|
663 | 659 | v_fmt = revlog_constants.INDEX_HEADER |
|
664 | 660 | return v_fmt.pack(header) |
|
665 | 661 | |
|
666 | 662 | def entry_binary(self, rev): |
|
667 | 663 | """return the raw binary string representing a revision""" |
|
668 | 664 | entry = self[rev] |
|
669 | 665 | p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8]) |
|
670 | 666 | if rev == 0: |
|
671 | 667 | p = p[revlog_constants.INDEX_HEADER.size :] |
|
672 | 668 | return p |
|
673 | 669 | |
|
674 | 670 | |
|
675 | 671 | class IndexObject(BaseIndexObject): |
|
676 | 672 | def __init__(self, data): |
|
677 | 673 | assert len(data) % self.entry_size == 0, ( |
|
678 | 674 | len(data), |
|
679 | 675 | self.entry_size, |
|
680 | 676 | len(data) % self.entry_size, |
|
681 | 677 | ) |
|
682 | 678 | self._data = data |
|
683 | 679 | self._lgt = len(data) // self.entry_size |
|
684 | 680 | self._extra = [] |
|
685 | 681 | |
|
686 | 682 | def _calculate_index(self, i): |
|
687 | 683 | return i * self.entry_size |
|
688 | 684 | |
|
689 | 685 | def __delitem__(self, i): |
|
690 | 686 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: |
|
691 | 687 | raise ValueError(b"deleting slices only supports a:-1 with step 1") |
|
692 | 688 | i = i.start |
|
693 | 689 | self._check_index(i) |
|
694 | 690 | self._stripnodes(i) |
|
695 | 691 | if i < self._lgt: |
|
696 | 692 | self._data = self._data[: i * self.entry_size] |
|
697 | 693 | self._lgt = i |
|
698 | 694 | self._extra = [] |
|
699 | 695 | else: |
|
700 | 696 | self._extra = self._extra[: i - self._lgt] |
|
701 | 697 | |
|
702 | 698 | |
|
703 | 699 | class PersistentNodeMapIndexObject(IndexObject): |
|
704 | 700 | """a Debug oriented class to test persistent nodemap |
|
705 | 701 | |
|
706 | 702 | We need a simple python object to test API and higher level behavior. See |
|
707 | 703 | the Rust implementation for more serious usage. This should be used only |
|
708 | 704 | through the dedicated `devel.persistent-nodemap` config. |
|
709 | 705 | """ |
|
710 | 706 | |
|
711 | 707 | def nodemap_data_all(self): |
|
712 | 708 | """Return bytes containing a full serialization of a nodemap |
|
713 | 709 | |
|
714 | 710 | The nodemap should be valid for the full set of revisions in the |
|
715 | 711 | index.""" |
|
716 | 712 | return nodemaputil.persistent_data(self) |
|
717 | 713 | |
|
718 | 714 | def nodemap_data_incremental(self): |
|
719 | 715 | """Return bytes containing a incremental update to persistent nodemap |
|
720 | 716 | |
|
721 | 717 | This containst the data for an append-only update of the data provided |
|
722 | 718 | in the last call to `update_nodemap_data`. |
|
723 | 719 | """ |
|
724 | 720 | if self._nm_root is None: |
|
725 | 721 | return None |
|
726 | 722 | docket = self._nm_docket |
|
727 | 723 | changed, data = nodemaputil.update_persistent_data( |
|
728 | 724 | self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev |
|
729 | 725 | ) |
|
730 | 726 | |
|
731 | 727 | self._nm_root = self._nm_max_idx = self._nm_docket = None |
|
732 | 728 | return docket, changed, data |
|
733 | 729 | |
|
734 | 730 | def update_nodemap_data(self, docket, nm_data): |
|
735 | 731 | """provide full block of persisted binary data for a nodemap |
|
736 | 732 | |
|
737 | 733 | The data are expected to come from disk. See `nodemap_data_all` for a |
|
738 | 734 | produceur of such data.""" |
|
739 | 735 | if nm_data is not None: |
|
740 | 736 | self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data) |
|
741 | 737 | if self._nm_root: |
|
742 | 738 | self._nm_docket = docket |
|
743 | 739 | else: |
|
744 | 740 | self._nm_root = self._nm_max_idx = self._nm_docket = None |
|
745 | 741 | |
|
746 | 742 | |
|
747 | 743 | class InlinedIndexObject(BaseIndexObject): |
|
748 | 744 | def __init__(self, data, inline=0): |
|
749 | 745 | self._data = data |
|
750 | 746 | self._lgt = self._inline_scan(None) |
|
751 | 747 | self._inline_scan(self._lgt) |
|
752 | 748 | self._extra = [] |
|
753 | 749 | |
|
754 | 750 | def _inline_scan(self, lgt): |
|
755 | 751 | off = 0 |
|
756 | 752 | if lgt is not None: |
|
757 | 753 | self._offsets = [0] * lgt |
|
758 | 754 | count = 0 |
|
759 | 755 | while off <= len(self._data) - self.entry_size: |
|
760 | 756 | start = off + self.big_int_size |
|
761 | 757 | (s,) = struct.unpack( |
|
762 | 758 | b'>i', |
|
763 | 759 | self._data[start : start + self.int_size], |
|
764 | 760 | ) |
|
765 | 761 | if lgt is not None: |
|
766 | 762 | self._offsets[count] = off |
|
767 | 763 | count += 1 |
|
768 | 764 | off += self.entry_size + s |
|
769 | 765 | if off != len(self._data): |
|
770 | 766 | raise ValueError(b"corrupted data") |
|
771 | 767 | return count |
|
772 | 768 | |
|
773 | 769 | def __delitem__(self, i): |
|
774 | 770 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: |
|
775 | 771 | raise ValueError(b"deleting slices only supports a:-1 with step 1") |
|
776 | 772 | i = i.start |
|
777 | 773 | self._check_index(i) |
|
778 | 774 | self._stripnodes(i) |
|
779 | 775 | if i < self._lgt: |
|
780 | 776 | self._offsets = self._offsets[:i] |
|
781 | 777 | self._lgt = i |
|
782 | 778 | self._extra = [] |
|
783 | 779 | else: |
|
784 | 780 | self._extra = self._extra[: i - self._lgt] |
|
785 | 781 | |
|
786 | 782 | def _calculate_index(self, i): |
|
787 | 783 | return self._offsets[i] |
|
788 | 784 | |
|
789 | 785 | |
|
790 | 786 | def parse_index2(data, inline, revlogv2=False): |
|
791 | 787 | if not inline: |
|
792 | 788 | cls = IndexObject2 if revlogv2 else IndexObject |
|
793 | 789 | return cls(data), None |
|
794 | 790 | cls = InlinedIndexObject |
|
795 | 791 | return cls(data, inline), (0, data) |
|
796 | 792 | |
|
797 | 793 | |
|
798 | 794 | def parse_index_cl_v2(data): |
|
799 | 795 | return IndexChangelogV2(data), None |
|
800 | 796 | |
|
801 | 797 | |
|
802 | 798 | class IndexObject2(IndexObject): |
|
803 | 799 | index_format = revlog_constants.INDEX_ENTRY_V2 |
|
804 | 800 | |
|
805 | 801 | def replace_sidedata_info( |
|
806 | 802 | self, |
|
807 | 803 | rev, |
|
808 | 804 | sidedata_offset, |
|
809 | 805 | sidedata_length, |
|
810 | 806 | offset_flags, |
|
811 | 807 | compression_mode, |
|
812 | 808 | ): |
|
813 | 809 | """ |
|
814 | 810 | Replace an existing index entry's sidedata offset and length with new |
|
815 | 811 | ones. |
|
816 | 812 | This cannot be used outside of the context of sidedata rewriting, |
|
817 | 813 | inside the transaction that creates the revision `rev`. |
|
818 | 814 | """ |
|
819 | 815 | if rev < 0: |
|
820 | 816 | raise KeyError |
|
821 | 817 | self._check_index(rev) |
|
822 | 818 | if rev < self._lgt: |
|
823 | 819 | msg = b"cannot rewrite entries outside of this transaction" |
|
824 | 820 | raise KeyError(msg) |
|
825 | 821 | else: |
|
826 | 822 | entry = list(self[rev]) |
|
827 | 823 | entry[0] = offset_flags |
|
828 | 824 | entry[8] = sidedata_offset |
|
829 | 825 | entry[9] = sidedata_length |
|
830 | 826 | entry[11] = compression_mode |
|
831 | 827 | entry = tuple(entry) |
|
832 | 828 | new = self._pack_entry(rev, entry) |
|
833 | 829 | self._extra[rev - self._lgt] = new |
|
834 | 830 | |
|
835 | 831 | def _unpack_entry(self, rev, data): |
|
836 | 832 | data = self.index_format.unpack(data) |
|
837 | 833 | entry = data[:10] |
|
838 | 834 | data_comp = data[10] & 3 |
|
839 | 835 | sidedata_comp = (data[10] & (3 << 2)) >> 2 |
|
840 | 836 | return entry + (data_comp, sidedata_comp) |
|
841 | 837 | |
|
842 | 838 | def _pack_entry(self, rev, entry): |
|
843 | 839 | data = entry[:10] |
|
844 | 840 | data_comp = entry[10] & 3 |
|
845 | 841 | sidedata_comp = (entry[11] & 3) << 2 |
|
846 | 842 | data += (data_comp | sidedata_comp,) |
|
847 | 843 | |
|
848 | 844 | return self.index_format.pack(*data) |
|
849 | 845 | |
|
850 | 846 | def entry_binary(self, rev): |
|
851 | 847 | """return the raw binary string representing a revision""" |
|
852 | 848 | entry = self[rev] |
|
853 | 849 | return self._pack_entry(rev, entry) |
|
854 | 850 | |
|
855 | 851 | def pack_header(self, header): |
|
856 | 852 | """pack header information as binary""" |
|
857 | 853 | msg = 'version header should go in the docket, not the index: %d' |
|
858 | 854 | msg %= header |
|
859 | 855 | raise error.ProgrammingError(msg) |
|
860 | 856 | |
|
861 | 857 | |
|
862 | 858 | class IndexChangelogV2(IndexObject2): |
|
863 | 859 | index_format = revlog_constants.INDEX_ENTRY_CL_V2 |
|
864 | 860 | |
|
865 | 861 | def _unpack_entry(self, rev, data, r=True): |
|
866 | 862 | items = self.index_format.unpack(data) |
|
867 | 863 | entry = items[:3] + (rev, rev) + items[3:8] |
|
868 | 864 | data_comp = items[8] & 3 |
|
869 | 865 | sidedata_comp = (items[8] >> 2) & 3 |
|
870 | 866 | return entry + (data_comp, sidedata_comp) |
|
871 | 867 | |
|
872 | 868 | def _pack_entry(self, rev, entry): |
|
873 | 869 | assert entry[3] == rev, entry[3] |
|
874 | 870 | assert entry[4] == rev, entry[4] |
|
875 | 871 | data = entry[:3] + entry[5:10] |
|
876 | 872 | data_comp = entry[10] & 3 |
|
877 | 873 | sidedata_comp = (entry[11] & 3) << 2 |
|
878 | 874 | data += (data_comp | sidedata_comp,) |
|
879 | 875 | return self.index_format.pack(*data) |
|
880 | 876 | |
|
881 | 877 | |
|
882 | 878 | def parse_index_devel_nodemap(data, inline): |
|
883 | 879 | """like parse_index2, but alway return a PersistentNodeMapIndexObject""" |
|
884 | 880 | return PersistentNodeMapIndexObject(data), None |
|
885 | 881 | |
|
886 | 882 | |
|
887 | 883 | def parse_dirstate(dmap, copymap, st): |
|
888 | 884 | parents = [st[:20], st[20:40]] |
|
889 | 885 | # dereference fields so they will be local in loop |
|
890 | 886 | format = b">cllll" |
|
891 | 887 | e_size = struct.calcsize(format) |
|
892 | 888 | pos1 = 40 |
|
893 | 889 | l = len(st) |
|
894 | 890 | |
|
895 | 891 | # the inner loop |
|
896 | 892 | while pos1 < l: |
|
897 | 893 | pos2 = pos1 + e_size |
|
898 | 894 | e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster |
|
899 | 895 | pos1 = pos2 + e[4] |
|
900 | 896 | f = st[pos2:pos1] |
|
901 | 897 | if b'\0' in f: |
|
902 | 898 | f, c = f.split(b'\0') |
|
903 | 899 | copymap[f] = c |
|
904 | 900 | dmap[f] = DirstateItem.from_v1_data(*e[:4]) |
|
905 | 901 | return parents |
|
906 | 902 | |
|
907 | 903 | |
|
908 |
def pack_dirstate(dmap, copymap, pl |
|
|
904 | def pack_dirstate(dmap, copymap, pl): | |
|
909 | 905 | cs = stringio() |
|
910 | 906 | write = cs.write |
|
911 | 907 | write(b"".join(pl)) |
|
912 | 908 | for f, e in pycompat.iteritems(dmap): |
|
913 | if e.need_delay(now): | |
|
914 | # The file was last modified "simultaneously" with the current | |
|
915 | # write to dirstate (i.e. within the same second for file- | |
|
916 | # systems with a granularity of 1 sec). This commonly happens | |
|
917 | # for at least a couple of files on 'update'. | |
|
918 | # The user could change the file without changing its size | |
|
919 | # within the same second. Invalidate the file's mtime in | |
|
920 | # dirstate, forcing future 'status' calls to compare the | |
|
921 | # contents of the file if the size is the same. This prevents | |
|
922 | # mistakenly treating such files as clean. | |
|
923 | e.set_possibly_dirty() | |
|
924 | ||
|
925 | 909 | if f in copymap: |
|
926 | 910 | f = b"%s\0%s" % (f, copymap[f]) |
|
927 | 911 | e = _pack( |
|
928 | 912 | b">cllll", |
|
929 | 913 | e.v1_state(), |
|
930 | 914 | e.v1_mode(), |
|
931 | 915 | e.v1_size(), |
|
932 | 916 | e.v1_mtime(), |
|
933 | 917 | len(f), |
|
934 | 918 | ) |
|
935 | 919 | write(e) |
|
936 | 920 | write(f) |
|
937 | 921 | return cs.getvalue() |
@@ -1,649 +1,639 b'' | |||
|
1 | 1 | use crate::dirstate_tree::on_disk::DirstateV2ParseError; |
|
2 | 2 | use crate::errors::HgError; |
|
3 | 3 | use bitflags::bitflags; |
|
4 | 4 | use std::convert::{TryFrom, TryInto}; |
|
5 | 5 | use std::fs; |
|
6 | 6 | use std::io; |
|
7 | 7 | use std::time::{SystemTime, UNIX_EPOCH}; |
|
8 | 8 | |
|
9 | 9 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] |
|
10 | 10 | pub enum EntryState { |
|
11 | 11 | Normal, |
|
12 | 12 | Added, |
|
13 | 13 | Removed, |
|
14 | 14 | Merged, |
|
15 | 15 | } |
|
16 | 16 | |
|
17 | 17 | /// `size` and `mtime.seconds` are truncated to 31 bits. |
|
18 | 18 | /// |
|
19 | 19 | /// TODO: double-check status algorithm correctness for files |
|
20 | 20 | /// larger than 2 GiB or modified after 2038. |
|
21 | 21 | #[derive(Debug, Copy, Clone)] |
|
22 | 22 | pub struct DirstateEntry { |
|
23 | 23 | pub(crate) flags: Flags, |
|
24 | 24 | mode_size: Option<(u32, u32)>, |
|
25 | 25 | mtime: Option<TruncatedTimestamp>, |
|
26 | 26 | } |
|
27 | 27 | |
|
28 | 28 | bitflags! { |
|
29 | 29 | pub(crate) struct Flags: u8 { |
|
30 | 30 | const WDIR_TRACKED = 1 << 0; |
|
31 | 31 | const P1_TRACKED = 1 << 1; |
|
32 | 32 | const P2_INFO = 1 << 2; |
|
33 | 33 | const HAS_FALLBACK_EXEC = 1 << 3; |
|
34 | 34 | const FALLBACK_EXEC = 1 << 4; |
|
35 | 35 | const HAS_FALLBACK_SYMLINK = 1 << 5; |
|
36 | 36 | const FALLBACK_SYMLINK = 1 << 6; |
|
37 | 37 | } |
|
38 | 38 | } |
|
39 | 39 | |
|
40 | 40 | /// A Unix timestamp with nanoseconds precision |
|
41 | 41 | #[derive(Debug, Copy, Clone)] |
|
42 | 42 | pub struct TruncatedTimestamp { |
|
43 | 43 | truncated_seconds: u32, |
|
44 | 44 | /// Always in the `0 .. 1_000_000_000` range. |
|
45 | 45 | nanoseconds: u32, |
|
46 | 46 | } |
|
47 | 47 | |
|
48 | 48 | impl TruncatedTimestamp { |
|
49 | 49 | /// Constructs from a timestamp potentially outside of the supported range, |
|
50 | 50 | /// and truncate the seconds components to its lower 31 bits. |
|
51 | 51 | /// |
|
52 | 52 | /// Panics if the nanoseconds components is not in the expected range. |
|
53 | 53 | pub fn new_truncate(seconds: i64, nanoseconds: u32) -> Self { |
|
54 | 54 | assert!(nanoseconds < NSEC_PER_SEC); |
|
55 | 55 | Self { |
|
56 | 56 | truncated_seconds: seconds as u32 & RANGE_MASK_31BIT, |
|
57 | 57 | nanoseconds, |
|
58 | 58 | } |
|
59 | 59 | } |
|
60 | 60 | |
|
61 | 61 | /// Construct from components. Returns an error if they are not in the |
|
62 | 62 | /// expcted range. |
|
63 | 63 | pub fn from_already_truncated( |
|
64 | 64 | truncated_seconds: u32, |
|
65 | 65 | nanoseconds: u32, |
|
66 | 66 | ) -> Result<Self, DirstateV2ParseError> { |
|
67 | 67 | if truncated_seconds & !RANGE_MASK_31BIT == 0 |
|
68 | 68 | && nanoseconds < NSEC_PER_SEC |
|
69 | 69 | { |
|
70 | 70 | Ok(Self { |
|
71 | 71 | truncated_seconds, |
|
72 | 72 | nanoseconds, |
|
73 | 73 | }) |
|
74 | 74 | } else { |
|
75 | 75 | Err(DirstateV2ParseError) |
|
76 | 76 | } |
|
77 | 77 | } |
|
78 | 78 | |
|
79 | 79 | pub fn for_mtime_of(metadata: &fs::Metadata) -> io::Result<Self> { |
|
80 | 80 | #[cfg(unix)] |
|
81 | 81 | { |
|
82 | 82 | use std::os::unix::fs::MetadataExt; |
|
83 | 83 | let seconds = metadata.mtime(); |
|
84 | 84 | // i64 -> u32 with value always in the `0 .. NSEC_PER_SEC` range |
|
85 | 85 | let nanoseconds = metadata.mtime_nsec().try_into().unwrap(); |
|
86 | 86 | Ok(Self::new_truncate(seconds, nanoseconds)) |
|
87 | 87 | } |
|
88 | 88 | #[cfg(not(unix))] |
|
89 | 89 | { |
|
90 | 90 | metadata.modified().map(Self::from) |
|
91 | 91 | } |
|
92 | 92 | } |
|
93 | 93 | |
|
94 | 94 | /// The lower 31 bits of the number of seconds since the epoch. |
|
95 | 95 | pub fn truncated_seconds(&self) -> u32 { |
|
96 | 96 | self.truncated_seconds |
|
97 | 97 | } |
|
98 | 98 | |
|
99 | 99 | /// The sub-second component of this timestamp, in nanoseconds. |
|
100 | 100 | /// Always in the `0 .. 1_000_000_000` range. |
|
101 | 101 | /// |
|
102 | 102 | /// This timestamp is after `(seconds, 0)` by this many nanoseconds. |
|
103 | 103 | pub fn nanoseconds(&self) -> u32 { |
|
104 | 104 | self.nanoseconds |
|
105 | 105 | } |
|
106 | 106 | |
|
107 | 107 | /// Returns whether two timestamps are equal modulo 2**31 seconds. |
|
108 | 108 | /// |
|
109 | 109 | /// If this returns `true`, the original values converted from `SystemTime` |
|
110 | 110 | /// or given to `new_truncate` were very likely equal. A false positive is |
|
111 | 111 | /// possible if they were exactly a multiple of 2**31 seconds apart (around |
|
112 | 112 | /// 68 years). This is deemed very unlikely to happen by chance, especially |
|
113 | 113 | /// on filesystems that support sub-second precision. |
|
114 | 114 | /// |
|
115 | 115 | /// If someone is manipulating the modification times of some files to |
|
116 | 116 | /// intentionally make `hg status` return incorrect results, not truncating |
|
117 | 117 | /// wouldn’t help much since they can set exactly the expected timestamp. |
|
118 | 118 | /// |
|
119 | 119 | /// Sub-second precision is ignored if it is zero in either value. |
|
120 | 120 | /// Some APIs simply return zero when more precision is not available. |
|
121 | 121 | /// When comparing values from different sources, if only one is truncated |
|
122 | 122 | /// in that way, doing a simple comparison would cause many false |
|
123 | 123 | /// negatives. |
|
124 | 124 | pub fn likely_equal(self, other: Self) -> bool { |
|
125 | 125 | self.truncated_seconds == other.truncated_seconds |
|
126 | 126 | && (self.nanoseconds == other.nanoseconds |
|
127 | 127 | || self.nanoseconds == 0 |
|
128 | 128 | || other.nanoseconds == 0) |
|
129 | 129 | } |
|
130 | 130 | |
|
131 | 131 | pub fn likely_equal_to_mtime_of( |
|
132 | 132 | self, |
|
133 | 133 | metadata: &fs::Metadata, |
|
134 | 134 | ) -> io::Result<bool> { |
|
135 | 135 | Ok(self.likely_equal(Self::for_mtime_of(metadata)?)) |
|
136 | 136 | } |
|
137 | 137 | } |
|
138 | 138 | |
|
139 | 139 | impl From<SystemTime> for TruncatedTimestamp { |
|
140 | 140 | fn from(system_time: SystemTime) -> Self { |
|
141 | 141 | // On Unix, `SystemTime` is a wrapper for the `timespec` C struct: |
|
142 | 142 | // https://www.gnu.org/software/libc/manual/html_node/Time-Types.html#index-struct-timespec |
|
143 | 143 | // We want to effectively access its fields, but the Rust standard |
|
144 | 144 | // library does not expose them. The best we can do is: |
|
145 | 145 | let seconds; |
|
146 | 146 | let nanoseconds; |
|
147 | 147 | match system_time.duration_since(UNIX_EPOCH) { |
|
148 | 148 | Ok(duration) => { |
|
149 | 149 | seconds = duration.as_secs() as i64; |
|
150 | 150 | nanoseconds = duration.subsec_nanos(); |
|
151 | 151 | } |
|
152 | 152 | Err(error) => { |
|
153 | 153 | // `system_time` is before `UNIX_EPOCH`. |
|
154 | 154 | // We need to undo this algorithm: |
|
155 | 155 | // https://github.com/rust-lang/rust/blob/6bed1f0bc3cc50c10aab26d5f94b16a00776b8a5/library/std/src/sys/unix/time.rs#L40-L41 |
|
156 | 156 | let negative = error.duration(); |
|
157 | 157 | let negative_secs = negative.as_secs() as i64; |
|
158 | 158 | let negative_nanos = negative.subsec_nanos(); |
|
159 | 159 | if negative_nanos == 0 { |
|
160 | 160 | seconds = -negative_secs; |
|
161 | 161 | nanoseconds = 0; |
|
162 | 162 | } else { |
|
163 | 163 | // For example if `system_time` was 4.3 seconds before |
|
164 | 164 | // the Unix epoch we get a Duration that represents |
|
165 | 165 | // `(-4, -0.3)` but we want `(-5, +0.7)`: |
|
166 | 166 | seconds = -1 - negative_secs; |
|
167 | 167 | nanoseconds = NSEC_PER_SEC - negative_nanos; |
|
168 | 168 | } |
|
169 | 169 | } |
|
170 | 170 | }; |
|
171 | 171 | Self::new_truncate(seconds, nanoseconds) |
|
172 | 172 | } |
|
173 | 173 | } |
|
174 | 174 | |
|
175 | 175 | const NSEC_PER_SEC: u32 = 1_000_000_000; |
|
176 | 176 | const RANGE_MASK_31BIT: u32 = 0x7FFF_FFFF; |
|
177 | 177 | |
|
178 | 178 | pub const MTIME_UNSET: i32 = -1; |
|
179 | 179 | |
|
180 | 180 | /// A `DirstateEntry` with a size of `-2` means that it was merged from the |
|
181 | 181 | /// other parent. This allows revert to pick the right status back during a |
|
182 | 182 | /// merge. |
|
183 | 183 | pub const SIZE_FROM_OTHER_PARENT: i32 = -2; |
|
184 | 184 | /// A special value used for internal representation of special case in |
|
185 | 185 | /// dirstate v1 format. |
|
186 | 186 | pub const SIZE_NON_NORMAL: i32 = -1; |
|
187 | 187 | |
|
188 | 188 | impl DirstateEntry { |
|
189 | 189 | pub fn from_v2_data( |
|
190 | 190 | wdir_tracked: bool, |
|
191 | 191 | p1_tracked: bool, |
|
192 | 192 | p2_info: bool, |
|
193 | 193 | mode_size: Option<(u32, u32)>, |
|
194 | 194 | mtime: Option<TruncatedTimestamp>, |
|
195 | 195 | fallback_exec: Option<bool>, |
|
196 | 196 | fallback_symlink: Option<bool>, |
|
197 | 197 | ) -> Self { |
|
198 | 198 | if let Some((mode, size)) = mode_size { |
|
199 | 199 | // TODO: return an error for out of range values? |
|
200 | 200 | assert!(mode & !RANGE_MASK_31BIT == 0); |
|
201 | 201 | assert!(size & !RANGE_MASK_31BIT == 0); |
|
202 | 202 | } |
|
203 | 203 | let mut flags = Flags::empty(); |
|
204 | 204 | flags.set(Flags::WDIR_TRACKED, wdir_tracked); |
|
205 | 205 | flags.set(Flags::P1_TRACKED, p1_tracked); |
|
206 | 206 | flags.set(Flags::P2_INFO, p2_info); |
|
207 | 207 | if let Some(exec) = fallback_exec { |
|
208 | 208 | flags.insert(Flags::HAS_FALLBACK_EXEC); |
|
209 | 209 | if exec { |
|
210 | 210 | flags.insert(Flags::FALLBACK_EXEC); |
|
211 | 211 | } |
|
212 | 212 | } |
|
213 | 213 | if let Some(exec) = fallback_symlink { |
|
214 | 214 | flags.insert(Flags::HAS_FALLBACK_SYMLINK); |
|
215 | 215 | if exec { |
|
216 | 216 | flags.insert(Flags::FALLBACK_SYMLINK); |
|
217 | 217 | } |
|
218 | 218 | } |
|
219 | 219 | Self { |
|
220 | 220 | flags, |
|
221 | 221 | mode_size, |
|
222 | 222 | mtime, |
|
223 | 223 | } |
|
224 | 224 | } |
|
225 | 225 | |
|
226 | 226 | pub fn from_v1_data( |
|
227 | 227 | state: EntryState, |
|
228 | 228 | mode: i32, |
|
229 | 229 | size: i32, |
|
230 | 230 | mtime: i32, |
|
231 | 231 | ) -> Self { |
|
232 | 232 | match state { |
|
233 | 233 | EntryState::Normal => { |
|
234 | 234 | if size == SIZE_FROM_OTHER_PARENT { |
|
235 | 235 | Self { |
|
236 | 236 | // might be missing P1_TRACKED |
|
237 | 237 | flags: Flags::WDIR_TRACKED | Flags::P2_INFO, |
|
238 | 238 | mode_size: None, |
|
239 | 239 | mtime: None, |
|
240 | 240 | } |
|
241 | 241 | } else if size == SIZE_NON_NORMAL { |
|
242 | 242 | Self { |
|
243 | 243 | flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED, |
|
244 | 244 | mode_size: None, |
|
245 | 245 | mtime: None, |
|
246 | 246 | } |
|
247 | 247 | } else if mtime == MTIME_UNSET { |
|
248 | 248 | // TODO: return an error for negative values? |
|
249 | 249 | let mode = u32::try_from(mode).unwrap(); |
|
250 | 250 | let size = u32::try_from(size).unwrap(); |
|
251 | 251 | Self { |
|
252 | 252 | flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED, |
|
253 | 253 | mode_size: Some((mode, size)), |
|
254 | 254 | mtime: None, |
|
255 | 255 | } |
|
256 | 256 | } else { |
|
257 | 257 | // TODO: return an error for negative values? |
|
258 | 258 | let mode = u32::try_from(mode).unwrap(); |
|
259 | 259 | let size = u32::try_from(size).unwrap(); |
|
260 | 260 | let mtime = u32::try_from(mtime).unwrap(); |
|
261 | 261 | let mtime = |
|
262 | 262 | TruncatedTimestamp::from_already_truncated(mtime, 0) |
|
263 | 263 | .unwrap(); |
|
264 | 264 | Self { |
|
265 | 265 | flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED, |
|
266 | 266 | mode_size: Some((mode, size)), |
|
267 | 267 | mtime: Some(mtime), |
|
268 | 268 | } |
|
269 | 269 | } |
|
270 | 270 | } |
|
271 | 271 | EntryState::Added => Self { |
|
272 | 272 | flags: Flags::WDIR_TRACKED, |
|
273 | 273 | mode_size: None, |
|
274 | 274 | mtime: None, |
|
275 | 275 | }, |
|
276 | 276 | EntryState::Removed => Self { |
|
277 | 277 | flags: if size == SIZE_NON_NORMAL { |
|
278 | 278 | Flags::P1_TRACKED | Flags::P2_INFO |
|
279 | 279 | } else if size == SIZE_FROM_OTHER_PARENT { |
|
280 | 280 | // We don’t know if P1_TRACKED should be set (file history) |
|
281 | 281 | Flags::P2_INFO |
|
282 | 282 | } else { |
|
283 | 283 | Flags::P1_TRACKED |
|
284 | 284 | }, |
|
285 | 285 | mode_size: None, |
|
286 | 286 | mtime: None, |
|
287 | 287 | }, |
|
288 | 288 | EntryState::Merged => Self { |
|
289 | 289 | flags: Flags::WDIR_TRACKED |
|
290 | 290 | | Flags::P1_TRACKED // might not be true because of rename ? |
|
291 | 291 | | Flags::P2_INFO, // might not be true because of rename ? |
|
292 | 292 | mode_size: None, |
|
293 | 293 | mtime: None, |
|
294 | 294 | }, |
|
295 | 295 | } |
|
296 | 296 | } |
|
297 | 297 | |
|
298 | 298 | /// Creates a new entry in "removed" state. |
|
299 | 299 | /// |
|
300 | 300 | /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or |
|
301 | 301 | /// `SIZE_FROM_OTHER_PARENT` |
|
302 | 302 | pub fn new_removed(size: i32) -> Self { |
|
303 | 303 | Self::from_v1_data(EntryState::Removed, 0, size, 0) |
|
304 | 304 | } |
|
305 | 305 | |
|
306 | 306 | pub fn tracked(&self) -> bool { |
|
307 | 307 | self.flags.contains(Flags::WDIR_TRACKED) |
|
308 | 308 | } |
|
309 | 309 | |
|
310 | 310 | pub fn p1_tracked(&self) -> bool { |
|
311 | 311 | self.flags.contains(Flags::P1_TRACKED) |
|
312 | 312 | } |
|
313 | 313 | |
|
314 | 314 | fn in_either_parent(&self) -> bool { |
|
315 | 315 | self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO) |
|
316 | 316 | } |
|
317 | 317 | |
|
318 | 318 | pub fn removed(&self) -> bool { |
|
319 | 319 | self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED) |
|
320 | 320 | } |
|
321 | 321 | |
|
322 | 322 | pub fn p2_info(&self) -> bool { |
|
323 | 323 | self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO) |
|
324 | 324 | } |
|
325 | 325 | |
|
326 | 326 | pub fn added(&self) -> bool { |
|
327 | 327 | self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent() |
|
328 | 328 | } |
|
329 | 329 | |
|
330 | 330 | pub fn maybe_clean(&self) -> bool { |
|
331 | 331 | if !self.flags.contains(Flags::WDIR_TRACKED) { |
|
332 | 332 | false |
|
333 | 333 | } else if !self.flags.contains(Flags::P1_TRACKED) { |
|
334 | 334 | false |
|
335 | 335 | } else if self.flags.contains(Flags::P2_INFO) { |
|
336 | 336 | false |
|
337 | 337 | } else { |
|
338 | 338 | true |
|
339 | 339 | } |
|
340 | 340 | } |
|
341 | 341 | |
|
342 | 342 | pub fn any_tracked(&self) -> bool { |
|
343 | 343 | self.flags.intersects( |
|
344 | 344 | Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO, |
|
345 | 345 | ) |
|
346 | 346 | } |
|
347 | 347 | |
|
348 | 348 | /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)` |
|
349 | 349 | pub(crate) fn v2_data( |
|
350 | 350 | &self, |
|
351 | 351 | ) -> ( |
|
352 | 352 | bool, |
|
353 | 353 | bool, |
|
354 | 354 | bool, |
|
355 | 355 | Option<(u32, u32)>, |
|
356 | 356 | Option<TruncatedTimestamp>, |
|
357 | 357 | Option<bool>, |
|
358 | 358 | Option<bool>, |
|
359 | 359 | ) { |
|
360 | 360 | if !self.any_tracked() { |
|
361 | 361 | // TODO: return an Option instead? |
|
362 | 362 | panic!("Accessing v1_state of an untracked DirstateEntry") |
|
363 | 363 | } |
|
364 | 364 | let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED); |
|
365 | 365 | let p1_tracked = self.flags.contains(Flags::P1_TRACKED); |
|
366 | 366 | let p2_info = self.flags.contains(Flags::P2_INFO); |
|
367 | 367 | let mode_size = self.mode_size; |
|
368 | 368 | let mtime = self.mtime; |
|
369 | 369 | ( |
|
370 | 370 | wdir_tracked, |
|
371 | 371 | p1_tracked, |
|
372 | 372 | p2_info, |
|
373 | 373 | mode_size, |
|
374 | 374 | mtime, |
|
375 | 375 | self.get_fallback_exec(), |
|
376 | 376 | self.get_fallback_symlink(), |
|
377 | 377 | ) |
|
378 | 378 | } |
|
379 | 379 | |
|
380 | 380 | fn v1_state(&self) -> EntryState { |
|
381 | 381 | if !self.any_tracked() { |
|
382 | 382 | // TODO: return an Option instead? |
|
383 | 383 | panic!("Accessing v1_state of an untracked DirstateEntry") |
|
384 | 384 | } |
|
385 | 385 | if self.removed() { |
|
386 | 386 | EntryState::Removed |
|
387 | 387 | } else if self |
|
388 | 388 | .flags |
|
389 | 389 | .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO) |
|
390 | 390 | { |
|
391 | 391 | EntryState::Merged |
|
392 | 392 | } else if self.added() { |
|
393 | 393 | EntryState::Added |
|
394 | 394 | } else { |
|
395 | 395 | EntryState::Normal |
|
396 | 396 | } |
|
397 | 397 | } |
|
398 | 398 | |
|
399 | 399 | fn v1_mode(&self) -> i32 { |
|
400 | 400 | if let Some((mode, _size)) = self.mode_size { |
|
401 | 401 | i32::try_from(mode).unwrap() |
|
402 | 402 | } else { |
|
403 | 403 | 0 |
|
404 | 404 | } |
|
405 | 405 | } |
|
406 | 406 | |
|
407 | 407 | fn v1_size(&self) -> i32 { |
|
408 | 408 | if !self.any_tracked() { |
|
409 | 409 | // TODO: return an Option instead? |
|
410 | 410 | panic!("Accessing v1_size of an untracked DirstateEntry") |
|
411 | 411 | } |
|
412 | 412 | if self.removed() |
|
413 | 413 | && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO) |
|
414 | 414 | { |
|
415 | 415 | SIZE_NON_NORMAL |
|
416 | 416 | } else if self.flags.contains(Flags::P2_INFO) { |
|
417 | 417 | SIZE_FROM_OTHER_PARENT |
|
418 | 418 | } else if self.removed() { |
|
419 | 419 | 0 |
|
420 | 420 | } else if self.added() { |
|
421 | 421 | SIZE_NON_NORMAL |
|
422 | 422 | } else if let Some((_mode, size)) = self.mode_size { |
|
423 | 423 | i32::try_from(size).unwrap() |
|
424 | 424 | } else { |
|
425 | 425 | SIZE_NON_NORMAL |
|
426 | 426 | } |
|
427 | 427 | } |
|
428 | 428 | |
|
429 | 429 | fn v1_mtime(&self) -> i32 { |
|
430 | 430 | if !self.any_tracked() { |
|
431 | 431 | // TODO: return an Option instead? |
|
432 | 432 | panic!("Accessing v1_mtime of an untracked DirstateEntry") |
|
433 | 433 | } |
|
434 | 434 | if self.removed() { |
|
435 | 435 | 0 |
|
436 | 436 | } else if self.flags.contains(Flags::P2_INFO) { |
|
437 | 437 | MTIME_UNSET |
|
438 | 438 | } else if !self.flags.contains(Flags::P1_TRACKED) { |
|
439 | 439 | MTIME_UNSET |
|
440 | 440 | } else if let Some(mtime) = self.mtime { |
|
441 | 441 | i32::try_from(mtime.truncated_seconds()).unwrap() |
|
442 | 442 | } else { |
|
443 | 443 | MTIME_UNSET |
|
444 | 444 | } |
|
445 | 445 | } |
|
446 | 446 | |
|
447 | 447 | // TODO: return `Option<EntryState>`? None when `!self.any_tracked` |
|
448 | 448 | pub fn state(&self) -> EntryState { |
|
449 | 449 | self.v1_state() |
|
450 | 450 | } |
|
451 | 451 | |
|
452 | 452 | // TODO: return Option? |
|
453 | 453 | pub fn mode(&self) -> i32 { |
|
454 | 454 | self.v1_mode() |
|
455 | 455 | } |
|
456 | 456 | |
|
457 | 457 | // TODO: return Option? |
|
458 | 458 | pub fn size(&self) -> i32 { |
|
459 | 459 | self.v1_size() |
|
460 | 460 | } |
|
461 | 461 | |
|
462 | 462 | // TODO: return Option? |
|
463 | 463 | pub fn mtime(&self) -> i32 { |
|
464 | 464 | self.v1_mtime() |
|
465 | 465 | } |
|
466 | 466 | |
|
467 | 467 | pub fn get_fallback_exec(&self) -> Option<bool> { |
|
468 | 468 | if self.flags.contains(Flags::HAS_FALLBACK_EXEC) { |
|
469 | 469 | Some(self.flags.contains(Flags::FALLBACK_EXEC)) |
|
470 | 470 | } else { |
|
471 | 471 | None |
|
472 | 472 | } |
|
473 | 473 | } |
|
474 | 474 | |
|
475 | 475 | pub fn set_fallback_exec(&mut self, value: Option<bool>) { |
|
476 | 476 | match value { |
|
477 | 477 | None => { |
|
478 | 478 | self.flags.remove(Flags::HAS_FALLBACK_EXEC); |
|
479 | 479 | self.flags.remove(Flags::FALLBACK_EXEC); |
|
480 | 480 | } |
|
481 | 481 | Some(exec) => { |
|
482 | 482 | self.flags.insert(Flags::HAS_FALLBACK_EXEC); |
|
483 | 483 | if exec { |
|
484 | 484 | self.flags.insert(Flags::FALLBACK_EXEC); |
|
485 | 485 | } |
|
486 | 486 | } |
|
487 | 487 | } |
|
488 | 488 | } |
|
489 | 489 | |
|
490 | 490 | pub fn get_fallback_symlink(&self) -> Option<bool> { |
|
491 | 491 | if self.flags.contains(Flags::HAS_FALLBACK_SYMLINK) { |
|
492 | 492 | Some(self.flags.contains(Flags::FALLBACK_SYMLINK)) |
|
493 | 493 | } else { |
|
494 | 494 | None |
|
495 | 495 | } |
|
496 | 496 | } |
|
497 | 497 | |
|
498 | 498 | pub fn set_fallback_symlink(&mut self, value: Option<bool>) { |
|
499 | 499 | match value { |
|
500 | 500 | None => { |
|
501 | 501 | self.flags.remove(Flags::HAS_FALLBACK_SYMLINK); |
|
502 | 502 | self.flags.remove(Flags::FALLBACK_SYMLINK); |
|
503 | 503 | } |
|
504 | 504 | Some(symlink) => { |
|
505 | 505 | self.flags.insert(Flags::HAS_FALLBACK_SYMLINK); |
|
506 | 506 | if symlink { |
|
507 | 507 | self.flags.insert(Flags::FALLBACK_SYMLINK); |
|
508 | 508 | } |
|
509 | 509 | } |
|
510 | 510 | } |
|
511 | 511 | } |
|
512 | 512 | |
|
513 | 513 | pub fn truncated_mtime(&self) -> Option<TruncatedTimestamp> { |
|
514 | 514 | self.mtime |
|
515 | 515 | } |
|
516 | 516 | |
|
517 | 517 | pub fn drop_merge_data(&mut self) { |
|
518 | 518 | if self.flags.contains(Flags::P2_INFO) { |
|
519 | 519 | self.flags.remove(Flags::P2_INFO); |
|
520 | 520 | self.mode_size = None; |
|
521 | 521 | self.mtime = None; |
|
522 | 522 | } |
|
523 | 523 | } |
|
524 | 524 | |
|
525 | 525 | pub fn set_possibly_dirty(&mut self) { |
|
526 | 526 | self.mtime = None |
|
527 | 527 | } |
|
528 | 528 | |
|
529 | 529 | pub fn set_clean( |
|
530 | 530 | &mut self, |
|
531 | 531 | mode: u32, |
|
532 | 532 | size: u32, |
|
533 | 533 | mtime: TruncatedTimestamp, |
|
534 | 534 | ) { |
|
535 | 535 | let size = size & RANGE_MASK_31BIT; |
|
536 | 536 | self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED); |
|
537 | 537 | self.mode_size = Some((mode, size)); |
|
538 | 538 | self.mtime = Some(mtime); |
|
539 | 539 | } |
|
540 | 540 | |
|
541 | 541 | pub fn set_tracked(&mut self) { |
|
542 | 542 | self.flags.insert(Flags::WDIR_TRACKED); |
|
543 | 543 | // `set_tracked` is replacing various `normallookup` call. So we mark |
|
544 | 544 | // the files as needing lookup |
|
545 | 545 | // |
|
546 | 546 | // Consider dropping this in the future in favor of something less |
|
547 | 547 | // broad. |
|
548 | 548 | self.mtime = None; |
|
549 | 549 | } |
|
550 | 550 | |
|
551 | 551 | pub fn set_untracked(&mut self) { |
|
552 | 552 | self.flags.remove(Flags::WDIR_TRACKED); |
|
553 | 553 | self.mode_size = None; |
|
554 | 554 | self.mtime = None; |
|
555 | 555 | } |
|
556 | 556 | |
|
557 | 557 | /// Returns `(state, mode, size, mtime)` for the puprose of serialization |
|
558 | 558 | /// in the dirstate-v1 format. |
|
559 | 559 | /// |
|
560 | 560 | /// This includes marker values such as `mtime == -1`. In the future we may |
|
561 | 561 | /// want to not represent these cases that way in memory, but serialization |
|
562 | 562 | /// will need to keep the same format. |
|
563 | 563 | pub fn v1_data(&self) -> (u8, i32, i32, i32) { |
|
564 | 564 | ( |
|
565 | 565 | self.v1_state().into(), |
|
566 | 566 | self.v1_mode(), |
|
567 | 567 | self.v1_size(), |
|
568 | 568 | self.v1_mtime(), |
|
569 | 569 | ) |
|
570 | 570 | } |
|
571 | 571 | |
|
572 | 572 | pub(crate) fn is_from_other_parent(&self) -> bool { |
|
573 | 573 | self.state() == EntryState::Normal |
|
574 | 574 | && self.size() == SIZE_FROM_OTHER_PARENT |
|
575 | 575 | } |
|
576 | 576 | |
|
577 | 577 | // TODO: other platforms |
|
578 | 578 | #[cfg(unix)] |
|
579 | 579 | pub fn mode_changed( |
|
580 | 580 | &self, |
|
581 | 581 | filesystem_metadata: &std::fs::Metadata, |
|
582 | 582 | ) -> bool { |
|
583 | 583 | let dirstate_exec_bit = (self.mode() as u32 & EXEC_BIT_MASK) != 0; |
|
584 | 584 | let fs_exec_bit = has_exec_bit(filesystem_metadata); |
|
585 | 585 | dirstate_exec_bit != fs_exec_bit |
|
586 | 586 | } |
|
587 | 587 | |
|
588 | 588 | /// Returns a `(state, mode, size, mtime)` tuple as for |
|
589 | 589 | /// `DirstateMapMethods::debug_iter`. |
|
590 | 590 | pub fn debug_tuple(&self) -> (u8, i32, i32, i32) { |
|
591 | 591 | (self.state().into(), self.mode(), self.size(), self.mtime()) |
|
592 | 592 | } |
|
593 | ||
|
594 | /// True if the stored mtime would be ambiguous with the current time | |
|
595 | pub fn need_delay(&self, now: TruncatedTimestamp) -> bool { | |
|
596 | if let Some(mtime) = self.mtime { | |
|
597 | self.state() == EntryState::Normal | |
|
598 | && mtime.truncated_seconds() == now.truncated_seconds() | |
|
599 | } else { | |
|
600 | false | |
|
601 | } | |
|
602 | } | |
|
603 | 593 | } |
|
604 | 594 | |
|
605 | 595 | impl EntryState { |
|
606 | 596 | pub fn is_tracked(self) -> bool { |
|
607 | 597 | use EntryState::*; |
|
608 | 598 | match self { |
|
609 | 599 | Normal | Added | Merged => true, |
|
610 | 600 | Removed => false, |
|
611 | 601 | } |
|
612 | 602 | } |
|
613 | 603 | } |
|
614 | 604 | |
|
615 | 605 | impl TryFrom<u8> for EntryState { |
|
616 | 606 | type Error = HgError; |
|
617 | 607 | |
|
618 | 608 | fn try_from(value: u8) -> Result<Self, Self::Error> { |
|
619 | 609 | match value { |
|
620 | 610 | b'n' => Ok(EntryState::Normal), |
|
621 | 611 | b'a' => Ok(EntryState::Added), |
|
622 | 612 | b'r' => Ok(EntryState::Removed), |
|
623 | 613 | b'm' => Ok(EntryState::Merged), |
|
624 | 614 | _ => Err(HgError::CorruptedRepository(format!( |
|
625 | 615 | "Incorrect dirstate entry state {}", |
|
626 | 616 | value |
|
627 | 617 | ))), |
|
628 | 618 | } |
|
629 | 619 | } |
|
630 | 620 | } |
|
631 | 621 | |
|
632 | 622 | impl Into<u8> for EntryState { |
|
633 | 623 | fn into(self) -> u8 { |
|
634 | 624 | match self { |
|
635 | 625 | EntryState::Normal => b'n', |
|
636 | 626 | EntryState::Added => b'a', |
|
637 | 627 | EntryState::Removed => b'r', |
|
638 | 628 | EntryState::Merged => b'm', |
|
639 | 629 | } |
|
640 | 630 | } |
|
641 | 631 | } |
|
642 | 632 | |
|
643 | 633 | const EXEC_BIT_MASK: u32 = 0o100; |
|
644 | 634 | |
|
645 | 635 | pub fn has_exec_bit(metadata: &std::fs::Metadata) -> bool { |
|
646 | 636 | // TODO: How to handle executable permissions on Windows? |
|
647 | 637 | use std::os::unix::fs::MetadataExt; |
|
648 | 638 | (metadata.mode() & EXEC_BIT_MASK) != 0 |
|
649 | 639 | } |
@@ -1,1184 +1,1139 b'' | |||
|
1 | 1 | use bytes_cast::BytesCast; |
|
2 | 2 | use micro_timer::timed; |
|
3 | 3 | use std::borrow::Cow; |
|
4 | 4 | use std::path::PathBuf; |
|
5 | 5 | |
|
6 | 6 | use super::on_disk; |
|
7 | 7 | use super::on_disk::DirstateV2ParseError; |
|
8 | 8 | use super::owning::OwningDirstateMap; |
|
9 | 9 | use super::path_with_basename::WithBasename; |
|
10 | 10 | use crate::dirstate::parsers::pack_entry; |
|
11 | 11 | use crate::dirstate::parsers::packed_entry_size; |
|
12 | 12 | use crate::dirstate::parsers::parse_dirstate_entries; |
|
13 | 13 | use crate::dirstate::CopyMapIter; |
|
14 | 14 | use crate::dirstate::StateMapIter; |
|
15 | 15 | use crate::dirstate::TruncatedTimestamp; |
|
16 | 16 | use crate::dirstate::SIZE_FROM_OTHER_PARENT; |
|
17 | 17 | use crate::dirstate::SIZE_NON_NORMAL; |
|
18 | 18 | use crate::matchers::Matcher; |
|
19 | 19 | use crate::utils::hg_path::{HgPath, HgPathBuf}; |
|
20 | 20 | use crate::DirstateEntry; |
|
21 | 21 | use crate::DirstateError; |
|
22 | 22 | use crate::DirstateParents; |
|
23 | 23 | use crate::DirstateStatus; |
|
24 | 24 | use crate::EntryState; |
|
25 | 25 | use crate::FastHashMap; |
|
26 | 26 | use crate::PatternFileWarning; |
|
27 | 27 | use crate::StatusError; |
|
28 | 28 | use crate::StatusOptions; |
|
29 | 29 | |
|
30 | 30 | /// Append to an existing data file if the amount of unreachable data (not used |
|
31 | 31 | /// anymore) is less than this fraction of the total amount of existing data. |
|
32 | 32 | const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5; |
|
33 | 33 | |
|
34 | 34 | pub struct DirstateMap<'on_disk> { |
|
35 | 35 | /// Contents of the `.hg/dirstate` file |
|
36 | 36 | pub(super) on_disk: &'on_disk [u8], |
|
37 | 37 | |
|
38 | 38 | pub(super) root: ChildNodes<'on_disk>, |
|
39 | 39 | |
|
40 | 40 | /// Number of nodes anywhere in the tree that have `.entry.is_some()`. |
|
41 | 41 | pub(super) nodes_with_entry_count: u32, |
|
42 | 42 | |
|
43 | 43 | /// Number of nodes anywhere in the tree that have |
|
44 | 44 | /// `.copy_source.is_some()`. |
|
45 | 45 | pub(super) nodes_with_copy_source_count: u32, |
|
46 | 46 | |
|
47 | 47 | /// See on_disk::Header |
|
48 | 48 | pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash, |
|
49 | 49 | |
|
50 | 50 | /// How many bytes of `on_disk` are not used anymore |
|
51 | 51 | pub(super) unreachable_bytes: u32, |
|
52 | 52 | } |
|
53 | 53 | |
|
54 | 54 | /// Using a plain `HgPathBuf` of the full path from the repository root as a |
|
55 | 55 | /// map key would also work: all paths in a given map have the same parent |
|
56 | 56 | /// path, so comparing full paths gives the same result as comparing base |
|
57 | 57 | /// names. However `HashMap` would waste time always re-hashing the same |
|
58 | 58 | /// string prefix. |
|
59 | 59 | pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>; |
|
60 | 60 | |
|
61 | 61 | /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned |
|
62 | 62 | /// for on-disk nodes that don’t actually have a `Cow` to borrow. |
|
63 | 63 | pub(super) enum BorrowedPath<'tree, 'on_disk> { |
|
64 | 64 | InMemory(&'tree HgPathBuf), |
|
65 | 65 | OnDisk(&'on_disk HgPath), |
|
66 | 66 | } |
|
67 | 67 | |
|
68 | 68 | pub(super) enum ChildNodes<'on_disk> { |
|
69 | 69 | InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>), |
|
70 | 70 | OnDisk(&'on_disk [on_disk::Node]), |
|
71 | 71 | } |
|
72 | 72 | |
|
73 | 73 | pub(super) enum ChildNodesRef<'tree, 'on_disk> { |
|
74 | 74 | InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>), |
|
75 | 75 | OnDisk(&'on_disk [on_disk::Node]), |
|
76 | 76 | } |
|
77 | 77 | |
|
78 | 78 | pub(super) enum NodeRef<'tree, 'on_disk> { |
|
79 | 79 | InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>), |
|
80 | 80 | OnDisk(&'on_disk on_disk::Node), |
|
81 | 81 | } |
|
82 | 82 | |
|
83 | 83 | impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> { |
|
84 | 84 | pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> { |
|
85 | 85 | match *self { |
|
86 | 86 | BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()), |
|
87 | 87 | BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk), |
|
88 | 88 | } |
|
89 | 89 | } |
|
90 | 90 | } |
|
91 | 91 | |
|
92 | 92 | impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> { |
|
93 | 93 | type Target = HgPath; |
|
94 | 94 | |
|
95 | 95 | fn deref(&self) -> &HgPath { |
|
96 | 96 | match *self { |
|
97 | 97 | BorrowedPath::InMemory(in_memory) => in_memory, |
|
98 | 98 | BorrowedPath::OnDisk(on_disk) => on_disk, |
|
99 | 99 | } |
|
100 | 100 | } |
|
101 | 101 | } |
|
102 | 102 | |
|
103 | 103 | impl Default for ChildNodes<'_> { |
|
104 | 104 | fn default() -> Self { |
|
105 | 105 | ChildNodes::InMemory(Default::default()) |
|
106 | 106 | } |
|
107 | 107 | } |
|
108 | 108 | |
|
109 | 109 | impl<'on_disk> ChildNodes<'on_disk> { |
|
110 | 110 | pub(super) fn as_ref<'tree>( |
|
111 | 111 | &'tree self, |
|
112 | 112 | ) -> ChildNodesRef<'tree, 'on_disk> { |
|
113 | 113 | match self { |
|
114 | 114 | ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes), |
|
115 | 115 | ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes), |
|
116 | 116 | } |
|
117 | 117 | } |
|
118 | 118 | |
|
119 | 119 | pub(super) fn is_empty(&self) -> bool { |
|
120 | 120 | match self { |
|
121 | 121 | ChildNodes::InMemory(nodes) => nodes.is_empty(), |
|
122 | 122 | ChildNodes::OnDisk(nodes) => nodes.is_empty(), |
|
123 | 123 | } |
|
124 | 124 | } |
|
125 | 125 | |
|
126 | 126 | fn make_mut( |
|
127 | 127 | &mut self, |
|
128 | 128 | on_disk: &'on_disk [u8], |
|
129 | 129 | unreachable_bytes: &mut u32, |
|
130 | 130 | ) -> Result< |
|
131 | 131 | &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>, |
|
132 | 132 | DirstateV2ParseError, |
|
133 | 133 | > { |
|
134 | 134 | match self { |
|
135 | 135 | ChildNodes::InMemory(nodes) => Ok(nodes), |
|
136 | 136 | ChildNodes::OnDisk(nodes) => { |
|
137 | 137 | *unreachable_bytes += |
|
138 | 138 | std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32; |
|
139 | 139 | let nodes = nodes |
|
140 | 140 | .iter() |
|
141 | 141 | .map(|node| { |
|
142 | 142 | Ok(( |
|
143 | 143 | node.path(on_disk)?, |
|
144 | 144 | node.to_in_memory_node(on_disk)?, |
|
145 | 145 | )) |
|
146 | 146 | }) |
|
147 | 147 | .collect::<Result<_, _>>()?; |
|
148 | 148 | *self = ChildNodes::InMemory(nodes); |
|
149 | 149 | match self { |
|
150 | 150 | ChildNodes::InMemory(nodes) => Ok(nodes), |
|
151 | 151 | ChildNodes::OnDisk(_) => unreachable!(), |
|
152 | 152 | } |
|
153 | 153 | } |
|
154 | 154 | } |
|
155 | 155 | } |
|
156 | 156 | } |
|
157 | 157 | |
|
158 | 158 | impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> { |
|
159 | 159 | pub(super) fn get( |
|
160 | 160 | &self, |
|
161 | 161 | base_name: &HgPath, |
|
162 | 162 | on_disk: &'on_disk [u8], |
|
163 | 163 | ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> { |
|
164 | 164 | match self { |
|
165 | 165 | ChildNodesRef::InMemory(nodes) => Ok(nodes |
|
166 | 166 | .get_key_value(base_name) |
|
167 | 167 | .map(|(k, v)| NodeRef::InMemory(k, v))), |
|
168 | 168 | ChildNodesRef::OnDisk(nodes) => { |
|
169 | 169 | let mut parse_result = Ok(()); |
|
170 | 170 | let search_result = nodes.binary_search_by(|node| { |
|
171 | 171 | match node.base_name(on_disk) { |
|
172 | 172 | Ok(node_base_name) => node_base_name.cmp(base_name), |
|
173 | 173 | Err(e) => { |
|
174 | 174 | parse_result = Err(e); |
|
175 | 175 | // Dummy comparison result, `search_result` won’t |
|
176 | 176 | // be used since `parse_result` is an error |
|
177 | 177 | std::cmp::Ordering::Equal |
|
178 | 178 | } |
|
179 | 179 | } |
|
180 | 180 | }); |
|
181 | 181 | parse_result.map(|()| { |
|
182 | 182 | search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i])) |
|
183 | 183 | }) |
|
184 | 184 | } |
|
185 | 185 | } |
|
186 | 186 | } |
|
187 | 187 | |
|
188 | 188 | /// Iterate in undefined order |
|
189 | 189 | pub(super) fn iter( |
|
190 | 190 | &self, |
|
191 | 191 | ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> { |
|
192 | 192 | match self { |
|
193 | 193 | ChildNodesRef::InMemory(nodes) => itertools::Either::Left( |
|
194 | 194 | nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)), |
|
195 | 195 | ), |
|
196 | 196 | ChildNodesRef::OnDisk(nodes) => { |
|
197 | 197 | itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk)) |
|
198 | 198 | } |
|
199 | 199 | } |
|
200 | 200 | } |
|
201 | 201 | |
|
202 | 202 | /// Iterate in parallel in undefined order |
|
203 | 203 | pub(super) fn par_iter( |
|
204 | 204 | &self, |
|
205 | 205 | ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>> |
|
206 | 206 | { |
|
207 | 207 | use rayon::prelude::*; |
|
208 | 208 | match self { |
|
209 | 209 | ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left( |
|
210 | 210 | nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)), |
|
211 | 211 | ), |
|
212 | 212 | ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right( |
|
213 | 213 | nodes.par_iter().map(NodeRef::OnDisk), |
|
214 | 214 | ), |
|
215 | 215 | } |
|
216 | 216 | } |
|
217 | 217 | |
|
218 | 218 | pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> { |
|
219 | 219 | match self { |
|
220 | 220 | ChildNodesRef::InMemory(nodes) => { |
|
221 | 221 | let mut vec: Vec<_> = nodes |
|
222 | 222 | .iter() |
|
223 | 223 | .map(|(k, v)| NodeRef::InMemory(k, v)) |
|
224 | 224 | .collect(); |
|
225 | 225 | fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath { |
|
226 | 226 | match node { |
|
227 | 227 | NodeRef::InMemory(path, _node) => path.base_name(), |
|
228 | 228 | NodeRef::OnDisk(_) => unreachable!(), |
|
229 | 229 | } |
|
230 | 230 | } |
|
231 | 231 | // `sort_unstable_by_key` doesn’t allow keys borrowing from the |
|
232 | 232 | // value: https://github.com/rust-lang/rust/issues/34162 |
|
233 | 233 | vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b))); |
|
234 | 234 | vec |
|
235 | 235 | } |
|
236 | 236 | ChildNodesRef::OnDisk(nodes) => { |
|
237 | 237 | // Nodes on disk are already sorted |
|
238 | 238 | nodes.iter().map(NodeRef::OnDisk).collect() |
|
239 | 239 | } |
|
240 | 240 | } |
|
241 | 241 | } |
|
242 | 242 | } |
|
243 | 243 | |
|
244 | 244 | impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> { |
|
245 | 245 | pub(super) fn full_path( |
|
246 | 246 | &self, |
|
247 | 247 | on_disk: &'on_disk [u8], |
|
248 | 248 | ) -> Result<&'tree HgPath, DirstateV2ParseError> { |
|
249 | 249 | match self { |
|
250 | 250 | NodeRef::InMemory(path, _node) => Ok(path.full_path()), |
|
251 | 251 | NodeRef::OnDisk(node) => node.full_path(on_disk), |
|
252 | 252 | } |
|
253 | 253 | } |
|
254 | 254 | |
|
255 | 255 | /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk, |
|
256 | 256 | /// HgPath>` detached from `'tree` |
|
257 | 257 | pub(super) fn full_path_borrowed( |
|
258 | 258 | &self, |
|
259 | 259 | on_disk: &'on_disk [u8], |
|
260 | 260 | ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> { |
|
261 | 261 | match self { |
|
262 | 262 | NodeRef::InMemory(path, _node) => match path.full_path() { |
|
263 | 263 | Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)), |
|
264 | 264 | Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)), |
|
265 | 265 | }, |
|
266 | 266 | NodeRef::OnDisk(node) => { |
|
267 | 267 | Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?)) |
|
268 | 268 | } |
|
269 | 269 | } |
|
270 | 270 | } |
|
271 | 271 | |
|
272 | 272 | pub(super) fn base_name( |
|
273 | 273 | &self, |
|
274 | 274 | on_disk: &'on_disk [u8], |
|
275 | 275 | ) -> Result<&'tree HgPath, DirstateV2ParseError> { |
|
276 | 276 | match self { |
|
277 | 277 | NodeRef::InMemory(path, _node) => Ok(path.base_name()), |
|
278 | 278 | NodeRef::OnDisk(node) => node.base_name(on_disk), |
|
279 | 279 | } |
|
280 | 280 | } |
|
281 | 281 | |
|
282 | 282 | pub(super) fn children( |
|
283 | 283 | &self, |
|
284 | 284 | on_disk: &'on_disk [u8], |
|
285 | 285 | ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> { |
|
286 | 286 | match self { |
|
287 | 287 | NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()), |
|
288 | 288 | NodeRef::OnDisk(node) => { |
|
289 | 289 | Ok(ChildNodesRef::OnDisk(node.children(on_disk)?)) |
|
290 | 290 | } |
|
291 | 291 | } |
|
292 | 292 | } |
|
293 | 293 | |
|
294 | 294 | pub(super) fn has_copy_source(&self) -> bool { |
|
295 | 295 | match self { |
|
296 | 296 | NodeRef::InMemory(_path, node) => node.copy_source.is_some(), |
|
297 | 297 | NodeRef::OnDisk(node) => node.has_copy_source(), |
|
298 | 298 | } |
|
299 | 299 | } |
|
300 | 300 | |
|
301 | 301 | pub(super) fn copy_source( |
|
302 | 302 | &self, |
|
303 | 303 | on_disk: &'on_disk [u8], |
|
304 | 304 | ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> { |
|
305 | 305 | match self { |
|
306 | 306 | NodeRef::InMemory(_path, node) => { |
|
307 | 307 | Ok(node.copy_source.as_ref().map(|s| &**s)) |
|
308 | 308 | } |
|
309 | 309 | NodeRef::OnDisk(node) => node.copy_source(on_disk), |
|
310 | 310 | } |
|
311 | 311 | } |
|
312 | 312 | |
|
313 | 313 | pub(super) fn entry( |
|
314 | 314 | &self, |
|
315 | 315 | ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> { |
|
316 | 316 | match self { |
|
317 | 317 | NodeRef::InMemory(_path, node) => { |
|
318 | 318 | Ok(node.data.as_entry().copied()) |
|
319 | 319 | } |
|
320 | 320 | NodeRef::OnDisk(node) => node.entry(), |
|
321 | 321 | } |
|
322 | 322 | } |
|
323 | 323 | |
|
324 | 324 | pub(super) fn state( |
|
325 | 325 | &self, |
|
326 | 326 | ) -> Result<Option<EntryState>, DirstateV2ParseError> { |
|
327 | 327 | Ok(self.entry()?.map(|e| e.state())) |
|
328 | 328 | } |
|
329 | 329 | |
|
330 | 330 | pub(super) fn cached_directory_mtime( |
|
331 | 331 | &self, |
|
332 | 332 | ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> { |
|
333 | 333 | match self { |
|
334 | 334 | NodeRef::InMemory(_path, node) => Ok(match node.data { |
|
335 | 335 | NodeData::CachedDirectory { mtime } => Some(mtime), |
|
336 | 336 | _ => None, |
|
337 | 337 | }), |
|
338 | 338 | NodeRef::OnDisk(node) => node.cached_directory_mtime(), |
|
339 | 339 | } |
|
340 | 340 | } |
|
341 | 341 | |
|
342 | 342 | pub(super) fn descendants_with_entry_count(&self) -> u32 { |
|
343 | 343 | match self { |
|
344 | 344 | NodeRef::InMemory(_path, node) => { |
|
345 | 345 | node.descendants_with_entry_count |
|
346 | 346 | } |
|
347 | 347 | NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(), |
|
348 | 348 | } |
|
349 | 349 | } |
|
350 | 350 | |
|
351 | 351 | pub(super) fn tracked_descendants_count(&self) -> u32 { |
|
352 | 352 | match self { |
|
353 | 353 | NodeRef::InMemory(_path, node) => node.tracked_descendants_count, |
|
354 | 354 | NodeRef::OnDisk(node) => node.tracked_descendants_count.get(), |
|
355 | 355 | } |
|
356 | 356 | } |
|
357 | 357 | } |
|
358 | 358 | |
|
359 | 359 | /// Represents a file or a directory |
|
360 | 360 | #[derive(Default)] |
|
361 | 361 | pub(super) struct Node<'on_disk> { |
|
362 | 362 | pub(super) data: NodeData, |
|
363 | 363 | |
|
364 | 364 | pub(super) copy_source: Option<Cow<'on_disk, HgPath>>, |
|
365 | 365 | |
|
366 | 366 | pub(super) children: ChildNodes<'on_disk>, |
|
367 | 367 | |
|
368 | 368 | /// How many (non-inclusive) descendants of this node have an entry. |
|
369 | 369 | pub(super) descendants_with_entry_count: u32, |
|
370 | 370 | |
|
371 | 371 | /// How many (non-inclusive) descendants of this node have an entry whose |
|
372 | 372 | /// state is "tracked". |
|
373 | 373 | pub(super) tracked_descendants_count: u32, |
|
374 | 374 | } |
|
375 | 375 | |
|
376 | 376 | pub(super) enum NodeData { |
|
377 | 377 | Entry(DirstateEntry), |
|
378 | 378 | CachedDirectory { mtime: TruncatedTimestamp }, |
|
379 | 379 | None, |
|
380 | 380 | } |
|
381 | 381 | |
|
382 | 382 | impl Default for NodeData { |
|
383 | 383 | fn default() -> Self { |
|
384 | 384 | NodeData::None |
|
385 | 385 | } |
|
386 | 386 | } |
|
387 | 387 | |
|
388 | 388 | impl NodeData { |
|
389 | 389 | fn has_entry(&self) -> bool { |
|
390 | 390 | match self { |
|
391 | 391 | NodeData::Entry(_) => true, |
|
392 | 392 | _ => false, |
|
393 | 393 | } |
|
394 | 394 | } |
|
395 | 395 | |
|
396 | 396 | fn as_entry(&self) -> Option<&DirstateEntry> { |
|
397 | 397 | match self { |
|
398 | 398 | NodeData::Entry(entry) => Some(entry), |
|
399 | 399 | _ => None, |
|
400 | 400 | } |
|
401 | 401 | } |
|
402 | 402 | } |
|
403 | 403 | |
|
404 | 404 | impl<'on_disk> DirstateMap<'on_disk> { |
|
405 | 405 | pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self { |
|
406 | 406 | Self { |
|
407 | 407 | on_disk, |
|
408 | 408 | root: ChildNodes::default(), |
|
409 | 409 | nodes_with_entry_count: 0, |
|
410 | 410 | nodes_with_copy_source_count: 0, |
|
411 | 411 | ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN], |
|
412 | 412 | unreachable_bytes: 0, |
|
413 | 413 | } |
|
414 | 414 | } |
|
415 | 415 | |
|
416 | 416 | #[timed] |
|
417 | 417 | pub fn new_v2( |
|
418 | 418 | on_disk: &'on_disk [u8], |
|
419 | 419 | data_size: usize, |
|
420 | 420 | metadata: &[u8], |
|
421 | 421 | ) -> Result<Self, DirstateError> { |
|
422 | 422 | if let Some(data) = on_disk.get(..data_size) { |
|
423 | 423 | Ok(on_disk::read(data, metadata)?) |
|
424 | 424 | } else { |
|
425 | 425 | Err(DirstateV2ParseError.into()) |
|
426 | 426 | } |
|
427 | 427 | } |
|
428 | 428 | |
|
429 | 429 | #[timed] |
|
430 | 430 | pub fn new_v1( |
|
431 | 431 | on_disk: &'on_disk [u8], |
|
432 | 432 | ) -> Result<(Self, Option<DirstateParents>), DirstateError> { |
|
433 | 433 | let mut map = Self::empty(on_disk); |
|
434 | 434 | if map.on_disk.is_empty() { |
|
435 | 435 | return Ok((map, None)); |
|
436 | 436 | } |
|
437 | 437 | |
|
438 | 438 | let parents = parse_dirstate_entries( |
|
439 | 439 | map.on_disk, |
|
440 | 440 | |path, entry, copy_source| { |
|
441 | 441 | let tracked = entry.state().is_tracked(); |
|
442 | 442 | let node = Self::get_or_insert_node( |
|
443 | 443 | map.on_disk, |
|
444 | 444 | &mut map.unreachable_bytes, |
|
445 | 445 | &mut map.root, |
|
446 | 446 | path, |
|
447 | 447 | WithBasename::to_cow_borrowed, |
|
448 | 448 | |ancestor| { |
|
449 | 449 | if tracked { |
|
450 | 450 | ancestor.tracked_descendants_count += 1 |
|
451 | 451 | } |
|
452 | 452 | ancestor.descendants_with_entry_count += 1 |
|
453 | 453 | }, |
|
454 | 454 | )?; |
|
455 | 455 | assert!( |
|
456 | 456 | !node.data.has_entry(), |
|
457 | 457 | "duplicate dirstate entry in read" |
|
458 | 458 | ); |
|
459 | 459 | assert!( |
|
460 | 460 | node.copy_source.is_none(), |
|
461 | 461 | "duplicate dirstate entry in read" |
|
462 | 462 | ); |
|
463 | 463 | node.data = NodeData::Entry(*entry); |
|
464 | 464 | node.copy_source = copy_source.map(Cow::Borrowed); |
|
465 | 465 | map.nodes_with_entry_count += 1; |
|
466 | 466 | if copy_source.is_some() { |
|
467 | 467 | map.nodes_with_copy_source_count += 1 |
|
468 | 468 | } |
|
469 | 469 | Ok(()) |
|
470 | 470 | }, |
|
471 | 471 | )?; |
|
472 | 472 | let parents = Some(parents.clone()); |
|
473 | 473 | |
|
474 | 474 | Ok((map, parents)) |
|
475 | 475 | } |
|
476 | 476 | |
|
477 | 477 | /// Assuming dirstate-v2 format, returns whether the next write should |
|
478 | 478 | /// append to the existing data file that contains `self.on_disk` (true), |
|
479 | 479 | /// or create a new data file from scratch (false). |
|
480 | 480 | pub(super) fn write_should_append(&self) -> bool { |
|
481 | 481 | let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32; |
|
482 | 482 | ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO |
|
483 | 483 | } |
|
484 | 484 | |
|
485 | 485 | fn get_node<'tree>( |
|
486 | 486 | &'tree self, |
|
487 | 487 | path: &HgPath, |
|
488 | 488 | ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> { |
|
489 | 489 | let mut children = self.root.as_ref(); |
|
490 | 490 | let mut components = path.components(); |
|
491 | 491 | let mut component = |
|
492 | 492 | components.next().expect("expected at least one components"); |
|
493 | 493 | loop { |
|
494 | 494 | if let Some(child) = children.get(component, self.on_disk)? { |
|
495 | 495 | if let Some(next_component) = components.next() { |
|
496 | 496 | component = next_component; |
|
497 | 497 | children = child.children(self.on_disk)?; |
|
498 | 498 | } else { |
|
499 | 499 | return Ok(Some(child)); |
|
500 | 500 | } |
|
501 | 501 | } else { |
|
502 | 502 | return Ok(None); |
|
503 | 503 | } |
|
504 | 504 | } |
|
505 | 505 | } |
|
506 | 506 | |
|
507 | 507 | /// Returns a mutable reference to the node at `path` if it exists |
|
508 | 508 | /// |
|
509 | 509 | /// This takes `root` instead of `&mut self` so that callers can mutate |
|
510 | 510 | /// other fields while the returned borrow is still valid |
|
511 | 511 | fn get_node_mut<'tree>( |
|
512 | 512 | on_disk: &'on_disk [u8], |
|
513 | 513 | unreachable_bytes: &mut u32, |
|
514 | 514 | root: &'tree mut ChildNodes<'on_disk>, |
|
515 | 515 | path: &HgPath, |
|
516 | 516 | ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> { |
|
517 | 517 | let mut children = root; |
|
518 | 518 | let mut components = path.components(); |
|
519 | 519 | let mut component = |
|
520 | 520 | components.next().expect("expected at least one components"); |
|
521 | 521 | loop { |
|
522 | 522 | if let Some(child) = children |
|
523 | 523 | .make_mut(on_disk, unreachable_bytes)? |
|
524 | 524 | .get_mut(component) |
|
525 | 525 | { |
|
526 | 526 | if let Some(next_component) = components.next() { |
|
527 | 527 | component = next_component; |
|
528 | 528 | children = &mut child.children; |
|
529 | 529 | } else { |
|
530 | 530 | return Ok(Some(child)); |
|
531 | 531 | } |
|
532 | 532 | } else { |
|
533 | 533 | return Ok(None); |
|
534 | 534 | } |
|
535 | 535 | } |
|
536 | 536 | } |
|
537 | 537 | |
|
538 | 538 | pub(super) fn get_or_insert<'tree, 'path>( |
|
539 | 539 | &'tree mut self, |
|
540 | 540 | path: &HgPath, |
|
541 | 541 | ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> { |
|
542 | 542 | Self::get_or_insert_node( |
|
543 | 543 | self.on_disk, |
|
544 | 544 | &mut self.unreachable_bytes, |
|
545 | 545 | &mut self.root, |
|
546 | 546 | path, |
|
547 | 547 | WithBasename::to_cow_owned, |
|
548 | 548 | |_| {}, |
|
549 | 549 | ) |
|
550 | 550 | } |
|
551 | 551 | |
|
552 | 552 | fn get_or_insert_node<'tree, 'path>( |
|
553 | 553 | on_disk: &'on_disk [u8], |
|
554 | 554 | unreachable_bytes: &mut u32, |
|
555 | 555 | root: &'tree mut ChildNodes<'on_disk>, |
|
556 | 556 | path: &'path HgPath, |
|
557 | 557 | to_cow: impl Fn( |
|
558 | 558 | WithBasename<&'path HgPath>, |
|
559 | 559 | ) -> WithBasename<Cow<'on_disk, HgPath>>, |
|
560 | 560 | mut each_ancestor: impl FnMut(&mut Node), |
|
561 | 561 | ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> { |
|
562 | 562 | let mut child_nodes = root; |
|
563 | 563 | let mut inclusive_ancestor_paths = |
|
564 | 564 | WithBasename::inclusive_ancestors_of(path); |
|
565 | 565 | let mut ancestor_path = inclusive_ancestor_paths |
|
566 | 566 | .next() |
|
567 | 567 | .expect("expected at least one inclusive ancestor"); |
|
568 | 568 | loop { |
|
569 | 569 | // TODO: can we avoid allocating an owned key in cases where the |
|
570 | 570 | // map already contains that key, without introducing double |
|
571 | 571 | // lookup? |
|
572 | 572 | let child_node = child_nodes |
|
573 | 573 | .make_mut(on_disk, unreachable_bytes)? |
|
574 | 574 | .entry(to_cow(ancestor_path)) |
|
575 | 575 | .or_default(); |
|
576 | 576 | if let Some(next) = inclusive_ancestor_paths.next() { |
|
577 | 577 | each_ancestor(child_node); |
|
578 | 578 | ancestor_path = next; |
|
579 | 579 | child_nodes = &mut child_node.children; |
|
580 | 580 | } else { |
|
581 | 581 | return Ok(child_node); |
|
582 | 582 | } |
|
583 | 583 | } |
|
584 | 584 | } |
|
585 | 585 | |
|
586 | 586 | fn add_or_remove_file( |
|
587 | 587 | &mut self, |
|
588 | 588 | path: &HgPath, |
|
589 | 589 | old_state: Option<EntryState>, |
|
590 | 590 | new_entry: DirstateEntry, |
|
591 | 591 | ) -> Result<(), DirstateV2ParseError> { |
|
592 | 592 | let had_entry = old_state.is_some(); |
|
593 | 593 | let was_tracked = old_state.map_or(false, |s| s.is_tracked()); |
|
594 | 594 | let tracked_count_increment = |
|
595 | 595 | match (was_tracked, new_entry.state().is_tracked()) { |
|
596 | 596 | (false, true) => 1, |
|
597 | 597 | (true, false) => -1, |
|
598 | 598 | _ => 0, |
|
599 | 599 | }; |
|
600 | 600 | |
|
601 | 601 | let node = Self::get_or_insert_node( |
|
602 | 602 | self.on_disk, |
|
603 | 603 | &mut self.unreachable_bytes, |
|
604 | 604 | &mut self.root, |
|
605 | 605 | path, |
|
606 | 606 | WithBasename::to_cow_owned, |
|
607 | 607 | |ancestor| { |
|
608 | 608 | if !had_entry { |
|
609 | 609 | ancestor.descendants_with_entry_count += 1; |
|
610 | 610 | } |
|
611 | 611 | |
|
612 | 612 | // We can’t use `+= increment` because the counter is unsigned, |
|
613 | 613 | // and we want debug builds to detect accidental underflow |
|
614 | 614 | // through zero |
|
615 | 615 | match tracked_count_increment { |
|
616 | 616 | 1 => ancestor.tracked_descendants_count += 1, |
|
617 | 617 | -1 => ancestor.tracked_descendants_count -= 1, |
|
618 | 618 | _ => {} |
|
619 | 619 | } |
|
620 | 620 | }, |
|
621 | 621 | )?; |
|
622 | 622 | if !had_entry { |
|
623 | 623 | self.nodes_with_entry_count += 1 |
|
624 | 624 | } |
|
625 | 625 | node.data = NodeData::Entry(new_entry); |
|
626 | 626 | Ok(()) |
|
627 | 627 | } |
|
628 | 628 | |
|
629 | 629 | fn iter_nodes<'tree>( |
|
630 | 630 | &'tree self, |
|
631 | 631 | ) -> impl Iterator< |
|
632 | 632 | Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>, |
|
633 | 633 | > + 'tree { |
|
634 | 634 | // Depth first tree traversal. |
|
635 | 635 | // |
|
636 | 636 | // If we could afford internal iteration and recursion, |
|
637 | 637 | // this would look like: |
|
638 | 638 | // |
|
639 | 639 | // ``` |
|
640 | 640 | // fn traverse_children( |
|
641 | 641 | // children: &ChildNodes, |
|
642 | 642 | // each: &mut impl FnMut(&Node), |
|
643 | 643 | // ) { |
|
644 | 644 | // for child in children.values() { |
|
645 | 645 | // traverse_children(&child.children, each); |
|
646 | 646 | // each(child); |
|
647 | 647 | // } |
|
648 | 648 | // } |
|
649 | 649 | // ``` |
|
650 | 650 | // |
|
651 | 651 | // However we want an external iterator and therefore can’t use the |
|
652 | 652 | // call stack. Use an explicit stack instead: |
|
653 | 653 | let mut stack = Vec::new(); |
|
654 | 654 | let mut iter = self.root.as_ref().iter(); |
|
655 | 655 | std::iter::from_fn(move || { |
|
656 | 656 | while let Some(child_node) = iter.next() { |
|
657 | 657 | let children = match child_node.children(self.on_disk) { |
|
658 | 658 | Ok(children) => children, |
|
659 | 659 | Err(error) => return Some(Err(error)), |
|
660 | 660 | }; |
|
661 | 661 | // Pseudo-recursion |
|
662 | 662 | let new_iter = children.iter(); |
|
663 | 663 | let old_iter = std::mem::replace(&mut iter, new_iter); |
|
664 | 664 | stack.push((child_node, old_iter)); |
|
665 | 665 | } |
|
666 | 666 | // Found the end of a `children.iter()` iterator. |
|
667 | 667 | if let Some((child_node, next_iter)) = stack.pop() { |
|
668 | 668 | // "Return" from pseudo-recursion by restoring state from the |
|
669 | 669 | // explicit stack |
|
670 | 670 | iter = next_iter; |
|
671 | 671 | |
|
672 | 672 | Some(Ok(child_node)) |
|
673 | 673 | } else { |
|
674 | 674 | // Reached the bottom of the stack, we’re done |
|
675 | 675 | None |
|
676 | 676 | } |
|
677 | 677 | }) |
|
678 | 678 | } |
|
679 | 679 | |
|
680 | fn clear_known_ambiguous_mtimes( | |
|
681 | &mut self, | |
|
682 | paths: &[impl AsRef<HgPath>], | |
|
683 | ) -> Result<(), DirstateV2ParseError> { | |
|
684 | for path in paths { | |
|
685 | if let Some(node) = Self::get_node_mut( | |
|
686 | self.on_disk, | |
|
687 | &mut self.unreachable_bytes, | |
|
688 | &mut self.root, | |
|
689 | path.as_ref(), | |
|
690 | )? { | |
|
691 | if let NodeData::Entry(entry) = &mut node.data { | |
|
692 | entry.set_possibly_dirty(); | |
|
693 | } | |
|
694 | } | |
|
695 | } | |
|
696 | Ok(()) | |
|
697 | } | |
|
698 | ||
|
699 | 680 | fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) { |
|
700 | 681 | if let Cow::Borrowed(path) = path { |
|
701 | 682 | *unreachable_bytes += path.len() as u32 |
|
702 | 683 | } |
|
703 | 684 | } |
|
704 | 685 | } |
|
705 | 686 | |
|
706 | 687 | /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s. |
|
707 | 688 | /// |
|
708 | 689 | /// The callback is only called for incoming `Ok` values. Errors are passed |
|
709 | 690 | /// through as-is. In order to let it use the `?` operator the callback is |
|
710 | 691 | /// expected to return a `Result` of `Option`, instead of an `Option` of |
|
711 | 692 | /// `Result`. |
|
712 | 693 | fn filter_map_results<'a, I, F, A, B, E>( |
|
713 | 694 | iter: I, |
|
714 | 695 | f: F, |
|
715 | 696 | ) -> impl Iterator<Item = Result<B, E>> + 'a |
|
716 | 697 | where |
|
717 | 698 | I: Iterator<Item = Result<A, E>> + 'a, |
|
718 | 699 | F: Fn(A) -> Result<Option<B>, E> + 'a, |
|
719 | 700 | { |
|
720 | 701 | iter.filter_map(move |result| match result { |
|
721 | 702 | Ok(node) => f(node).transpose(), |
|
722 | 703 | Err(e) => Some(Err(e)), |
|
723 | 704 | }) |
|
724 | 705 | } |
|
725 | 706 | |
|
726 | 707 | impl OwningDirstateMap { |
|
727 | 708 | pub fn clear(&mut self) { |
|
728 | 709 | let map = self.get_map_mut(); |
|
729 | 710 | map.root = Default::default(); |
|
730 | 711 | map.nodes_with_entry_count = 0; |
|
731 | 712 | map.nodes_with_copy_source_count = 0; |
|
732 | 713 | } |
|
733 | 714 | |
|
734 | 715 | pub fn set_entry( |
|
735 | 716 | &mut self, |
|
736 | 717 | filename: &HgPath, |
|
737 | 718 | entry: DirstateEntry, |
|
738 | 719 | ) -> Result<(), DirstateV2ParseError> { |
|
739 | 720 | let map = self.get_map_mut(); |
|
740 | 721 | map.get_or_insert(&filename)?.data = NodeData::Entry(entry); |
|
741 | 722 | Ok(()) |
|
742 | 723 | } |
|
743 | 724 | |
|
744 | 725 | pub fn add_file( |
|
745 | 726 | &mut self, |
|
746 | 727 | filename: &HgPath, |
|
747 | 728 | entry: DirstateEntry, |
|
748 | 729 | ) -> Result<(), DirstateError> { |
|
749 | 730 | let old_state = self.get(filename)?.map(|e| e.state()); |
|
750 | 731 | let map = self.get_map_mut(); |
|
751 | 732 | Ok(map.add_or_remove_file(filename, old_state, entry)?) |
|
752 | 733 | } |
|
753 | 734 | |
|
754 | 735 | pub fn remove_file( |
|
755 | 736 | &mut self, |
|
756 | 737 | filename: &HgPath, |
|
757 | 738 | in_merge: bool, |
|
758 | 739 | ) -> Result<(), DirstateError> { |
|
759 | 740 | let old_entry_opt = self.get(filename)?; |
|
760 | 741 | let old_state = old_entry_opt.map(|e| e.state()); |
|
761 | 742 | let mut size = 0; |
|
762 | 743 | if in_merge { |
|
763 | 744 | // XXX we should not be able to have 'm' state and 'FROM_P2' if not |
|
764 | 745 | // during a merge. So I (marmoute) am not sure we need the |
|
765 | 746 | // conditionnal at all. Adding double checking this with assert |
|
766 | 747 | // would be nice. |
|
767 | 748 | if let Some(old_entry) = old_entry_opt { |
|
768 | 749 | // backup the previous state |
|
769 | 750 | if old_entry.state() == EntryState::Merged { |
|
770 | 751 | size = SIZE_NON_NORMAL; |
|
771 | 752 | } else if old_entry.state() == EntryState::Normal |
|
772 | 753 | && old_entry.size() == SIZE_FROM_OTHER_PARENT |
|
773 | 754 | { |
|
774 | 755 | // other parent |
|
775 | 756 | size = SIZE_FROM_OTHER_PARENT; |
|
776 | 757 | } |
|
777 | 758 | } |
|
778 | 759 | } |
|
779 | 760 | if size == 0 { |
|
780 | 761 | self.copy_map_remove(filename)?; |
|
781 | 762 | } |
|
782 | 763 | let map = self.get_map_mut(); |
|
783 | 764 | let entry = DirstateEntry::new_removed(size); |
|
784 | 765 | Ok(map.add_or_remove_file(filename, old_state, entry)?) |
|
785 | 766 | } |
|
786 | 767 | |
|
787 | 768 | pub fn drop_entry_and_copy_source( |
|
788 | 769 | &mut self, |
|
789 | 770 | filename: &HgPath, |
|
790 | 771 | ) -> Result<(), DirstateError> { |
|
791 | 772 | let was_tracked = self |
|
792 | 773 | .get(filename)? |
|
793 | 774 | .map_or(false, |e| e.state().is_tracked()); |
|
794 | 775 | let map = self.get_map_mut(); |
|
795 | 776 | struct Dropped { |
|
796 | 777 | was_tracked: bool, |
|
797 | 778 | had_entry: bool, |
|
798 | 779 | had_copy_source: bool, |
|
799 | 780 | } |
|
800 | 781 | |
|
801 | 782 | /// If this returns `Ok(Some((dropped, removed)))`, then |
|
802 | 783 | /// |
|
803 | 784 | /// * `dropped` is about the leaf node that was at `filename` |
|
804 | 785 | /// * `removed` is whether this particular level of recursion just |
|
805 | 786 | /// removed a node in `nodes`. |
|
806 | 787 | fn recur<'on_disk>( |
|
807 | 788 | on_disk: &'on_disk [u8], |
|
808 | 789 | unreachable_bytes: &mut u32, |
|
809 | 790 | nodes: &mut ChildNodes<'on_disk>, |
|
810 | 791 | path: &HgPath, |
|
811 | 792 | ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> { |
|
812 | 793 | let (first_path_component, rest_of_path) = |
|
813 | 794 | path.split_first_component(); |
|
814 | 795 | let nodes = nodes.make_mut(on_disk, unreachable_bytes)?; |
|
815 | 796 | let node = if let Some(node) = nodes.get_mut(first_path_component) |
|
816 | 797 | { |
|
817 | 798 | node |
|
818 | 799 | } else { |
|
819 | 800 | return Ok(None); |
|
820 | 801 | }; |
|
821 | 802 | let dropped; |
|
822 | 803 | if let Some(rest) = rest_of_path { |
|
823 | 804 | if let Some((d, removed)) = recur( |
|
824 | 805 | on_disk, |
|
825 | 806 | unreachable_bytes, |
|
826 | 807 | &mut node.children, |
|
827 | 808 | rest, |
|
828 | 809 | )? { |
|
829 | 810 | dropped = d; |
|
830 | 811 | if dropped.had_entry { |
|
831 | 812 | node.descendants_with_entry_count -= 1; |
|
832 | 813 | } |
|
833 | 814 | if dropped.was_tracked { |
|
834 | 815 | node.tracked_descendants_count -= 1; |
|
835 | 816 | } |
|
836 | 817 | |
|
837 | 818 | // Directory caches must be invalidated when removing a |
|
838 | 819 | // child node |
|
839 | 820 | if removed { |
|
840 | 821 | if let NodeData::CachedDirectory { .. } = &node.data { |
|
841 | 822 | node.data = NodeData::None |
|
842 | 823 | } |
|
843 | 824 | } |
|
844 | 825 | } else { |
|
845 | 826 | return Ok(None); |
|
846 | 827 | } |
|
847 | 828 | } else { |
|
848 | 829 | let had_entry = node.data.has_entry(); |
|
849 | 830 | if had_entry { |
|
850 | 831 | node.data = NodeData::None |
|
851 | 832 | } |
|
852 | 833 | if let Some(source) = &node.copy_source { |
|
853 | 834 | DirstateMap::count_dropped_path(unreachable_bytes, source); |
|
854 | 835 | node.copy_source = None |
|
855 | 836 | } |
|
856 | 837 | dropped = Dropped { |
|
857 | 838 | was_tracked: node |
|
858 | 839 | .data |
|
859 | 840 | .as_entry() |
|
860 | 841 | .map_or(false, |entry| entry.state().is_tracked()), |
|
861 | 842 | had_entry, |
|
862 | 843 | had_copy_source: node.copy_source.take().is_some(), |
|
863 | 844 | }; |
|
864 | 845 | } |
|
865 | 846 | // After recursion, for both leaf (rest_of_path is None) nodes and |
|
866 | 847 | // parent nodes, remove a node if it just became empty. |
|
867 | 848 | let remove = !node.data.has_entry() |
|
868 | 849 | && node.copy_source.is_none() |
|
869 | 850 | && node.children.is_empty(); |
|
870 | 851 | if remove { |
|
871 | 852 | let (key, _) = |
|
872 | 853 | nodes.remove_entry(first_path_component).unwrap(); |
|
873 | 854 | DirstateMap::count_dropped_path( |
|
874 | 855 | unreachable_bytes, |
|
875 | 856 | key.full_path(), |
|
876 | 857 | ) |
|
877 | 858 | } |
|
878 | 859 | Ok(Some((dropped, remove))) |
|
879 | 860 | } |
|
880 | 861 | |
|
881 | 862 | if let Some((dropped, _removed)) = recur( |
|
882 | 863 | map.on_disk, |
|
883 | 864 | &mut map.unreachable_bytes, |
|
884 | 865 | &mut map.root, |
|
885 | 866 | filename, |
|
886 | 867 | )? { |
|
887 | 868 | if dropped.had_entry { |
|
888 | 869 | map.nodes_with_entry_count -= 1 |
|
889 | 870 | } |
|
890 | 871 | if dropped.had_copy_source { |
|
891 | 872 | map.nodes_with_copy_source_count -= 1 |
|
892 | 873 | } |
|
893 | 874 | } else { |
|
894 | 875 | debug_assert!(!was_tracked); |
|
895 | 876 | } |
|
896 | 877 | Ok(()) |
|
897 | 878 | } |
|
898 | 879 | |
|
899 | 880 | pub fn has_tracked_dir( |
|
900 | 881 | &mut self, |
|
901 | 882 | directory: &HgPath, |
|
902 | 883 | ) -> Result<bool, DirstateError> { |
|
903 | 884 | let map = self.get_map_mut(); |
|
904 | 885 | if let Some(node) = map.get_node(directory)? { |
|
905 | 886 | // A node without a `DirstateEntry` was created to hold child |
|
906 | 887 | // nodes, and is therefore a directory. |
|
907 | 888 | let state = node.state()?; |
|
908 | 889 | Ok(state.is_none() && node.tracked_descendants_count() > 0) |
|
909 | 890 | } else { |
|
910 | 891 | Ok(false) |
|
911 | 892 | } |
|
912 | 893 | } |
|
913 | 894 | |
|
914 | 895 | pub fn has_dir( |
|
915 | 896 | &mut self, |
|
916 | 897 | directory: &HgPath, |
|
917 | 898 | ) -> Result<bool, DirstateError> { |
|
918 | 899 | let map = self.get_map_mut(); |
|
919 | 900 | if let Some(node) = map.get_node(directory)? { |
|
920 | 901 | // A node without a `DirstateEntry` was created to hold child |
|
921 | 902 | // nodes, and is therefore a directory. |
|
922 | 903 | let state = node.state()?; |
|
923 | 904 | Ok(state.is_none() && node.descendants_with_entry_count() > 0) |
|
924 | 905 | } else { |
|
925 | 906 | Ok(false) |
|
926 | 907 | } |
|
927 | 908 | } |
|
928 | 909 | |
|
929 | 910 | #[timed] |
|
930 | 911 | pub fn pack_v1( |
|
931 | 912 | &mut self, |
|
932 | 913 | parents: DirstateParents, |
|
933 | now: TruncatedTimestamp, | |
|
934 | 914 | ) -> Result<Vec<u8>, DirstateError> { |
|
935 | 915 | let map = self.get_map_mut(); |
|
936 | let mut ambiguous_mtimes = Vec::new(); | |
|
937 | 916 | // Optizimation (to be measured?): pre-compute size to avoid `Vec` |
|
938 | 917 | // reallocations |
|
939 | 918 | let mut size = parents.as_bytes().len(); |
|
940 | 919 | for node in map.iter_nodes() { |
|
941 | 920 | let node = node?; |
|
942 |
if |
|
|
921 | if node.entry()?.is_some() { | |
|
943 | 922 | size += packed_entry_size( |
|
944 | 923 | node.full_path(map.on_disk)?, |
|
945 | 924 | node.copy_source(map.on_disk)?, |
|
946 | 925 | ); |
|
947 | if entry.need_delay(now) { | |
|
948 | ambiguous_mtimes.push( | |
|
949 | node.full_path_borrowed(map.on_disk)? | |
|
950 | .detach_from_tree(), | |
|
951 | ) | |
|
952 | } | |
|
953 | 926 | } |
|
954 | 927 | } |
|
955 | map.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?; | |
|
956 | 928 | |
|
957 | 929 | let mut packed = Vec::with_capacity(size); |
|
958 | 930 | packed.extend(parents.as_bytes()); |
|
959 | 931 | |
|
960 | 932 | for node in map.iter_nodes() { |
|
961 | 933 | let node = node?; |
|
962 | 934 | if let Some(entry) = node.entry()? { |
|
963 | 935 | pack_entry( |
|
964 | 936 | node.full_path(map.on_disk)?, |
|
965 | 937 | &entry, |
|
966 | 938 | node.copy_source(map.on_disk)?, |
|
967 | 939 | &mut packed, |
|
968 | 940 | ); |
|
969 | 941 | } |
|
970 | 942 | } |
|
971 | 943 | Ok(packed) |
|
972 | 944 | } |
|
973 | 945 | |
|
974 | 946 | /// Returns new data and metadata together with whether that data should be |
|
975 | 947 | /// appended to the existing data file whose content is at |
|
976 | 948 | /// `map.on_disk` (true), instead of written to a new data file |
|
977 | 949 | /// (false). |
|
978 | 950 | #[timed] |
|
979 | 951 | pub fn pack_v2( |
|
980 | 952 | &mut self, |
|
981 | now: TruncatedTimestamp, | |
|
982 | 953 | can_append: bool, |
|
983 | 954 | ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> { |
|
984 | 955 | let map = self.get_map_mut(); |
|
985 | let mut paths = Vec::new(); | |
|
986 | for node in map.iter_nodes() { | |
|
987 | let node = node?; | |
|
988 | if let Some(entry) = node.entry()? { | |
|
989 | if entry.need_delay(now) { | |
|
990 | paths.push( | |
|
991 | node.full_path_borrowed(map.on_disk)? | |
|
992 | .detach_from_tree(), | |
|
993 | ) | |
|
994 | } | |
|
995 | } | |
|
996 | } | |
|
997 | // Borrow of `self` ends here since we collect cloned paths | |
|
998 | ||
|
999 | map.clear_known_ambiguous_mtimes(&paths)?; | |
|
1000 | ||
|
1001 | 956 | on_disk::write(map, can_append) |
|
1002 | 957 | } |
|
1003 | 958 | |
|
1004 | 959 | pub fn status<'a>( |
|
1005 | 960 | &'a mut self, |
|
1006 | 961 | matcher: &'a (dyn Matcher + Sync), |
|
1007 | 962 | root_dir: PathBuf, |
|
1008 | 963 | ignore_files: Vec<PathBuf>, |
|
1009 | 964 | options: StatusOptions, |
|
1010 | 965 | ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError> |
|
1011 | 966 | { |
|
1012 | 967 | let map = self.get_map_mut(); |
|
1013 | 968 | super::status::status(map, matcher, root_dir, ignore_files, options) |
|
1014 | 969 | } |
|
1015 | 970 | |
|
1016 | 971 | pub fn copy_map_len(&self) -> usize { |
|
1017 | 972 | let map = self.get_map(); |
|
1018 | 973 | map.nodes_with_copy_source_count as usize |
|
1019 | 974 | } |
|
1020 | 975 | |
|
1021 | 976 | pub fn copy_map_iter(&self) -> CopyMapIter<'_> { |
|
1022 | 977 | let map = self.get_map(); |
|
1023 | 978 | Box::new(filter_map_results(map.iter_nodes(), move |node| { |
|
1024 | 979 | Ok(if let Some(source) = node.copy_source(map.on_disk)? { |
|
1025 | 980 | Some((node.full_path(map.on_disk)?, source)) |
|
1026 | 981 | } else { |
|
1027 | 982 | None |
|
1028 | 983 | }) |
|
1029 | 984 | })) |
|
1030 | 985 | } |
|
1031 | 986 | |
|
1032 | 987 | pub fn copy_map_contains_key( |
|
1033 | 988 | &self, |
|
1034 | 989 | key: &HgPath, |
|
1035 | 990 | ) -> Result<bool, DirstateV2ParseError> { |
|
1036 | 991 | let map = self.get_map(); |
|
1037 | 992 | Ok(if let Some(node) = map.get_node(key)? { |
|
1038 | 993 | node.has_copy_source() |
|
1039 | 994 | } else { |
|
1040 | 995 | false |
|
1041 | 996 | }) |
|
1042 | 997 | } |
|
1043 | 998 | |
|
1044 | 999 | pub fn copy_map_get( |
|
1045 | 1000 | &self, |
|
1046 | 1001 | key: &HgPath, |
|
1047 | 1002 | ) -> Result<Option<&HgPath>, DirstateV2ParseError> { |
|
1048 | 1003 | let map = self.get_map(); |
|
1049 | 1004 | if let Some(node) = map.get_node(key)? { |
|
1050 | 1005 | if let Some(source) = node.copy_source(map.on_disk)? { |
|
1051 | 1006 | return Ok(Some(source)); |
|
1052 | 1007 | } |
|
1053 | 1008 | } |
|
1054 | 1009 | Ok(None) |
|
1055 | 1010 | } |
|
1056 | 1011 | |
|
1057 | 1012 | pub fn copy_map_remove( |
|
1058 | 1013 | &mut self, |
|
1059 | 1014 | key: &HgPath, |
|
1060 | 1015 | ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { |
|
1061 | 1016 | let map = self.get_map_mut(); |
|
1062 | 1017 | let count = &mut map.nodes_with_copy_source_count; |
|
1063 | 1018 | let unreachable_bytes = &mut map.unreachable_bytes; |
|
1064 | 1019 | Ok(DirstateMap::get_node_mut( |
|
1065 | 1020 | map.on_disk, |
|
1066 | 1021 | unreachable_bytes, |
|
1067 | 1022 | &mut map.root, |
|
1068 | 1023 | key, |
|
1069 | 1024 | )? |
|
1070 | 1025 | .and_then(|node| { |
|
1071 | 1026 | if let Some(source) = &node.copy_source { |
|
1072 | 1027 | *count -= 1; |
|
1073 | 1028 | DirstateMap::count_dropped_path(unreachable_bytes, source); |
|
1074 | 1029 | } |
|
1075 | 1030 | node.copy_source.take().map(Cow::into_owned) |
|
1076 | 1031 | })) |
|
1077 | 1032 | } |
|
1078 | 1033 | |
|
1079 | 1034 | pub fn copy_map_insert( |
|
1080 | 1035 | &mut self, |
|
1081 | 1036 | key: HgPathBuf, |
|
1082 | 1037 | value: HgPathBuf, |
|
1083 | 1038 | ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> { |
|
1084 | 1039 | let map = self.get_map_mut(); |
|
1085 | 1040 | let node = DirstateMap::get_or_insert_node( |
|
1086 | 1041 | map.on_disk, |
|
1087 | 1042 | &mut map.unreachable_bytes, |
|
1088 | 1043 | &mut map.root, |
|
1089 | 1044 | &key, |
|
1090 | 1045 | WithBasename::to_cow_owned, |
|
1091 | 1046 | |_ancestor| {}, |
|
1092 | 1047 | )?; |
|
1093 | 1048 | if node.copy_source.is_none() { |
|
1094 | 1049 | map.nodes_with_copy_source_count += 1 |
|
1095 | 1050 | } |
|
1096 | 1051 | Ok(node.copy_source.replace(value.into()).map(Cow::into_owned)) |
|
1097 | 1052 | } |
|
1098 | 1053 | |
|
1099 | 1054 | pub fn len(&self) -> usize { |
|
1100 | 1055 | let map = self.get_map(); |
|
1101 | 1056 | map.nodes_with_entry_count as usize |
|
1102 | 1057 | } |
|
1103 | 1058 | |
|
1104 | 1059 | pub fn contains_key( |
|
1105 | 1060 | &self, |
|
1106 | 1061 | key: &HgPath, |
|
1107 | 1062 | ) -> Result<bool, DirstateV2ParseError> { |
|
1108 | 1063 | Ok(self.get(key)?.is_some()) |
|
1109 | 1064 | } |
|
1110 | 1065 | |
|
1111 | 1066 | pub fn get( |
|
1112 | 1067 | &self, |
|
1113 | 1068 | key: &HgPath, |
|
1114 | 1069 | ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> { |
|
1115 | 1070 | let map = self.get_map(); |
|
1116 | 1071 | Ok(if let Some(node) = map.get_node(key)? { |
|
1117 | 1072 | node.entry()? |
|
1118 | 1073 | } else { |
|
1119 | 1074 | None |
|
1120 | 1075 | }) |
|
1121 | 1076 | } |
|
1122 | 1077 | |
|
1123 | 1078 | pub fn iter(&self) -> StateMapIter<'_> { |
|
1124 | 1079 | let map = self.get_map(); |
|
1125 | 1080 | Box::new(filter_map_results(map.iter_nodes(), move |node| { |
|
1126 | 1081 | Ok(if let Some(entry) = node.entry()? { |
|
1127 | 1082 | Some((node.full_path(map.on_disk)?, entry)) |
|
1128 | 1083 | } else { |
|
1129 | 1084 | None |
|
1130 | 1085 | }) |
|
1131 | 1086 | })) |
|
1132 | 1087 | } |
|
1133 | 1088 | |
|
1134 | 1089 | pub fn iter_tracked_dirs( |
|
1135 | 1090 | &mut self, |
|
1136 | 1091 | ) -> Result< |
|
1137 | 1092 | Box< |
|
1138 | 1093 | dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> |
|
1139 | 1094 | + Send |
|
1140 | 1095 | + '_, |
|
1141 | 1096 | >, |
|
1142 | 1097 | DirstateError, |
|
1143 | 1098 | > { |
|
1144 | 1099 | let map = self.get_map_mut(); |
|
1145 | 1100 | let on_disk = map.on_disk; |
|
1146 | 1101 | Ok(Box::new(filter_map_results( |
|
1147 | 1102 | map.iter_nodes(), |
|
1148 | 1103 | move |node| { |
|
1149 | 1104 | Ok(if node.tracked_descendants_count() > 0 { |
|
1150 | 1105 | Some(node.full_path(on_disk)?) |
|
1151 | 1106 | } else { |
|
1152 | 1107 | None |
|
1153 | 1108 | }) |
|
1154 | 1109 | }, |
|
1155 | 1110 | ))) |
|
1156 | 1111 | } |
|
1157 | 1112 | |
|
1158 | 1113 | pub fn debug_iter( |
|
1159 | 1114 | &self, |
|
1160 | 1115 | all: bool, |
|
1161 | 1116 | ) -> Box< |
|
1162 | 1117 | dyn Iterator< |
|
1163 | 1118 | Item = Result< |
|
1164 | 1119 | (&HgPath, (u8, i32, i32, i32)), |
|
1165 | 1120 | DirstateV2ParseError, |
|
1166 | 1121 | >, |
|
1167 | 1122 | > + Send |
|
1168 | 1123 | + '_, |
|
1169 | 1124 | > { |
|
1170 | 1125 | let map = self.get_map(); |
|
1171 | 1126 | Box::new(filter_map_results(map.iter_nodes(), move |node| { |
|
1172 | 1127 | let debug_tuple = if let Some(entry) = node.entry()? { |
|
1173 | 1128 | entry.debug_tuple() |
|
1174 | 1129 | } else if !all { |
|
1175 | 1130 | return Ok(None); |
|
1176 | 1131 | } else if let Some(mtime) = node.cached_directory_mtime()? { |
|
1177 | 1132 | (b' ', 0, -1, mtime.truncated_seconds() as i32) |
|
1178 | 1133 | } else { |
|
1179 | 1134 | (b' ', 0, -1, -1) |
|
1180 | 1135 | }; |
|
1181 | 1136 | Ok(Some((node.full_path(map.on_disk)?, debug_tuple))) |
|
1182 | 1137 | })) |
|
1183 | 1138 | } |
|
1184 | 1139 | } |
@@ -1,505 +1,499 b'' | |||
|
1 | 1 | // dirstate_map.rs |
|
2 | 2 | // |
|
3 | 3 | // Copyright 2019 Raphaël Gomès <rgomes@octobus.net> |
|
4 | 4 | // |
|
5 | 5 | // This software may be used and distributed according to the terms of the |
|
6 | 6 | // GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | //! Bindings for the `hg::dirstate::dirstate_map` file provided by the |
|
9 | 9 | //! `hg-core` package. |
|
10 | 10 | |
|
11 | 11 | use std::cell::{RefCell, RefMut}; |
|
12 | 12 | use std::convert::TryInto; |
|
13 | 13 | |
|
14 | 14 | use cpython::{ |
|
15 | 15 | exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject, |
|
16 | 16 | PyResult, Python, PythonObject, ToPyObject, UnsafePyLeaked, |
|
17 | 17 | }; |
|
18 | 18 | |
|
19 | 19 | use crate::{ |
|
20 | 20 | dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator}, |
|
21 |
dirstate::item:: |
|
|
21 | dirstate::item::DirstateItem, | |
|
22 | 22 | pybytes_deref::PyBytesDeref, |
|
23 | 23 | }; |
|
24 | 24 | use hg::{ |
|
25 | 25 | dirstate::StateMapIter, |
|
26 | 26 | dirstate_tree::dirstate_map::DirstateMap as TreeDirstateMap, |
|
27 | 27 | dirstate_tree::on_disk::DirstateV2ParseError, |
|
28 | 28 | dirstate_tree::owning::OwningDirstateMap, |
|
29 | 29 | revlog::Node, |
|
30 | 30 | utils::files::normalize_case, |
|
31 | 31 | utils::hg_path::{HgPath, HgPathBuf}, |
|
32 | 32 | DirstateEntry, DirstateError, DirstateParents, EntryState, |
|
33 | 33 | }; |
|
34 | 34 | |
|
35 | 35 | // TODO |
|
36 | 36 | // This object needs to share references to multiple members of its Rust |
|
37 | 37 | // inner struct, namely `copy_map`, `dirs` and `all_dirs`. |
|
38 | 38 | // Right now `CopyMap` is done, but it needs to have an explicit reference |
|
39 | 39 | // to `RustDirstateMap` which itself needs to have an encapsulation for |
|
40 | 40 | // every method in `CopyMap` (copymapcopy, etc.). |
|
41 | 41 | // This is ugly and hard to maintain. |
|
42 | 42 | // The same logic applies to `dirs` and `all_dirs`, however the `Dirs` |
|
43 | 43 | // `py_class!` is already implemented and does not mention |
|
44 | 44 | // `RustDirstateMap`, rightfully so. |
|
45 | 45 | // All attributes also have to have a separate refcount data attribute for |
|
46 | 46 | // leaks, with all methods that go along for reference sharing. |
|
47 | 47 | py_class!(pub class DirstateMap |py| { |
|
48 | 48 | @shared data inner: OwningDirstateMap; |
|
49 | 49 | |
|
50 | 50 | /// Returns a `(dirstate_map, parents)` tuple |
|
51 | 51 | @staticmethod |
|
52 | 52 | def new_v1( |
|
53 | 53 | on_disk: PyBytes, |
|
54 | 54 | ) -> PyResult<PyObject> { |
|
55 | 55 | let on_disk = PyBytesDeref::new(py, on_disk); |
|
56 | 56 | let mut map = OwningDirstateMap::new_empty(on_disk); |
|
57 | 57 | let (on_disk, map_placeholder) = map.get_pair_mut(); |
|
58 | 58 | |
|
59 | 59 | let (actual_map, parents) = TreeDirstateMap::new_v1(on_disk) |
|
60 | 60 | .map_err(|e| dirstate_error(py, e))?; |
|
61 | 61 | *map_placeholder = actual_map; |
|
62 | 62 | let map = Self::create_instance(py, map)?; |
|
63 | 63 | let parents = parents.map(|p| { |
|
64 | 64 | let p1 = PyBytes::new(py, p.p1.as_bytes()); |
|
65 | 65 | let p2 = PyBytes::new(py, p.p2.as_bytes()); |
|
66 | 66 | (p1, p2) |
|
67 | 67 | }); |
|
68 | 68 | Ok((map, parents).to_py_object(py).into_object()) |
|
69 | 69 | } |
|
70 | 70 | |
|
71 | 71 | /// Returns a DirstateMap |
|
72 | 72 | @staticmethod |
|
73 | 73 | def new_v2( |
|
74 | 74 | on_disk: PyBytes, |
|
75 | 75 | data_size: usize, |
|
76 | 76 | tree_metadata: PyBytes, |
|
77 | 77 | ) -> PyResult<PyObject> { |
|
78 | 78 | let dirstate_error = |e: DirstateError| { |
|
79 | 79 | PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e)) |
|
80 | 80 | }; |
|
81 | 81 | let on_disk = PyBytesDeref::new(py, on_disk); |
|
82 | 82 | let mut map = OwningDirstateMap::new_empty(on_disk); |
|
83 | 83 | let (on_disk, map_placeholder) = map.get_pair_mut(); |
|
84 | 84 | *map_placeholder = TreeDirstateMap::new_v2( |
|
85 | 85 | on_disk, data_size, tree_metadata.data(py), |
|
86 | 86 | ).map_err(dirstate_error)?; |
|
87 | 87 | let map = Self::create_instance(py, map)?; |
|
88 | 88 | Ok(map.into_object()) |
|
89 | 89 | } |
|
90 | 90 | |
|
91 | 91 | def clear(&self) -> PyResult<PyObject> { |
|
92 | 92 | self.inner(py).borrow_mut().clear(); |
|
93 | 93 | Ok(py.None()) |
|
94 | 94 | } |
|
95 | 95 | |
|
96 | 96 | def get( |
|
97 | 97 | &self, |
|
98 | 98 | key: PyObject, |
|
99 | 99 | default: Option<PyObject> = None |
|
100 | 100 | ) -> PyResult<Option<PyObject>> { |
|
101 | 101 | let key = key.extract::<PyBytes>(py)?; |
|
102 | 102 | match self |
|
103 | 103 | .inner(py) |
|
104 | 104 | .borrow() |
|
105 | 105 | .get(HgPath::new(key.data(py))) |
|
106 | 106 | .map_err(|e| v2_error(py, e))? |
|
107 | 107 | { |
|
108 | 108 | Some(entry) => { |
|
109 | 109 | Ok(Some(DirstateItem::new_as_pyobject(py, entry)?)) |
|
110 | 110 | }, |
|
111 | 111 | None => Ok(default) |
|
112 | 112 | } |
|
113 | 113 | } |
|
114 | 114 | |
|
115 | 115 | def set_dirstate_item( |
|
116 | 116 | &self, |
|
117 | 117 | path: PyObject, |
|
118 | 118 | item: DirstateItem |
|
119 | 119 | ) -> PyResult<PyObject> { |
|
120 | 120 | let f = path.extract::<PyBytes>(py)?; |
|
121 | 121 | let filename = HgPath::new(f.data(py)); |
|
122 | 122 | self.inner(py) |
|
123 | 123 | .borrow_mut() |
|
124 | 124 | .set_entry(filename, item.get_entry(py)) |
|
125 | 125 | .map_err(|e| v2_error(py, e))?; |
|
126 | 126 | Ok(py.None()) |
|
127 | 127 | } |
|
128 | 128 | |
|
129 | 129 | def addfile( |
|
130 | 130 | &self, |
|
131 | 131 | f: PyBytes, |
|
132 | 132 | item: DirstateItem, |
|
133 | 133 | ) -> PyResult<PyNone> { |
|
134 | 134 | let filename = HgPath::new(f.data(py)); |
|
135 | 135 | let entry = item.get_entry(py); |
|
136 | 136 | self.inner(py) |
|
137 | 137 | .borrow_mut() |
|
138 | 138 | .add_file(filename, entry) |
|
139 | 139 | .map_err(|e |dirstate_error(py, e))?; |
|
140 | 140 | Ok(PyNone) |
|
141 | 141 | } |
|
142 | 142 | |
|
143 | 143 | def removefile( |
|
144 | 144 | &self, |
|
145 | 145 | f: PyObject, |
|
146 | 146 | in_merge: PyObject |
|
147 | 147 | ) -> PyResult<PyObject> { |
|
148 | 148 | self.inner(py).borrow_mut() |
|
149 | 149 | .remove_file( |
|
150 | 150 | HgPath::new(f.extract::<PyBytes>(py)?.data(py)), |
|
151 | 151 | in_merge.extract::<PyBool>(py)?.is_true(), |
|
152 | 152 | ) |
|
153 | 153 | .or_else(|_| { |
|
154 | 154 | Err(PyErr::new::<exc::OSError, _>( |
|
155 | 155 | py, |
|
156 | 156 | "Dirstate error".to_string(), |
|
157 | 157 | )) |
|
158 | 158 | })?; |
|
159 | 159 | Ok(py.None()) |
|
160 | 160 | } |
|
161 | 161 | |
|
162 | 162 | def drop_item_and_copy_source( |
|
163 | 163 | &self, |
|
164 | 164 | f: PyBytes, |
|
165 | 165 | ) -> PyResult<PyNone> { |
|
166 | 166 | self.inner(py) |
|
167 | 167 | .borrow_mut() |
|
168 | 168 | .drop_entry_and_copy_source(HgPath::new(f.data(py))) |
|
169 | 169 | .map_err(|e |dirstate_error(py, e))?; |
|
170 | 170 | Ok(PyNone) |
|
171 | 171 | } |
|
172 | 172 | |
|
173 | 173 | def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> { |
|
174 | 174 | let d = d.extract::<PyBytes>(py)?; |
|
175 | 175 | Ok(self.inner(py).borrow_mut() |
|
176 | 176 | .has_tracked_dir(HgPath::new(d.data(py))) |
|
177 | 177 | .map_err(|e| { |
|
178 | 178 | PyErr::new::<exc::ValueError, _>(py, e.to_string()) |
|
179 | 179 | })? |
|
180 | 180 | .to_py_object(py)) |
|
181 | 181 | } |
|
182 | 182 | |
|
183 | 183 | def hasdir(&self, d: PyObject) -> PyResult<PyBool> { |
|
184 | 184 | let d = d.extract::<PyBytes>(py)?; |
|
185 | 185 | Ok(self.inner(py).borrow_mut() |
|
186 | 186 | .has_dir(HgPath::new(d.data(py))) |
|
187 | 187 | .map_err(|e| { |
|
188 | 188 | PyErr::new::<exc::ValueError, _>(py, e.to_string()) |
|
189 | 189 | })? |
|
190 | 190 | .to_py_object(py)) |
|
191 | 191 | } |
|
192 | 192 | |
|
193 | 193 | def write_v1( |
|
194 | 194 | &self, |
|
195 | 195 | p1: PyObject, |
|
196 | 196 | p2: PyObject, |
|
197 | now: (u32, u32) | |
|
198 | 197 | ) -> PyResult<PyBytes> { |
|
199 | let now = timestamp(py, now)?; | |
|
200 | ||
|
201 | 198 | let mut inner = self.inner(py).borrow_mut(); |
|
202 | 199 | let parents = DirstateParents { |
|
203 | 200 | p1: extract_node_id(py, &p1)?, |
|
204 | 201 | p2: extract_node_id(py, &p2)?, |
|
205 | 202 | }; |
|
206 |
let result = inner.pack_v1(parents |
|
|
203 | let result = inner.pack_v1(parents); | |
|
207 | 204 | match result { |
|
208 | 205 | Ok(packed) => Ok(PyBytes::new(py, &packed)), |
|
209 | 206 | Err(_) => Err(PyErr::new::<exc::OSError, _>( |
|
210 | 207 | py, |
|
211 | 208 | "Dirstate error".to_string(), |
|
212 | 209 | )), |
|
213 | 210 | } |
|
214 | 211 | } |
|
215 | 212 | |
|
216 | 213 | /// Returns new data together with whether that data should be appended to |
|
217 | 214 | /// the existing data file whose content is at `self.on_disk` (True), |
|
218 | 215 | /// instead of written to a new data file (False). |
|
219 | 216 | def write_v2( |
|
220 | 217 | &self, |
|
221 | now: (u32, u32), | |
|
222 | 218 | can_append: bool, |
|
223 | 219 | ) -> PyResult<PyObject> { |
|
224 | let now = timestamp(py, now)?; | |
|
225 | ||
|
226 | 220 | let mut inner = self.inner(py).borrow_mut(); |
|
227 |
let result = inner.pack_v2( |
|
|
221 | let result = inner.pack_v2(can_append); | |
|
228 | 222 | match result { |
|
229 | 223 | Ok((packed, tree_metadata, append)) => { |
|
230 | 224 | let packed = PyBytes::new(py, &packed); |
|
231 | 225 | let tree_metadata = PyBytes::new(py, &tree_metadata); |
|
232 | 226 | let tuple = (packed, tree_metadata, append); |
|
233 | 227 | Ok(tuple.to_py_object(py).into_object()) |
|
234 | 228 | }, |
|
235 | 229 | Err(_) => Err(PyErr::new::<exc::OSError, _>( |
|
236 | 230 | py, |
|
237 | 231 | "Dirstate error".to_string(), |
|
238 | 232 | )), |
|
239 | 233 | } |
|
240 | 234 | } |
|
241 | 235 | |
|
242 | 236 | def filefoldmapasdict(&self) -> PyResult<PyDict> { |
|
243 | 237 | let dict = PyDict::new(py); |
|
244 | 238 | for item in self.inner(py).borrow_mut().iter() { |
|
245 | 239 | let (path, entry) = item.map_err(|e| v2_error(py, e))?; |
|
246 | 240 | if entry.state() != EntryState::Removed { |
|
247 | 241 | let key = normalize_case(path); |
|
248 | 242 | let value = path; |
|
249 | 243 | dict.set_item( |
|
250 | 244 | py, |
|
251 | 245 | PyBytes::new(py, key.as_bytes()).into_object(), |
|
252 | 246 | PyBytes::new(py, value.as_bytes()).into_object(), |
|
253 | 247 | )?; |
|
254 | 248 | } |
|
255 | 249 | } |
|
256 | 250 | Ok(dict) |
|
257 | 251 | } |
|
258 | 252 | |
|
259 | 253 | def __len__(&self) -> PyResult<usize> { |
|
260 | 254 | Ok(self.inner(py).borrow().len()) |
|
261 | 255 | } |
|
262 | 256 | |
|
263 | 257 | def __contains__(&self, key: PyObject) -> PyResult<bool> { |
|
264 | 258 | let key = key.extract::<PyBytes>(py)?; |
|
265 | 259 | self.inner(py) |
|
266 | 260 | .borrow() |
|
267 | 261 | .contains_key(HgPath::new(key.data(py))) |
|
268 | 262 | .map_err(|e| v2_error(py, e)) |
|
269 | 263 | } |
|
270 | 264 | |
|
271 | 265 | def __getitem__(&self, key: PyObject) -> PyResult<PyObject> { |
|
272 | 266 | let key = key.extract::<PyBytes>(py)?; |
|
273 | 267 | let key = HgPath::new(key.data(py)); |
|
274 | 268 | match self |
|
275 | 269 | .inner(py) |
|
276 | 270 | .borrow() |
|
277 | 271 | .get(key) |
|
278 | 272 | .map_err(|e| v2_error(py, e))? |
|
279 | 273 | { |
|
280 | 274 | Some(entry) => { |
|
281 | 275 | Ok(DirstateItem::new_as_pyobject(py, entry)?) |
|
282 | 276 | }, |
|
283 | 277 | None => Err(PyErr::new::<exc::KeyError, _>( |
|
284 | 278 | py, |
|
285 | 279 | String::from_utf8_lossy(key.as_bytes()), |
|
286 | 280 | )), |
|
287 | 281 | } |
|
288 | 282 | } |
|
289 | 283 | |
|
290 | 284 | def keys(&self) -> PyResult<DirstateMapKeysIterator> { |
|
291 | 285 | let leaked_ref = self.inner(py).leak_immutable(); |
|
292 | 286 | DirstateMapKeysIterator::from_inner( |
|
293 | 287 | py, |
|
294 | 288 | unsafe { leaked_ref.map(py, |o| o.iter()) }, |
|
295 | 289 | ) |
|
296 | 290 | } |
|
297 | 291 | |
|
298 | 292 | def items(&self) -> PyResult<DirstateMapItemsIterator> { |
|
299 | 293 | let leaked_ref = self.inner(py).leak_immutable(); |
|
300 | 294 | DirstateMapItemsIterator::from_inner( |
|
301 | 295 | py, |
|
302 | 296 | unsafe { leaked_ref.map(py, |o| o.iter()) }, |
|
303 | 297 | ) |
|
304 | 298 | } |
|
305 | 299 | |
|
306 | 300 | def __iter__(&self) -> PyResult<DirstateMapKeysIterator> { |
|
307 | 301 | let leaked_ref = self.inner(py).leak_immutable(); |
|
308 | 302 | DirstateMapKeysIterator::from_inner( |
|
309 | 303 | py, |
|
310 | 304 | unsafe { leaked_ref.map(py, |o| o.iter()) }, |
|
311 | 305 | ) |
|
312 | 306 | } |
|
313 | 307 | |
|
314 | 308 | // TODO all copymap* methods, see docstring above |
|
315 | 309 | def copymapcopy(&self) -> PyResult<PyDict> { |
|
316 | 310 | let dict = PyDict::new(py); |
|
317 | 311 | for item in self.inner(py).borrow().copy_map_iter() { |
|
318 | 312 | let (key, value) = item.map_err(|e| v2_error(py, e))?; |
|
319 | 313 | dict.set_item( |
|
320 | 314 | py, |
|
321 | 315 | PyBytes::new(py, key.as_bytes()), |
|
322 | 316 | PyBytes::new(py, value.as_bytes()), |
|
323 | 317 | )?; |
|
324 | 318 | } |
|
325 | 319 | Ok(dict) |
|
326 | 320 | } |
|
327 | 321 | |
|
328 | 322 | def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> { |
|
329 | 323 | let key = key.extract::<PyBytes>(py)?; |
|
330 | 324 | match self |
|
331 | 325 | .inner(py) |
|
332 | 326 | .borrow() |
|
333 | 327 | .copy_map_get(HgPath::new(key.data(py))) |
|
334 | 328 | .map_err(|e| v2_error(py, e))? |
|
335 | 329 | { |
|
336 | 330 | Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())), |
|
337 | 331 | None => Err(PyErr::new::<exc::KeyError, _>( |
|
338 | 332 | py, |
|
339 | 333 | String::from_utf8_lossy(key.data(py)), |
|
340 | 334 | )), |
|
341 | 335 | } |
|
342 | 336 | } |
|
343 | 337 | def copymap(&self) -> PyResult<CopyMap> { |
|
344 | 338 | CopyMap::from_inner(py, self.clone_ref(py)) |
|
345 | 339 | } |
|
346 | 340 | |
|
347 | 341 | def copymaplen(&self) -> PyResult<usize> { |
|
348 | 342 | Ok(self.inner(py).borrow().copy_map_len()) |
|
349 | 343 | } |
|
350 | 344 | def copymapcontains(&self, key: PyObject) -> PyResult<bool> { |
|
351 | 345 | let key = key.extract::<PyBytes>(py)?; |
|
352 | 346 | self.inner(py) |
|
353 | 347 | .borrow() |
|
354 | 348 | .copy_map_contains_key(HgPath::new(key.data(py))) |
|
355 | 349 | .map_err(|e| v2_error(py, e)) |
|
356 | 350 | } |
|
357 | 351 | def copymapget( |
|
358 | 352 | &self, |
|
359 | 353 | key: PyObject, |
|
360 | 354 | default: Option<PyObject> |
|
361 | 355 | ) -> PyResult<Option<PyObject>> { |
|
362 | 356 | let key = key.extract::<PyBytes>(py)?; |
|
363 | 357 | match self |
|
364 | 358 | .inner(py) |
|
365 | 359 | .borrow() |
|
366 | 360 | .copy_map_get(HgPath::new(key.data(py))) |
|
367 | 361 | .map_err(|e| v2_error(py, e))? |
|
368 | 362 | { |
|
369 | 363 | Some(copy) => Ok(Some( |
|
370 | 364 | PyBytes::new(py, copy.as_bytes()).into_object(), |
|
371 | 365 | )), |
|
372 | 366 | None => Ok(default), |
|
373 | 367 | } |
|
374 | 368 | } |
|
375 | 369 | def copymapsetitem( |
|
376 | 370 | &self, |
|
377 | 371 | key: PyObject, |
|
378 | 372 | value: PyObject |
|
379 | 373 | ) -> PyResult<PyObject> { |
|
380 | 374 | let key = key.extract::<PyBytes>(py)?; |
|
381 | 375 | let value = value.extract::<PyBytes>(py)?; |
|
382 | 376 | self.inner(py) |
|
383 | 377 | .borrow_mut() |
|
384 | 378 | .copy_map_insert( |
|
385 | 379 | HgPathBuf::from_bytes(key.data(py)), |
|
386 | 380 | HgPathBuf::from_bytes(value.data(py)), |
|
387 | 381 | ) |
|
388 | 382 | .map_err(|e| v2_error(py, e))?; |
|
389 | 383 | Ok(py.None()) |
|
390 | 384 | } |
|
391 | 385 | def copymappop( |
|
392 | 386 | &self, |
|
393 | 387 | key: PyObject, |
|
394 | 388 | default: Option<PyObject> |
|
395 | 389 | ) -> PyResult<Option<PyObject>> { |
|
396 | 390 | let key = key.extract::<PyBytes>(py)?; |
|
397 | 391 | match self |
|
398 | 392 | .inner(py) |
|
399 | 393 | .borrow_mut() |
|
400 | 394 | .copy_map_remove(HgPath::new(key.data(py))) |
|
401 | 395 | .map_err(|e| v2_error(py, e))? |
|
402 | 396 | { |
|
403 | 397 | Some(copy) => Ok(Some( |
|
404 | 398 | PyBytes::new(py, copy.as_bytes()).into_object(), |
|
405 | 399 | )), |
|
406 | 400 | None => Ok(default), |
|
407 | 401 | } |
|
408 | 402 | } |
|
409 | 403 | |
|
410 | 404 | def copymapiter(&self) -> PyResult<CopyMapKeysIterator> { |
|
411 | 405 | let leaked_ref = self.inner(py).leak_immutable(); |
|
412 | 406 | CopyMapKeysIterator::from_inner( |
|
413 | 407 | py, |
|
414 | 408 | unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) }, |
|
415 | 409 | ) |
|
416 | 410 | } |
|
417 | 411 | |
|
418 | 412 | def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> { |
|
419 | 413 | let leaked_ref = self.inner(py).leak_immutable(); |
|
420 | 414 | CopyMapItemsIterator::from_inner( |
|
421 | 415 | py, |
|
422 | 416 | unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) }, |
|
423 | 417 | ) |
|
424 | 418 | } |
|
425 | 419 | |
|
426 | 420 | def tracked_dirs(&self) -> PyResult<PyList> { |
|
427 | 421 | let dirs = PyList::new(py, &[]); |
|
428 | 422 | for path in self.inner(py).borrow_mut().iter_tracked_dirs() |
|
429 | 423 | .map_err(|e |dirstate_error(py, e))? |
|
430 | 424 | { |
|
431 | 425 | let path = path.map_err(|e| v2_error(py, e))?; |
|
432 | 426 | let path = PyBytes::new(py, path.as_bytes()); |
|
433 | 427 | dirs.append(py, path.into_object()) |
|
434 | 428 | } |
|
435 | 429 | Ok(dirs) |
|
436 | 430 | } |
|
437 | 431 | |
|
438 | 432 | def debug_iter(&self, all: bool) -> PyResult<PyList> { |
|
439 | 433 | let dirs = PyList::new(py, &[]); |
|
440 | 434 | for item in self.inner(py).borrow().debug_iter(all) { |
|
441 | 435 | let (path, (state, mode, size, mtime)) = |
|
442 | 436 | item.map_err(|e| v2_error(py, e))?; |
|
443 | 437 | let path = PyBytes::new(py, path.as_bytes()); |
|
444 | 438 | let item = (path, state, mode, size, mtime); |
|
445 | 439 | dirs.append(py, item.to_py_object(py).into_object()) |
|
446 | 440 | } |
|
447 | 441 | Ok(dirs) |
|
448 | 442 | } |
|
449 | 443 | }); |
|
450 | 444 | |
|
451 | 445 | impl DirstateMap { |
|
452 | 446 | pub fn get_inner_mut<'a>( |
|
453 | 447 | &'a self, |
|
454 | 448 | py: Python<'a>, |
|
455 | 449 | ) -> RefMut<'a, OwningDirstateMap> { |
|
456 | 450 | self.inner(py).borrow_mut() |
|
457 | 451 | } |
|
458 | 452 | fn translate_key( |
|
459 | 453 | py: Python, |
|
460 | 454 | res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>, |
|
461 | 455 | ) -> PyResult<Option<PyBytes>> { |
|
462 | 456 | let (f, _entry) = res.map_err(|e| v2_error(py, e))?; |
|
463 | 457 | Ok(Some(PyBytes::new(py, f.as_bytes()))) |
|
464 | 458 | } |
|
465 | 459 | fn translate_key_value( |
|
466 | 460 | py: Python, |
|
467 | 461 | res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>, |
|
468 | 462 | ) -> PyResult<Option<(PyBytes, PyObject)>> { |
|
469 | 463 | let (f, entry) = res.map_err(|e| v2_error(py, e))?; |
|
470 | 464 | Ok(Some(( |
|
471 | 465 | PyBytes::new(py, f.as_bytes()), |
|
472 | 466 | DirstateItem::new_as_pyobject(py, entry)?, |
|
473 | 467 | ))) |
|
474 | 468 | } |
|
475 | 469 | } |
|
476 | 470 | |
|
477 | 471 | py_shared_iterator!( |
|
478 | 472 | DirstateMapKeysIterator, |
|
479 | 473 | UnsafePyLeaked<StateMapIter<'static>>, |
|
480 | 474 | DirstateMap::translate_key, |
|
481 | 475 | Option<PyBytes> |
|
482 | 476 | ); |
|
483 | 477 | |
|
484 | 478 | py_shared_iterator!( |
|
485 | 479 | DirstateMapItemsIterator, |
|
486 | 480 | UnsafePyLeaked<StateMapIter<'static>>, |
|
487 | 481 | DirstateMap::translate_key_value, |
|
488 | 482 | Option<(PyBytes, PyObject)> |
|
489 | 483 | ); |
|
490 | 484 | |
|
491 | 485 | fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> { |
|
492 | 486 | let bytes = obj.extract::<PyBytes>(py)?; |
|
493 | 487 | match bytes.data(py).try_into() { |
|
494 | 488 | Ok(s) => Ok(s), |
|
495 | 489 | Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())), |
|
496 | 490 | } |
|
497 | 491 | } |
|
498 | 492 | |
|
499 | 493 | pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr { |
|
500 | 494 | PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2") |
|
501 | 495 | } |
|
502 | 496 | |
|
503 | 497 | fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr { |
|
504 | 498 | PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e)) |
|
505 | 499 | } |
@@ -1,288 +1,283 b'' | |||
|
1 | 1 | use cpython::exc; |
|
2 | 2 | use cpython::ObjectProtocol; |
|
3 | 3 | use cpython::PyBytes; |
|
4 | 4 | use cpython::PyErr; |
|
5 | 5 | use cpython::PyNone; |
|
6 | 6 | use cpython::PyObject; |
|
7 | 7 | use cpython::PyResult; |
|
8 | 8 | use cpython::Python; |
|
9 | 9 | use cpython::PythonObject; |
|
10 | 10 | use hg::dirstate::DirstateEntry; |
|
11 | 11 | use hg::dirstate::EntryState; |
|
12 | 12 | use hg::dirstate::TruncatedTimestamp; |
|
13 | 13 | use std::cell::Cell; |
|
14 | 14 | use std::convert::TryFrom; |
|
15 | 15 | |
|
16 | 16 | py_class!(pub class DirstateItem |py| { |
|
17 | 17 | data entry: Cell<DirstateEntry>; |
|
18 | 18 | |
|
19 | 19 | def __new__( |
|
20 | 20 | _cls, |
|
21 | 21 | wc_tracked: bool = false, |
|
22 | 22 | p1_tracked: bool = false, |
|
23 | 23 | p2_info: bool = false, |
|
24 | 24 | has_meaningful_data: bool = true, |
|
25 | 25 | has_meaningful_mtime: bool = true, |
|
26 | 26 | parentfiledata: Option<(u32, u32, Option<(u32, u32)>)> = None, |
|
27 | 27 | fallback_exec: Option<bool> = None, |
|
28 | 28 | fallback_symlink: Option<bool> = None, |
|
29 | 29 | |
|
30 | 30 | ) -> PyResult<DirstateItem> { |
|
31 | 31 | let mut mode_size_opt = None; |
|
32 | 32 | let mut mtime_opt = None; |
|
33 | 33 | if let Some((mode, size, mtime)) = parentfiledata { |
|
34 | 34 | if has_meaningful_data { |
|
35 | 35 | mode_size_opt = Some((mode, size)) |
|
36 | 36 | } |
|
37 | 37 | if has_meaningful_mtime { |
|
38 | 38 | if let Some(m) = mtime { |
|
39 | 39 | mtime_opt = Some(timestamp(py, m)?); |
|
40 | 40 | } |
|
41 | 41 | } |
|
42 | 42 | } |
|
43 | 43 | let entry = DirstateEntry::from_v2_data( |
|
44 | 44 | wc_tracked, |
|
45 | 45 | p1_tracked, |
|
46 | 46 | p2_info, |
|
47 | 47 | mode_size_opt, |
|
48 | 48 | mtime_opt, |
|
49 | 49 | fallback_exec, |
|
50 | 50 | fallback_symlink, |
|
51 | 51 | ); |
|
52 | 52 | DirstateItem::create_instance(py, Cell::new(entry)) |
|
53 | 53 | } |
|
54 | 54 | |
|
55 | 55 | @property |
|
56 | 56 | def state(&self) -> PyResult<PyBytes> { |
|
57 | 57 | let state_byte: u8 = self.entry(py).get().state().into(); |
|
58 | 58 | Ok(PyBytes::new(py, &[state_byte])) |
|
59 | 59 | } |
|
60 | 60 | |
|
61 | 61 | @property |
|
62 | 62 | def mode(&self) -> PyResult<i32> { |
|
63 | 63 | Ok(self.entry(py).get().mode()) |
|
64 | 64 | } |
|
65 | 65 | |
|
66 | 66 | @property |
|
67 | 67 | def size(&self) -> PyResult<i32> { |
|
68 | 68 | Ok(self.entry(py).get().size()) |
|
69 | 69 | } |
|
70 | 70 | |
|
71 | 71 | @property |
|
72 | 72 | def mtime(&self) -> PyResult<i32> { |
|
73 | 73 | Ok(self.entry(py).get().mtime()) |
|
74 | 74 | } |
|
75 | 75 | |
|
76 | 76 | @property |
|
77 | 77 | def has_fallback_exec(&self) -> PyResult<bool> { |
|
78 | 78 | match self.entry(py).get().get_fallback_exec() { |
|
79 | 79 | Some(_) => Ok(true), |
|
80 | 80 | None => Ok(false), |
|
81 | 81 | } |
|
82 | 82 | } |
|
83 | 83 | |
|
84 | 84 | @property |
|
85 | 85 | def fallback_exec(&self) -> PyResult<Option<bool>> { |
|
86 | 86 | match self.entry(py).get().get_fallback_exec() { |
|
87 | 87 | Some(exec) => Ok(Some(exec)), |
|
88 | 88 | None => Ok(None), |
|
89 | 89 | } |
|
90 | 90 | } |
|
91 | 91 | |
|
92 | 92 | @fallback_exec.setter |
|
93 | 93 | def set_fallback_exec(&self, value: Option<PyObject>) -> PyResult<()> { |
|
94 | 94 | match value { |
|
95 | 95 | None => {self.entry(py).get().set_fallback_exec(None);}, |
|
96 | 96 | Some(value) => { |
|
97 | 97 | if value.is_none(py) { |
|
98 | 98 | self.entry(py).get().set_fallback_exec(None); |
|
99 | 99 | } else { |
|
100 | 100 | self.entry(py).get().set_fallback_exec( |
|
101 | 101 | Some(value.is_true(py)?) |
|
102 | 102 | ); |
|
103 | 103 | }}, |
|
104 | 104 | } |
|
105 | 105 | Ok(()) |
|
106 | 106 | } |
|
107 | 107 | |
|
108 | 108 | @property |
|
109 | 109 | def has_fallback_symlink(&self) -> PyResult<bool> { |
|
110 | 110 | match self.entry(py).get().get_fallback_symlink() { |
|
111 | 111 | Some(_) => Ok(true), |
|
112 | 112 | None => Ok(false), |
|
113 | 113 | } |
|
114 | 114 | } |
|
115 | 115 | |
|
116 | 116 | @property |
|
117 | 117 | def fallback_symlink(&self) -> PyResult<Option<bool>> { |
|
118 | 118 | match self.entry(py).get().get_fallback_symlink() { |
|
119 | 119 | Some(symlink) => Ok(Some(symlink)), |
|
120 | 120 | None => Ok(None), |
|
121 | 121 | } |
|
122 | 122 | } |
|
123 | 123 | |
|
124 | 124 | @fallback_symlink.setter |
|
125 | 125 | def set_fallback_symlink(&self, value: Option<PyObject>) -> PyResult<()> { |
|
126 | 126 | match value { |
|
127 | 127 | None => {self.entry(py).get().set_fallback_symlink(None);}, |
|
128 | 128 | Some(value) => { |
|
129 | 129 | if value.is_none(py) { |
|
130 | 130 | self.entry(py).get().set_fallback_symlink(None); |
|
131 | 131 | } else { |
|
132 | 132 | self.entry(py).get().set_fallback_symlink( |
|
133 | 133 | Some(value.is_true(py)?) |
|
134 | 134 | ); |
|
135 | 135 | }}, |
|
136 | 136 | } |
|
137 | 137 | Ok(()) |
|
138 | 138 | } |
|
139 | 139 | |
|
140 | 140 | @property |
|
141 | 141 | def tracked(&self) -> PyResult<bool> { |
|
142 | 142 | Ok(self.entry(py).get().tracked()) |
|
143 | 143 | } |
|
144 | 144 | |
|
145 | 145 | @property |
|
146 | 146 | def p1_tracked(&self) -> PyResult<bool> { |
|
147 | 147 | Ok(self.entry(py).get().p1_tracked()) |
|
148 | 148 | } |
|
149 | 149 | |
|
150 | 150 | @property |
|
151 | 151 | def added(&self) -> PyResult<bool> { |
|
152 | 152 | Ok(self.entry(py).get().added()) |
|
153 | 153 | } |
|
154 | 154 | |
|
155 | 155 | |
|
156 | 156 | @property |
|
157 | 157 | def p2_info(&self) -> PyResult<bool> { |
|
158 | 158 | Ok(self.entry(py).get().p2_info()) |
|
159 | 159 | } |
|
160 | 160 | |
|
161 | 161 | @property |
|
162 | 162 | def removed(&self) -> PyResult<bool> { |
|
163 | 163 | Ok(self.entry(py).get().removed()) |
|
164 | 164 | } |
|
165 | 165 | |
|
166 | 166 | @property |
|
167 | 167 | def maybe_clean(&self) -> PyResult<bool> { |
|
168 | 168 | Ok(self.entry(py).get().maybe_clean()) |
|
169 | 169 | } |
|
170 | 170 | |
|
171 | 171 | @property |
|
172 | 172 | def any_tracked(&self) -> PyResult<bool> { |
|
173 | 173 | Ok(self.entry(py).get().any_tracked()) |
|
174 | 174 | } |
|
175 | 175 | |
|
176 | 176 | def v1_state(&self) -> PyResult<PyBytes> { |
|
177 | 177 | let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data(); |
|
178 | 178 | let state_byte: u8 = state.into(); |
|
179 | 179 | Ok(PyBytes::new(py, &[state_byte])) |
|
180 | 180 | } |
|
181 | 181 | |
|
182 | 182 | def v1_mode(&self) -> PyResult<i32> { |
|
183 | 183 | let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data(); |
|
184 | 184 | Ok(mode) |
|
185 | 185 | } |
|
186 | 186 | |
|
187 | 187 | def v1_size(&self) -> PyResult<i32> { |
|
188 | 188 | let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data(); |
|
189 | 189 | Ok(size) |
|
190 | 190 | } |
|
191 | 191 | |
|
192 | 192 | def v1_mtime(&self) -> PyResult<i32> { |
|
193 | 193 | let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data(); |
|
194 | 194 | Ok(mtime) |
|
195 | 195 | } |
|
196 | 196 | |
|
197 | def need_delay(&self, now: (u32, u32)) -> PyResult<bool> { | |
|
198 | let now = timestamp(py, now)?; | |
|
199 | Ok(self.entry(py).get().need_delay(now)) | |
|
200 | } | |
|
201 | ||
|
202 | 197 | def mtime_likely_equal_to(&self, other: (u32, u32)) -> PyResult<bool> { |
|
203 | 198 | if let Some(mtime) = self.entry(py).get().truncated_mtime() { |
|
204 | 199 | Ok(mtime.likely_equal(timestamp(py, other)?)) |
|
205 | 200 | } else { |
|
206 | 201 | Ok(false) |
|
207 | 202 | } |
|
208 | 203 | } |
|
209 | 204 | |
|
210 | 205 | @classmethod |
|
211 | 206 | def from_v1_data( |
|
212 | 207 | _cls, |
|
213 | 208 | state: PyBytes, |
|
214 | 209 | mode: i32, |
|
215 | 210 | size: i32, |
|
216 | 211 | mtime: i32, |
|
217 | 212 | ) -> PyResult<Self> { |
|
218 | 213 | let state = <[u8; 1]>::try_from(state.data(py)) |
|
219 | 214 | .ok() |
|
220 | 215 | .and_then(|state| EntryState::try_from(state[0]).ok()) |
|
221 | 216 | .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?; |
|
222 | 217 | let entry = DirstateEntry::from_v1_data(state, mode, size, mtime); |
|
223 | 218 | DirstateItem::create_instance(py, Cell::new(entry)) |
|
224 | 219 | } |
|
225 | 220 | |
|
226 | 221 | def drop_merge_data(&self) -> PyResult<PyNone> { |
|
227 | 222 | self.update(py, |entry| entry.drop_merge_data()); |
|
228 | 223 | Ok(PyNone) |
|
229 | 224 | } |
|
230 | 225 | |
|
231 | 226 | def set_clean( |
|
232 | 227 | &self, |
|
233 | 228 | mode: u32, |
|
234 | 229 | size: u32, |
|
235 | 230 | mtime: (u32, u32), |
|
236 | 231 | ) -> PyResult<PyNone> { |
|
237 | 232 | let mtime = timestamp(py, mtime)?; |
|
238 | 233 | self.update(py, |entry| entry.set_clean(mode, size, mtime)); |
|
239 | 234 | Ok(PyNone) |
|
240 | 235 | } |
|
241 | 236 | |
|
242 | 237 | def set_possibly_dirty(&self) -> PyResult<PyNone> { |
|
243 | 238 | self.update(py, |entry| entry.set_possibly_dirty()); |
|
244 | 239 | Ok(PyNone) |
|
245 | 240 | } |
|
246 | 241 | |
|
247 | 242 | def set_tracked(&self) -> PyResult<PyNone> { |
|
248 | 243 | self.update(py, |entry| entry.set_tracked()); |
|
249 | 244 | Ok(PyNone) |
|
250 | 245 | } |
|
251 | 246 | |
|
252 | 247 | def set_untracked(&self) -> PyResult<PyNone> { |
|
253 | 248 | self.update(py, |entry| entry.set_untracked()); |
|
254 | 249 | Ok(PyNone) |
|
255 | 250 | } |
|
256 | 251 | }); |
|
257 | 252 | |
|
258 | 253 | impl DirstateItem { |
|
259 | 254 | pub fn new_as_pyobject( |
|
260 | 255 | py: Python<'_>, |
|
261 | 256 | entry: DirstateEntry, |
|
262 | 257 | ) -> PyResult<PyObject> { |
|
263 | 258 | Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object()) |
|
264 | 259 | } |
|
265 | 260 | |
|
266 | 261 | pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry { |
|
267 | 262 | self.entry(py).get() |
|
268 | 263 | } |
|
269 | 264 | |
|
270 | 265 | // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable |
|
271 | 266 | pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) { |
|
272 | 267 | let mut entry = self.entry(py).get(); |
|
273 | 268 | f(&mut entry); |
|
274 | 269 | self.entry(py).set(entry) |
|
275 | 270 | } |
|
276 | 271 | } |
|
277 | 272 | |
|
278 | 273 | pub(crate) fn timestamp( |
|
279 | 274 | py: Python<'_>, |
|
280 | 275 | (s, ns): (u32, u32), |
|
281 | 276 | ) -> PyResult<TruncatedTimestamp> { |
|
282 | 277 | TruncatedTimestamp::from_already_truncated(s, ns).map_err(|_| { |
|
283 | 278 | PyErr::new::<exc::ValueError, _>( |
|
284 | 279 | py, |
|
285 | 280 | "expected mtime truncated to 31 bits", |
|
286 | 281 | ) |
|
287 | 282 | }) |
|
288 | 283 | } |
@@ -1,106 +1,100 b'' | |||
|
1 | 1 | # extension to emulate invoking 'dirstate.write()' at the time |
|
2 | 2 | # specified by '[fakedirstatewritetime] fakenow', only when |
|
3 | 3 | # 'dirstate.write()' is invoked via functions below: |
|
4 | 4 | # |
|
5 | 5 | # - 'workingctx._poststatusfixup()' (= 'repo.status()') |
|
6 | 6 | # - 'committablectx.markcommitted()' |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | from mercurial import ( |
|
11 | 11 | context, |
|
12 | 12 | dirstatemap as dirstatemapmod, |
|
13 | 13 | extensions, |
|
14 | 14 | policy, |
|
15 | 15 | registrar, |
|
16 | 16 | ) |
|
17 | 17 | from mercurial.dirstateutils import timestamp |
|
18 | 18 | from mercurial.utils import dateutil |
|
19 | 19 | |
|
20 | 20 | try: |
|
21 | 21 | from mercurial import rustext |
|
22 | 22 | |
|
23 | 23 | rustext.__name__ # force actual import (see hgdemandimport) |
|
24 | 24 | except ImportError: |
|
25 | 25 | rustext = None |
|
26 | 26 | |
|
27 | 27 | configtable = {} |
|
28 | 28 | configitem = registrar.configitem(configtable) |
|
29 | 29 | |
|
30 | 30 | configitem( |
|
31 | 31 | b'fakedirstatewritetime', |
|
32 | 32 | b'fakenow', |
|
33 | 33 | default=None, |
|
34 | 34 | ) |
|
35 | 35 | |
|
36 | 36 | parsers = policy.importmod('parsers') |
|
37 | 37 | has_rust_dirstate = policy.importrust('dirstate') is not None |
|
38 | 38 | |
|
39 | 39 | |
|
40 |
def pack_dirstate( |
|
|
41 | # execute what original parsers.pack_dirstate should do actually | |
|
42 | # for consistency | |
|
43 | for f, e in dmap.items(): | |
|
44 | if e.need_delay(now): | |
|
45 | e.set_possibly_dirty() | |
|
46 | ||
|
47 | return orig(dmap, copymap, pl, fakenow) | |
|
40 | def pack_dirstate(orig, dmap, copymap, pl): | |
|
41 | return orig(dmap, copymap, pl) | |
|
48 | 42 | |
|
49 | 43 | |
|
50 | 44 | def fakewrite(ui, func): |
|
51 | 45 | # fake "now" of 'pack_dirstate' only if it is invoked while 'func' |
|
52 | 46 | |
|
53 | 47 | fakenow = ui.config(b'fakedirstatewritetime', b'fakenow') |
|
54 | 48 | if not fakenow: |
|
55 | 49 | # Execute original one, if fakenow isn't configured. This is |
|
56 | 50 | # useful to prevent subrepos from executing replaced one, |
|
57 | 51 | # because replacing 'parsers.pack_dirstate' is also effective |
|
58 | 52 | # in subrepos. |
|
59 | 53 | return func() |
|
60 | 54 | |
|
61 | 55 | # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between |
|
62 | 56 | # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy |
|
63 | 57 | fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0] |
|
64 | 58 | fakenow = timestamp.timestamp((fakenow, 0)) |
|
65 | 59 | |
|
66 | 60 | if has_rust_dirstate: |
|
67 | 61 | # The Rust implementation does not use public parse/pack dirstate |
|
68 | 62 | # to prevent conversion round-trips |
|
69 | 63 | orig_dirstatemap_write = dirstatemapmod.dirstatemap.write |
|
70 |
wrapper = lambda self, tr, st |
|
|
71 | self, tr, st, fakenow | |
|
72 | ) | |
|
64 | wrapper = lambda self, tr, st: orig_dirstatemap_write(self, tr, st) | |
|
73 | 65 | dirstatemapmod.dirstatemap.write = wrapper |
|
74 | 66 | |
|
75 | 67 | orig_get_fs_now = timestamp.get_fs_now |
|
76 |
wrapper = lambda *args: pack_dirstate( |
|
|
68 | wrapper = lambda *args: pack_dirstate(orig_pack_dirstate, *args) | |
|
77 | 69 | |
|
78 | 70 | orig_module = parsers |
|
79 | 71 | orig_pack_dirstate = parsers.pack_dirstate |
|
80 | 72 | |
|
81 | 73 | orig_module.pack_dirstate = wrapper |
|
82 |
timestamp.get_fs_now = |
|
|
74 | timestamp.get_fs_now = ( | |
|
75 | lambda *args: fakenow | |
|
76 | ) # XXX useless for this purpose now | |
|
83 | 77 | try: |
|
84 | 78 | return func() |
|
85 | 79 | finally: |
|
86 | 80 | orig_module.pack_dirstate = orig_pack_dirstate |
|
87 | 81 | timestamp.get_fs_now = orig_get_fs_now |
|
88 | 82 | if has_rust_dirstate: |
|
89 | 83 | dirstatemapmod.dirstatemap.write = orig_dirstatemap_write |
|
90 | 84 | |
|
91 | 85 | |
|
92 | 86 | def _poststatusfixup(orig, workingctx, status, fixup): |
|
93 | 87 | ui = workingctx.repo().ui |
|
94 | 88 | return fakewrite(ui, lambda: orig(workingctx, status, fixup)) |
|
95 | 89 | |
|
96 | 90 | |
|
97 | 91 | def markcommitted(orig, committablectx, node): |
|
98 | 92 | ui = committablectx.repo().ui |
|
99 | 93 | return fakewrite(ui, lambda: orig(committablectx, node)) |
|
100 | 94 | |
|
101 | 95 | |
|
102 | 96 | def extsetup(ui): |
|
103 | 97 | extensions.wrapfunction( |
|
104 | 98 | context.workingctx, '_poststatusfixup', _poststatusfixup |
|
105 | 99 | ) |
|
106 | 100 | extensions.wrapfunction(context.workingctx, 'markcommitted', markcommitted) |
@@ -1,439 +1,426 b'' | |||
|
1 | 1 | $ cat <<EOF > merge |
|
2 | 2 | > from __future__ import print_function |
|
3 | 3 | > import sys, os |
|
4 | 4 | > |
|
5 | 5 | > try: |
|
6 | 6 | > import msvcrt |
|
7 | 7 | > msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) |
|
8 | 8 | > msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY) |
|
9 | 9 | > except ImportError: |
|
10 | 10 | > pass |
|
11 | 11 | > |
|
12 | 12 | > print("merging for", os.path.basename(sys.argv[1])) |
|
13 | 13 | > EOF |
|
14 | 14 | $ HGMERGE="\"$PYTHON\" ../merge"; export HGMERGE |
|
15 | 15 | |
|
16 | 16 | $ hg init t |
|
17 | 17 | $ cd t |
|
18 | 18 | $ echo This is file a1 > a |
|
19 | 19 | $ hg add a |
|
20 | 20 | $ hg commit -m "commit #0" |
|
21 | 21 | $ echo This is file b1 > b |
|
22 | 22 | $ hg add b |
|
23 | 23 | $ hg commit -m "commit #1" |
|
24 | 24 | |
|
25 | 25 | $ hg update 0 |
|
26 | 26 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
27 | 27 | |
|
28 | 28 | Test interrupted updates by having a non-empty dir with the same name as one |
|
29 | 29 | of the files in a commit we're updating to |
|
30 | 30 | |
|
31 | 31 | $ mkdir b && touch b/nonempty |
|
32 | 32 | $ hg up |
|
33 | 33 | abort: Unlinking directory not permitted: *$TESTTMP/t/b* (glob) (windows !) |
|
34 | 34 | abort: Directory not empty: '?\$TESTTMP/t/b'? (re) (no-windows !) |
|
35 | 35 | [255] |
|
36 | 36 | $ hg ci |
|
37 | 37 | abort: last update was interrupted |
|
38 | 38 | (use 'hg update' to get a consistent checkout) |
|
39 | 39 | [20] |
|
40 | 40 | $ hg sum |
|
41 | 41 | parent: 0:538afb845929 |
|
42 | 42 | commit #0 |
|
43 | 43 | branch: default |
|
44 | 44 | commit: 1 unknown (interrupted update) |
|
45 | 45 | update: 1 new changesets (update) |
|
46 | 46 | phases: 2 draft |
|
47 | 47 | Detect interrupted update by hg status --verbose |
|
48 | 48 | $ hg status -v |
|
49 | 49 | ? b/nonempty |
|
50 | 50 | # The repository is in an unfinished *update* state. |
|
51 | 51 | |
|
52 | 52 | # To continue: hg update . |
|
53 | 53 | |
|
54 | 54 | |
|
55 | 55 | $ rm b/nonempty |
|
56 | 56 | |
|
57 | 57 | $ hg up |
|
58 | 58 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
59 | 59 | $ hg sum |
|
60 | 60 | parent: 1:b8bb4a988f25 tip |
|
61 | 61 | commit #1 |
|
62 | 62 | branch: default |
|
63 | 63 | commit: (clean) |
|
64 | 64 | update: (current) |
|
65 | 65 | phases: 2 draft |
|
66 | 66 | |
|
67 | 67 | Prepare a basic merge |
|
68 | 68 | |
|
69 | 69 | $ hg up 0 |
|
70 | 70 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
71 | 71 | $ echo This is file c1 > c |
|
72 | 72 | $ hg add c |
|
73 | 73 | $ hg commit -m "commit #2" |
|
74 | 74 | created new head |
|
75 | 75 | $ echo This is file b1 > b |
|
76 | 76 | no merges expected |
|
77 | 77 | $ hg merge -P 1 |
|
78 | 78 | changeset: 1:b8bb4a988f25 |
|
79 | 79 | user: test |
|
80 | 80 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
81 | 81 | summary: commit #1 |
|
82 | 82 | |
|
83 | 83 | $ hg merge 1 |
|
84 | 84 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
85 | 85 | (branch merge, don't forget to commit) |
|
86 | 86 | $ hg diff --nodates |
|
87 | 87 | diff -r 49035e18a8e6 b |
|
88 | 88 | --- /dev/null |
|
89 | 89 | +++ b/b |
|
90 | 90 | @@ -0,0 +1,1 @@ |
|
91 | 91 | +This is file b1 |
|
92 | 92 | $ hg status |
|
93 | 93 | M b |
|
94 | 94 | $ cd ..; rm -r t |
|
95 | 95 | |
|
96 | 96 | $ hg init t |
|
97 | 97 | $ cd t |
|
98 | 98 | $ echo This is file a1 > a |
|
99 | 99 | $ hg add a |
|
100 | 100 | $ hg commit -m "commit #0" |
|
101 | 101 | $ echo This is file b1 > b |
|
102 | 102 | $ hg add b |
|
103 | 103 | $ hg commit -m "commit #1" |
|
104 | 104 | |
|
105 | 105 | $ hg update 0 |
|
106 | 106 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
107 | 107 | $ echo This is file c1 > c |
|
108 | 108 | $ hg add c |
|
109 | 109 | $ hg commit -m "commit #2" |
|
110 | 110 | created new head |
|
111 | 111 | $ echo This is file b2 > b |
|
112 | 112 | merge should fail |
|
113 | 113 | $ hg merge 1 |
|
114 | 114 | b: untracked file differs |
|
115 | 115 | abort: untracked files in working directory differ from files in requested revision |
|
116 | 116 | [20] |
|
117 | 117 | |
|
118 | 118 | #if symlink |
|
119 | 119 | symlinks to directories should be treated as regular files (issue5027) |
|
120 | 120 | $ rm b |
|
121 | 121 | $ ln -s 'This is file b2' b |
|
122 | 122 | $ hg merge 1 |
|
123 | 123 | b: untracked file differs |
|
124 | 124 | abort: untracked files in working directory differ from files in requested revision |
|
125 | 125 | [20] |
|
126 | 126 | symlinks shouldn't be followed |
|
127 | 127 | $ rm b |
|
128 | 128 | $ echo This is file b1 > .hg/b |
|
129 | 129 | $ ln -s .hg/b b |
|
130 | 130 | $ hg merge 1 |
|
131 | 131 | b: untracked file differs |
|
132 | 132 | abort: untracked files in working directory differ from files in requested revision |
|
133 | 133 | [20] |
|
134 | 134 | |
|
135 | 135 | $ rm b |
|
136 | 136 | $ echo This is file b2 > b |
|
137 | 137 | #endif |
|
138 | 138 | |
|
139 | 139 | bad config |
|
140 | 140 | $ hg merge 1 --config merge.checkunknown=x |
|
141 | 141 | config error: merge.checkunknown not valid ('x' is none of 'abort', 'ignore', 'warn') |
|
142 | 142 | [30] |
|
143 | 143 | this merge should fail |
|
144 | 144 | $ hg merge 1 --config merge.checkunknown=abort |
|
145 | 145 | b: untracked file differs |
|
146 | 146 | abort: untracked files in working directory differ from files in requested revision |
|
147 | 147 | [20] |
|
148 | 148 | |
|
149 | 149 | this merge should warn |
|
150 | 150 | $ hg merge 1 --config merge.checkunknown=warn |
|
151 | 151 | b: replacing untracked file |
|
152 | 152 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
153 | 153 | (branch merge, don't forget to commit) |
|
154 | 154 | $ cat b.orig |
|
155 | 155 | This is file b2 |
|
156 | 156 | $ hg up --clean 2 |
|
157 | 157 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
158 | 158 | $ mv b.orig b |
|
159 | 159 | |
|
160 | 160 | this merge should silently ignore |
|
161 | 161 | $ cat b |
|
162 | 162 | This is file b2 |
|
163 | 163 | $ hg merge 1 --config merge.checkunknown=ignore |
|
164 | 164 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
165 | 165 | (branch merge, don't forget to commit) |
|
166 | 166 | |
|
167 | 167 | merge.checkignored |
|
168 | 168 | $ hg up --clean 1 |
|
169 | 169 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
170 | 170 | $ cat >> .hgignore << EOF |
|
171 | 171 | > remoteignored |
|
172 | 172 | > EOF |
|
173 | 173 | $ echo This is file localignored3 > localignored |
|
174 | 174 | $ echo This is file remoteignored3 > remoteignored |
|
175 | 175 | $ hg add .hgignore localignored remoteignored |
|
176 | 176 | $ hg commit -m "commit #3" |
|
177 | 177 | |
|
178 | 178 | $ hg up 2 |
|
179 | 179 | 1 files updated, 0 files merged, 4 files removed, 0 files unresolved |
|
180 | 180 | $ cat >> .hgignore << EOF |
|
181 | 181 | > localignored |
|
182 | 182 | > EOF |
|
183 | 183 | $ hg add .hgignore |
|
184 | 184 | $ hg commit -m "commit #4" |
|
185 | 185 | |
|
186 | 186 | remote .hgignore shouldn't be used for determining whether a file is ignored |
|
187 | 187 | $ echo This is file remoteignored4 > remoteignored |
|
188 | 188 | $ hg merge 3 --config merge.checkignored=ignore --config merge.checkunknown=abort |
|
189 | 189 | remoteignored: untracked file differs |
|
190 | 190 | abort: untracked files in working directory differ from files in requested revision |
|
191 | 191 | [20] |
|
192 | 192 | $ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=ignore |
|
193 | 193 | merging .hgignore |
|
194 | 194 | merging for .hgignore |
|
195 | 195 | 3 files updated, 1 files merged, 0 files removed, 0 files unresolved |
|
196 | 196 | (branch merge, don't forget to commit) |
|
197 | 197 | $ cat remoteignored |
|
198 | 198 | This is file remoteignored3 |
|
199 | 199 | $ cat remoteignored.orig |
|
200 | 200 | This is file remoteignored4 |
|
201 | 201 | $ rm remoteignored.orig |
|
202 | 202 | |
|
203 | 203 | local .hgignore should be used for that |
|
204 | 204 | $ hg up --clean 4 |
|
205 | 205 | 1 files updated, 0 files merged, 3 files removed, 0 files unresolved |
|
206 | 206 | $ echo This is file localignored4 > localignored |
|
207 | 207 | also test other conflicting files to see we output the full set of warnings |
|
208 | 208 | $ echo This is file b2 > b |
|
209 | 209 | $ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=abort |
|
210 | 210 | b: untracked file differs |
|
211 | 211 | localignored: untracked file differs |
|
212 | 212 | abort: untracked files in working directory differ from files in requested revision |
|
213 | 213 | [20] |
|
214 | 214 | $ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=ignore |
|
215 | 215 | localignored: untracked file differs |
|
216 | 216 | abort: untracked files in working directory differ from files in requested revision |
|
217 | 217 | [20] |
|
218 | 218 | $ hg merge 3 --config merge.checkignored=warn --config merge.checkunknown=abort |
|
219 | 219 | b: untracked file differs |
|
220 | 220 | abort: untracked files in working directory differ from files in requested revision |
|
221 | 221 | [20] |
|
222 | 222 | $ hg merge 3 --config merge.checkignored=warn --config merge.checkunknown=warn |
|
223 | 223 | b: replacing untracked file |
|
224 | 224 | localignored: replacing untracked file |
|
225 | 225 | merging .hgignore |
|
226 | 226 | merging for .hgignore |
|
227 | 227 | 3 files updated, 1 files merged, 0 files removed, 0 files unresolved |
|
228 | 228 | (branch merge, don't forget to commit) |
|
229 | 229 | $ cat localignored |
|
230 | 230 | This is file localignored3 |
|
231 | 231 | $ cat localignored.orig |
|
232 | 232 | This is file localignored4 |
|
233 | 233 | $ rm localignored.orig |
|
234 | 234 | |
|
235 | 235 | $ cat b.orig |
|
236 | 236 | This is file b2 |
|
237 | 237 | $ hg up --clean 2 |
|
238 | 238 | 0 files updated, 0 files merged, 4 files removed, 0 files unresolved |
|
239 | 239 | $ mv b.orig b |
|
240 | 240 | |
|
241 | 241 | this merge of b should work |
|
242 | 242 | $ cat b |
|
243 | 243 | This is file b2 |
|
244 | 244 | $ hg merge -f 1 |
|
245 | 245 | merging b |
|
246 | 246 | merging for b |
|
247 | 247 | 0 files updated, 1 files merged, 0 files removed, 0 files unresolved |
|
248 | 248 | (branch merge, don't forget to commit) |
|
249 | 249 | $ hg diff --nodates |
|
250 | 250 | diff -r 49035e18a8e6 b |
|
251 | 251 | --- /dev/null |
|
252 | 252 | +++ b/b |
|
253 | 253 | @@ -0,0 +1,1 @@ |
|
254 | 254 | +This is file b2 |
|
255 | 255 | $ hg status |
|
256 | 256 | M b |
|
257 | 257 | $ cd ..; rm -r t |
|
258 | 258 | |
|
259 | 259 | $ hg init t |
|
260 | 260 | $ cd t |
|
261 | 261 | $ echo This is file a1 > a |
|
262 | 262 | $ hg add a |
|
263 | 263 | $ hg commit -m "commit #0" |
|
264 | 264 | $ echo This is file b1 > b |
|
265 | 265 | $ hg add b |
|
266 | 266 | $ hg commit -m "commit #1" |
|
267 | 267 | $ echo This is file b22 > b |
|
268 | 268 | $ hg commit -m "commit #2" |
|
269 | 269 | $ hg update 1 |
|
270 | 270 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
271 | 271 | $ echo This is file c1 > c |
|
272 | 272 | $ hg add c |
|
273 | 273 | $ hg commit -m "commit #3" |
|
274 | 274 | created new head |
|
275 | 275 | |
|
276 | 276 | Contents of b should be "this is file b1" |
|
277 | 277 | $ cat b |
|
278 | 278 | This is file b1 |
|
279 | 279 | |
|
280 | 280 | $ echo This is file b22 > b |
|
281 | 281 | merge fails |
|
282 | 282 | $ hg merge 2 |
|
283 | 283 | abort: uncommitted changes |
|
284 | 284 | (use 'hg status' to list changes) |
|
285 | 285 | [20] |
|
286 | 286 | merge expected! |
|
287 | 287 | $ hg merge -f 2 |
|
288 | 288 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
289 | 289 | (branch merge, don't forget to commit) |
|
290 | 290 | $ hg diff --nodates |
|
291 | 291 | diff -r 85de557015a8 b |
|
292 | 292 | --- a/b |
|
293 | 293 | +++ b/b |
|
294 | 294 | @@ -1,1 +1,1 @@ |
|
295 | 295 | -This is file b1 |
|
296 | 296 | +This is file b22 |
|
297 | 297 | $ hg status |
|
298 | 298 | M b |
|
299 | 299 | $ cd ..; rm -r t |
|
300 | 300 | |
|
301 | 301 | $ hg init t |
|
302 | 302 | $ cd t |
|
303 | 303 | $ echo This is file a1 > a |
|
304 | 304 | $ hg add a |
|
305 | 305 | $ hg commit -m "commit #0" |
|
306 | 306 | $ echo This is file b1 > b |
|
307 | 307 | $ hg add b |
|
308 | 308 | $ hg commit -m "commit #1" |
|
309 | 309 | $ echo This is file b22 > b |
|
310 | 310 | $ hg commit -m "commit #2" |
|
311 | 311 | $ hg update 1 |
|
312 | 312 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
313 | 313 | $ echo This is file c1 > c |
|
314 | 314 | $ hg add c |
|
315 | 315 | $ hg commit -m "commit #3" |
|
316 | 316 | created new head |
|
317 | 317 | $ echo This is file b33 > b |
|
318 | 318 | merge of b should fail |
|
319 | 319 | $ hg merge 2 |
|
320 | 320 | abort: uncommitted changes |
|
321 | 321 | (use 'hg status' to list changes) |
|
322 | 322 | [20] |
|
323 | 323 | merge of b expected |
|
324 | 324 | $ hg merge -f 2 |
|
325 | 325 | merging b |
|
326 | 326 | merging for b |
|
327 | 327 | 0 files updated, 1 files merged, 0 files removed, 0 files unresolved |
|
328 | 328 | (branch merge, don't forget to commit) |
|
329 | 329 | $ hg diff --nodates |
|
330 | 330 | diff -r 85de557015a8 b |
|
331 | 331 | --- a/b |
|
332 | 332 | +++ b/b |
|
333 | 333 | @@ -1,1 +1,1 @@ |
|
334 | 334 | -This is file b1 |
|
335 | 335 | +This is file b33 |
|
336 | 336 | $ hg status |
|
337 | 337 | M b |
|
338 | 338 | |
|
339 | 339 | Test for issue2364 |
|
340 | 340 | |
|
341 | 341 | $ hg up -qC . |
|
342 | 342 | $ hg rm b |
|
343 | 343 | $ hg ci -md |
|
344 | 344 | $ hg revert -r -2 b |
|
345 | 345 | $ hg up -q -- -2 |
|
346 | 346 | |
|
347 | 347 | Test that updated files are treated as "modified", when |
|
348 | 348 | 'merge.update()' is aborted before 'merge.recordupdates()' (= parents |
|
349 | 349 | aren't changed), even if none of mode, size and timestamp of them |
|
350 | 350 | isn't changed on the filesystem (see also issue4583). |
|
351 | 351 | |
|
352 | This test is now "best effort" as the mechanism to prevent such race are | |
|
353 | getting better, it get more complicated to test a specific scenario that would | |
|
354 | trigger it. If you see flakyness here, there is a race. | |
|
355 | ||
|
352 | 356 | $ cat > $TESTTMP/abort.py <<EOF |
|
353 | 357 | > from __future__ import absolute_import |
|
354 | 358 | > # emulate aborting before "recordupdates()". in this case, files |
|
355 | 359 | > # are changed without updating dirstate |
|
356 | 360 | > from mercurial import ( |
|
357 | 361 | > error, |
|
358 | 362 | > extensions, |
|
359 | 363 | > merge, |
|
360 | 364 | > ) |
|
361 | 365 | > def applyupdates(orig, *args, **kwargs): |
|
362 | 366 | > orig(*args, **kwargs) |
|
363 | 367 | > raise error.Abort(b'intentional aborting') |
|
364 | 368 | > def extsetup(ui): |
|
365 | 369 | > extensions.wrapfunction(merge, "applyupdates", applyupdates) |
|
366 | 370 | > EOF |
|
367 | 371 | |
|
368 | $ cat >> .hg/hgrc <<EOF | |
|
369 | > [fakedirstatewritetime] | |
|
370 | > # emulate invoking dirstate.write() via repo.status() | |
|
371 | > # at 2000-01-01 00:00 | |
|
372 | > fakenow = 200001010000 | |
|
373 | > EOF | |
|
374 | ||
|
375 | 372 | (file gotten from other revision) |
|
376 | 373 | |
|
377 | 374 | $ hg update -q -C 2 |
|
378 | 375 | $ echo 'THIS IS FILE B5' > b |
|
379 | 376 | $ hg commit -m 'commit #5' |
|
380 | 377 | |
|
381 | 378 | $ hg update -q -C 3 |
|
382 | 379 | $ cat b |
|
383 | 380 | This is file b1 |
|
384 | $ touch -t 200001010000 b | |
|
385 | $ hg debugrebuildstate | |
|
386 | ||
|
387 | 381 | $ cat >> .hg/hgrc <<EOF |
|
388 | 382 | > [extensions] |
|
389 | > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py | |
|
390 | 383 | > abort = $TESTTMP/abort.py |
|
391 | 384 | > EOF |
|
392 | 385 | $ hg merge 5 |
|
393 | 386 | abort: intentional aborting |
|
394 | 387 | [255] |
|
395 | 388 | $ cat >> .hg/hgrc <<EOF |
|
396 | 389 | > [extensions] |
|
397 | > fakedirstatewritetime = ! | |
|
398 | 390 | > abort = ! |
|
399 | 391 | > EOF |
|
400 | 392 | |
|
401 | 393 | $ cat b |
|
402 | 394 | THIS IS FILE B5 |
|
403 | $ touch -t 200001010000 b | |
|
404 | 395 | $ hg status -A b |
|
405 | 396 | M b |
|
406 | 397 | |
|
407 | 398 | (file merged from other revision) |
|
408 | 399 | |
|
409 | 400 | $ hg update -q -C 3 |
|
410 | 401 | $ echo 'this is file b6' > b |
|
411 | 402 | $ hg commit -m 'commit #6' |
|
412 | 403 | created new head |
|
413 | 404 | |
|
414 | 405 | $ cat b |
|
415 | 406 | this is file b6 |
|
416 | $ touch -t 200001010000 b | |
|
417 | $ hg debugrebuildstate | |
|
407 | $ hg status | |
|
418 | 408 | |
|
419 | 409 | $ cat >> .hg/hgrc <<EOF |
|
420 | 410 | > [extensions] |
|
421 | > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py | |
|
422 | 411 | > abort = $TESTTMP/abort.py |
|
423 | 412 | > EOF |
|
424 | 413 | $ hg merge --tool internal:other 5 |
|
425 | 414 | abort: intentional aborting |
|
426 | 415 | [255] |
|
427 | 416 | $ cat >> .hg/hgrc <<EOF |
|
428 | 417 | > [extensions] |
|
429 | > fakedirstatewritetime = ! | |
|
430 | 418 | > abort = ! |
|
431 | 419 | > EOF |
|
432 | 420 | |
|
433 | 421 | $ cat b |
|
434 | 422 | THIS IS FILE B5 |
|
435 | $ touch -t 200001010000 b | |
|
436 | 423 | $ hg status -A b |
|
437 | 424 | M b |
|
438 | 425 | |
|
439 | 426 | $ cd .. |
@@ -1,2035 +1,2019 b'' | |||
|
1 | 1 | Let commit recurse into subrepos by default to match pre-2.0 behavior: |
|
2 | 2 | |
|
3 | 3 | $ echo "[ui]" >> $HGRCPATH |
|
4 | 4 | $ echo "commitsubrepos = Yes" >> $HGRCPATH |
|
5 | 5 | |
|
6 | 6 | $ hg init t |
|
7 | 7 | $ cd t |
|
8 | 8 | |
|
9 | 9 | first revision, no sub |
|
10 | 10 | |
|
11 | 11 | $ echo a > a |
|
12 | 12 | $ hg ci -Am0 |
|
13 | 13 | adding a |
|
14 | 14 | |
|
15 | 15 | add first sub |
|
16 | 16 | |
|
17 | 17 | $ echo s = s > .hgsub |
|
18 | 18 | $ hg add .hgsub |
|
19 | 19 | $ hg init s |
|
20 | 20 | $ echo a > s/a |
|
21 | 21 | |
|
22 | 22 | Issue2232: committing a subrepo without .hgsub |
|
23 | 23 | |
|
24 | 24 | $ hg ci -mbad s |
|
25 | 25 | abort: can't commit subrepos without .hgsub |
|
26 | 26 | [255] |
|
27 | 27 | |
|
28 | 28 | $ hg -R s add s/a |
|
29 | 29 | $ hg files -S |
|
30 | 30 | .hgsub |
|
31 | 31 | a |
|
32 | 32 | s/a |
|
33 | 33 | |
|
34 | 34 | `hg files` respects ui.relative-paths |
|
35 | 35 | BROKEN: shows subrepo paths relative to the subrepo |
|
36 | 36 | $ hg files -S --config ui.relative-paths=no |
|
37 | 37 | .hgsub |
|
38 | 38 | a |
|
39 | 39 | s/a |
|
40 | 40 | |
|
41 | 41 | $ hg -R s ci -Ams0 |
|
42 | 42 | $ hg sum |
|
43 | 43 | parent: 0:f7b1eb17ad24 tip |
|
44 | 44 | 0 |
|
45 | 45 | branch: default |
|
46 | 46 | commit: 1 added, 1 subrepos |
|
47 | 47 | update: (current) |
|
48 | 48 | phases: 1 draft |
|
49 | 49 | $ hg ci -m1 |
|
50 | 50 | |
|
51 | 51 | test handling .hgsubstate "added" explicitly. |
|
52 | 52 | |
|
53 | 53 | $ hg parents --template '{node}\n{files}\n' |
|
54 | 54 | 7cf8cfea66e410e8e3336508dfeec07b3192de51 |
|
55 | 55 | .hgsub .hgsubstate |
|
56 | 56 | $ hg rollback -q |
|
57 | 57 | $ hg add .hgsubstate |
|
58 | 58 | $ hg ci -m1 |
|
59 | 59 | $ hg parents --template '{node}\n{files}\n' |
|
60 | 60 | 7cf8cfea66e410e8e3336508dfeec07b3192de51 |
|
61 | 61 | .hgsub .hgsubstate |
|
62 | 62 | |
|
63 | 63 | Subrepopath which overlaps with filepath, does not change warnings in remove() |
|
64 | 64 | |
|
65 | 65 | $ mkdir snot |
|
66 | 66 | $ touch snot/file |
|
67 | 67 | $ hg remove -S snot/file |
|
68 | 68 | not removing snot/file: file is untracked |
|
69 | 69 | [1] |
|
70 | 70 | $ hg cat snot/filenot |
|
71 | 71 | snot/filenot: no such file in rev 7cf8cfea66e4 |
|
72 | 72 | [1] |
|
73 | 73 | $ rm -r snot |
|
74 | 74 | |
|
75 | 75 | Revert subrepo and test subrepo fileset keyword: |
|
76 | 76 | |
|
77 | 77 | $ echo b > s/a |
|
78 | 78 | $ hg revert --dry-run "set:subrepo('glob:s*')" |
|
79 | 79 | reverting subrepo s |
|
80 | 80 | reverting s/a |
|
81 | 81 | $ cat s/a |
|
82 | 82 | b |
|
83 | 83 | $ hg revert "set:subrepo('glob:s*')" |
|
84 | 84 | reverting subrepo s |
|
85 | 85 | reverting s/a |
|
86 | 86 | $ cat s/a |
|
87 | 87 | a |
|
88 | 88 | $ rm s/a.orig |
|
89 | 89 | |
|
90 | 90 | Revert subrepo with no backup. The "reverting s/a" line is gone since |
|
91 | 91 | we're really running 'hg update' in the subrepo: |
|
92 | 92 | |
|
93 | 93 | $ echo b > s/a |
|
94 | 94 | $ hg revert --no-backup s |
|
95 | 95 | reverting subrepo s |
|
96 | 96 | |
|
97 | 97 | Issue2022: update -C |
|
98 | 98 | |
|
99 | 99 | $ echo b > s/a |
|
100 | 100 | $ hg sum |
|
101 | 101 | parent: 1:7cf8cfea66e4 tip |
|
102 | 102 | 1 |
|
103 | 103 | branch: default |
|
104 | 104 | commit: 1 subrepos |
|
105 | 105 | update: (current) |
|
106 | 106 | phases: 2 draft |
|
107 | 107 | $ hg co -C 1 |
|
108 | 108 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
109 | 109 | $ hg sum |
|
110 | 110 | parent: 1:7cf8cfea66e4 tip |
|
111 | 111 | 1 |
|
112 | 112 | branch: default |
|
113 | 113 | commit: (clean) |
|
114 | 114 | update: (current) |
|
115 | 115 | phases: 2 draft |
|
116 | 116 | |
|
117 | 117 | commands that require a clean repo should respect subrepos |
|
118 | 118 | |
|
119 | 119 | $ echo b >> s/a |
|
120 | 120 | $ hg backout tip |
|
121 | 121 | abort: uncommitted changes in subrepository "s" |
|
122 | 122 | [255] |
|
123 | 123 | $ hg revert -C -R s s/a |
|
124 | 124 | |
|
125 | 125 | add sub sub |
|
126 | 126 | |
|
127 | 127 | $ echo ss = ss > s/.hgsub |
|
128 | 128 | $ hg init s/ss |
|
129 | 129 | $ echo a > s/ss/a |
|
130 | 130 | $ hg -R s add s/.hgsub |
|
131 | 131 | $ hg -R s/ss add s/ss/a |
|
132 | 132 | $ hg sum |
|
133 | 133 | parent: 1:7cf8cfea66e4 tip |
|
134 | 134 | 1 |
|
135 | 135 | branch: default |
|
136 | 136 | commit: 1 subrepos |
|
137 | 137 | update: (current) |
|
138 | 138 | phases: 2 draft |
|
139 | 139 | $ hg ci -m2 |
|
140 | 140 | committing subrepository s |
|
141 | 141 | committing subrepository s/ss |
|
142 | 142 | $ hg sum |
|
143 | 143 | parent: 2:df30734270ae tip |
|
144 | 144 | 2 |
|
145 | 145 | branch: default |
|
146 | 146 | commit: (clean) |
|
147 | 147 | update: (current) |
|
148 | 148 | phases: 3 draft |
|
149 | 149 | |
|
150 | 150 | test handling .hgsubstate "modified" explicitly. |
|
151 | 151 | |
|
152 | 152 | $ hg parents --template '{node}\n{files}\n' |
|
153 | 153 | df30734270ae757feb35e643b7018e818e78a9aa |
|
154 | 154 | .hgsubstate |
|
155 | 155 | $ hg rollback -q |
|
156 | 156 | $ hg status -A .hgsubstate |
|
157 | 157 | M .hgsubstate |
|
158 | 158 | $ hg ci -m2 |
|
159 | 159 | $ hg parents --template '{node}\n{files}\n' |
|
160 | 160 | df30734270ae757feb35e643b7018e818e78a9aa |
|
161 | 161 | .hgsubstate |
|
162 | 162 | |
|
163 | 163 | bump sub rev (and check it is ignored by ui.commitsubrepos) |
|
164 | 164 | |
|
165 | 165 | $ echo b > s/a |
|
166 | 166 | $ hg -R s ci -ms1 |
|
167 | 167 | $ hg --config ui.commitsubrepos=no ci -m3 |
|
168 | 168 | |
|
169 | 169 | leave sub dirty (and check ui.commitsubrepos=no aborts the commit) |
|
170 | 170 | |
|
171 | 171 | $ echo c > s/a |
|
172 | 172 | $ hg --config ui.commitsubrepos=no ci -m4 |
|
173 | 173 | abort: uncommitted changes in subrepository "s" |
|
174 | 174 | (use --subrepos for recursive commit) |
|
175 | 175 | [255] |
|
176 | 176 | $ hg id |
|
177 | 177 | f6affe3fbfaa+ tip |
|
178 | 178 | $ hg -R s ci -mc |
|
179 | 179 | $ hg id |
|
180 | 180 | f6affe3fbfaa+ tip |
|
181 | 181 | $ echo d > s/a |
|
182 | 182 | $ hg ci -m4 |
|
183 | 183 | committing subrepository s |
|
184 | 184 | $ hg tip -R s |
|
185 | 185 | changeset: 4:02dcf1d70411 |
|
186 | 186 | tag: tip |
|
187 | 187 | user: test |
|
188 | 188 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
189 | 189 | summary: 4 |
|
190 | 190 | |
|
191 | 191 | |
|
192 | 192 | check caching |
|
193 | 193 | |
|
194 | 194 | $ hg co 0 |
|
195 | 195 | 0 files updated, 0 files merged, 2 files removed, 0 files unresolved |
|
196 | 196 | $ hg debugsub |
|
197 | 197 | |
|
198 | 198 | restore |
|
199 | 199 | |
|
200 | 200 | $ hg co |
|
201 | 201 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
202 | 202 | $ hg debugsub |
|
203 | 203 | path s |
|
204 | 204 | source s |
|
205 | 205 | revision 02dcf1d704118aee3ee306ccfa1910850d5b05ef |
|
206 | 206 | |
|
207 | 207 | new branch for merge tests |
|
208 | 208 | |
|
209 | 209 | $ hg co 1 |
|
210 | 210 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
211 | 211 | $ echo t = t >> .hgsub |
|
212 | 212 | $ hg init t |
|
213 | 213 | $ echo t > t/t |
|
214 | 214 | $ hg -R t add t |
|
215 | 215 | adding t/t |
|
216 | 216 | |
|
217 | 217 | 5 |
|
218 | 218 | |
|
219 | 219 | $ hg ci -m5 # add sub |
|
220 | 220 | committing subrepository t |
|
221 | 221 | created new head |
|
222 | 222 | $ echo t2 > t/t |
|
223 | 223 | |
|
224 | 224 | 6 |
|
225 | 225 | |
|
226 | 226 | $ hg st -R s |
|
227 | 227 | $ hg ci -m6 # change sub |
|
228 | 228 | committing subrepository t |
|
229 | 229 | $ hg debugsub |
|
230 | 230 | path s |
|
231 | 231 | source s |
|
232 | 232 | revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 |
|
233 | 233 | path t |
|
234 | 234 | source t |
|
235 | 235 | revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad |
|
236 | 236 | $ echo t3 > t/t |
|
237 | 237 | |
|
238 | 238 | 7 |
|
239 | 239 | |
|
240 | 240 | $ hg ci -m7 # change sub again for conflict test |
|
241 | 241 | committing subrepository t |
|
242 | 242 | $ hg rm .hgsub |
|
243 | 243 | |
|
244 | 244 | 8 |
|
245 | 245 | |
|
246 | 246 | $ hg ci -m8 # remove sub |
|
247 | 247 | |
|
248 | 248 | test handling .hgsubstate "removed" explicitly. |
|
249 | 249 | |
|
250 | 250 | $ hg parents --template '{node}\n{files}\n' |
|
251 | 251 | 96615c1dad2dc8e3796d7332c77ce69156f7b78e |
|
252 | 252 | .hgsub .hgsubstate |
|
253 | 253 | $ hg rollback -q |
|
254 | 254 | $ hg remove .hgsubstate |
|
255 | 255 | $ hg ci -m8 |
|
256 | 256 | $ hg parents --template '{node}\n{files}\n' |
|
257 | 257 | 96615c1dad2dc8e3796d7332c77ce69156f7b78e |
|
258 | 258 | .hgsub .hgsubstate |
|
259 | 259 | |
|
260 | 260 | merge tests |
|
261 | 261 | |
|
262 | 262 | $ hg co -C 3 |
|
263 | 263 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
264 | 264 | $ hg merge 5 # test adding |
|
265 | 265 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
266 | 266 | (branch merge, don't forget to commit) |
|
267 | 267 | $ hg debugsub |
|
268 | 268 | path s |
|
269 | 269 | source s |
|
270 | 270 | revision fc627a69481fcbe5f1135069e8a3881c023e4cf5 |
|
271 | 271 | path t |
|
272 | 272 | source t |
|
273 | 273 | revision 60ca1237c19474e7a3978b0dc1ca4e6f36d51382 |
|
274 | 274 | $ hg ci -m9 |
|
275 | 275 | created new head |
|
276 | 276 | $ hg merge 6 --debug # test change |
|
277 | 277 | resolving manifests |
|
278 | 278 | branchmerge: True, force: False, partial: False |
|
279 | 279 | ancestor: 1f14a2e2d3ec, local: f0d2028bf86d+, remote: 1831e14459c4 |
|
280 | 280 | starting 4 threads for background file closing (?) |
|
281 | 281 | .hgsubstate: versions differ -> m (premerge) |
|
282 | 282 | subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec |
|
283 | 283 | subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg |
|
284 | 284 | getting subrepo t |
|
285 | 285 | resolving manifests |
|
286 | 286 | branchmerge: False, force: False, partial: False |
|
287 | 287 | ancestor: 60ca1237c194, local: 60ca1237c194+, remote: 6747d179aa9a |
|
288 | 288 | t: remote is newer -> g |
|
289 | 289 | getting t |
|
290 | 290 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
291 | 291 | (branch merge, don't forget to commit) |
|
292 | 292 | $ hg debugsub |
|
293 | 293 | path s |
|
294 | 294 | source s |
|
295 | 295 | revision fc627a69481fcbe5f1135069e8a3881c023e4cf5 |
|
296 | 296 | path t |
|
297 | 297 | source t |
|
298 | 298 | revision 6747d179aa9a688023c4b0cad32e4c92bb7f34ad |
|
299 | 299 | $ echo conflict > t/t |
|
300 | 300 | $ hg ci -m10 |
|
301 | 301 | committing subrepository t |
|
302 | 302 | $ HGMERGE=internal:merge hg merge --debug 7 # test conflict |
|
303 | 303 | resolving manifests |
|
304 | 304 | branchmerge: True, force: False, partial: False |
|
305 | 305 | ancestor: 1831e14459c4, local: e45c8b14af55+, remote: f94576341bcf |
|
306 | 306 | starting 4 threads for background file closing (?) |
|
307 | 307 | .hgsubstate: versions differ -> m (premerge) |
|
308 | 308 | subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4 |
|
309 | 309 | subrepo t: both sides changed |
|
310 | 310 | subrepository t diverged (local revision: 20a0db6fbf6c, remote revision: 7af322bc1198) |
|
311 | 311 | starting 4 threads for background file closing (?) |
|
312 | 312 | you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [merge rev]. |
|
313 | 313 | what do you want to do? m |
|
314 | 314 | merging subrepository "t" |
|
315 | 315 | resolving manifests |
|
316 | 316 | branchmerge: True, force: False, partial: False |
|
317 | 317 | ancestor: 6747d179aa9a, local: 20a0db6fbf6c+, remote: 7af322bc1198 |
|
318 | 318 | starting 4 threads for background file closing (?) |
|
319 | 319 | preserving t for resolve of t |
|
320 | 320 | t: versions differ -> m (premerge) |
|
321 | 321 | picked tool ':merge' for t (binary False symlink False changedelete False) |
|
322 | 322 | merging t |
|
323 | 323 | my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a |
|
324 | 324 | t: versions differ -> m (merge) |
|
325 | 325 | picked tool ':merge' for t (binary False symlink False changedelete False) |
|
326 | 326 | my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a |
|
327 | 327 | warning: conflicts while merging t! (edit, then use 'hg resolve --mark') |
|
328 | 328 | 0 files updated, 0 files merged, 0 files removed, 1 files unresolved |
|
329 | 329 | use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon |
|
330 | 330 | subrepo t: merge with t:7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4:hg |
|
331 | 331 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
332 | 332 | (branch merge, don't forget to commit) |
|
333 | 333 | |
|
334 | 334 | should conflict |
|
335 | 335 | |
|
336 | 336 | $ cat t/t |
|
337 | 337 | <<<<<<< local: 20a0db6fbf6c - test: 10 |
|
338 | 338 | conflict |
|
339 | 339 | ======= |
|
340 | 340 | t3 |
|
341 | 341 | >>>>>>> other: 7af322bc1198 - test: 7 |
|
342 | 342 | |
|
343 | 343 | 11: remove subrepo t |
|
344 | 344 | |
|
345 | 345 | $ hg co -C 5 |
|
346 | 346 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
347 | 347 | $ hg revert -r 4 .hgsub # remove t |
|
348 | 348 | $ hg ci -m11 |
|
349 | 349 | created new head |
|
350 | 350 | $ hg debugsub |
|
351 | 351 | path s |
|
352 | 352 | source s |
|
353 | 353 | revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 |
|
354 | 354 | |
|
355 | 355 | local removed, remote changed, keep changed |
|
356 | 356 | |
|
357 | 357 | $ hg merge 6 |
|
358 | 358 | remote [merge rev] changed subrepository t which local [working copy] removed |
|
359 | 359 | use (c)hanged version or (d)elete? c |
|
360 | 360 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
361 | 361 | (branch merge, don't forget to commit) |
|
362 | 362 | BROKEN: should include subrepo t |
|
363 | 363 | $ hg debugsub |
|
364 | 364 | path s |
|
365 | 365 | source s |
|
366 | 366 | revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 |
|
367 | 367 | $ cat .hgsubstate |
|
368 | 368 | e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s |
|
369 | 369 | 6747d179aa9a688023c4b0cad32e4c92bb7f34ad t |
|
370 | 370 | $ hg ci -m 'local removed, remote changed, keep changed' |
|
371 | 371 | BROKEN: should include subrepo t |
|
372 | 372 | $ hg debugsub |
|
373 | 373 | path s |
|
374 | 374 | source s |
|
375 | 375 | revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 |
|
376 | 376 | BROKEN: should include subrepo t |
|
377 | 377 | $ cat .hgsubstate |
|
378 | 378 | e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s |
|
379 | 379 | $ cat t/t |
|
380 | 380 | t2 |
|
381 | 381 | |
|
382 | 382 | local removed, remote changed, keep removed |
|
383 | 383 | |
|
384 | 384 | $ hg co -C 11 |
|
385 | 385 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
386 | 386 | $ hg merge --config ui.interactive=true 6 <<EOF |
|
387 | 387 | > d |
|
388 | 388 | > EOF |
|
389 | 389 | remote [merge rev] changed subrepository t which local [working copy] removed |
|
390 | 390 | use (c)hanged version or (d)elete? d |
|
391 | 391 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
392 | 392 | (branch merge, don't forget to commit) |
|
393 | 393 | $ hg debugsub |
|
394 | 394 | path s |
|
395 | 395 | source s |
|
396 | 396 | revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 |
|
397 | 397 | $ cat .hgsubstate |
|
398 | 398 | e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s |
|
399 | 399 | $ hg ci -m 'local removed, remote changed, keep removed' |
|
400 | 400 | created new head |
|
401 | 401 | $ hg debugsub |
|
402 | 402 | path s |
|
403 | 403 | source s |
|
404 | 404 | revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 |
|
405 | 405 | $ cat .hgsubstate |
|
406 | 406 | e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s |
|
407 | 407 | |
|
408 | 408 | local changed, remote removed, keep changed |
|
409 | 409 | |
|
410 | 410 | $ hg co -C 6 |
|
411 | 411 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
412 | 412 | $ hg merge 11 |
|
413 | 413 | local [working copy] changed subrepository t which remote [merge rev] removed |
|
414 | 414 | use (c)hanged version or (d)elete? c |
|
415 | 415 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
416 | 416 | (branch merge, don't forget to commit) |
|
417 | 417 | BROKEN: should include subrepo t |
|
418 | 418 | $ hg debugsub |
|
419 | 419 | path s |
|
420 | 420 | source s |
|
421 | 421 | revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 |
|
422 | 422 | BROKEN: should include subrepo t |
|
423 | 423 | $ cat .hgsubstate |
|
424 | 424 | e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s |
|
425 | 425 | $ hg ci -m 'local changed, remote removed, keep changed' |
|
426 | 426 | created new head |
|
427 | 427 | BROKEN: should include subrepo t |
|
428 | 428 | $ hg debugsub |
|
429 | 429 | path s |
|
430 | 430 | source s |
|
431 | 431 | revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 |
|
432 | 432 | BROKEN: should include subrepo t |
|
433 | 433 | $ cat .hgsubstate |
|
434 | 434 | e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s |
|
435 | 435 | $ cat t/t |
|
436 | 436 | t2 |
|
437 | 437 | |
|
438 | 438 | local changed, remote removed, keep removed |
|
439 | 439 | |
|
440 | 440 | $ hg co -C 6 |
|
441 | 441 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
442 | 442 | $ hg merge --config ui.interactive=true 11 <<EOF |
|
443 | 443 | > d |
|
444 | 444 | > EOF |
|
445 | 445 | local [working copy] changed subrepository t which remote [merge rev] removed |
|
446 | 446 | use (c)hanged version or (d)elete? d |
|
447 | 447 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
448 | 448 | (branch merge, don't forget to commit) |
|
449 | 449 | $ hg debugsub |
|
450 | 450 | path s |
|
451 | 451 | source s |
|
452 | 452 | revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 |
|
453 | 453 | $ cat .hgsubstate |
|
454 | 454 | e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s |
|
455 | 455 | $ hg ci -m 'local changed, remote removed, keep removed' |
|
456 | 456 | created new head |
|
457 | 457 | $ hg debugsub |
|
458 | 458 | path s |
|
459 | 459 | source s |
|
460 | 460 | revision e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 |
|
461 | 461 | $ cat .hgsubstate |
|
462 | 462 | e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s |
|
463 | 463 | |
|
464 | 464 | clean up to avoid having to fix up the tests below |
|
465 | 465 | |
|
466 | 466 | $ hg co -C 10 |
|
467 | 467 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
468 | 468 | $ cat >> $HGRCPATH <<EOF |
|
469 | 469 | > [extensions] |
|
470 | 470 | > strip= |
|
471 | 471 | > EOF |
|
472 | 472 | $ hg strip -r 11:15 |
|
473 | 473 | saved backup bundle to $TESTTMP/t/.hg/strip-backup/*-backup.hg (glob) |
|
474 | 474 | |
|
475 | 475 | clone |
|
476 | 476 | |
|
477 | 477 | $ cd .. |
|
478 | 478 | $ hg clone t tc |
|
479 | 479 | updating to branch default |
|
480 | 480 | cloning subrepo s from $TESTTMP/t/s |
|
481 | 481 | cloning subrepo s/ss from $TESTTMP/t/s/ss |
|
482 | 482 | cloning subrepo t from $TESTTMP/t/t |
|
483 | 483 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
484 | 484 | $ cd tc |
|
485 | 485 | $ hg debugsub |
|
486 | 486 | path s |
|
487 | 487 | source s |
|
488 | 488 | revision fc627a69481fcbe5f1135069e8a3881c023e4cf5 |
|
489 | 489 | path t |
|
490 | 490 | source t |
|
491 | 491 | revision 20a0db6fbf6c3d2836e6519a642ae929bfc67c0e |
|
492 | 492 | $ cd .. |
|
493 | 493 | |
|
494 | 494 | clone with subrepo disabled (update should fail) |
|
495 | 495 | |
|
496 | 496 | $ hg clone t -U tc2 --config subrepos.allowed=false |
|
497 | 497 | $ hg update -R tc2 --config subrepos.allowed=false |
|
498 | 498 | abort: subrepos not enabled |
|
499 | 499 | (see 'hg help config.subrepos' for details) |
|
500 | 500 | [255] |
|
501 | 501 | $ ls -A tc2 |
|
502 | 502 | .hg |
|
503 | 503 | .hgsub |
|
504 | 504 | .hgsubstate |
|
505 | 505 | a |
|
506 | 506 | |
|
507 | 507 | $ hg clone t tc3 --config subrepos.allowed=false |
|
508 | 508 | updating to branch default |
|
509 | 509 | abort: subrepos not enabled |
|
510 | 510 | (see 'hg help config.subrepos' for details) |
|
511 | 511 | [255] |
|
512 | 512 | $ ls -A tc3 |
|
513 | 513 | .hg |
|
514 | 514 | .hgsub |
|
515 | 515 | .hgsubstate |
|
516 | 516 | a |
|
517 | 517 | |
|
518 | 518 | And again with just the hg type disabled |
|
519 | 519 | |
|
520 | 520 | $ hg clone t -U tc4 --config subrepos.hg:allowed=false |
|
521 | 521 | $ hg update -R tc4 --config subrepos.hg:allowed=false |
|
522 | 522 | abort: hg subrepos not allowed |
|
523 | 523 | (see 'hg help config.subrepos' for details) |
|
524 | 524 | [255] |
|
525 | 525 | $ ls -A tc4 |
|
526 | 526 | .hg |
|
527 | 527 | .hgsub |
|
528 | 528 | .hgsubstate |
|
529 | 529 | a |
|
530 | 530 | |
|
531 | 531 | $ hg clone t tc5 --config subrepos.hg:allowed=false |
|
532 | 532 | updating to branch default |
|
533 | 533 | abort: hg subrepos not allowed |
|
534 | 534 | (see 'hg help config.subrepos' for details) |
|
535 | 535 | [255] |
|
536 | 536 | $ ls -A tc5 |
|
537 | 537 | .hg |
|
538 | 538 | .hgsub |
|
539 | 539 | .hgsubstate |
|
540 | 540 | a |
|
541 | 541 | |
|
542 | 542 | push |
|
543 | 543 | |
|
544 | 544 | $ cd tc |
|
545 | 545 | $ echo bah > t/t |
|
546 | 546 | $ hg ci -m11 |
|
547 | 547 | committing subrepository t |
|
548 | 548 | $ hg push |
|
549 | 549 | pushing to $TESTTMP/t |
|
550 | 550 | no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss |
|
551 | 551 | no changes made to subrepo s since last push to $TESTTMP/t/s |
|
552 | 552 | pushing subrepo t to $TESTTMP/t/t |
|
553 | 553 | searching for changes |
|
554 | 554 | adding changesets |
|
555 | 555 | adding manifests |
|
556 | 556 | adding file changes |
|
557 | 557 | added 1 changesets with 1 changes to 1 files |
|
558 | 558 | searching for changes |
|
559 | 559 | adding changesets |
|
560 | 560 | adding manifests |
|
561 | 561 | adding file changes |
|
562 | 562 | added 1 changesets with 1 changes to 1 files |
|
563 | 563 | |
|
564 | 564 | push -f |
|
565 | 565 | |
|
566 | 566 | $ echo bah > s/a |
|
567 | 567 | $ hg ci -m12 |
|
568 | 568 | committing subrepository s |
|
569 | 569 | $ hg push |
|
570 | 570 | pushing to $TESTTMP/t |
|
571 | 571 | no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss |
|
572 | 572 | pushing subrepo s to $TESTTMP/t/s |
|
573 | 573 | searching for changes |
|
574 | 574 | abort: push creates new remote head 12a213df6fa9 (in subrepository "s") |
|
575 | 575 | (merge or see 'hg help push' for details about pushing new heads) |
|
576 | 576 | [255] |
|
577 | 577 | $ hg push -f |
|
578 | 578 | pushing to $TESTTMP/t |
|
579 | 579 | pushing subrepo s/ss to $TESTTMP/t/s/ss |
|
580 | 580 | searching for changes |
|
581 | 581 | no changes found |
|
582 | 582 | pushing subrepo s to $TESTTMP/t/s |
|
583 | 583 | searching for changes |
|
584 | 584 | adding changesets |
|
585 | 585 | adding manifests |
|
586 | 586 | adding file changes |
|
587 | 587 | added 1 changesets with 1 changes to 1 files (+1 heads) |
|
588 | 588 | pushing subrepo t to $TESTTMP/t/t |
|
589 | 589 | searching for changes |
|
590 | 590 | no changes found |
|
591 | 591 | searching for changes |
|
592 | 592 | adding changesets |
|
593 | 593 | adding manifests |
|
594 | 594 | adding file changes |
|
595 | 595 | added 1 changesets with 1 changes to 1 files |
|
596 | 596 | |
|
597 | 597 | check that unmodified subrepos are not pushed |
|
598 | 598 | |
|
599 | 599 | $ hg clone . ../tcc |
|
600 | 600 | updating to branch default |
|
601 | 601 | cloning subrepo s from $TESTTMP/tc/s |
|
602 | 602 | cloning subrepo s/ss from $TESTTMP/tc/s/ss |
|
603 | 603 | cloning subrepo t from $TESTTMP/tc/t |
|
604 | 604 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
605 | 605 | |
|
606 | 606 | the subrepos on the new clone have nothing to push to its source |
|
607 | 607 | |
|
608 | 608 | $ hg push -R ../tcc . |
|
609 | 609 | pushing to . |
|
610 | 610 | no changes made to subrepo s/ss since last push to s/ss |
|
611 | 611 | no changes made to subrepo s since last push to s |
|
612 | 612 | no changes made to subrepo t since last push to t |
|
613 | 613 | searching for changes |
|
614 | 614 | no changes found |
|
615 | 615 | [1] |
|
616 | 616 | |
|
617 | 617 | the subrepos on the source do not have a clean store versus the clone target |
|
618 | 618 | because they were never explicitly pushed to the source |
|
619 | 619 | |
|
620 | 620 | $ hg push ../tcc |
|
621 | 621 | pushing to ../tcc |
|
622 | 622 | pushing subrepo s/ss to ../tcc/s/ss |
|
623 | 623 | searching for changes |
|
624 | 624 | no changes found |
|
625 | 625 | pushing subrepo s to ../tcc/s |
|
626 | 626 | searching for changes |
|
627 | 627 | no changes found |
|
628 | 628 | pushing subrepo t to ../tcc/t |
|
629 | 629 | searching for changes |
|
630 | 630 | no changes found |
|
631 | 631 | searching for changes |
|
632 | 632 | no changes found |
|
633 | 633 | [1] |
|
634 | 634 | |
|
635 | 635 | after push their stores become clean |
|
636 | 636 | |
|
637 | 637 | $ hg push ../tcc |
|
638 | 638 | pushing to ../tcc |
|
639 | 639 | no changes made to subrepo s/ss since last push to ../tcc/s/ss |
|
640 | 640 | no changes made to subrepo s since last push to ../tcc/s |
|
641 | 641 | no changes made to subrepo t since last push to ../tcc/t |
|
642 | 642 | searching for changes |
|
643 | 643 | no changes found |
|
644 | 644 | [1] |
|
645 | 645 | |
|
646 | 646 | updating a subrepo to a different revision or changing |
|
647 | 647 | its working directory does not make its store dirty |
|
648 | 648 | |
|
649 | 649 | $ hg -R s update '.^' |
|
650 | 650 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
651 | 651 | $ hg push |
|
652 | 652 | pushing to $TESTTMP/t |
|
653 | 653 | no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss |
|
654 | 654 | no changes made to subrepo s since last push to $TESTTMP/t/s |
|
655 | 655 | no changes made to subrepo t since last push to $TESTTMP/t/t |
|
656 | 656 | searching for changes |
|
657 | 657 | no changes found |
|
658 | 658 | [1] |
|
659 | 659 | $ echo foo >> s/a |
|
660 | 660 | $ hg push |
|
661 | 661 | pushing to $TESTTMP/t |
|
662 | 662 | no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss |
|
663 | 663 | no changes made to subrepo s since last push to $TESTTMP/t/s |
|
664 | 664 | no changes made to subrepo t since last push to $TESTTMP/t/t |
|
665 | 665 | searching for changes |
|
666 | 666 | no changes found |
|
667 | 667 | [1] |
|
668 | 668 | $ hg -R s update -C tip |
|
669 | 669 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
670 | 670 | |
|
671 | 671 | committing into a subrepo makes its store (but not its parent's store) dirty |
|
672 | 672 | |
|
673 | 673 | $ echo foo >> s/ss/a |
|
674 | 674 | $ hg -R s/ss commit -m 'test dirty store detection' |
|
675 | 675 | |
|
676 | 676 | $ hg out -S -r `hg log -r tip -T "{node|short}"` |
|
677 | 677 | comparing with $TESTTMP/t |
|
678 | 678 | searching for changes |
|
679 | 679 | no changes found |
|
680 | 680 | comparing with $TESTTMP/t/s |
|
681 | 681 | searching for changes |
|
682 | 682 | no changes found |
|
683 | 683 | comparing with $TESTTMP/t/s/ss |
|
684 | 684 | searching for changes |
|
685 | 685 | changeset: 1:79ea5566a333 |
|
686 | 686 | tag: tip |
|
687 | 687 | user: test |
|
688 | 688 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
689 | 689 | summary: test dirty store detection |
|
690 | 690 | |
|
691 | 691 | comparing with $TESTTMP/t/t |
|
692 | 692 | searching for changes |
|
693 | 693 | no changes found |
|
694 | 694 | |
|
695 | 695 | $ hg push |
|
696 | 696 | pushing to $TESTTMP/t |
|
697 | 697 | pushing subrepo s/ss to $TESTTMP/t/s/ss |
|
698 | 698 | searching for changes |
|
699 | 699 | adding changesets |
|
700 | 700 | adding manifests |
|
701 | 701 | adding file changes |
|
702 | 702 | added 1 changesets with 1 changes to 1 files |
|
703 | 703 | no changes made to subrepo s since last push to $TESTTMP/t/s |
|
704 | 704 | no changes made to subrepo t since last push to $TESTTMP/t/t |
|
705 | 705 | searching for changes |
|
706 | 706 | no changes found |
|
707 | 707 | [1] |
|
708 | 708 | |
|
709 | 709 | a subrepo store may be clean versus one repo but not versus another |
|
710 | 710 | |
|
711 | 711 | $ hg push |
|
712 | 712 | pushing to $TESTTMP/t |
|
713 | 713 | no changes made to subrepo s/ss since last push to $TESTTMP/t/s/ss |
|
714 | 714 | no changes made to subrepo s since last push to $TESTTMP/t/s |
|
715 | 715 | no changes made to subrepo t since last push to $TESTTMP/t/t |
|
716 | 716 | searching for changes |
|
717 | 717 | no changes found |
|
718 | 718 | [1] |
|
719 | 719 | $ hg push ../tcc |
|
720 | 720 | pushing to ../tcc |
|
721 | 721 | pushing subrepo s/ss to ../tcc/s/ss |
|
722 | 722 | searching for changes |
|
723 | 723 | adding changesets |
|
724 | 724 | adding manifests |
|
725 | 725 | adding file changes |
|
726 | 726 | added 1 changesets with 1 changes to 1 files |
|
727 | 727 | no changes made to subrepo s since last push to ../tcc/s |
|
728 | 728 | no changes made to subrepo t since last push to ../tcc/t |
|
729 | 729 | searching for changes |
|
730 | 730 | no changes found |
|
731 | 731 | [1] |
|
732 | 732 | |
|
733 | 733 | update |
|
734 | 734 | |
|
735 | 735 | $ cd ../t |
|
736 | 736 | $ hg up -C # discard our earlier merge |
|
737 | 737 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
738 | 738 | updated to "c373c8102e68: 12" |
|
739 | 739 | 2 other heads for branch "default" |
|
740 | 740 | $ echo blah > t/t |
|
741 | 741 | $ hg ci -m13 |
|
742 | 742 | committing subrepository t |
|
743 | 743 | |
|
744 | 744 | backout calls revert internally with minimal opts, which should not raise |
|
745 | 745 | KeyError |
|
746 | 746 | |
|
747 | 747 | $ hg backout ".^" --no-commit |
|
748 | 748 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
749 | 749 | changeset c373c8102e68 backed out, don't forget to commit. |
|
750 | 750 | |
|
751 | 751 | $ hg up -C # discard changes |
|
752 | 752 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
753 | 753 | updated to "925c17564ef8: 13" |
|
754 | 754 | 2 other heads for branch "default" |
|
755 | 755 | |
|
756 | 756 | pull |
|
757 | 757 | |
|
758 | 758 | $ cd ../tc |
|
759 | 759 | $ hg pull |
|
760 | 760 | pulling from $TESTTMP/t |
|
761 | 761 | searching for changes |
|
762 | 762 | adding changesets |
|
763 | 763 | adding manifests |
|
764 | 764 | adding file changes |
|
765 | 765 | added 1 changesets with 1 changes to 1 files |
|
766 | 766 | new changesets 925c17564ef8 |
|
767 | 767 | (run 'hg update' to get a working copy) |
|
768 | 768 | |
|
769 | 769 | should pull t |
|
770 | 770 | |
|
771 | 771 | $ hg incoming -S -r `hg log -r tip -T "{node|short}"` |
|
772 | 772 | comparing with $TESTTMP/t |
|
773 | 773 | no changes found |
|
774 | 774 | comparing with $TESTTMP/t/s |
|
775 | 775 | searching for changes |
|
776 | 776 | no changes found |
|
777 | 777 | comparing with $TESTTMP/t/s/ss |
|
778 | 778 | searching for changes |
|
779 | 779 | no changes found |
|
780 | 780 | comparing with $TESTTMP/t/t |
|
781 | 781 | searching for changes |
|
782 | 782 | changeset: 5:52c0adc0515a |
|
783 | 783 | tag: tip |
|
784 | 784 | user: test |
|
785 | 785 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
786 | 786 | summary: 13 |
|
787 | 787 | |
|
788 | 788 | |
|
789 | 789 | $ hg up |
|
790 | 790 | pulling subrepo t from $TESTTMP/t/t |
|
791 | 791 | searching for changes |
|
792 | 792 | adding changesets |
|
793 | 793 | adding manifests |
|
794 | 794 | adding file changes |
|
795 | 795 | added 1 changesets with 1 changes to 1 files |
|
796 | 796 | new changesets 52c0adc0515a |
|
797 | 797 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
798 | 798 | updated to "925c17564ef8: 13" |
|
799 | 799 | 2 other heads for branch "default" |
|
800 | 800 | $ cat t/t |
|
801 | 801 | blah |
|
802 | 802 | |
|
803 | 803 | bogus subrepo path aborts |
|
804 | 804 | |
|
805 | 805 | $ echo 'bogus=[boguspath' >> .hgsub |
|
806 | 806 | $ hg ci -m 'bogus subrepo path' |
|
807 | 807 | abort: missing ] in subrepository source |
|
808 | 808 | [255] |
|
809 | 809 | |
|
810 | 810 | Issue1986: merge aborts when trying to merge a subrepo that |
|
811 | 811 | shouldn't need merging |
|
812 | 812 | |
|
813 | 813 | # subrepo layout |
|
814 | 814 | # |
|
815 | 815 | # o 5 br |
|
816 | 816 | # /| |
|
817 | 817 | # o | 4 default |
|
818 | 818 | # | | |
|
819 | 819 | # | o 3 br |
|
820 | 820 | # |/| |
|
821 | 821 | # o | 2 default |
|
822 | 822 | # | | |
|
823 | 823 | # | o 1 br |
|
824 | 824 | # |/ |
|
825 | 825 | # o 0 default |
|
826 | 826 | |
|
827 | 827 | $ cd .. |
|
828 | 828 | $ rm -rf sub |
|
829 | 829 | $ hg init main |
|
830 | 830 | $ cd main |
|
831 | 831 | $ hg init s |
|
832 | 832 | $ cd s |
|
833 | 833 | $ echo a > a |
|
834 | 834 | $ hg ci -Am1 |
|
835 | 835 | adding a |
|
836 | 836 | $ hg branch br |
|
837 | 837 | marked working directory as branch br |
|
838 | 838 | (branches are permanent and global, did you want a bookmark?) |
|
839 | 839 | $ echo a >> a |
|
840 | 840 | $ hg ci -m1 |
|
841 | 841 | $ hg up default |
|
842 | 842 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
843 | 843 | $ echo b > b |
|
844 | 844 | $ hg ci -Am1 |
|
845 | 845 | adding b |
|
846 | 846 | $ hg up br |
|
847 | 847 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
848 | 848 | $ hg merge tip |
|
849 | 849 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
850 | 850 | (branch merge, don't forget to commit) |
|
851 | 851 | $ hg ci -m1 |
|
852 | 852 | $ hg up 2 |
|
853 | 853 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
854 | 854 | $ echo c > c |
|
855 | 855 | $ hg ci -Am1 |
|
856 | 856 | adding c |
|
857 | 857 | $ hg up 3 |
|
858 | 858 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
859 | 859 | $ hg merge 4 |
|
860 | 860 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
861 | 861 | (branch merge, don't forget to commit) |
|
862 | 862 | $ hg ci -m1 |
|
863 | 863 | |
|
864 | 864 | # main repo layout: |
|
865 | 865 | # |
|
866 | 866 | # * <-- try to merge default into br again |
|
867 | 867 | # .`| |
|
868 | 868 | # . o 5 br --> substate = 5 |
|
869 | 869 | # . | |
|
870 | 870 | # o | 4 default --> substate = 4 |
|
871 | 871 | # | | |
|
872 | 872 | # | o 3 br --> substate = 2 |
|
873 | 873 | # |/| |
|
874 | 874 | # o | 2 default --> substate = 2 |
|
875 | 875 | # | | |
|
876 | 876 | # | o 1 br --> substate = 3 |
|
877 | 877 | # |/ |
|
878 | 878 | # o 0 default --> substate = 2 |
|
879 | 879 | |
|
880 | 880 | $ cd .. |
|
881 | 881 | $ echo 's = s' > .hgsub |
|
882 | 882 | $ hg -R s up 2 |
|
883 | 883 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
884 | 884 | $ hg ci -Am1 |
|
885 | 885 | adding .hgsub |
|
886 | 886 | $ hg branch br |
|
887 | 887 | marked working directory as branch br |
|
888 | 888 | (branches are permanent and global, did you want a bookmark?) |
|
889 | 889 | $ echo b > b |
|
890 | 890 | $ hg -R s up 3 |
|
891 | 891 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
892 | 892 | $ hg ci -Am1 |
|
893 | 893 | adding b |
|
894 | 894 | $ hg up default |
|
895 | 895 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
896 | 896 | $ echo c > c |
|
897 | 897 | $ hg ci -Am1 |
|
898 | 898 | adding c |
|
899 | 899 | $ hg up 1 |
|
900 | 900 | 2 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
901 | 901 | $ hg merge 2 |
|
902 | 902 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
903 | 903 | (branch merge, don't forget to commit) |
|
904 | 904 | $ hg ci -m1 |
|
905 | 905 | $ hg up 2 |
|
906 | 906 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
907 | 907 | $ hg -R s up 4 |
|
908 | 908 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
909 | 909 | $ echo d > d |
|
910 | 910 | $ hg ci -Am1 |
|
911 | 911 | adding d |
|
912 | 912 | $ hg up 3 |
|
913 | 913 | 2 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
914 | 914 | $ hg -R s up 5 |
|
915 | 915 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
916 | 916 | $ echo e > e |
|
917 | 917 | $ hg ci -Am1 |
|
918 | 918 | adding e |
|
919 | 919 | |
|
920 | 920 | $ hg up 5 |
|
921 | 921 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
922 | 922 | $ hg merge 4 # try to merge default into br again |
|
923 | 923 | subrepository s diverged (local revision: f8f13b33206e, remote revision: a3f9062a4f88) |
|
924 | 924 | you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [merge rev]. |
|
925 | 925 | what do you want to do? m |
|
926 | 926 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
927 | 927 | (branch merge, don't forget to commit) |
|
928 | 928 | $ cd .. |
|
929 | 929 | |
|
930 | 930 | test subrepo delete from .hgsubstate |
|
931 | 931 | |
|
932 | 932 | $ hg init testdelete |
|
933 | 933 | $ mkdir testdelete/nested testdelete/nested2 |
|
934 | 934 | $ hg init testdelete/nested |
|
935 | 935 | $ hg init testdelete/nested2 |
|
936 | 936 | $ echo test > testdelete/nested/foo |
|
937 | 937 | $ echo test > testdelete/nested2/foo |
|
938 | 938 | $ hg -R testdelete/nested add |
|
939 | 939 | adding testdelete/nested/foo |
|
940 | 940 | $ hg -R testdelete/nested2 add |
|
941 | 941 | adding testdelete/nested2/foo |
|
942 | 942 | $ hg -R testdelete/nested ci -m test |
|
943 | 943 | $ hg -R testdelete/nested2 ci -m test |
|
944 | 944 | $ echo nested = nested > testdelete/.hgsub |
|
945 | 945 | $ echo nested2 = nested2 >> testdelete/.hgsub |
|
946 | 946 | $ hg -R testdelete add |
|
947 | 947 | adding testdelete/.hgsub |
|
948 | 948 | $ hg -R testdelete ci -m "nested 1 & 2 added" |
|
949 | 949 | $ echo nested = nested > testdelete/.hgsub |
|
950 | 950 | $ hg -R testdelete ci -m "nested 2 deleted" |
|
951 | 951 | $ cat testdelete/.hgsubstate |
|
952 | 952 | bdf5c9a3103743d900b12ae0db3ffdcfd7b0d878 nested |
|
953 | 953 | $ hg -R testdelete remove testdelete/.hgsub |
|
954 | 954 | $ hg -R testdelete ci -m ".hgsub deleted" |
|
955 | 955 | $ cat testdelete/.hgsubstate |
|
956 | 956 | bdf5c9a3103743d900b12ae0db3ffdcfd7b0d878 nested |
|
957 | 957 | |
|
958 | 958 | test repository cloning |
|
959 | 959 | |
|
960 | 960 | $ mkdir mercurial mercurial2 |
|
961 | 961 | $ hg init nested_absolute |
|
962 | 962 | $ echo test > nested_absolute/foo |
|
963 | 963 | $ hg -R nested_absolute add |
|
964 | 964 | adding nested_absolute/foo |
|
965 | 965 | $ hg -R nested_absolute ci -mtest |
|
966 | 966 | $ cd mercurial |
|
967 | 967 | $ hg init nested_relative |
|
968 | 968 | $ echo test2 > nested_relative/foo2 |
|
969 | 969 | $ hg -R nested_relative add |
|
970 | 970 | adding nested_relative/foo2 |
|
971 | 971 | $ hg -R nested_relative ci -mtest2 |
|
972 | 972 | $ hg init main |
|
973 | 973 | $ echo "nested_relative = ../nested_relative" > main/.hgsub |
|
974 | 974 | $ echo "nested_absolute = `pwd`/nested_absolute" >> main/.hgsub |
|
975 | 975 | $ hg -R main add |
|
976 | 976 | adding main/.hgsub |
|
977 | 977 | $ hg -R main ci -m "add subrepos" |
|
978 | 978 | $ cd .. |
|
979 | 979 | $ hg clone mercurial/main mercurial2/main |
|
980 | 980 | updating to branch default |
|
981 | 981 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
982 | 982 | $ cat mercurial2/main/nested_absolute/.hg/hgrc \ |
|
983 | 983 | > mercurial2/main/nested_relative/.hg/hgrc |
|
984 | 984 | [paths] |
|
985 | 985 | default = $TESTTMP/mercurial/nested_absolute |
|
986 | 986 | [paths] |
|
987 | 987 | default = $TESTTMP/mercurial/nested_relative |
|
988 | 988 | $ rm -rf mercurial mercurial2 |
|
989 | 989 | |
|
990 | 990 | Issue1977: multirepo push should fail if subrepo push fails |
|
991 | 991 | |
|
992 | 992 | $ hg init repo |
|
993 | 993 | $ hg init repo/s |
|
994 | 994 | $ echo a > repo/s/a |
|
995 | 995 | $ hg -R repo/s ci -Am0 |
|
996 | 996 | adding a |
|
997 | 997 | $ echo s = s > repo/.hgsub |
|
998 | 998 | $ hg -R repo ci -Am1 |
|
999 | 999 | adding .hgsub |
|
1000 | 1000 | $ hg clone repo repo2 |
|
1001 | 1001 | updating to branch default |
|
1002 | 1002 | cloning subrepo s from $TESTTMP/repo/s |
|
1003 | 1003 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1004 | 1004 | $ hg -q -R repo2 pull -u |
|
1005 | 1005 | $ echo 1 > repo2/s/a |
|
1006 | 1006 | $ hg -R repo2/s ci -m2 |
|
1007 | 1007 | $ hg -q -R repo2/s push |
|
1008 | 1008 | $ hg -R repo2/s up -C 0 |
|
1009 | 1009 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1010 | 1010 | $ echo 2 > repo2/s/b |
|
1011 | 1011 | $ hg -R repo2/s ci -m3 -A |
|
1012 | 1012 | adding b |
|
1013 | 1013 | created new head |
|
1014 | 1014 | $ hg -R repo2 ci -m3 |
|
1015 | 1015 | $ hg -q -R repo2 push |
|
1016 | 1016 | abort: push creates new remote head cc505f09a8b2 (in subrepository "s") |
|
1017 | 1017 | (merge or see 'hg help push' for details about pushing new heads) |
|
1018 | 1018 | [255] |
|
1019 | 1019 | $ hg -R repo update |
|
1020 | 1020 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1021 | 1021 | |
|
1022 | 1022 | test if untracked file is not overwritten |
|
1023 | 1023 | |
|
1024 | (this also tests that updated .hgsubstate is treated as "modified", | |
|
1025 | when 'merge.update()' is aborted before 'merge.recordupdates()', even | |
|
1026 | if none of mode, size and timestamp of it isn't changed on the | |
|
1027 | filesystem (see also issue4583)) | |
|
1024 | (this tests also has a change to update .hgsubstate and merge it within the | |
|
1025 | same second. It should mark is are modified , even if none of mode, size and | |
|
1026 | timestamp of it isn't changed on the filesystem (see also issue4583)) | |
|
1028 | 1027 | |
|
1029 | 1028 | $ echo issue3276_ok > repo/s/b |
|
1030 | 1029 | $ hg -R repo2 push -f -q |
|
1031 | $ touch -t 200001010000 repo/.hgsubstate | |
|
1032 | 1030 | |
|
1033 | $ cat >> repo/.hg/hgrc <<EOF | |
|
1034 | > [fakedirstatewritetime] | |
|
1035 | > # emulate invoking dirstate.write() via repo.status() | |
|
1036 | > # at 2000-01-01 00:00 | |
|
1037 | > fakenow = 200001010000 | |
|
1038 | > | |
|
1039 | > [extensions] | |
|
1040 | > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py | |
|
1041 | > EOF | |
|
1042 | 1031 | $ hg -R repo update |
|
1043 | 1032 | b: untracked file differs |
|
1044 | 1033 | abort: untracked files in working directory differ from files in requested revision (in subrepository "s") |
|
1045 | 1034 | [255] |
|
1046 | $ cat >> repo/.hg/hgrc <<EOF | |
|
1047 | > [extensions] | |
|
1048 | > fakedirstatewritetime = ! | |
|
1049 | > EOF | |
|
1050 | 1035 | |
|
1051 | 1036 | $ cat repo/s/b |
|
1052 | 1037 | issue3276_ok |
|
1053 | 1038 | $ rm repo/s/b |
|
1054 | $ touch -t 200001010000 repo/.hgsubstate | |
|
1055 | 1039 | $ hg -R repo revert --all |
|
1056 | 1040 | reverting repo/.hgsubstate |
|
1057 | 1041 | reverting subrepo s |
|
1058 | 1042 | $ hg -R repo update |
|
1059 | 1043 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1060 | 1044 | $ cat repo/s/b |
|
1061 | 1045 | 2 |
|
1062 | 1046 | $ rm -rf repo2 repo |
|
1063 | 1047 | |
|
1064 | 1048 | |
|
1065 | 1049 | Issue1852 subrepos with relative paths always push/pull relative to default |
|
1066 | 1050 | |
|
1067 | 1051 | Prepare a repo with subrepo |
|
1068 | 1052 | |
|
1069 | 1053 | $ hg init issue1852a |
|
1070 | 1054 | $ cd issue1852a |
|
1071 | 1055 | $ hg init sub/repo |
|
1072 | 1056 | $ echo test > sub/repo/foo |
|
1073 | 1057 | $ hg -R sub/repo add sub/repo/foo |
|
1074 | 1058 | $ echo sub/repo = sub/repo > .hgsub |
|
1075 | 1059 | $ hg add .hgsub |
|
1076 | 1060 | $ hg ci -mtest |
|
1077 | 1061 | committing subrepository sub/repo |
|
1078 | 1062 | $ echo test >> sub/repo/foo |
|
1079 | 1063 | $ hg ci -mtest |
|
1080 | 1064 | committing subrepository sub/repo |
|
1081 | 1065 | $ hg cat sub/repo/foo |
|
1082 | 1066 | test |
|
1083 | 1067 | test |
|
1084 | 1068 | $ hg cat sub/repo/foo -Tjson | sed 's|\\\\|/|g' |
|
1085 | 1069 | [ |
|
1086 | 1070 | { |
|
1087 | 1071 | "data": "test\ntest\n", |
|
1088 | 1072 | "path": "foo" |
|
1089 | 1073 | } |
|
1090 | 1074 | ] |
|
1091 | 1075 | |
|
1092 | 1076 | non-exact match: |
|
1093 | 1077 | |
|
1094 | 1078 | $ hg cat -T '{path|relpath}\n' 'glob:**' |
|
1095 | 1079 | .hgsub |
|
1096 | 1080 | .hgsubstate |
|
1097 | 1081 | sub/repo/foo |
|
1098 | 1082 | $ hg cat -T '{path|relpath}\n' 're:^sub' |
|
1099 | 1083 | sub/repo/foo |
|
1100 | 1084 | |
|
1101 | 1085 | missing subrepos in working directory: |
|
1102 | 1086 | |
|
1103 | 1087 | $ mkdir -p tmp/sub/repo |
|
1104 | 1088 | $ hg cat -r 0 --output tmp/%p_p sub/repo/foo |
|
1105 | 1089 | $ cat tmp/sub/repo/foo_p |
|
1106 | 1090 | test |
|
1107 | 1091 | $ mv sub/repo sub_ |
|
1108 | 1092 | $ hg cat sub/repo/baz |
|
1109 | 1093 | skipping missing subrepository: sub/repo |
|
1110 | 1094 | [1] |
|
1111 | 1095 | $ rm -rf sub/repo |
|
1112 | 1096 | $ mv sub_ sub/repo |
|
1113 | 1097 | $ cd .. |
|
1114 | 1098 | |
|
1115 | 1099 | Create repo without default path, pull top repo, and see what happens on update |
|
1116 | 1100 | |
|
1117 | 1101 | $ hg init issue1852b |
|
1118 | 1102 | $ hg -R issue1852b pull issue1852a |
|
1119 | 1103 | pulling from issue1852a |
|
1120 | 1104 | requesting all changes |
|
1121 | 1105 | adding changesets |
|
1122 | 1106 | adding manifests |
|
1123 | 1107 | adding file changes |
|
1124 | 1108 | added 2 changesets with 3 changes to 2 files |
|
1125 | 1109 | new changesets 19487b456929:be5eb94e7215 |
|
1126 | 1110 | (run 'hg update' to get a working copy) |
|
1127 | 1111 | $ hg -R issue1852b update |
|
1128 | 1112 | abort: default path for subrepository not found (in subrepository "sub/repo") |
|
1129 | 1113 | [255] |
|
1130 | 1114 | |
|
1131 | 1115 | Ensure a full traceback, not just the SubrepoAbort part |
|
1132 | 1116 | |
|
1133 | 1117 | $ hg -R issue1852b update --traceback 2>&1 | grep 'raise error\.Abort' |
|
1134 | 1118 | raise error.Abort(_(b"default path for subrepository not found")) |
|
1135 | 1119 | |
|
1136 | 1120 | Pull -u now doesn't help |
|
1137 | 1121 | |
|
1138 | 1122 | $ hg -R issue1852b pull -u issue1852a |
|
1139 | 1123 | pulling from issue1852a |
|
1140 | 1124 | searching for changes |
|
1141 | 1125 | no changes found |
|
1142 | 1126 | |
|
1143 | 1127 | Try the same, but with pull -u |
|
1144 | 1128 | |
|
1145 | 1129 | $ hg init issue1852c |
|
1146 | 1130 | $ hg -R issue1852c pull -r0 -u issue1852a |
|
1147 | 1131 | pulling from issue1852a |
|
1148 | 1132 | adding changesets |
|
1149 | 1133 | adding manifests |
|
1150 | 1134 | adding file changes |
|
1151 | 1135 | added 1 changesets with 2 changes to 2 files |
|
1152 | 1136 | new changesets 19487b456929 |
|
1153 | 1137 | cloning subrepo sub/repo from issue1852a/sub/repo |
|
1154 | 1138 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1155 | 1139 | |
|
1156 | 1140 | Try to push from the other side |
|
1157 | 1141 | |
|
1158 | 1142 | $ hg -R issue1852a push `pwd`/issue1852c |
|
1159 | 1143 | pushing to $TESTTMP/issue1852c |
|
1160 | 1144 | pushing subrepo sub/repo to $TESTTMP/issue1852c/sub/repo |
|
1161 | 1145 | searching for changes |
|
1162 | 1146 | no changes found |
|
1163 | 1147 | searching for changes |
|
1164 | 1148 | adding changesets |
|
1165 | 1149 | adding manifests |
|
1166 | 1150 | adding file changes |
|
1167 | 1151 | added 1 changesets with 1 changes to 1 files |
|
1168 | 1152 | |
|
1169 | 1153 | Incoming and outgoing should not use the default path: |
|
1170 | 1154 | |
|
1171 | 1155 | $ hg clone -q issue1852a issue1852d |
|
1172 | 1156 | $ hg -R issue1852d outgoing --subrepos issue1852c |
|
1173 | 1157 | comparing with issue1852c |
|
1174 | 1158 | searching for changes |
|
1175 | 1159 | no changes found |
|
1176 | 1160 | comparing with issue1852c/sub/repo |
|
1177 | 1161 | searching for changes |
|
1178 | 1162 | no changes found |
|
1179 | 1163 | [1] |
|
1180 | 1164 | $ hg -R issue1852d incoming --subrepos issue1852c |
|
1181 | 1165 | comparing with issue1852c |
|
1182 | 1166 | searching for changes |
|
1183 | 1167 | no changes found |
|
1184 | 1168 | comparing with issue1852c/sub/repo |
|
1185 | 1169 | searching for changes |
|
1186 | 1170 | no changes found |
|
1187 | 1171 | [1] |
|
1188 | 1172 | |
|
1189 | 1173 | Check that merge of a new subrepo doesn't write the uncommitted state to |
|
1190 | 1174 | .hgsubstate (issue4622) |
|
1191 | 1175 | |
|
1192 | 1176 | $ hg init issue1852a/addedsub |
|
1193 | 1177 | $ echo zzz > issue1852a/addedsub/zz.txt |
|
1194 | 1178 | $ hg -R issue1852a/addedsub ci -Aqm "initial ZZ" |
|
1195 | 1179 | |
|
1196 | 1180 | $ hg clone issue1852a/addedsub issue1852d/addedsub |
|
1197 | 1181 | updating to branch default |
|
1198 | 1182 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1199 | 1183 | |
|
1200 | 1184 | $ echo def > issue1852a/sub/repo/foo |
|
1201 | 1185 | $ hg -R issue1852a ci -SAm 'tweaked subrepo' |
|
1202 | 1186 | adding tmp/sub/repo/foo_p |
|
1203 | 1187 | committing subrepository sub/repo |
|
1204 | 1188 | |
|
1205 | 1189 | $ echo 'addedsub = addedsub' >> issue1852d/.hgsub |
|
1206 | 1190 | $ echo xyz > issue1852d/sub/repo/foo |
|
1207 | 1191 | $ hg -R issue1852d pull -u |
|
1208 | 1192 | pulling from $TESTTMP/issue1852a |
|
1209 | 1193 | searching for changes |
|
1210 | 1194 | adding changesets |
|
1211 | 1195 | adding manifests |
|
1212 | 1196 | adding file changes |
|
1213 | 1197 | added 1 changesets with 2 changes to 2 files |
|
1214 | 1198 | new changesets c82b79fdcc5b |
|
1215 | 1199 | subrepository sub/repo diverged (local revision: f42d5c7504a8, remote revision: 46cd4aac504c) |
|
1216 | 1200 | you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination]. |
|
1217 | 1201 | what do you want to do? m |
|
1218 | 1202 | pulling subrepo sub/repo from $TESTTMP/issue1852a/sub/repo |
|
1219 | 1203 | searching for changes |
|
1220 | 1204 | adding changesets |
|
1221 | 1205 | adding manifests |
|
1222 | 1206 | adding file changes |
|
1223 | 1207 | added 1 changesets with 1 changes to 1 files |
|
1224 | 1208 | new changesets 46cd4aac504c |
|
1225 | 1209 | subrepository sources for sub/repo differ |
|
1226 | 1210 | you can use (l)ocal source (f42d5c7504a8) or (r)emote source (46cd4aac504c). |
|
1227 | 1211 | what do you want to do? l |
|
1228 | 1212 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1229 | 1213 | $ cat issue1852d/.hgsubstate |
|
1230 | 1214 | f42d5c7504a811dda50f5cf3e5e16c3330b87172 sub/repo |
|
1231 | 1215 | |
|
1232 | 1216 | Check status of files when none of them belong to the first |
|
1233 | 1217 | subrepository: |
|
1234 | 1218 | |
|
1235 | 1219 | $ hg init subrepo-status |
|
1236 | 1220 | $ cd subrepo-status |
|
1237 | 1221 | $ hg init subrepo-1 |
|
1238 | 1222 | $ hg init subrepo-2 |
|
1239 | 1223 | $ cd subrepo-2 |
|
1240 | 1224 | $ touch file |
|
1241 | 1225 | $ hg add file |
|
1242 | 1226 | $ cd .. |
|
1243 | 1227 | $ echo subrepo-1 = subrepo-1 > .hgsub |
|
1244 | 1228 | $ echo subrepo-2 = subrepo-2 >> .hgsub |
|
1245 | 1229 | $ hg add .hgsub |
|
1246 | 1230 | $ hg ci -m 'Added subrepos' |
|
1247 | 1231 | committing subrepository subrepo-2 |
|
1248 | 1232 | $ hg st subrepo-2/file |
|
1249 | 1233 | |
|
1250 | 1234 | Check that share works with subrepo |
|
1251 | 1235 | $ hg --config extensions.share= share . ../shared |
|
1252 | 1236 | updating working directory |
|
1253 | 1237 | sharing subrepo subrepo-1 from $TESTTMP/subrepo-status/subrepo-1 |
|
1254 | 1238 | sharing subrepo subrepo-2 from $TESTTMP/subrepo-status/subrepo-2 |
|
1255 | 1239 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1256 | 1240 | $ find ../shared/* | sort |
|
1257 | 1241 | ../shared/subrepo-1 |
|
1258 | 1242 | ../shared/subrepo-1/.hg |
|
1259 | 1243 | ../shared/subrepo-1/.hg/cache |
|
1260 | 1244 | ../shared/subrepo-1/.hg/cache/storehash |
|
1261 | 1245 | ../shared/subrepo-1/.hg/cache/storehash/* (glob) |
|
1262 | 1246 | ../shared/subrepo-1/.hg/hgrc |
|
1263 | 1247 | ../shared/subrepo-1/.hg/requires |
|
1264 | 1248 | ../shared/subrepo-1/.hg/sharedpath |
|
1265 | 1249 | ../shared/subrepo-1/.hg/wcache |
|
1266 | 1250 | ../shared/subrepo-2 |
|
1267 | 1251 | ../shared/subrepo-2/.hg |
|
1268 | 1252 | ../shared/subrepo-2/.hg/branch |
|
1269 | 1253 | ../shared/subrepo-2/.hg/cache |
|
1270 | 1254 | ../shared/subrepo-2/.hg/cache/storehash |
|
1271 | 1255 | ../shared/subrepo-2/.hg/cache/storehash/* (glob) |
|
1272 | 1256 | ../shared/subrepo-2/.hg/dirstate |
|
1273 | 1257 | ../shared/subrepo-2/.hg/hgrc |
|
1274 | 1258 | ../shared/subrepo-2/.hg/requires |
|
1275 | 1259 | ../shared/subrepo-2/.hg/sharedpath |
|
1276 | 1260 | ../shared/subrepo-2/.hg/wcache |
|
1277 | 1261 | ../shared/subrepo-2/.hg/wcache/checkisexec (execbit !) |
|
1278 | 1262 | ../shared/subrepo-2/.hg/wcache/checklink (symlink no-rust !) |
|
1279 | 1263 | ../shared/subrepo-2/.hg/wcache/checklink-target (symlink no-rust !) |
|
1280 | 1264 | ../shared/subrepo-2/.hg/wcache/manifestfulltextcache (reporevlogstore !) |
|
1281 | 1265 | ../shared/subrepo-2/file |
|
1282 | 1266 | $ hg -R ../shared in |
|
1283 | 1267 | abort: repository default not found |
|
1284 | 1268 | [255] |
|
1285 | 1269 | $ hg -R ../shared/subrepo-2 showconfig paths |
|
1286 | 1270 | paths.default=$TESTTMP/subrepo-status/subrepo-2 |
|
1287 | 1271 | $ hg -R ../shared/subrepo-1 sum --remote |
|
1288 | 1272 | parent: -1:000000000000 tip (empty repository) |
|
1289 | 1273 | branch: default |
|
1290 | 1274 | commit: (clean) |
|
1291 | 1275 | update: (current) |
|
1292 | 1276 | remote: (synced) |
|
1293 | 1277 | |
|
1294 | 1278 | Check hg update --clean |
|
1295 | 1279 | $ cd $TESTTMP/t |
|
1296 | 1280 | $ rm -r t/t.orig |
|
1297 | 1281 | $ hg status -S --all |
|
1298 | 1282 | C .hgsub |
|
1299 | 1283 | C .hgsubstate |
|
1300 | 1284 | C a |
|
1301 | 1285 | C s/.hgsub |
|
1302 | 1286 | C s/.hgsubstate |
|
1303 | 1287 | C s/a |
|
1304 | 1288 | C s/ss/a |
|
1305 | 1289 | C t/t |
|
1306 | 1290 | $ echo c1 > s/a |
|
1307 | 1291 | $ cd s |
|
1308 | 1292 | $ echo c1 > b |
|
1309 | 1293 | $ echo c1 > c |
|
1310 | 1294 | $ hg add b |
|
1311 | 1295 | $ cd .. |
|
1312 | 1296 | $ hg status -S |
|
1313 | 1297 | M s/a |
|
1314 | 1298 | A s/b |
|
1315 | 1299 | ? s/c |
|
1316 | 1300 | $ hg update -C |
|
1317 | 1301 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1318 | 1302 | updated to "925c17564ef8: 13" |
|
1319 | 1303 | 2 other heads for branch "default" |
|
1320 | 1304 | $ hg status -S |
|
1321 | 1305 | ? s/b |
|
1322 | 1306 | ? s/c |
|
1323 | 1307 | |
|
1324 | 1308 | Sticky subrepositories, no changes |
|
1325 | 1309 | $ cd $TESTTMP/t |
|
1326 | 1310 | $ hg id |
|
1327 | 1311 | 925c17564ef8 tip |
|
1328 | 1312 | $ hg -R s id |
|
1329 | 1313 | 12a213df6fa9 tip |
|
1330 | 1314 | $ hg -R t id |
|
1331 | 1315 | 52c0adc0515a tip |
|
1332 | 1316 | $ hg update 11 |
|
1333 | 1317 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1334 | 1318 | $ hg id |
|
1335 | 1319 | 365661e5936a |
|
1336 | 1320 | $ hg -R s id |
|
1337 | 1321 | fc627a69481f |
|
1338 | 1322 | $ hg -R t id |
|
1339 | 1323 | e95bcfa18a35 |
|
1340 | 1324 | |
|
1341 | 1325 | Sticky subrepositories, file changes |
|
1342 | 1326 | $ touch s/f1 |
|
1343 | 1327 | $ touch t/f1 |
|
1344 | 1328 | $ hg add -S s/f1 |
|
1345 | 1329 | $ hg add -S t/f1 |
|
1346 | 1330 | $ hg id |
|
1347 | 1331 | 365661e5936a+ |
|
1348 | 1332 | $ hg -R s id |
|
1349 | 1333 | fc627a69481f+ |
|
1350 | 1334 | $ hg -R t id |
|
1351 | 1335 | e95bcfa18a35+ |
|
1352 | 1336 | $ hg update tip |
|
1353 | 1337 | subrepository s diverged (local revision: fc627a69481f, remote revision: 12a213df6fa9) |
|
1354 | 1338 | you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination]. |
|
1355 | 1339 | what do you want to do? m |
|
1356 | 1340 | subrepository sources for s differ |
|
1357 | 1341 | you can use (l)ocal source (fc627a69481f) or (r)emote source (12a213df6fa9). |
|
1358 | 1342 | what do you want to do? l |
|
1359 | 1343 | subrepository t diverged (local revision: e95bcfa18a35, remote revision: 52c0adc0515a) |
|
1360 | 1344 | you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination]. |
|
1361 | 1345 | what do you want to do? m |
|
1362 | 1346 | subrepository sources for t differ |
|
1363 | 1347 | you can use (l)ocal source (e95bcfa18a35) or (r)emote source (52c0adc0515a). |
|
1364 | 1348 | what do you want to do? l |
|
1365 | 1349 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1366 | 1350 | $ hg id |
|
1367 | 1351 | 925c17564ef8+ tip |
|
1368 | 1352 | $ hg -R s id |
|
1369 | 1353 | fc627a69481f+ |
|
1370 | 1354 | $ hg -R t id |
|
1371 | 1355 | e95bcfa18a35+ |
|
1372 | 1356 | $ hg update --clean tip |
|
1373 | 1357 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1374 | 1358 | |
|
1375 | 1359 | Sticky subrepository, revision updates |
|
1376 | 1360 | $ hg id |
|
1377 | 1361 | 925c17564ef8 tip |
|
1378 | 1362 | $ hg -R s id |
|
1379 | 1363 | 12a213df6fa9 tip |
|
1380 | 1364 | $ hg -R t id |
|
1381 | 1365 | 52c0adc0515a tip |
|
1382 | 1366 | $ cd s |
|
1383 | 1367 | $ hg update -r -2 |
|
1384 | 1368 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1385 | 1369 | $ cd ../t |
|
1386 | 1370 | $ hg update -r 2 |
|
1387 | 1371 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1388 | 1372 | $ cd .. |
|
1389 | 1373 | $ hg update 10 |
|
1390 | 1374 | subrepository s diverged (local revision: 12a213df6fa9, remote revision: fc627a69481f) |
|
1391 | 1375 | you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination]. |
|
1392 | 1376 | what do you want to do? m |
|
1393 | 1377 | subrepository t diverged (local revision: 52c0adc0515a, remote revision: 20a0db6fbf6c) |
|
1394 | 1378 | you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination]. |
|
1395 | 1379 | what do you want to do? m |
|
1396 | 1380 | subrepository sources for t differ (in checked out version) |
|
1397 | 1381 | you can use (l)ocal source (7af322bc1198) or (r)emote source (20a0db6fbf6c). |
|
1398 | 1382 | what do you want to do? l |
|
1399 | 1383 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1400 | 1384 | $ hg id |
|
1401 | 1385 | e45c8b14af55+ |
|
1402 | 1386 | $ hg -R s id |
|
1403 | 1387 | 02dcf1d70411 |
|
1404 | 1388 | $ hg -R t id |
|
1405 | 1389 | 7af322bc1198 |
|
1406 | 1390 | |
|
1407 | 1391 | Sticky subrepository, file changes and revision updates |
|
1408 | 1392 | $ touch s/f1 |
|
1409 | 1393 | $ touch t/f1 |
|
1410 | 1394 | $ hg add -S s/f1 |
|
1411 | 1395 | $ hg add -S t/f1 |
|
1412 | 1396 | $ hg id |
|
1413 | 1397 | e45c8b14af55+ |
|
1414 | 1398 | $ hg -R s id |
|
1415 | 1399 | 02dcf1d70411+ |
|
1416 | 1400 | $ hg -R t id |
|
1417 | 1401 | 7af322bc1198+ |
|
1418 | 1402 | $ hg update tip |
|
1419 | 1403 | subrepository s diverged (local revision: 12a213df6fa9, remote revision: 12a213df6fa9) |
|
1420 | 1404 | you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination]. |
|
1421 | 1405 | what do you want to do? m |
|
1422 | 1406 | subrepository sources for s differ |
|
1423 | 1407 | you can use (l)ocal source (02dcf1d70411) or (r)emote source (12a213df6fa9). |
|
1424 | 1408 | what do you want to do? l |
|
1425 | 1409 | subrepository t diverged (local revision: 52c0adc0515a, remote revision: 52c0adc0515a) |
|
1426 | 1410 | you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination]. |
|
1427 | 1411 | what do you want to do? m |
|
1428 | 1412 | subrepository sources for t differ |
|
1429 | 1413 | you can use (l)ocal source (7af322bc1198) or (r)emote source (52c0adc0515a). |
|
1430 | 1414 | what do you want to do? l |
|
1431 | 1415 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1432 | 1416 | $ hg id |
|
1433 | 1417 | 925c17564ef8+ tip |
|
1434 | 1418 | $ hg -R s id |
|
1435 | 1419 | 02dcf1d70411+ |
|
1436 | 1420 | $ hg -R t id |
|
1437 | 1421 | 7af322bc1198+ |
|
1438 | 1422 | |
|
1439 | 1423 | Sticky repository, update --clean |
|
1440 | 1424 | $ hg update --clean tip |
|
1441 | 1425 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1442 | 1426 | $ hg id |
|
1443 | 1427 | 925c17564ef8 tip |
|
1444 | 1428 | $ hg -R s id |
|
1445 | 1429 | 12a213df6fa9 tip |
|
1446 | 1430 | $ hg -R t id |
|
1447 | 1431 | 52c0adc0515a tip |
|
1448 | 1432 | |
|
1449 | 1433 | Test subrepo already at intended revision: |
|
1450 | 1434 | $ cd s |
|
1451 | 1435 | $ hg update fc627a69481f |
|
1452 | 1436 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1453 | 1437 | $ cd .. |
|
1454 | 1438 | $ hg update 11 |
|
1455 | 1439 | subrepository s diverged (local revision: 12a213df6fa9, remote revision: fc627a69481f) |
|
1456 | 1440 | you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination]. |
|
1457 | 1441 | what do you want to do? m |
|
1458 | 1442 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1459 | 1443 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1460 | 1444 | $ hg id -n |
|
1461 | 1445 | 11+ |
|
1462 | 1446 | $ hg -R s id |
|
1463 | 1447 | fc627a69481f |
|
1464 | 1448 | $ hg -R t id |
|
1465 | 1449 | e95bcfa18a35 |
|
1466 | 1450 | |
|
1467 | 1451 | Test that removing .hgsubstate doesn't break anything: |
|
1468 | 1452 | |
|
1469 | 1453 | $ hg rm -f .hgsubstate |
|
1470 | 1454 | $ hg ci -mrm |
|
1471 | 1455 | nothing changed |
|
1472 | 1456 | [1] |
|
1473 | 1457 | $ hg log -vr tip |
|
1474 | 1458 | changeset: 13:925c17564ef8 |
|
1475 | 1459 | tag: tip |
|
1476 | 1460 | user: test |
|
1477 | 1461 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1478 | 1462 | files: .hgsubstate |
|
1479 | 1463 | description: |
|
1480 | 1464 | 13 |
|
1481 | 1465 | |
|
1482 | 1466 | |
|
1483 | 1467 | |
|
1484 | 1468 | Test that removing .hgsub removes .hgsubstate: |
|
1485 | 1469 | |
|
1486 | 1470 | $ hg rm .hgsub |
|
1487 | 1471 | $ hg ci -mrm2 |
|
1488 | 1472 | created new head |
|
1489 | 1473 | $ hg log -vr tip |
|
1490 | 1474 | changeset: 14:2400bccd50af |
|
1491 | 1475 | tag: tip |
|
1492 | 1476 | parent: 11:365661e5936a |
|
1493 | 1477 | user: test |
|
1494 | 1478 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
1495 | 1479 | files: .hgsub .hgsubstate |
|
1496 | 1480 | description: |
|
1497 | 1481 | rm2 |
|
1498 | 1482 | |
|
1499 | 1483 | |
|
1500 | 1484 | Test issue3153: diff -S with deleted subrepos |
|
1501 | 1485 | |
|
1502 | 1486 | $ hg diff --nodates -S -c . |
|
1503 | 1487 | diff -r 365661e5936a -r 2400bccd50af .hgsub |
|
1504 | 1488 | --- a/.hgsub |
|
1505 | 1489 | +++ /dev/null |
|
1506 | 1490 | @@ -1,2 +0,0 @@ |
|
1507 | 1491 | -s = s |
|
1508 | 1492 | -t = t |
|
1509 | 1493 | diff -r 365661e5936a -r 2400bccd50af .hgsubstate |
|
1510 | 1494 | --- a/.hgsubstate |
|
1511 | 1495 | +++ /dev/null |
|
1512 | 1496 | @@ -1,2 +0,0 @@ |
|
1513 | 1497 | -fc627a69481fcbe5f1135069e8a3881c023e4cf5 s |
|
1514 | 1498 | -e95bcfa18a358dc4936da981ebf4147b4cad1362 t |
|
1515 | 1499 | |
|
1516 | 1500 | Test behavior of add for explicit path in subrepo: |
|
1517 | 1501 | $ cd .. |
|
1518 | 1502 | $ hg init explicit |
|
1519 | 1503 | $ cd explicit |
|
1520 | 1504 | $ echo s = s > .hgsub |
|
1521 | 1505 | $ hg add .hgsub |
|
1522 | 1506 | $ hg init s |
|
1523 | 1507 | $ hg ci -m0 |
|
1524 | 1508 | Adding with an explicit path in a subrepo adds the file |
|
1525 | 1509 | $ echo c1 > f1 |
|
1526 | 1510 | $ echo c2 > s/f2 |
|
1527 | 1511 | $ hg st -S |
|
1528 | 1512 | ? f1 |
|
1529 | 1513 | ? s/f2 |
|
1530 | 1514 | $ hg add s/f2 |
|
1531 | 1515 | $ hg st -S |
|
1532 | 1516 | A s/f2 |
|
1533 | 1517 | ? f1 |
|
1534 | 1518 | $ hg ci -R s -m0 |
|
1535 | 1519 | $ hg ci -Am1 |
|
1536 | 1520 | adding f1 |
|
1537 | 1521 | Adding with an explicit path in a subrepo with -S has the same behavior |
|
1538 | 1522 | $ echo c3 > f3 |
|
1539 | 1523 | $ echo c4 > s/f4 |
|
1540 | 1524 | $ hg st -S |
|
1541 | 1525 | ? f3 |
|
1542 | 1526 | ? s/f4 |
|
1543 | 1527 | $ hg add -S s/f4 |
|
1544 | 1528 | $ hg st -S |
|
1545 | 1529 | A s/f4 |
|
1546 | 1530 | ? f3 |
|
1547 | 1531 | $ hg ci -R s -m1 |
|
1548 | 1532 | $ hg ci -Ama2 |
|
1549 | 1533 | adding f3 |
|
1550 | 1534 | Adding without a path or pattern silently ignores subrepos |
|
1551 | 1535 | $ echo c5 > f5 |
|
1552 | 1536 | $ echo c6 > s/f6 |
|
1553 | 1537 | $ echo c7 > s/f7 |
|
1554 | 1538 | $ hg st -S |
|
1555 | 1539 | ? f5 |
|
1556 | 1540 | ? s/f6 |
|
1557 | 1541 | ? s/f7 |
|
1558 | 1542 | $ hg add |
|
1559 | 1543 | adding f5 |
|
1560 | 1544 | $ hg st -S |
|
1561 | 1545 | A f5 |
|
1562 | 1546 | ? s/f6 |
|
1563 | 1547 | ? s/f7 |
|
1564 | 1548 | $ hg ci -R s -Am2 |
|
1565 | 1549 | adding f6 |
|
1566 | 1550 | adding f7 |
|
1567 | 1551 | $ hg ci -m3 |
|
1568 | 1552 | Adding without a path or pattern with -S also adds files in subrepos |
|
1569 | 1553 | $ echo c8 > f8 |
|
1570 | 1554 | $ echo c9 > s/f9 |
|
1571 | 1555 | $ echo c10 > s/f10 |
|
1572 | 1556 | $ hg st -S |
|
1573 | 1557 | ? f8 |
|
1574 | 1558 | ? s/f10 |
|
1575 | 1559 | ? s/f9 |
|
1576 | 1560 | $ hg add -S |
|
1577 | 1561 | adding f8 |
|
1578 | 1562 | adding s/f10 |
|
1579 | 1563 | adding s/f9 |
|
1580 | 1564 | $ hg st -S |
|
1581 | 1565 | A f8 |
|
1582 | 1566 | A s/f10 |
|
1583 | 1567 | A s/f9 |
|
1584 | 1568 | $ hg ci -R s -m3 |
|
1585 | 1569 | $ hg ci -m4 |
|
1586 | 1570 | Adding with a pattern silently ignores subrepos |
|
1587 | 1571 | $ echo c11 > fm11 |
|
1588 | 1572 | $ echo c12 > fn12 |
|
1589 | 1573 | $ echo c13 > s/fm13 |
|
1590 | 1574 | $ echo c14 > s/fn14 |
|
1591 | 1575 | $ hg st -S |
|
1592 | 1576 | ? fm11 |
|
1593 | 1577 | ? fn12 |
|
1594 | 1578 | ? s/fm13 |
|
1595 | 1579 | ? s/fn14 |
|
1596 | 1580 | $ hg add 'glob:**fm*' |
|
1597 | 1581 | adding fm11 |
|
1598 | 1582 | $ hg st -S |
|
1599 | 1583 | A fm11 |
|
1600 | 1584 | ? fn12 |
|
1601 | 1585 | ? s/fm13 |
|
1602 | 1586 | ? s/fn14 |
|
1603 | 1587 | $ hg ci -R s -Am4 |
|
1604 | 1588 | adding fm13 |
|
1605 | 1589 | adding fn14 |
|
1606 | 1590 | $ hg ci -Am5 |
|
1607 | 1591 | adding fn12 |
|
1608 | 1592 | Adding with a pattern with -S also adds matches in subrepos |
|
1609 | 1593 | $ echo c15 > fm15 |
|
1610 | 1594 | $ echo c16 > fn16 |
|
1611 | 1595 | $ echo c17 > s/fm17 |
|
1612 | 1596 | $ echo c18 > s/fn18 |
|
1613 | 1597 | $ hg st -S |
|
1614 | 1598 | ? fm15 |
|
1615 | 1599 | ? fn16 |
|
1616 | 1600 | ? s/fm17 |
|
1617 | 1601 | ? s/fn18 |
|
1618 | 1602 | $ hg add -S 'glob:**fm*' |
|
1619 | 1603 | adding fm15 |
|
1620 | 1604 | adding s/fm17 |
|
1621 | 1605 | $ hg st -S |
|
1622 | 1606 | A fm15 |
|
1623 | 1607 | A s/fm17 |
|
1624 | 1608 | ? fn16 |
|
1625 | 1609 | ? s/fn18 |
|
1626 | 1610 | $ hg ci -R s -Am5 |
|
1627 | 1611 | adding fn18 |
|
1628 | 1612 | $ hg ci -Am6 |
|
1629 | 1613 | adding fn16 |
|
1630 | 1614 | |
|
1631 | 1615 | Test behavior of forget for explicit path in subrepo: |
|
1632 | 1616 | Forgetting an explicit path in a subrepo untracks the file |
|
1633 | 1617 | $ echo c19 > s/f19 |
|
1634 | 1618 | $ hg add s/f19 |
|
1635 | 1619 | $ hg st -S |
|
1636 | 1620 | A s/f19 |
|
1637 | 1621 | $ hg forget s/f19 |
|
1638 | 1622 | $ hg st -S |
|
1639 | 1623 | ? s/f19 |
|
1640 | 1624 | $ rm s/f19 |
|
1641 | 1625 | $ cd .. |
|
1642 | 1626 | |
|
1643 | 1627 | Courtesy phases synchronisation to publishing server does not block the push |
|
1644 | 1628 | (issue3781) |
|
1645 | 1629 | |
|
1646 | 1630 | $ cp -R main issue3781 |
|
1647 | 1631 | $ cp -R main issue3781-dest |
|
1648 | 1632 | $ cd issue3781-dest/s |
|
1649 | 1633 | $ hg phase tip # show we have draft changeset |
|
1650 | 1634 | 5: draft |
|
1651 | 1635 | $ chmod a-w .hg/store/phaseroots # prevent phase push |
|
1652 | 1636 | $ cd ../../issue3781 |
|
1653 | 1637 | $ cat >> .hg/hgrc << EOF |
|
1654 | 1638 | > [paths] |
|
1655 | 1639 | > default=../issue3781-dest/ |
|
1656 | 1640 | > EOF |
|
1657 | 1641 | $ hg push --config devel.legacy.exchange=bundle1 |
|
1658 | 1642 | pushing to $TESTTMP/issue3781-dest |
|
1659 | 1643 | pushing subrepo s to $TESTTMP/issue3781-dest/s |
|
1660 | 1644 | searching for changes |
|
1661 | 1645 | no changes found |
|
1662 | 1646 | searching for changes |
|
1663 | 1647 | no changes found |
|
1664 | 1648 | [1] |
|
1665 | 1649 | # clean the push cache |
|
1666 | 1650 | $ rm s/.hg/cache/storehash/* |
|
1667 | 1651 | $ hg push # bundle2+ |
|
1668 | 1652 | pushing to $TESTTMP/issue3781-dest |
|
1669 | 1653 | pushing subrepo s to $TESTTMP/issue3781-dest/s |
|
1670 | 1654 | searching for changes |
|
1671 | 1655 | no changes found |
|
1672 | 1656 | searching for changes |
|
1673 | 1657 | no changes found |
|
1674 | 1658 | [1] |
|
1675 | 1659 | $ cd .. |
|
1676 | 1660 | |
|
1677 | 1661 | Test phase choice for newly created commit with "phases.subrepochecks" |
|
1678 | 1662 | configuration |
|
1679 | 1663 | |
|
1680 | 1664 | $ cd t |
|
1681 | 1665 | $ hg update -q -r 12 |
|
1682 | 1666 | |
|
1683 | 1667 | $ cat >> s/ss/.hg/hgrc <<EOF |
|
1684 | 1668 | > [phases] |
|
1685 | 1669 | > new-commit = secret |
|
1686 | 1670 | > EOF |
|
1687 | 1671 | $ cat >> s/.hg/hgrc <<EOF |
|
1688 | 1672 | > [phases] |
|
1689 | 1673 | > new-commit = draft |
|
1690 | 1674 | > EOF |
|
1691 | 1675 | $ echo phasecheck1 >> s/ss/a |
|
1692 | 1676 | $ hg -R s commit -S --config phases.checksubrepos=abort -m phasecheck1 |
|
1693 | 1677 | committing subrepository ss |
|
1694 | 1678 | transaction abort! |
|
1695 | 1679 | rollback completed |
|
1696 | 1680 | abort: can't commit in draft phase conflicting secret from subrepository ss |
|
1697 | 1681 | [255] |
|
1698 | 1682 | $ echo phasecheck2 >> s/ss/a |
|
1699 | 1683 | $ hg -R s commit -S --config phases.checksubrepos=ignore -m phasecheck2 |
|
1700 | 1684 | committing subrepository ss |
|
1701 | 1685 | $ hg -R s/ss phase tip |
|
1702 | 1686 | 3: secret |
|
1703 | 1687 | $ hg -R s phase tip |
|
1704 | 1688 | 6: draft |
|
1705 | 1689 | $ echo phasecheck3 >> s/ss/a |
|
1706 | 1690 | $ hg -R s commit -S -m phasecheck3 |
|
1707 | 1691 | committing subrepository ss |
|
1708 | 1692 | warning: changes are committed in secret phase from subrepository ss |
|
1709 | 1693 | $ hg -R s/ss phase tip |
|
1710 | 1694 | 4: secret |
|
1711 | 1695 | $ hg -R s phase tip |
|
1712 | 1696 | 7: secret |
|
1713 | 1697 | |
|
1714 | 1698 | $ cat >> t/.hg/hgrc <<EOF |
|
1715 | 1699 | > [phases] |
|
1716 | 1700 | > new-commit = draft |
|
1717 | 1701 | > EOF |
|
1718 | 1702 | $ cat >> .hg/hgrc <<EOF |
|
1719 | 1703 | > [phases] |
|
1720 | 1704 | > new-commit = public |
|
1721 | 1705 | > EOF |
|
1722 | 1706 | $ echo phasecheck4 >> s/ss/a |
|
1723 | 1707 | $ echo phasecheck4 >> t/t |
|
1724 | 1708 | $ hg commit -S -m phasecheck4 |
|
1725 | 1709 | committing subrepository s |
|
1726 | 1710 | committing subrepository s/ss |
|
1727 | 1711 | warning: changes are committed in secret phase from subrepository ss |
|
1728 | 1712 | committing subrepository t |
|
1729 | 1713 | warning: changes are committed in secret phase from subrepository s |
|
1730 | 1714 | created new head |
|
1731 | 1715 | $ hg -R s/ss phase tip |
|
1732 | 1716 | 5: secret |
|
1733 | 1717 | $ hg -R s phase tip |
|
1734 | 1718 | 8: secret |
|
1735 | 1719 | $ hg -R t phase tip |
|
1736 | 1720 | 6: draft |
|
1737 | 1721 | $ hg phase tip |
|
1738 | 1722 | 15: secret |
|
1739 | 1723 | |
|
1740 | 1724 | $ cd .. |
|
1741 | 1725 | |
|
1742 | 1726 | |
|
1743 | 1727 | Test that commit --secret works on both repo and subrepo (issue4182) |
|
1744 | 1728 | |
|
1745 | 1729 | $ cd main |
|
1746 | 1730 | $ echo secret >> b |
|
1747 | 1731 | $ echo secret >> s/b |
|
1748 | 1732 | $ hg commit --secret --subrepo -m "secret" |
|
1749 | 1733 | committing subrepository s |
|
1750 | 1734 | $ hg phase -r . |
|
1751 | 1735 | 6: secret |
|
1752 | 1736 | $ cd s |
|
1753 | 1737 | $ hg phase -r . |
|
1754 | 1738 | 6: secret |
|
1755 | 1739 | $ cd ../../ |
|
1756 | 1740 | |
|
1757 | 1741 | Test "subrepos" template keyword |
|
1758 | 1742 | |
|
1759 | 1743 | $ cd t |
|
1760 | 1744 | $ hg update -q 15 |
|
1761 | 1745 | $ cat > .hgsub <<EOF |
|
1762 | 1746 | > s = s |
|
1763 | 1747 | > EOF |
|
1764 | 1748 | $ hg commit -m "16" |
|
1765 | 1749 | warning: changes are committed in secret phase from subrepository s |
|
1766 | 1750 | |
|
1767 | 1751 | (addition of ".hgsub" itself) |
|
1768 | 1752 | |
|
1769 | 1753 | $ hg diff --nodates -c 1 .hgsubstate |
|
1770 | 1754 | diff -r f7b1eb17ad24 -r 7cf8cfea66e4 .hgsubstate |
|
1771 | 1755 | --- /dev/null |
|
1772 | 1756 | +++ b/.hgsubstate |
|
1773 | 1757 | @@ -0,0 +1,1 @@ |
|
1774 | 1758 | +e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s |
|
1775 | 1759 | $ hg log -r 1 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}" |
|
1776 | 1760 | f7b1eb17ad24 000000000000 |
|
1777 | 1761 | s |
|
1778 | 1762 | |
|
1779 | 1763 | (modification of existing entry) |
|
1780 | 1764 | |
|
1781 | 1765 | $ hg diff --nodates -c 2 .hgsubstate |
|
1782 | 1766 | diff -r 7cf8cfea66e4 -r df30734270ae .hgsubstate |
|
1783 | 1767 | --- a/.hgsubstate |
|
1784 | 1768 | +++ b/.hgsubstate |
|
1785 | 1769 | @@ -1,1 +1,1 @@ |
|
1786 | 1770 | -e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s |
|
1787 | 1771 | +dc73e2e6d2675eb2e41e33c205f4bdab4ea5111d s |
|
1788 | 1772 | $ hg log -r 2 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}" |
|
1789 | 1773 | 7cf8cfea66e4 000000000000 |
|
1790 | 1774 | s |
|
1791 | 1775 | |
|
1792 | 1776 | (addition of entry) |
|
1793 | 1777 | |
|
1794 | 1778 | $ hg diff --nodates -c 5 .hgsubstate |
|
1795 | 1779 | diff -r 7cf8cfea66e4 -r 1f14a2e2d3ec .hgsubstate |
|
1796 | 1780 | --- a/.hgsubstate |
|
1797 | 1781 | +++ b/.hgsubstate |
|
1798 | 1782 | @@ -1,1 +1,2 @@ |
|
1799 | 1783 | e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s |
|
1800 | 1784 | +60ca1237c19474e7a3978b0dc1ca4e6f36d51382 t |
|
1801 | 1785 | $ hg log -r 5 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}" |
|
1802 | 1786 | 7cf8cfea66e4 000000000000 |
|
1803 | 1787 | t |
|
1804 | 1788 | |
|
1805 | 1789 | (removal of existing entry) |
|
1806 | 1790 | |
|
1807 | 1791 | $ hg diff --nodates -c 16 .hgsubstate |
|
1808 | 1792 | diff -r 8bec38d2bd0b -r f2f70bc3d3c9 .hgsubstate |
|
1809 | 1793 | --- a/.hgsubstate |
|
1810 | 1794 | +++ b/.hgsubstate |
|
1811 | 1795 | @@ -1,2 +1,1 @@ |
|
1812 | 1796 | 0731af8ca9423976d3743119d0865097c07bdc1b s |
|
1813 | 1797 | -e202dc79b04c88a636ea8913d9182a1346d9b3dc t |
|
1814 | 1798 | $ hg log -r 16 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}" |
|
1815 | 1799 | 8bec38d2bd0b 000000000000 |
|
1816 | 1800 | t |
|
1817 | 1801 | |
|
1818 | 1802 | (merging) |
|
1819 | 1803 | |
|
1820 | 1804 | $ hg diff --nodates -c 9 .hgsubstate |
|
1821 | 1805 | diff -r f6affe3fbfaa -r f0d2028bf86d .hgsubstate |
|
1822 | 1806 | --- a/.hgsubstate |
|
1823 | 1807 | +++ b/.hgsubstate |
|
1824 | 1808 | @@ -1,1 +1,2 @@ |
|
1825 | 1809 | fc627a69481fcbe5f1135069e8a3881c023e4cf5 s |
|
1826 | 1810 | +60ca1237c19474e7a3978b0dc1ca4e6f36d51382 t |
|
1827 | 1811 | $ hg log -r 9 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}" |
|
1828 | 1812 | f6affe3fbfaa 1f14a2e2d3ec |
|
1829 | 1813 | t |
|
1830 | 1814 | |
|
1831 | 1815 | (removal of ".hgsub" itself) |
|
1832 | 1816 | |
|
1833 | 1817 | $ hg diff --nodates -c 8 .hgsubstate |
|
1834 | 1818 | diff -r f94576341bcf -r 96615c1dad2d .hgsubstate |
|
1835 | 1819 | --- a/.hgsubstate |
|
1836 | 1820 | +++ /dev/null |
|
1837 | 1821 | @@ -1,2 +0,0 @@ |
|
1838 | 1822 | -e4ece1bf43360ddc8f6a96432201a37b7cd27ae4 s |
|
1839 | 1823 | -7af322bc1198a32402fe903e0b7ebcfc5c9bf8f4 t |
|
1840 | 1824 | $ hg log -r 8 --template "{p1node|short} {p2node|short}\n{subrepos % '{subrepo}\n'}" |
|
1841 | 1825 | f94576341bcf 000000000000 |
|
1842 | 1826 | |
|
1843 | 1827 | Test that '[paths]' is configured correctly at subrepo creation |
|
1844 | 1828 | |
|
1845 | 1829 | $ cd $TESTTMP/tc |
|
1846 | 1830 | $ cat > .hgsub <<EOF |
|
1847 | 1831 | > # to clear bogus subrepo path 'bogus=[boguspath' |
|
1848 | 1832 | > s = s |
|
1849 | 1833 | > t = t |
|
1850 | 1834 | > EOF |
|
1851 | 1835 | $ hg update -q --clean null |
|
1852 | 1836 | $ rm -rf s t |
|
1853 | 1837 | $ cat >> .hg/hgrc <<EOF |
|
1854 | 1838 | > [paths] |
|
1855 | 1839 | > default-push = /foo/bar |
|
1856 | 1840 | > EOF |
|
1857 | 1841 | $ hg update -q |
|
1858 | 1842 | $ cat s/.hg/hgrc |
|
1859 | 1843 | [paths] |
|
1860 | 1844 | default = $TESTTMP/t/s |
|
1861 | 1845 | default-push = /foo/bar/s |
|
1862 | 1846 | $ cat s/ss/.hg/hgrc |
|
1863 | 1847 | [paths] |
|
1864 | 1848 | default = $TESTTMP/t/s/ss |
|
1865 | 1849 | default-push = /foo/bar/s/ss |
|
1866 | 1850 | $ cat t/.hg/hgrc |
|
1867 | 1851 | [paths] |
|
1868 | 1852 | default = $TESTTMP/t/t |
|
1869 | 1853 | default-push = /foo/bar/t |
|
1870 | 1854 | |
|
1871 | 1855 | $ cd $TESTTMP/t |
|
1872 | 1856 | $ hg up -qC 0 |
|
1873 | 1857 | $ echo 'bar' > bar.txt |
|
1874 | 1858 | $ hg ci -Am 'branch before subrepo add' |
|
1875 | 1859 | adding bar.txt |
|
1876 | 1860 | created new head |
|
1877 | 1861 | $ hg merge -r "first(subrepo('s'))" |
|
1878 | 1862 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
1879 | 1863 | (branch merge, don't forget to commit) |
|
1880 | 1864 | $ hg status -S -X '.hgsub*' |
|
1881 | 1865 | A s/a |
|
1882 | 1866 | ? s/b |
|
1883 | 1867 | ? s/c |
|
1884 | 1868 | ? s/f1 |
|
1885 | 1869 | $ hg status -S --rev 'p2()' |
|
1886 | 1870 | A bar.txt |
|
1887 | 1871 | ? s/b |
|
1888 | 1872 | ? s/c |
|
1889 | 1873 | ? s/f1 |
|
1890 | 1874 | $ hg diff -S -X '.hgsub*' --nodates |
|
1891 | 1875 | diff -r 000000000000 s/a |
|
1892 | 1876 | --- /dev/null |
|
1893 | 1877 | +++ b/s/a |
|
1894 | 1878 | @@ -0,0 +1,1 @@ |
|
1895 | 1879 | +a |
|
1896 | 1880 | $ hg diff -S --rev 'p2()' --nodates |
|
1897 | 1881 | diff -r 7cf8cfea66e4 bar.txt |
|
1898 | 1882 | --- /dev/null |
|
1899 | 1883 | +++ b/bar.txt |
|
1900 | 1884 | @@ -0,0 +1,1 @@ |
|
1901 | 1885 | +bar |
|
1902 | 1886 | |
|
1903 | 1887 | $ hg diff -X '.hgsub*' --nodates s |
|
1904 | 1888 | diff -r 000000000000 s/a |
|
1905 | 1889 | --- /dev/null |
|
1906 | 1890 | +++ b/s/a |
|
1907 | 1891 | @@ -0,0 +1,1 @@ |
|
1908 | 1892 | +a |
|
1909 | 1893 | $ hg diff -X '.hgsub*' --nodates s/a |
|
1910 | 1894 | diff -r 000000000000 s/a |
|
1911 | 1895 | --- /dev/null |
|
1912 | 1896 | +++ b/s/a |
|
1913 | 1897 | @@ -0,0 +1,1 @@ |
|
1914 | 1898 | +a |
|
1915 | 1899 | |
|
1916 | 1900 | $ cd .. |
|
1917 | 1901 | |
|
1918 | 1902 | test for ssh exploit 2017-07-25 |
|
1919 | 1903 | |
|
1920 | 1904 | $ cat >> $HGRCPATH << EOF |
|
1921 | 1905 | > [ui] |
|
1922 | 1906 | > ssh = sh -c "read l; read l; read l" |
|
1923 | 1907 | > EOF |
|
1924 | 1908 | |
|
1925 | 1909 | $ hg init malicious-proxycommand |
|
1926 | 1910 | $ cd malicious-proxycommand |
|
1927 | 1911 | $ echo 's = [hg]ssh://-oProxyCommand=touch${IFS}owned/path' > .hgsub |
|
1928 | 1912 | $ hg init s |
|
1929 | 1913 | $ cd s |
|
1930 | 1914 | $ echo init > init |
|
1931 | 1915 | $ hg add |
|
1932 | 1916 | adding init |
|
1933 | 1917 | $ hg commit -m init |
|
1934 | 1918 | $ cd .. |
|
1935 | 1919 | $ hg add .hgsub |
|
1936 | 1920 | $ hg ci -m 'add subrepo' |
|
1937 | 1921 | $ cd .. |
|
1938 | 1922 | $ hg clone malicious-proxycommand malicious-proxycommand-clone |
|
1939 | 1923 | updating to branch default |
|
1940 | 1924 | cloning subrepo s from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path |
|
1941 | 1925 | abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path' (in subrepository "s") |
|
1942 | 1926 | [255] |
|
1943 | 1927 | |
|
1944 | 1928 | also check that a percent encoded '-' (%2D) doesn't work |
|
1945 | 1929 | |
|
1946 | 1930 | $ cd malicious-proxycommand |
|
1947 | 1931 | $ echo 's = [hg]ssh://%2DoProxyCommand=touch${IFS}owned/path' > .hgsub |
|
1948 | 1932 | $ hg ci -m 'change url to percent encoded' |
|
1949 | 1933 | $ cd .. |
|
1950 | 1934 | $ rm -r malicious-proxycommand-clone |
|
1951 | 1935 | $ hg clone malicious-proxycommand malicious-proxycommand-clone |
|
1952 | 1936 | updating to branch default |
|
1953 | 1937 | cloning subrepo s from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path |
|
1954 | 1938 | abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path' (in subrepository "s") |
|
1955 | 1939 | [255] |
|
1956 | 1940 | |
|
1957 | 1941 | also check for a pipe |
|
1958 | 1942 | |
|
1959 | 1943 | $ cd malicious-proxycommand |
|
1960 | 1944 | $ echo 's = [hg]ssh://fakehost|touch${IFS}owned/path' > .hgsub |
|
1961 | 1945 | $ hg ci -m 'change url to pipe' |
|
1962 | 1946 | $ cd .. |
|
1963 | 1947 | $ rm -r malicious-proxycommand-clone |
|
1964 | 1948 | $ hg clone malicious-proxycommand malicious-proxycommand-clone |
|
1965 | 1949 | updating to branch default |
|
1966 | 1950 | cloning subrepo s from ssh://fakehost%7Ctouch%24%7BIFS%7Downed/path |
|
1967 | 1951 | abort: no suitable response from remote hg |
|
1968 | 1952 | [255] |
|
1969 | 1953 | $ [ ! -f owned ] || echo 'you got owned' |
|
1970 | 1954 | |
|
1971 | 1955 | also check that a percent encoded '|' (%7C) doesn't work |
|
1972 | 1956 | |
|
1973 | 1957 | $ cd malicious-proxycommand |
|
1974 | 1958 | $ echo 's = [hg]ssh://fakehost%7Ctouch%20owned/path' > .hgsub |
|
1975 | 1959 | $ hg ci -m 'change url to percent encoded pipe' |
|
1976 | 1960 | $ cd .. |
|
1977 | 1961 | $ rm -r malicious-proxycommand-clone |
|
1978 | 1962 | $ hg clone malicious-proxycommand malicious-proxycommand-clone |
|
1979 | 1963 | updating to branch default |
|
1980 | 1964 | cloning subrepo s from ssh://fakehost%7Ctouch%20owned/path |
|
1981 | 1965 | abort: no suitable response from remote hg |
|
1982 | 1966 | [255] |
|
1983 | 1967 | $ [ ! -f owned ] || echo 'you got owned' |
|
1984 | 1968 | |
|
1985 | 1969 | and bad usernames: |
|
1986 | 1970 | $ cd malicious-proxycommand |
|
1987 | 1971 | $ echo 's = [hg]ssh://-oProxyCommand=touch owned@example.com/path' > .hgsub |
|
1988 | 1972 | $ hg ci -m 'owned username' |
|
1989 | 1973 | $ cd .. |
|
1990 | 1974 | $ rm -r malicious-proxycommand-clone |
|
1991 | 1975 | $ hg clone malicious-proxycommand malicious-proxycommand-clone |
|
1992 | 1976 | updating to branch default |
|
1993 | 1977 | cloning subrepo s from ssh://-oProxyCommand%3Dtouch%20owned@example.com/path |
|
1994 | 1978 | abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned@example.com/path' (in subrepository "s") |
|
1995 | 1979 | [255] |
|
1996 | 1980 | |
|
1997 | 1981 | Test convert subrepositories including merge (issue5526): |
|
1998 | 1982 | |
|
1999 | 1983 | $ hg init tconv |
|
2000 | 1984 | $ hg convert --config extensions.convert= -q t/s tconv/s |
|
2001 | 1985 | $ hg convert --config extensions.convert= -q t/s/ss tconv/s/ss |
|
2002 | 1986 | $ hg convert --config extensions.convert= -q t/t tconv/t |
|
2003 | 1987 | |
|
2004 | 1988 | convert shouldn't fail because of pseudo filenode: |
|
2005 | 1989 | |
|
2006 | 1990 | $ hg convert --config extensions.convert= t tconv |
|
2007 | 1991 | scanning source... |
|
2008 | 1992 | sorting... |
|
2009 | 1993 | converting... |
|
2010 | 1994 | 17 0 |
|
2011 | 1995 | 16 1 |
|
2012 | 1996 | 15 2 |
|
2013 | 1997 | 14 3 |
|
2014 | 1998 | 13 4 |
|
2015 | 1999 | 12 5 |
|
2016 | 2000 | 11 6 |
|
2017 | 2001 | 10 7 |
|
2018 | 2002 | 9 8 |
|
2019 | 2003 | 8 9 |
|
2020 | 2004 | 7 10 |
|
2021 | 2005 | 6 11 |
|
2022 | 2006 | 5 12 |
|
2023 | 2007 | 4 13 |
|
2024 | 2008 | 3 rm2 |
|
2025 | 2009 | 2 phasecheck4 |
|
2026 | 2010 | 1 16 |
|
2027 | 2011 | 0 branch before subrepo add |
|
2028 | 2012 | |
|
2029 | 2013 | converted .hgsubstate should point to valid nodes: |
|
2030 | 2014 | |
|
2031 | 2015 | $ hg up -R tconv 9 |
|
2032 | 2016 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
2033 | 2017 | $ cat tconv/.hgsubstate |
|
2034 | 2018 | fc627a69481fcbe5f1135069e8a3881c023e4cf5 s |
|
2035 | 2019 | 60ca1237c19474e7a3978b0dc1ca4e6f36d51382 t |
General Comments 0
You need to be logged in to leave comments.
Login now